hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7946eba0c1e55e4a1157a500dbd117640402cec4 | 3,256 | py | Python | viewport_experimental.py | Euclideon/vaultsdkpython | cea55dead80fc32618e72158c2eab783833e16c1 | [
"MIT"
] | 2 | 2020-07-14T19:35:13.000Z | 2020-08-13T08:18:42.000Z | viewport_experimental.py | Euclideon/vaultsdkpython | cea55dead80fc32618e72158c2eab783833e16c1 | [
"MIT"
] | 17 | 2020-05-13T04:56:44.000Z | 2020-08-12T05:09:24.000Z | viewport_experimental.py | Euclideon/vaultsdkpython | cea55dead80fc32618e72158c2eab783833e16c1 | [
"MIT"
] | null | null | null | """
module containing experimental features relating to extending viewports
"""
from pygletExample import *
class VDKViewPort3D(VDKViewPort):
"""
Viewport quad with 3D faces, used for constructing ViewPrisms
"""
def __init__(self, width, height, centreX, centreY, parent, horizontalDirection = [1,0,0], verticalDirection = [0,1,0]):
self._width = width
self._height = height
self._centre = [centreX, centreY, 0]
self.parent = parent
self.vec1 = horizontalDirection
self.vec2 = verticalDirection
#self.vec1 = [0.707, -0.707,0.01]
#self.vec2 = [0.707, 0.707,0.01]
super(VDKViewPort3D, self).__init__(width, height, centreX, centreY, parent)
def orient(self, centre, vec1, vec2):
#position the plane such that it is parallel to vectors 1 and 2 and centred at centre:
# these are the vertices at the corners of the quad, each line is the pixel coordinates of the
self._vertex_list.vertices = \
[
# bottom left
centre[0] - vec1[0] * self._width / 2 - vec2[0] * self._height / 2,
centre[1] - vec1[1] * self._width / 2 - vec2[1] * self._height / 2,
centre[2] - vec1[2] * self._width / 2 - vec2[2] * self._height / 2,
# bottom right
centre[0] + vec1[0] * self._width / 2 - vec2[0] * self._height / 2,
centre[1] + vec1[1] * self._width / 2 - vec2[1] * self._height / 2,
centre[2] + vec1[2] * self._width / 2 - vec2[2] * self._height / 2,
#top right
centre[0] + vec1[0] * self._width/2 + vec2[0] * self._height/2,
centre[1] + vec1[1] * self._width / 2 + vec2[1] * self._height / 2,
centre[2] + vec1[2] * self._width / 2 + vec2[2] * self._height / 2,
# top left
centre[0] - vec1[0] * self._width / 2 + vec2[0] * self._height / 2,
centre[1] - vec1[1] * self._width / 2 + vec2[1] * self._height / 2,
centre[2] - vec1[2] * self._width / 2 + vec2[2] * self._height / 2,
]
#position the camera such that it is a fixed distance from the
import numpy as np
v1 = np.array(vec1)
v2 = np.array(vec2)
normal = np.cross(v1, v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))
normal = normal.dot(np.array([
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
]))
self.camera.look_at([0, 0, 0], normal)
#self._camera.set_view(normal[0], normal[1], normal[2])
def make_vertex_list(self):
self._vertex_list = pyglet.graphics.vertex_list(4,'v3f/stream','t2f/static')
self._vertex_list.tex_coords = \
[
0, 1,
1, 1,
1, 0,
0, 0,
]
self.orient(self._centre, self.vec1, self.vec2)
class VDKViewPrism:
"""
Class representing a sectional view of a model
it is a rectangular prism with a UD view for each face
"""
def __init__(self, width, height, depth):
self.height = height
self.width = width
self.depth = depth
self.viewPorts = []
| 40.7 | 124 | 0.541769 |
7946ec7361437e45e755b6b33241ab9f2084317a | 1,330 | py | Python | omoide_index/tests/infra/test_memory_calculator.py | IgorZyktin/omoide-index | b64cdc9e661b0b3d3b25a460f8bb0ef689ea81ad | [
"MIT"
] | null | null | null | omoide_index/tests/infra/test_memory_calculator.py | IgorZyktin/omoide-index | b64cdc9e661b0b3d3b25a460f8bb0ef689ea81ad | [
"MIT"
] | 16 | 2021-12-22T02:27:24.000Z | 2022-03-31T02:26:07.000Z | omoide_index/tests/infra/test_memory_calculator.py | IgorZyktin/omoide-index | b64cdc9e661b0b3d3b25a460f8bb0ef689ea81ad | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests.
"""
import pytest
from omoide_index.infra import MemoryCalculator
@pytest.fixture
def infra_memory_calculator_instance():
return MemoryCalculator()
def test_memory_calculator_consumption_str(infra_memory_calculator_instance):
# arrange
inst = infra_memory_calculator_instance
# act
res = inst.get_process_memory_consumption_str()
# assert
assert res.endswith('MiB')
@pytest.mark.parametrize('size,string', [
(-1, '0 B'),
(0, '0 B'),
(5, '5 B'),
(29, '29 B'),
(60, '60 B'),
(1_000, '1000 B'),
(1_024, '1.0 KiB'),
(1_120, '1.1 KiB'),
(99_999, '97.7 KiB'),
(98_962_693, '94.4 MiB'),
(100_000_000_000, '93.1 GiB'),
(100_000_000_000_000, '90.9 TiB'),
])
def test_memory_calculator_human_readable_size(size, string):
# arrange
inst = MemoryCalculator()
# assert
assert inst.format_human_readable_size(size) == string
@pytest.mark.parametrize('target,string', [
(True, '32 B'),
(False, '24 B'),
(None, '16 B'),
(1, '32 B'),
((1, 2, 3), '160 B'),
((1, 2, (3, 4, (5, 6))), '376 B'),
])
def test_memory_calculator_object_consumption(target, string):
# arrange
inst = MemoryCalculator()
# assert
assert inst.get_object_memory_consumption_str(target) == string
| 21.111111 | 77 | 0.627068 |
7946ecb5eb7e85f6a542f25548362b5f4424ac41 | 8,534 | py | Python | telethon/events/inlinequery.py | bb010g/Telethon | 278f0e9e983d938589b6d541e71135ad5b6857c5 | [
"MIT"
] | null | null | null | telethon/events/inlinequery.py | bb010g/Telethon | 278f0e9e983d938589b6d541e71135ad5b6857c5 | [
"MIT"
] | null | null | null | telethon/events/inlinequery.py | bb010g/Telethon | 278f0e9e983d938589b6d541e71135ad5b6857c5 | [
"MIT"
] | null | null | null | import inspect
import re
import asyncio
from .common import EventBuilder, EventCommon, name_inner_event
from .. import utils
from ..tl import types, functions, custom
from ..tl.custom.sendergetter import SenderGetter
@name_inner_event
class InlineQuery(EventBuilder):
"""
Represents an inline query event (when someone writes ``'@my_bot query'``).
Args:
users (`entity`, optional):
May be one or more entities (username/peer/etc.), preferably IDs.
By default, only inline queries from these users will be handled.
blacklist_users (`bool`, optional):
Whether to treat the users as a blacklist instead of
as a whitelist (default). This means that every chat
will be handled *except* those specified in ``users``
which will be ignored if ``blacklist_users=True``.
pattern (`str`, `callable`, `Pattern`, optional):
If set, only queries matching this pattern will be handled.
You can specify a regex-like string which will be matched
against the message, a callable function that returns ``True``
if a message is acceptable, or a compiled regex pattern.
"""
def __init__(
self, users=None, *, blacklist_users=False, func=None, pattern=None):
super().__init__(users, blacklist_chats=blacklist_users, func=func)
if isinstance(pattern, str):
self.pattern = re.compile(pattern).match
elif not pattern or callable(pattern):
self.pattern = pattern
elif hasattr(pattern, 'match') and callable(pattern.match):
self.pattern = pattern.match
else:
raise TypeError('Invalid pattern type given')
@classmethod
def build(cls, update):
if isinstance(update, types.UpdateBotInlineQuery):
event = cls.Event(update)
else:
return
event._entities = update._entities
return event
def filter(self, event):
if self.pattern:
match = self.pattern(event.text)
if not match:
return
event.pattern_match = match
return super().filter(event)
class Event(EventCommon, SenderGetter):
"""
Represents the event of a new callback query.
Members:
query (:tl:`UpdateBotCallbackQuery`):
The original :tl:`UpdateBotCallbackQuery`.
Make sure to access the `text` of the query if
that's what you want instead working with this.
pattern_match (`obj`, optional):
The resulting object from calling the passed ``pattern``
function, which is ``re.compile(...).match`` by default.
"""
def __init__(self, query):
super().__init__(chat_peer=types.PeerUser(query.user_id))
self.query = query
self.pattern_match = None
self._answered = False
self._sender_id = query.user_id
self._input_sender = None
self._sender = None
def _set_client(self, client):
super()._set_client(client)
self._sender, self._input_sender = utils._get_entity_pair(
self.sender_id, self._entities, client._entity_cache)
@property
def id(self):
"""
Returns the unique identifier for the query ID.
"""
return self.query.query_id
@property
def text(self):
"""
Returns the text the user used to make the inline query.
"""
return self.query.query
@property
def offset(self):
"""
The string the user's client used as an offset for the query.
This will either be empty or equal to offsets passed to `answer`.
"""
return self.query.offset
@property
def geo(self):
"""
If the user location is requested when using inline mode
and the user's device is able to send it, this will return
the :tl:`GeoPoint` with the position of the user.
"""
return
@property
def builder(self):
"""
Returns a new `InlineBuilder
<telethon.tl.custom.inlinebuilder.InlineBuilder>` instance.
"""
return custom.InlineBuilder(self._client)
async def answer(
self, results=None, cache_time=0, *,
gallery=False, next_offset=None, private=False,
switch_pm=None, switch_pm_param=''):
"""
Answers the inline query with the given results.
Args:
results (`list`, optional):
A list of :tl:`InputBotInlineResult` to use.
You should use `builder` to create these:
.. code-block:: python
builder = inline.builder
r1 = builder.article('Be nice', text='Have a nice day')
r2 = builder.article('Be bad', text="I don't like you")
await inline.answer([r1, r2])
You can send up to 50 results as documented in
https://core.telegram.org/bots/api#answerinlinequery.
Sending more will raise ``ResultsTooMuchError``,
and you should consider using `next_offset` to
paginate them.
cache_time (`int`, optional):
For how long this result should be cached on
the user's client. Defaults to 0 for no cache.
gallery (`bool`, optional):
Whether the results should show as a gallery (grid) or not.
next_offset (`str`, optional):
The offset the client will send when the user scrolls the
results and it repeats the request.
private (`bool`, optional):
Whether the results should be cached by Telegram
(not private) or by the user's client (private).
switch_pm (`str`, optional):
If set, this text will be shown in the results
to allow the user to switch to private messages.
switch_pm_param (`str`, optional):
Optional parameter to start the bot with if
`switch_pm` was used.
Example:
.. code-block:: python
@bot.on(events.InlineQuery)
async def handler(event):
builder = event.builder
rev_text = event.text[::-1]
await event.answer([
builder.article('Reverse text', text=rev_text),
builder.photo('/path/to/photo.jpg')
])
"""
if self._answered:
return
if results:
futures = [self._as_future(x, self._client.loop)
for x in results]
await asyncio.wait(futures, loop=self._client.loop)
# All futures will be in the `done` *set* that `wait` returns.
#
# Precisely because it's a `set` and not a `list`, it
# will not preserve the order, but since all futures
# completed we can use our original, ordered `list`.
results = [x.result() for x in futures]
else:
results = []
if switch_pm:
switch_pm = types.InlineBotSwitchPM(switch_pm, switch_pm_param)
return await self._client(
functions.messages.SetInlineBotResultsRequest(
query_id=self.query.query_id,
results=results,
cache_time=cache_time,
gallery=gallery,
next_offset=next_offset,
private=private,
switch_pm=switch_pm
)
)
@staticmethod
def _as_future(obj, loop):
if inspect.isawaitable(obj):
return asyncio.ensure_future(obj, loop=loop)
f = loop.create_future()
f.set_result(obj)
return f
| 36.008439 | 81 | 0.537263 |
7946ed067052491e80c247db87a53b2dccc8de94 | 9,054 | py | Python | electrum_mars/mnemonic.py | marscoin/electrum-mars | e95274b0ad959bdb02226c988303339a24acb8bf | [
"MIT"
] | 3 | 2021-08-15T08:05:00.000Z | 2021-11-21T21:35:10.000Z | electrum_mars/mnemonic.py | marscoin/electrum-mars | e95274b0ad959bdb02226c988303339a24acb8bf | [
"MIT"
] | 1 | 2021-12-02T08:04:05.000Z | 2021-12-02T08:04:05.000Z | electrum_mars/mnemonic.py | marscoin/electrum-mars | e95274b0ad959bdb02226c988303339a24acb8bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import math
import hashlib
import unicodedata
import string
from typing import Sequence, Dict
from types import MappingProxyType
from .util import resource_path, bfh, bh2u, randrange
from .crypto import hmac_oneshot
from . import version
from .logging import Logger
# http://www.asahi-net.or.jp/~ax2s-kmtn/ref/unicode/e_asia.html
CJK_INTERVALS = [
(0x4E00, 0x9FFF, 'CJK Unified Ideographs'),
(0x3400, 0x4DBF, 'CJK Unified Ideographs Extension A'),
(0x20000, 0x2A6DF, 'CJK Unified Ideographs Extension B'),
(0x2A700, 0x2B73F, 'CJK Unified Ideographs Extension C'),
(0x2B740, 0x2B81F, 'CJK Unified Ideographs Extension D'),
(0xF900, 0xFAFF, 'CJK Compatibility Ideographs'),
(0x2F800, 0x2FA1D, 'CJK Compatibility Ideographs Supplement'),
(0x3190, 0x319F, 'Kanbun'),
(0x2E80, 0x2EFF, 'CJK Radicals Supplement'),
(0x2F00, 0x2FDF, 'CJK Radicals'),
(0x31C0, 0x31EF, 'CJK Strokes'),
(0x2FF0, 0x2FFF, 'Ideographic Description Characters'),
(0xE0100, 0xE01EF, 'Variation Selectors Supplement'),
(0x3100, 0x312F, 'Bopomofo'),
(0x31A0, 0x31BF, 'Bopomofo Extended'),
(0xFF00, 0xFFEF, 'Halfwidth and Fullwidth Forms'),
(0x3040, 0x309F, 'Hiragana'),
(0x30A0, 0x30FF, 'Katakana'),
(0x31F0, 0x31FF, 'Katakana Phonetic Extensions'),
(0x1B000, 0x1B0FF, 'Kana Supplement'),
(0xAC00, 0xD7AF, 'Hangul Syllables'),
(0x1100, 0x11FF, 'Hangul Jamo'),
(0xA960, 0xA97F, 'Hangul Jamo Extended A'),
(0xD7B0, 0xD7FF, 'Hangul Jamo Extended B'),
(0x3130, 0x318F, 'Hangul Compatibility Jamo'),
(0xA4D0, 0xA4FF, 'Lisu'),
(0x16F00, 0x16F9F, 'Miao'),
(0xA000, 0xA48F, 'Yi Syllables'),
(0xA490, 0xA4CF, 'Yi Radicals'),
]
def is_CJK(c):
n = ord(c)
for imin,imax,name in CJK_INTERVALS:
if n>=imin and n<=imax: return True
return False
def normalize_text(seed: str) -> str:
# normalize
seed = unicodedata.normalize('NFKD', seed)
# lower
seed = seed.lower()
# remove accents
seed = u''.join([c for c in seed if not unicodedata.combining(c)])
# normalize whitespaces
seed = u' '.join(seed.split())
# remove whitespaces between CJK
seed = u''.join([seed[i] for i in range(len(seed)) if not (seed[i] in string.whitespace and is_CJK(seed[i-1]) and is_CJK(seed[i+1]))])
return seed
_WORDLIST_CACHE = {} # type: Dict[str, Wordlist]
class Wordlist(tuple):
def __init__(self, words: Sequence[str]):
super().__init__()
index_from_word = {w: i for i, w in enumerate(words)}
self._index_from_word = MappingProxyType(index_from_word) # no mutation
def index(self, word, start=None, stop=None) -> int:
try:
return self._index_from_word[word]
except KeyError as e:
raise ValueError from e
def __contains__(self, word) -> bool:
try:
self.index(word)
except ValueError:
return False
else:
return True
@classmethod
def from_file(cls, filename) -> 'Wordlist':
path = resource_path('wordlist', filename)
if path not in _WORDLIST_CACHE:
with open(path, 'r', encoding='utf-8') as f:
s = f.read().strip()
s = unicodedata.normalize('NFKD', s)
lines = s.split('\n')
words = []
for line in lines:
line = line.split('#')[0]
line = line.strip(' \r')
assert ' ' not in line
if line:
words.append(line)
_WORDLIST_CACHE[path] = Wordlist(words)
return _WORDLIST_CACHE[path]
filenames = {
'en':'english.txt',
'es':'spanish.txt',
'ja':'japanese.txt',
'pt':'portuguese.txt',
'zh':'chinese_simplified.txt'
}
class Mnemonic(Logger):
# Seed derivation does not follow BIP39
# Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum
def __init__(self, lang=None):
Logger.__init__(self)
lang = lang or 'en'
self.logger.info(f'language {lang}')
filename = filenames.get(lang[0:2], 'english.txt')
self.wordlist = Wordlist.from_file(filename)
self.logger.info(f"wordlist has {len(self.wordlist)} words")
@classmethod
def mnemonic_to_seed(self, mnemonic, passphrase) -> bytes:
PBKDF2_ROUNDS = 2048
mnemonic = normalize_text(mnemonic)
passphrase = passphrase or ''
passphrase = normalize_text(passphrase)
return hashlib.pbkdf2_hmac('sha512', mnemonic.encode('utf-8'), b'electrum' + passphrase.encode('utf-8'), iterations = PBKDF2_ROUNDS)
def mnemonic_encode(self, i):
n = len(self.wordlist)
words = []
while i:
x = i%n
i = i//n
words.append(self.wordlist[x])
return ' '.join(words)
def get_suggestions(self, prefix):
for w in self.wordlist:
if w.startswith(prefix):
yield w
def mnemonic_decode(self, seed):
n = len(self.wordlist)
words = seed.split()
i = 0
while words:
w = words.pop()
k = self.wordlist.index(w)
i = i*n + k
return i
def make_seed(self, *, seed_type=None, num_bits=None) -> str:
from .keystore import bip39_is_checksum_valid
if seed_type is None:
seed_type = 'standard'
if num_bits is None:
num_bits = 132
prefix = version.seed_prefix(seed_type)
# increase num_bits in order to obtain a uniform distribution for the last word
bpw = math.log(len(self.wordlist), 2)
num_bits = int(math.ceil(num_bits/bpw) * bpw)
self.logger.info(f"make_seed. prefix: '{prefix}', entropy: {num_bits} bits")
entropy = 1
while entropy < pow(2, num_bits - bpw):
# try again if seed would not contain enough words
entropy = randrange(pow(2, num_bits))
nonce = 0
while True:
nonce += 1
i = entropy + nonce
seed = self.mnemonic_encode(i)
if i != self.mnemonic_decode(seed):
raise Exception('Cannot extract same entropy from mnemonic!')
if is_old_seed(seed):
continue
# Make sure the mnemonic we generate is not also a valid bip39 seed
# by accident. Note that this test has not always been done historically,
# so it cannot be relied upon.
if bip39_is_checksum_valid(seed, wordlist=self.wordlist) == (True, True):
continue
if is_new_seed(seed, prefix):
break
self.logger.info(f'{len(seed.split())} words')
return seed
def is_new_seed(x: str, prefix=version.SEED_PREFIX) -> bool:
x = normalize_text(x)
s = bh2u(hmac_oneshot(b"Seed version", x.encode('utf8'), hashlib.sha512))
return s.startswith(prefix)
def is_old_seed(seed: str) -> bool:
from . import old_mnemonic
seed = normalize_text(seed)
words = seed.split()
try:
# checks here are deliberately left weak for legacy reasons, see #3149
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed = bfh(seed)
is_hex = (len(seed) == 16 or len(seed) == 32)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x: str) -> str:
num_words = len(x.split())
if is_old_seed(x):
return 'old'
elif is_new_seed(x, version.SEED_PREFIX):
return 'standard'
return ''
def is_seed(x: str) -> bool:
return bool(seed_type(x))
def is_any_2fa_seed_type(seed_type: str) -> bool:
return seed_type in ['2fa', '2fa_segwit']
| 34.295455 | 140 | 0.631434 |
7946f05d249527a7e7b3a7d0478153fea321a0eb | 295 | py | Python | pyscf/pbc/df/__init__.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-05-02T19:55:30.000Z | 2018-05-02T19:55:30.000Z | pyscf/pbc/df/__init__.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | null | null | null | pyscf/pbc/df/__init__.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-12-06T03:10:50.000Z | 2018-12-06T03:10:50.000Z | from . import incore
from . import outcore
from . import fft
from . import aft
from . import df
from . import mdf
from .df import DF, GDF
from .mdf import MDF
from .aft import AFTDF
from .fft import FFTDF
from pyscf.df.addons import aug_etb
# For backward compatibility
pwdf = aft
PWDF = AFTDF
| 18.4375 | 35 | 0.755932 |
7946f0b8e4c098dbe1e4ad5721b49aaa6ec77b3d | 93 | py | Python | build/lib/pyconfluent/kafka_streams/errors/kafka_streams_error.py | newellp2019/pyconfluent | b1de0b8255678c2e6a3a7d016df57b9f40cdc861 | [
"MIT"
] | null | null | null | build/lib/pyconfluent/kafka_streams/errors/kafka_streams_error.py | newellp2019/pyconfluent | b1de0b8255678c2e6a3a7d016df57b9f40cdc861 | [
"MIT"
] | null | null | null | build/lib/pyconfluent/kafka_streams/errors/kafka_streams_error.py | newellp2019/pyconfluent | b1de0b8255678c2e6a3a7d016df57b9f40cdc861 | [
"MIT"
] | null | null | null | """
Run time exception thrown on error
"""
class KafkaStreamsError(RuntimeError):
pass
| 11.625 | 38 | 0.72043 |
7946f1928208bba17eee8e65a266e0aa4268aa7e | 4,582 | py | Python | src/final_solution/param_search.py | federicoBetti/DataScienceCompetition_FutureSales | f78e38f39b5ecda2a67b4bfbf476cdc6a4a41460 | [
"MIT"
] | null | null | null | src/final_solution/param_search.py | federicoBetti/DataScienceCompetition_FutureSales | f78e38f39b5ecda2a67b4bfbf476cdc6a4a41460 | [
"MIT"
] | null | null | null | src/final_solution/param_search.py | federicoBetti/DataScienceCompetition_FutureSales | f78e38f39b5ecda2a67b4bfbf476cdc6a4a41460 | [
"MIT"
] | null | null | null | import os
import hyperopt
import pandas as pd
from hyperopt import fmin, tpe, STATUS_OK, Trials
from hyperopt import hp
from hyperopt.pyll import scope
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
from src.utils.memory_managment import save_object
def trainXGBoost(train_x, train_y, valid_x=None, valid_y=None, n_estimators=50):
model = XGBRegressor(
max_depth=10,
n_estimators=n_estimators,
min_child_weight=0.5,
colsample_bytree=0.8,
subsample=0.8,
eta=0.1,
# tree_method='gpu_hist',
seed=42)
if valid_x is None:
eval_set = None
early_stopping = None
else:
eval_set = [(train_x, train_y), (valid_x, valid_y)]
early_stopping = 10
model.fit(
train_x,
train_y,
eval_metric="rmse",
eval_set=eval_set,
verbose=True,
early_stopping_rounds=early_stopping)
return model
def trainLR(train_x, train_y):
lr = LinearRegression()
lr.fit(train_x.fillna(0).values, train_y.fillna(0))
return lr
from sklearn import svm
def trainSVM(train_x, train_y):
regr = svm.LinearSVR()
regr.fit(train_x.values, train_y)
return regr
from sklearn.neural_network import MLPRegressor
def trainNN(train_x, train_y):
regr = MLPRegressor(hidden_layer_sizes=(16, 8), learning_rate="adaptive", verbose=True, max_iter=8)
regr.fit(train_x.values, train_y)
return regr
from sklearn.metrics import mean_squared_error
def getRMSE(y_actual, y_predicted):
rms = mean_squared_error(y_actual.clip(upper=20), y_predicted.clip(max=20), squared=True)
return rms
# train_test_df.dropna(inplace=True)
# all_train_x = train_test_df[train_test_df.date_block_num < 34].drop(['item_cnt_month'], axis=1)
# all_train_y = train_test_df[train_test_df.date_block_num < 34]['item_cnt_month'].clip(lower=0, upper=20)
def get_data():
CUSTOM_DATA_FOLDER = '../../data_custom/'
train_test_df = pd.read_feather(
os.path.join(os.getcwd(), CUSTOM_DATA_FOLDER, 'all_data_preprocessed.feather')).set_index("index")
train_x = train_test_df[train_test_df.date_block_num < 33].drop(['item_cnt_month'], axis=1)
train_y = train_test_df[train_test_df.date_block_num < 33]['item_cnt_month'].clip(lower=0, upper=20)
valid_x = train_test_df[train_test_df.date_block_num == 33].drop(['item_cnt_month'], axis=1)
valid_y = train_test_df[train_test_df.date_block_num == 33]['item_cnt_month'].clip(lower=0, upper=20)
del train_test_df
# test_x = train_test_df[train_test_df.date_block_num == 34].drop(['item_cnt_month'], axis=1)
return train_x, train_y, valid_x, valid_y
def get_validation_score(args):
max_depth = args["max_depth"]
min_child_weight = args["min_child_weight"]
eta = args["eta"]
subsample = args["subsample"]
colsample_bytree = args["colsample_bytree"]
train_x, train_y, valid_x, valid_y = get_data()
model = XGBRegressor(
max_depth=max_depth,
n_estimators=100,
min_child_weight=min_child_weight,
colsample_bytree=colsample_bytree,
subsample=subsample,
eta=eta,
# tree_method='gpu_hist',
seed=42)
eval_set = [(train_x, train_y), (valid_x, valid_y)]
early_stopping = 15
model.fit(
train_x,
train_y,
eval_metric="rmse",
eval_set=eval_set,
verbose=False,
early_stopping_rounds=early_stopping)
rmse = getRMSE(valid_y, model.predict(valid_x, ntree_limit=model.best_ntree_limit))
dict_to_ret = {
"loss": -rmse,
"status": STATUS_OK,
"best_tree_number": model.best_ntree_limit
}
return dict_to_ret
space = {
"max_depth": scope.int(hp.quniform("max_depth", 5, 40, 2)),
"min_child_weight": hp.uniform("min_child_weight", 0.3, 1),
"eta": hp.choice("eta", [0.1, 0.01, 0.001]),
"subsample": hp.uniform("subsample", 0.6, 1),
"colsample_bytree": hp.uniform("colsample_bytree", 0.6, 1),
}
trials = Trials()
best = fmin(get_validation_score, space, algo=tpe.suggest, max_evals=10, trials=trials)
print(best)
# -> {'a': 1, 'c2': 0.01420615366247227}
print(hyperopt.space_eval(space, best))
print(trials)
best_path = os.path.join(os.getcwd(), CUSTOM_DATA_FOLDER, 'best_opt.pkl')
trials_path = os.path.join(os.getcwd(), CUSTOM_DATA_FOLDER, 'trials.pkl')
space_path = os.path.join(os.getcwd(), CUSTOM_DATA_FOLDER, 'space.pkl')
save_object(best, best_path)
save_object(trials, trials_path)
save_object(space, space_path)
| 29.371795 | 106 | 0.693147 |
7946f1be1d664cd8ee5a6ae2688f723c400d759b | 7,155 | py | Python | d3rlpy/algos/torch/awr_impl.py | YangRui2015/d3rlpy | da778b2a2b0afbafe25395296baecd0d4d0cd0d5 | [
"MIT"
] | 1 | 2021-05-08T06:21:05.000Z | 2021-05-08T06:21:05.000Z | d3rlpy/algos/torch/awr_impl.py | YangRui2015/d3rlpy | da778b2a2b0afbafe25395296baecd0d4d0cd0d5 | [
"MIT"
] | null | null | null | d3rlpy/algos/torch/awr_impl.py | YangRui2015/d3rlpy | da778b2a2b0afbafe25395296baecd0d4d0cd0d5 | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
from typing import Any, Optional, Sequence
import numpy as np
import torch
from torch.optim import Optimizer
from ...augmentation import AugmentationPipeline
from ...gpu import Device
from ...models.builders import (
create_categorical_policy,
create_squashed_normal_policy,
create_value_function,
)
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.torch import (
CategoricalPolicy,
Policy,
SquashedNormalPolicy,
ValueFunction,
squash_action,
)
from ...preprocessing import ActionScaler, Scaler
from ...torch_utility import augmentation_api, eval_api, torch_api, train_api
from .base import TorchImplBase
class AWRBaseImpl(TorchImplBase, metaclass=ABCMeta):
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_use_gpu: Optional[Device]
_v_func: Optional[ValueFunction]
_policy: Optional[Policy]
_critic_optim: Optional[Optimizer]
_actor_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
augmentation: AugmentationPipeline,
):
super().__init__(
observation_shape, action_size, scaler, action_scaler, augmentation
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = actor_encoder_factory
self._critic_encoder_factory = critic_encoder_factory
self._use_gpu = use_gpu
# initialized in build
self._v_func = None
self._policy = None
self._critic_optim = None
self._actor_optim = None
def build(self) -> None:
# setup torch models
self._build_critic()
self._build_actor()
if self._use_gpu:
self.to_gpu(self._use_gpu)
else:
self.to_cpu()
# setup optimizer after the parameters move to GPU
self._build_critic_optim()
self._build_actor_optim()
def _build_critic(self) -> None:
self._v_func = create_value_function(
self._observation_shape, self._critic_encoder_factory
)
def _build_critic_optim(self) -> None:
assert self._v_func is not None
self._critic_optim = self._critic_optim_factory.create(
self._v_func.parameters(), lr=self._critic_learning_rate
)
@abstractmethod
def _build_actor(self) -> None:
pass
def _build_actor_optim(self) -> None:
assert self._policy is not None
self._actor_optim = self._actor_optim_factory.create(
self._policy.parameters(), lr=self._actor_learning_rate
)
@train_api
@torch_api(scaler_targets=["observation"])
def update_critic(
self, observation: torch.Tensor, value: torch.Tensor
) -> np.ndarray:
assert self._critic_optim is not None
self._critic_optim.zero_grad()
loss = self.compute_critic_loss(observation, value)
loss.backward()
self._critic_optim.step()
return loss.cpu().detach().numpy()
@augmentation_api(targets=["observation"])
def compute_critic_loss(
self, observation: torch.Tensor, value: torch.Tensor
) -> torch.Tensor:
assert self._v_func is not None
return self._v_func.compute_error(observation, value)
@train_api
@torch_api(scaler_targets=["observation"], action_scaler_targets=["action"])
def update_actor(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> np.ndarray:
assert self._actor_optim is not None
self._actor_optim.zero_grad()
loss = self.compute_actor_loss(observation, action, weight)
loss.backward()
self._actor_optim.step()
return loss.cpu().detach().numpy()
@augmentation_api(targets=["observation"])
def compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
return self._compute_actor_loss(observation, action, weight)
@abstractmethod
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
pass
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.best_action(x)
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.sample(x)
@eval_api
@torch_api(scaler_targets=["x"])
def predict_value(
self, x: torch.Tensor, *args: Any, **kwargs: Any
) -> np.ndarray:
assert self._v_func is not None
with torch.no_grad():
return self._v_func(x).view(-1).cpu().detach().numpy()
class AWRImpl(AWRBaseImpl):
_policy: Optional[SquashedNormalPolicy]
def _build_actor(self) -> None:
self._policy = create_squashed_normal_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(observation)
# unnormalize action via inverse tanh function
unnormalized_action = torch.atanh(action.clamp(-0.999999, 0.999999))
# compute log probability
_, log_probs = squash_action(dist, unnormalized_action)
return -(weight * log_probs).mean()
class DiscreteAWRImpl(AWRBaseImpl):
_policy: Optional[CategoricalPolicy]
def _build_actor(self) -> None:
self._policy = create_categorical_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(observation)
log_probs = dist.log_prob(action).view(observation.shape[0], -1)
return -(weight * log_probs.sum(dim=1, keepdim=True)).mean()
| 29.937238 | 80 | 0.667785 |
7946f1d56291481d29bb3c995f90d24a25290ffc | 4,321 | py | Python | libs/base/lblogs.py | wildscsi/ecopos | 9922bb5160227777401eb33fa9a01cfba5730781 | [
"MIT"
] | null | null | null | libs/base/lblogs.py | wildscsi/ecopos | 9922bb5160227777401eb33fa9a01cfba5730781 | [
"MIT"
] | 1 | 2021-11-04T20:43:03.000Z | 2021-11-04T20:43:03.000Z | libs/base/lblogs.py | wildscsi/ecopos | 9922bb5160227777401eb33fa9a01cfba5730781 | [
"MIT"
] | 1 | 2021-11-04T19:43:53.000Z | 2021-11-04T19:43:53.000Z | __author__ = 'BM'
# -*- coding: utf-8 -*-
import os
from datetime import *
class WrLog:
def __init__(self):
self.nolog = False
self.logsize = 1024*1024 # 1Mb = 1024*1024
self.filename = 'app'
self.path = ''
self.errmsg = ''
def writelog(self, cmsg='', ch='-'):
# log не ведём
self.errmsg = ''
if self.nolog: return
# проверяем наличие файла
if os.path.exists(self.path + self.filename + '.log'):
# если размер файла больше заданного - переименновываем в OLD
if os.path.getsize(self.filename + '.log') > self.logsize:
try:
os.replace(self.path + self.filename + '.log', self.path + self.filename + '.old')
except Exception as e:
return self.errbox('Ошибка переименовывания файла\n%s' %e)
# открываем файл на дозапись
ctxt = datetime.today().strftime("%d-%m-%Y %H:%M") + '\n' + cmsg + '\n' + ch * 50
return self.datatofile(ctxt,self.filename + '.log','LOG')
def datatofile(self,cmsg='',cfile='',atr='',cfld=None):
self.errmsg = ''; atr = atr.upper()
if 'NEW' in atr: self.delfile(cfile)
if 'LOG' in atr: return self.fsave(cmsg,cfile)
if len(cfile) == 0: return self.errbox('Имя файла не определено')
tpv = type(cmsg)
ctxt = '-' * 50 + '\n' + datetime.today().strftime("%d-%m-%Y %H:%M") + '\n'
if tpv is str:
if 'TXT' in atr: return self.fsave(cmsg,cfile)
ctxt += 'ТИП STRING\n' + cmsg
elif tpv is dict:
ctxt += 'СЛОВАРЬ ДАННЫХ\n'
if len(cmsg) == 0: return self.fsave('Словарь пуст',cfile)
ctxt += self.getdict(cmsg,cfld)
elif tpv is list:
ipos = 0
ctxt += 'СПИСОК ДАННЫХ\n'
if len(cmsg) == 0: return self.fsave('Список пуст',cfile)
cmsg = list(cmsg)
for c1 in cmsg:
if type(c1) is dict:
ipos += 1
ctxt += '< ' + str(ipos) + ' > ' + '-' * 10 + '\n' + self.getdict(c1,cfld)
else:
ctxt += str(c1) + '\n'
else:
ctxt += 'ТИП ' + str(tpv).upper() + '\n' + str(cmsg)
return self.fsave(ctxt,cfile)
@staticmethod
def getdict(dic1,cfld):
if len(dic1) == 0: return 'Словарь пуст'
ctxt = ''
if not cfld is None: cfld += ','
try:
dic1 = dict(dic1)
lst = dic1.keys()
for ckey in lst:
if not cfld is None:
if not ckey + ',' in cfld: continue
ctxt += ckey + '=' + str(dic1[ckey]) + '\n'
except Exception as e:
ctxt = 'ОШИБКА DATATOFILE: запись словаря\n%s' %e
return ctxt
def delfile(self,cfile=''):
if len(cfile) == 0: return self.errbox('Имя файла не определено')
try:
os.remove(cfile)
except Exception as e:
return self.errbox('ОШИБКА DELFILE: удаления файла\n%s' %e)
return True
# Пишит информацию в файл
def fsave(self,cmsg='',fname='',cp='utf-8'):
fname = fname.strip(' ')
if len(fname) == 0:
return self.errbox('Имя файла неопределено')
try:
ofile = open(fname, 'a', encoding=cp)
ofile.write(cmsg + '\n')
ofile.close()
except Exception as e:
return self.errbox('Ошибка записи в файл\n%s' %e)
return True
# Читает информацию из файла
def fload(self,fname='',cp='utf-8'):
fname = fname.strip(' '); lret = True; han = None; cresult = ''
if len(fname) == 0:
return self.errbox('Имя файла неопределено'),''
if not os.path.isfile(fname):
return self.errbox('Файл ' + fname.upper() + ' не найден'),''
try:
han = open(fname,'r',encoding=cp)
except Exception as e:
lret = self.errbox('Ошибка чтения из файла\n' + fname.upper() + '\n%s' %e),''
if not lret: return False,''
for line in han:
cresult += line
han.close()
return True, cresult
def errbox(self,cmsg):
self.errmsg = 'WRLOG: ' + cmsg
return False
| 37.25 | 102 | 0.505439 |
7946f51e65f446569c19e6ee3d32b5ee4cbb9c07 | 2,570 | py | Python | frameworks/kafka/tests/test_overlay.py | smush618/dcos-commons | 3c69642c79f197f4b28deb8d4e2df9160e77906b | [
"Apache-2.0"
] | null | null | null | frameworks/kafka/tests/test_overlay.py | smush618/dcos-commons | 3c69642c79f197f4b28deb8d4e2df9160e77906b | [
"Apache-2.0"
] | null | null | null | frameworks/kafka/tests/test_overlay.py | smush618/dcos-commons | 3c69642c79f197f4b28deb8d4e2df9160e77906b | [
"Apache-2.0"
] | null | null | null | import pytest
import sdk_install as install
import sdk_networks
import sdk_tasks
import sdk_utils
from tests import config, test_utils
@pytest.fixture(scope="module", autouse=True)
def configure_package(configure_security):
try:
install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
config.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=sdk_networks.ENABLE_VIRTUAL_NETWORKS_OPTIONS,
)
yield # let the test session execute
finally:
install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.overlay
@pytest.mark.smoke
@pytest.mark.sanity
@pytest.mark.dcos_min_version("1.9")
def test_service_overlay_health():
"""Installs SDK based Kafka on with virtual networks set to True. Tests that the deployment completes
and the service is healthy, then checks that all of the service tasks (brokers) are on the overlay network
"""
tasks = sdk_tasks.check_task_count(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
for task in tasks:
sdk_networks.check_task_network(task.name)
@pytest.mark.smoke
@pytest.mark.sanity
@pytest.mark.overlay
@pytest.mark.dcos_min_version("1.9")
def test_overlay_network_deployment_and_endpoints():
endpoint_names = sdk_networks.get_endpoint_names(config.PACKAGE_NAME, config.SERVICE_NAME)
assert set(["broker", "zookeeper"]) == set(endpoint_names)
sdk_networks.check_endpoint_on_overlay(config.PACKAGE_NAME, config.SERVICE_NAME, "broker", config.DEFAULT_BROKER_COUNT)
zookeeper = sdk_networks.get_endpoint_string(
config.PACKAGE_NAME, config.SERVICE_NAME, "zookeeper"
)
assert zookeeper == "master.mesos:2181/{}".format(sdk_utils.get_zk_path(config.SERVICE_NAME))
@pytest.mark.sanity
@pytest.mark.overlay
@pytest.mark.dcos_min_version("1.9")
def test_pod_restart_on_overlay():
test_utils.restart_broker_pods()
test_overlay_network_deployment_and_endpoints()
@pytest.mark.sanity
@pytest.mark.overlay
@pytest.mark.dcos_min_version("1.9")
def test_pod_replace_on_overlay():
test_utils.replace_broker_pod()
test_overlay_network_deployment_and_endpoints()
@pytest.mark.sanity
@pytest.mark.overlay
@pytest.mark.dcos_min_version("1.9")
def test_topic_create_overlay():
test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME)
@pytest.mark.sanity
@pytest.mark.overlay
@pytest.mark.dcos_min_version("1.9")
def test_topic_delete_overlay():
test_utils.delete_topic(config.EPHEMERAL_TOPIC_NAME)
| 31.341463 | 123 | 0.770428 |
7946f5382ff287a8d517671bf9e58f49f64afe21 | 26 | py | Python | urlkeyword/__init__.py | jleeothon/urlkeyword | ef745e08a90cba56038d508667fb0f6acc3e41ce | [
"MIT"
] | 3 | 2021-05-21T03:45:59.000Z | 2022-01-23T18:26:45.000Z | urlkeyword/__init__.py | jleeothon/urlkeyword | ef745e08a90cba56038d508667fb0f6acc3e41ce | [
"MIT"
] | 13 | 2021-04-03T19:56:35.000Z | 2022-01-23T18:39:47.000Z | urlkeyword/__init__.py | jleeothon/urlkeyword | ef745e08a90cba56038d508667fb0f6acc3e41ce | [
"MIT"
] | 2 | 2022-02-02T03:15:51.000Z | 2022-03-07T10:00:41.000Z | from .validators import *
| 13 | 25 | 0.769231 |
7946f5ff3699e956e0f938fb9e77a062d3fe66d5 | 3,266 | py | Python | structural_model/constants.py | zibneuro/udvary-et-al-2022 | 8b456c41e72958677cb6035028d9c23013cb7c7e | [
"MIT"
] | 1 | 2022-03-11T13:43:50.000Z | 2022-03-11T13:43:50.000Z | structural_model/constants.py | zibneuro/udvary-et-al-2022 | 8b456c41e72958677cb6035028d9c23013cb7c7e | [
"MIT"
] | null | null | null | structural_model/constants.py | zibneuro/udvary-et-al-2022 | 8b456c41e72958677cb6035028d9c23013cb7c7e | [
"MIT"
] | null | null | null | import numpy as np
def getLayerDepths():
name_depthRange = {}
name_depthRange["L1"] = [0, 157]
name_depthRange["L2"] = [157, 367]
name_depthRange["L23"] = [157, 576]
name_depthRange["L3"] = [367, 576]
name_depthRange["L4"] = [576, 855]
name_depthRange["L5A"] = [855, 1102]
name_depthRange["L5B"] = [1102, 1349]
name_depthRange["L5"] = [855, 1349]
name_depthRange["L6A"] = [1349, 1620]
name_depthRange["L6"] = [1349, 1973]
return name_depthRange
def getLaminarLocations():
return ["L1", "L23", "L4", "L5", "L6"]
def getColumns():
return ["A1", "A2", "A3", "A4",
"B1", "B2", "B3", "B4",
"C1", "C2", "C3", "C4",
"D1", "D2", "D3", "D4",
"E1", "E2", "E3", "E4",
"Alpha", "Beta", "Gamma", "Delta"]
def getRegionsForColumn(column, includeSurrounding = True):
regions = [
column,
"S1_Septum_{}".format(column),
"{}_Barreloid".format(column)
]
if(includeSurrounding):
regions.append("S1_Surrounding_{}".format(column))
return regions
def getCellTypes(includeVPM = True):
if(includeVPM):
return ["L2PY", "L3PY", "L4PY", "L4sp", "L4ss",
"L5IT", "L5PT", "L6CC", "L6INV", "L6CT", "INH", "VPM"]
else:
return ["L2PY", "L3PY", "L4PY", "L4sp", "L4ss",
"L5IT", "L5PT", "L6CC", "L6INV", "L6CT", "INH"]
def getCellTypesExc(includeVPM = True):
if(includeVPM):
allCelltypes = getCellTypes(includeVPM=True)
allCelltypes.remove("INH")
return allCelltypes
else:
return getCellTypes()[0:10]
def getNetworkIndex(network):
if(network == "RBC" or "Truncated" in network):
return getNetworks().index(network)
else:
network = network.replace("-", "RBCTruncated")
return getNetworks().index(network)
def getReferenceVolume():
boxMin = np.array([-200, 300, -1000])
boxMax = np.array([0, 500, 600])
return boxMin, boxMax
def getL4Volume():
boxMin = np.array([-400, 100, -200])
boxMax = np.array([200, 700, 150])
return boxMin, boxMax
def getC2Volume():
boxMin = np.array([-550, -50, -1400])
boxMax = np.array([400, 850, 700])
return boxMin, boxMax
def getC2VolumeExt():
boxMin = np.array([-700, -200, -1600])
boxMax = np.array([600, 1100, 800])
return boxMin, boxMax
def getD2Volume():
boxMin = np.array([-500, -500, -1600])
boxMax = np.array([500, 500, 700])
return boxMin, boxMax
def getCellularConnectivityVolume():
boxMin = np.array([-700, -1200, -1600])
boxMax = np.array([600, 1900, 800])
return boxMin, boxMax
def getModelVolume():
boxMin = np.array([-1600, -1200, -1600])
boxMax = np.array([1800, 1900, 800])
return boxMin, boxMax
def getSelectedCubeVolume():
boxMin = np.array([-150, 250, 350])
boxMax = np.array([-50, 350, 400])
return boxMin, boxMax
def getReferenceVolumeL5():
# grid size: 8 x 8 x 24
boxMin = np.array([-128, 400, -408])
boxMax = np.array([-48, 480, -360])
return boxMin, boxMax
def getSelectedCellIds():
# L5PT: 301854 (ct 6)
# L2PY: 748854 (ct 0)
# L6CC: 199678 (ct 7)
return [301854, 748854, 199678] | 25.716535 | 70 | 0.578996 |
7946f6b1c8f826896cff47686312fe45dc991a35 | 52,772 | py | Python | torch/quantization/observer.py | rodrigoberriel/pytorch | 97e86cf3197823cc598e6380ed47bcc385e50077 | [
"Intel"
] | null | null | null | torch/quantization/observer.py | rodrigoberriel/pytorch | 97e86cf3197823cc598e6380ed47bcc385e50077 | [
"Intel"
] | null | null | null | torch/quantization/observer.py | rodrigoberriel/pytorch | 97e86cf3197823cc598e6380ed47bcc385e50077 | [
"Intel"
] | null | null | null | import re
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from functools import partial
from typing import Any, List, Tuple, Optional, Dict, Union
import torch
import torch.nn as nn
from .utils import check_min_max_valid, calculate_qmin_qmax
class _PartialWrapper(object):
def __init__(self, p):
self.p = p
self.callable_args = {}
def __call__(self, *args, **keywords):
# call each arg in callable_args and add them partial, then run with keywords
# skip if arg_name in keywords so its possible to overwrite
for arg_name in self.callable_args:
if arg_name not in keywords:
keywords = {**keywords, **{arg_name: self.callable_args[arg_name]()}}
return self.p(*args, **keywords)
def __repr__(self):
return self.p.__repr__() + self.callable_args.__repr__()
def with_args(self, **kwargs):
return _with_args(self, **kwargs)
def with_callable_args(self, **kwargs):
result = _PartialWrapper(p=self.p)
result.callable_args = {**self.callable_args, **kwargs}
return result
def _with_args(cls_or_self, **kwargs):
r"""Wrapper that allows creation of class factories.
This can be useful when there is a need to create classes with the same
constructor arguments, but different instances. Can be used in conjunction with
_callable_args
Example::
>>> Foo.with_args = classmethod(_with_args)
>>> foo_builder = Foo.with_args(a=3, b=4).with_args(answer=42)
>>> foo_instance1 = foo_builder()
>>> foo_instance2 = foo_builder()
>>> id(foo_instance1) == id(foo_instance2)
False
"""
r = _PartialWrapper(partial(cls_or_self, **kwargs))
return r
def _with_callable_args(cls_or_self, **kwargs):
r"""Wrapper that allows creation of class factories args that need to be
called at construction time.
This can be useful when there is a need to create classes with the same
constructor arguments, but different instances and those arguments should only
be calculated at construction time. Can be used in conjunction with _with_args
Example::
>>> Foo.with_callable_args = classmethod(_with_callable_args)
>>> Foo.with_args = classmethod(_with_args)
>>> foo_builder = Foo.with_callable_args(cur_time=get_time_func).with_args(name="dan")
>>> foo_instance1 = foo_builder()
>>> wait 50
>>> foo_instance2 = foo_builder()
>>> id(foo_instance1.creation_time) == id(foo_instance2.creation_time)
False
"""
r = _PartialWrapper(partial(cls_or_self))
return r.with_callable_args(**kwargs)
ABC: Any = ABCMeta(str("ABC"), (object,), {}) # compatible with Python 2 *and* 3:
class ObserverBase(ABC, nn.Module):
r"""Base observer Module.
Any observer implementation should derive from this class.
Concrete observers should follow the same API. In forward, they will update
the statistics of the observed Tensor. And they should provide a
`calculate_qparams` function that computes the quantization parameters given
the collected statistics.
Args:
dtype: Quantized data type
"""
def __init__(self, dtype):
super(ObserverBase, self).__init__()
self.dtype = dtype
@abstractmethod
def forward(self, x):
pass
@abstractmethod
def calculate_qparams(self, **kwargs):
pass
with_args = classmethod(_with_args)
with_callable_args = classmethod(_with_callable_args)
class _ObserverBase(ObserverBase):
r"""Internal common base for all qint/quint8 observers.
This base is for commonly used parameters used internally.
Users should use `~torch.quantization.observer.ObserverBase` as a base class
for custom observers.
Args:
dtype: Quantized data type.
qscheme: Quantization scheme to be used.
reduce_range: Reduces the range of the quantized data type by 1 bit.
This is sometimes required to avoid instruction overflow.
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
.. warning::
:attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``.
.. warning::
:attr:`qscheme` can only take one of the following options:
- ``torch.per_tensor_affine``
- ``torch.per_tensor_symmetric``
- ``torch.per_channel_affine``
- ``torch.per_channel_symmetric``
"""
# Note: the version is shared by all observer types
#
# Version 1/None
# self
#
# Version 2 (base class only, does not include child class buffers)
# self
# |--- eps : Tensor
#
# Version 3
# for HistogramObserver only, changed the shape of uninitialized
# min_val and max_val buffers from torch.Size([0]) to torch.Size([])
# for PerChannelObservers, changed the name of the buffers from min_vals
# to min_val and from max_vals to max_val.
_version = 3
eps: torch.Tensor
def __init__(
self,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
) -> None:
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
super(_ObserverBase, self).__init__(dtype=dtype)
self.qscheme = qscheme
if reduce_range:
warnings.warn(
"Please use quant_min and quant_max to specify the range for observers. \
reduce_range will be deprecated in a future release of PyTorch."
)
self.reduce_range = reduce_range
self.register_buffer(
"eps", torch.tensor([torch.finfo(torch.float32).eps], **factory_kwargs)
)
assert self.qscheme in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
), "Default Observer only works for per_tensor_affine, \
per_tensor_symmetric, per_channel_affine, \
per_channel_symmetric and per_channel_float_qparams quantization scheme"
assert self.dtype in (
torch.qint8,
torch.quint8,
torch.quint4x2,
), "Default Observer only works for qint8, quint8 and quint4x2 data type"
self.has_customized_qrange = (quant_min is not None) and (quant_max is not None)
if self.has_customized_qrange:
self._validate_qmin_qmax(quant_min, quant_max)
self.quant_min, self.quant_max = \
calculate_qmin_qmax(quant_min, quant_max, self.has_customized_qrange, self.dtype, self.reduce_range)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version == 1:
# eps was moved to a buffer in version 2
eps = torch.tensor([torch.finfo(torch.float32).eps])
state_dict[prefix + "eps"] = eps
super(ObserverBase, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
@torch.jit.export
def _validate_qmin_qmax(self, quant_min: int, quant_max: int) -> None:
r"""Validates that the user-specified quantization range is properly initialized
and within the given bound supported by the observer dtype.
To accommodate lower-bit quantization with respect to the existing torch.qint8 and
torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing
in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax
values are used to calculate static estimates of the scale and zero point for aggressive lower-bit
fake quantization. These estimates are compared against parameters learned through backpropagation.
The related literatures for scale and zero point via backpropagation are as follows:
Learned Step Size Quantization: https://openreview.net/pdf?id=rkgO66VKDS
Trained Quantization Thresholds: https://arxiv.org/pdf/1903.08066.pdf
"""
# The variable names are prefixed with "initial" because their values (qmin and qmax) might be adjusted
# based on whether quantization range is reduced and the datatype (signed/unsigned) used by the observer.
assert (
quant_min <= 0 <= quant_max
), "Used-specified quantization range must include 0."
assert (
quant_min < quant_max
), "qmin must be strictly less than qmax for user-specified quantization range."
@torch.jit.export
def _calculate_qparams(
self, min_val: torch.Tensor, max_val: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Calculates the quantization parameters, given min and max
value tensors. Works for both per tensor and per channel cases
Args:
min_val: Minimum values per channel
max_val: Maximum values per channel
Returns:
scales: Scales tensor of shape (#channels,)
zero_points: Zero points tensor of shape (#channels,)
"""
if not check_min_max_valid(min_val, max_val):
return torch.tensor([1.0], device=min_val.device.type), torch.tensor([0], device=min_val.device.type)
quant_min, quant_max = self.quant_min, self.quant_max
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
device = min_val_neg.device
scale = torch.ones(min_val_neg.size(), dtype=torch.float32, device=device)
zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)
if (
self.qscheme == torch.per_tensor_symmetric
or self.qscheme == torch.per_channel_symmetric
):
max_val_pos = torch.max(-min_val_neg, max_val_pos)
scale = max_val_pos / (float(quant_max - quant_min) / 2)
scale = torch.max(scale, self.eps)
if self.dtype == torch.quint8:
if self.has_customized_qrange:
# When customized quantization range is used, down-rounded midpoint of the range is chosen.
zero_point = zero_point.new_full(
zero_point.size(), (quant_min + quant_max) // 2
)
else:
zero_point = zero_point.new_full(zero_point.size(), 128)
elif self.qscheme == torch.per_channel_affine_float_qparams:
scale = (max_val - min_val) / float(quant_max - quant_min)
scale = torch.where(scale > self.eps, scale, torch.ones_like(scale))
# We use the quantize function
# xq = Round(Xf * inv_scale + zero_point),
# setting zero_point to (-1 * min *inv_scale) we get
# Xq = Round((Xf - min) * inv_scale)
zero_point = -1 * min_val / scale
else:
scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)
scale = torch.max(scale, self.eps)
zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)
zero_point = torch.clamp(zero_point, quant_min, quant_max)
# For scalar values, cast them to Tensors of size 1 to keep the shape
# consistent with default values in FakeQuantize.
if len(scale.shape) == 0:
# TODO: switch to scale.item() after adding JIT support
scale = torch.tensor([float(scale)], dtype=scale.dtype, device=device)
if len(zero_point.shape) == 0:
# TODO: switch to zero_point.item() after adding JIT support
zero_point = torch.tensor(
[int(zero_point)], dtype=zero_point.dtype, device=device
)
if self.qscheme == torch.per_channel_affine_float_qparams:
zero_point = torch.tensor(
[float(zero_point)], dtype=zero_point.dtype, device=device
)
return scale, zero_point
@torch.jit.export
def reset_min_max_vals(self):
raise NotImplementedError("Cannot reset min/max values in the given observer.")
class MinMaxObserver(_ObserverBase):
r"""Observer module for computing the quantization parameters based on the
running min and max values.
This observer uses the tensor min/max statistics to compute the quantization
parameters. The module records the running minimum and maximum of incoming
tensors, and uses this statistic to compute the quantization parameters.
Args:
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
Given running min/max as :math:`x_\text{min}` and :math:`x_\text{max}`,
scale :math:`s` and zero point :math:`z` are computed as:
The running minimum/maximum :math:`x_\text{min/max}` is computed as:
.. math::
\begin{array}{ll}
x_\text{min} &= \begin{cases}
\min(X) & \text{if~}x_\text{min} = \text{None} \\
\min\left(x_\text{min}, \min(X)\right) & \text{otherwise}
\end{cases}\\
x_\text{max} &= \begin{cases}
\max(X) & \text{if~}x_\text{max} = \text{None} \\
\max\left(x_\text{max}, \max(X)\right) & \text{otherwise}
\end{cases}\\
\end{array}
where :math:`X` is the observed tensor.
The scale :math:`s` and zero point :math:`z` are then computed as:
.. math::
\begin{aligned}
\text{if Symmetric:}&\\
&s = 2 \max(|x_\text{min}|, x_\text{max}) /
\left( Q_\text{max} - Q_\text{min} \right) \\
&z = \begin{cases}
0 & \text{if dtype is qint8} \\
128 & \text{otherwise}
\end{cases}\\
\text{Otherwise:}&\\
&s = \left( x_\text{max} - x_\text{min} \right ) /
\left( Q_\text{max} - Q_\text{min} \right ) \\
&z = Q_\text{min} - \text{round}(x_\text{min} / s)
\end{aligned}
where :math:`Q_\text{min}` and :math:`Q_\text{max}` are the minimum and
maximum of the quantized data type.
.. warning:: Only works with ``torch.per_tensor_symmetric`` quantization scheme
.. warning:: :attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``.
.. note:: If the running minimum equals to the running maximum, the scale
and zero_point are set to 1.0 and 0.
"""
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
) -> None:
# For x86 quantized kernels, we need to ensure that the vpmaddubsw
# instruction does not overflow. We allow for a reduce_range argument to
# observers that reduces the quantized range to (0,127) or (-64, 63).
# For more details see aten/src/ATen/native/quantized/cpu/qconv.cpp
# This is not an optimal choice for non x86 backends as it loses a bit
# of precision for activations.
super(MinMaxObserver, self).__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
)
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs))
self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs))
if (
self.qscheme == torch.per_tensor_symmetric
and self.reduce_range
and self.dtype == torch.quint8
):
raise NotImplementedError(
"Cannot reduce range for symmetric \
quantization for quint8"
)
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
x = x.to(self.min_val.dtype)
min_val_cur, max_val_cur = torch._aminmax(x)
min_val = torch.min(min_val_cur, self.min_val)
max_val = torch.max(max_val_cur, self.max_val)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
@torch.jit.export
def calculate_qparams(self):
r"""Calculates the quantization parameters."""
return self._calculate_qparams(self.min_val, self.max_val)
@torch.jit.export
def extra_repr(self):
return "min_val={}, max_val={}".format(self.min_val, self.max_val)
@torch.jit.export
def reset_min_max_vals(self):
"""Resets the min/max values."""
self.min_val = torch.tensor(float("inf"))
self.max_val = torch.tensor(float("-inf"))
class MovingAverageMinMaxObserver(MinMaxObserver):
r"""Observer module for computing the quantization parameters based on the
moving average of the min and max values.
This observer computes the quantization parameters based on the moving
averages of minimums and maximums of the incoming tensors. The module
records the average minimum and maximum of incoming tensors, and uses this
statistic to compute the quantization parameters.
Args:
averaging_constant: Averaging constant for min/max.
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
The moving average min/max is computed as follows
.. math::
\begin{array}{ll}
x_\text{min} = \begin{cases}
\min(X) & \text{if~}x_\text{min} = \text{None} \\
(1 - c) x_\text{min} + c \min(X) & \text{otherwise}
\end{cases}\\
x_\text{max} = \begin{cases}
\max(X) & \text{if~}x_\text{max} = \text{None} \\
(1 - c) x_\text{max} + c \max(X) & \text{otherwise}
\end{cases}\\
\end{array}
where :math:`x_\text{min/max}` is the running average min/max, :math:`X` is
is the incoming tensor, and :math:`c` is the ``averaging_constant``.
The scale and zero point are then computed as in
:class:`~torch.quantization.observer.MinMaxObserver`.
.. note:: Only works with ``torch.per_tensor_affine`` quantization scheme.
.. note:: If the running minimum equals to the running maximum, the scale
and zero_point are set to 1.0 and 0.
"""
def __init__(
self,
averaging_constant=0.01,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
**kwargs
) -> None:
self.averaging_constant = averaging_constant
super(MovingAverageMinMaxObserver, self).__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
**kwargs
)
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
x = x.to(self.min_val.dtype)
min_val = self.min_val
max_val = self.max_val
if min_val == float("inf") and max_val == float("-inf"):
min_val, max_val = torch._aminmax(x)
else:
min_val_cur, max_val_cur = torch._aminmax(x)
min_val = min_val + self.averaging_constant * (min_val_cur - min_val)
max_val = max_val + self.averaging_constant * (max_val_cur - max_val)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
class PerChannelMinMaxObserver(_ObserverBase):
r"""Observer module for computing the quantization parameters based on the
running per channel min and max values.
This observer uses the tensor min/max statistics to compute the per channel
quantization parameters. The module records the running minimum and maximum
of incoming tensors, and uses this statistic to compute the quantization
parameters.
Args:
ch_axis: Channel axis
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
The quantization parameters are computed the same way as in
:class:`~torch.quantization.observer.MinMaxObserver`, with the difference
that the running min/max values are stored per channel.
Scales and zero points are thus computed per channel as well.
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
ch_axis=0,
dtype=torch.quint8,
qscheme=torch.per_channel_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
) -> None:
super(PerChannelMinMaxObserver, self).__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
)
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
self.ch_axis = ch_axis
self.register_buffer("min_val", torch.tensor([], **factory_kwargs))
self.register_buffer("max_val", torch.tensor([], **factory_kwargs))
if (
self.qscheme == torch.per_channel_symmetric
and self.reduce_range
and self.dtype == torch.quint8
):
raise NotImplementedError(
"Cannot reduce range for symmetric quantization for quint8"
)
def forward(self, x_orig):
return self._forward(x_orig)
def _forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
min_val = self.min_val
max_val = self.max_val
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
# Need to match dtype of min/max because the updates to buffers
# are done in place and types need to match for comparisons
y = y.to(self.min_val.dtype)
y = torch.flatten(y, start_dim=1)
if min_val.numel() == 0 or max_val.numel() == 0:
min_val, max_val = torch._aminmax(y, 1)
else:
min_val_cur, max_val_cur = torch._aminmax(y, 1)
min_val = torch.min(min_val_cur, min_val)
max_val = torch.max(max_val_cur, max_val)
self.min_val.resize_(min_val.shape)
self.max_val.resize_(max_val.shape)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
@torch.jit.export
def calculate_qparams(self):
return self._calculate_qparams(self.min_val, self.max_val)
def extra_repr(self):
return "min_val={}, max_val={}".format(self.min_val, self.max_val)
def _load_from_state_dict(
self,
state_dict: Union[Dict[str, torch.Tensor], Dict[str, torch.Tensor]],
prefix: str,
local_metadata: Dict[str, torch.Tensor],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
):
version = local_metadata.get("version", None)
if version is None or version < 3:
local_state = ["min_vals", "max_vals"]
expected_min_name = "min_vals"
expected_max_name = "max_vals"
else:
local_state = ["min_val", "max_val"]
expected_min_name = "min_val"
expected_max_name = "max_val"
for name in local_state:
key = prefix + name
if key in state_dict:
val = state_dict[key]
# Custom handling to allow loading min_val or max_val
# of size N into uninitialized buffers of size 0. The
# buffers are resized here, and the values are copied in
# the default state_dict loading code of the parent.
if name == expected_min_name:
self.min_val.resize_(val.shape)
elif name == expected_max_name:
self.max_val.resize_(val.shape)
else:
warnings.warn("Observer load_from_state_dict got unexpected name {}".format(name))
# For torchscript module we need to update the attributes here since we do not
# call the `_load_from_state_dict` function defined module.py
if torch.jit.is_scripting():
if name == expected_min_name:
self.min_val.copy_(val)
elif name == expected_max_name:
self.max_val.copy_(val)
else:
warnings.warn("Observer load_from_state_dict got unexpected name {}".format(name))
elif strict:
missing_keys.append(key)
if not torch.jit.is_scripting():
super(PerChannelMinMaxObserver, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
def _load_from_state_dict_script(
self,
state_dict: Union[Dict[str, torch.Tensor], Dict[str, torch.Tensor]],
prefix: str,
local_metadata: Dict[str, torch.Tensor],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
):
self._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
@torch.jit.export
def reset_min_max_vals(self):
"""Resets the min/max values."""
self.min_val = torch.tensor([])
self.max_val = torch.tensor([])
class MovingAveragePerChannelMinMaxObserver(PerChannelMinMaxObserver):
r"""Observer module for computing the quantization parameters based on the
running per channel min and max values.
This observer uses the tensor min/max statistics to compute the per channel
quantization parameters. The module records the running minimum and maximum
of incoming tensors, and uses this statistic to compute the quantization
parameters.
Args:
averaging_constant: Averaging constant for min/max.
ch_axis: Channel axis
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
The quantization parameters are computed the same way as in
:class:`~torch.quantization.observer.MovingAverageMinMaxObserver`, with the
difference that the running min/max values are stored per channel.
Scales and zero points are thus computed per channel as well.
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
def __init__(
self,
averaging_constant=0.01,
ch_axis=0,
dtype=torch.quint8,
qscheme=torch.per_channel_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
**kwargs
) -> None:
super(MovingAveragePerChannelMinMaxObserver, self).__init__(
ch_axis=ch_axis,
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
**kwargs
)
self.averaging_constant = averaging_constant
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
x = x.to(self.min_val.dtype)
min_val = self.min_val
max_val = self.max_val
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
y = torch.flatten(y, start_dim=1)
if min_val.numel() == 0 or max_val.numel() == 0:
min_val, max_val = torch._aminmax(y, 1)
else:
min_val_cur, max_val_cur = torch._aminmax(y, 1)
min_val = min_val + self.averaging_constant * (min_val_cur - min_val)
max_val = max_val + self.averaging_constant * (max_val_cur - max_val)
self.min_val.resize_(min_val.shape)
self.max_val.resize_(max_val.shape)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
class HistogramObserver(_ObserverBase):
r"""
The module records the running histogram of tensor values along with
min/max values. ``calculate_qparams`` will calculate scale and zero_point.
Args:
bins: Number of bins to use for the histogram
upsample_rate: Factor by which the histograms are upsampled, this is
used to interpolate histograms with varying ranges across observations
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
The scale and zero point are computed as follows:
1. Create the histogram of the incoming inputs.
The histogram is computed continuously, and the ranges per bin change
with every new tensor observed.
2. Search the distribution in the histogram for optimal min/max values.
The search for the min/max values ensures the minimization of the
quantization error with respect to the floating point model.
3. Compute the scale and zero point the same way as in the
:class:`~torch.quantization.MinMaxObserver`
"""
histogram: torch.Tensor
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
bins: int = 2048,
upsample_rate: int = 128,
dtype: torch.dtype = torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
factory_kwargs=None,
) -> None:
# bins: The number of bins used for histogram calculation.
super(HistogramObserver, self).__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
factory_kwargs=factory_kwargs,
)
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
self.bins = bins
self.register_buffer("histogram", torch.zeros(self.bins, **factory_kwargs))
self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs))
self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs))
self.dst_nbins = 2 ** torch.iinfo(self.dtype).bits
self.upsample_rate = upsample_rate
def _get_norm(
self, delta_begin: torch.Tensor, delta_end: torch.Tensor, density: torch.Tensor
) -> torch.Tensor:
r"""
Compute the norm of the values uniformaly distributed between
delta_begin and delta_end.
Currently only L2 norm is supported.
norm = density * (integral_{begin, end} x^2)
= density * (end^3 - begin^3) / 3
"""
norm = (
delta_end * delta_end * delta_end - delta_begin * delta_begin * delta_begin
) / 3
return density * norm
def _compute_quantization_error(self, next_start_bin: int, next_end_bin: int):
r"""
Compute the quantization error if we use start_bin to end_bin as the
min and max to do the quantization.
"""
bin_width = (self.max_val.item() - self.min_val.item()) / self.bins
dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins
if dst_bin_width == 0.0:
return 0.0
src_bin = torch.arange(self.bins, device=self.histogram.device)
# distances from the beginning of first dst_bin to the beginning and
# end of src_bin
src_bin_begin = (src_bin - next_start_bin) * bin_width
src_bin_end = src_bin_begin + bin_width
# which dst_bins the beginning and end of src_bin belong to?
dst_bin_of_begin = torch.clamp(
src_bin_begin // dst_bin_width, 0, self.dst_nbins - 1
)
dst_bin_of_begin_center = (dst_bin_of_begin + 0.5) * dst_bin_width
dst_bin_of_end = torch.clamp(
src_bin_end // dst_bin_width, 0, self.dst_nbins - 1
)
dst_bin_of_end_center = (dst_bin_of_end + 0.5) * dst_bin_width
density = self.histogram / bin_width
norm = torch.zeros(self.bins, device=self.histogram.device)
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = dst_bin_width / 2
norm += self._get_norm(delta_begin,
torch.ones(self.bins, device=self.histogram.device) * delta_end,
density)
norm += (dst_bin_of_end - dst_bin_of_begin - 1) * self._get_norm(
torch.tensor(-dst_bin_width / 2), torch.tensor(dst_bin_width / 2), density
)
dst_bin_of_end_center = dst_bin_of_end * dst_bin_width + dst_bin_width / 2
delta_begin = -dst_bin_width / 2
delta_end = src_bin_end - dst_bin_of_end_center
norm += self._get_norm(torch.tensor(delta_begin), delta_end, density)
return norm.sum().item()
def _non_linear_param_search(self) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Non-linear parameter search.
An approximation for L2 error minimization for selecting min/max.
By selecting new min/max, we filter out outliers in input distribution.
This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
caffe2/quantization/server/norm_minimization.cc
"""
assert self.histogram.size()[0] == self.bins, "bins mistmatch"
bin_width = (self.max_val - self.min_val) / self.bins
# cumulative sum
total = torch.sum(self.histogram).item()
cSum = torch.cumsum(self.histogram, dim=0)
stepsize = 1e-5 # granularity
alpha = 0.0 # lower bound
beta = 1.0 # upper bound
start_bin = 0
end_bin = self.bins - 1
norm_min = float("inf")
while alpha < beta:
# Find the next step
next_alpha = alpha + stepsize
next_beta = beta - stepsize
# find the left and right bins between the quantile bounds
l = start_bin
r = end_bin
while l < end_bin and cSum[l] < next_alpha * total:
l = l + 1
while r > start_bin and cSum[r] > next_beta * total:
r = r - 1
# decide the next move
next_start_bin = start_bin
next_end_bin = end_bin
if (l - start_bin) > (end_bin - r):
# move the start bin
next_start_bin = l
alpha = next_alpha
else:
# move the end bin
next_end_bin = r
beta = next_beta
if next_start_bin == start_bin and next_end_bin == end_bin:
continue
# calculate the quantization error using next_start_bin and next_end_bin
norm = self._compute_quantization_error(next_start_bin, next_end_bin)
if norm > norm_min:
break
norm_min = norm
start_bin = next_start_bin
end_bin = next_end_bin
new_min = self.min_val + bin_width * start_bin
new_max = self.min_val + bin_width * (end_bin + 1)
return new_min, new_max
def _adjust_min_max(
self, combined_min: torch.Tensor, combined_max: torch.Tensor, upsample_rate: int
) -> Tuple[torch.Tensor, torch.Tensor, int, int]:
# We ensure that:
# (combined_max - combined_min)/(downsample_rate*Nbins) = (max - min)/(upsample_rate*Nbins)
# This allows us to have a common grid of resolution s, where we can align
# the input histogram
# start_idx maps min_val to the histogram bin index.
hist_bin_width = (self.max_val - self.min_val) / (self.bins * upsample_rate)
downsample_rate = int(
torch.ceil(
(combined_max - combined_min) / (self.bins * hist_bin_width)
).item()
)
e = downsample_rate * (self.bins * hist_bin_width) - (
combined_max - combined_min
)
# Relax only the max, not the min, so that for one sided distributions, min stays at zero
combined_max = combined_max + e
combined_min = combined_min
start_idx = int(
torch.round((self.min_val - combined_min) / hist_bin_width).item()
)
return combined_min, combined_max, downsample_rate, start_idx
def _combine_histograms(
self,
orig_hist: torch.Tensor,
new_hist: torch.Tensor,
upsample_rate: int,
downsample_rate: int,
start_idx: int,
Nbins: int,
) -> torch.Tensor:
# First up-sample the histogram with new data by a factor of L
# This creates an approximate probability density thats piecwise constant
upsampled_histogram = new_hist.repeat_interleave(upsample_rate)
# Now insert the upsampled histogram into the output
# histogram, which is initialized with zeros.
# The offset at which the histogram is introduced is determined
# by the start index as the output histogram can cover a wider range
histogram_with_output_range = torch.zeros(
(Nbins * downsample_rate), device=orig_hist.device
)
histogram_with_output_range[
start_idx : Nbins * upsample_rate + start_idx
] = upsampled_histogram
# Compute integral histogram, double precision is needed to ensure
# that there are no overflows
integral_histogram = torch.cumsum(
histogram_with_output_range, 0, dtype=torch.double
)[downsample_rate - 1 :: downsample_rate]
# Finally perform interpolation
shifted_integral_histogram = torch.zeros((Nbins), device=orig_hist.device)
shifted_integral_histogram[1:Nbins] = integral_histogram[0:-1]
interpolated_histogram = (
integral_histogram - shifted_integral_histogram
) / upsample_rate
orig_hist = orig_hist + interpolated_histogram.to(torch.float)
return orig_hist
def forward(self, x_orig: torch.Tensor) -> torch.Tensor:
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach()
min_val = self.min_val
max_val = self.max_val
same_values = min_val.item() == max_val.item()
is_uninitialized = min_val == float("inf") and max_val == float("-inf")
if is_uninitialized or same_values:
min_val, max_val = torch._aminmax(x)
self.min_val.resize_(min_val.shape)
self.min_val.copy_(min_val)
self.max_val.resize_(max_val.shape)
self.max_val.copy_(max_val)
assert (
min_val.numel() == 1 and max_val.numel() == 1
), "histogram min/max values must be scalar."
torch.histc(
x, self.bins, min=int(min_val), max=int(max_val), out=self.histogram
)
else:
new_min, new_max = torch._aminmax(x)
combined_min = torch.min(new_min, min_val)
combined_max = torch.max(new_max, max_val)
# combine the existing histogram and new histogram into 1 histogram
# We do this by first upsampling the histogram to a dense grid
# and then downsampling the histogram efficiently
(
combined_min,
combined_max,
downsample_rate,
start_idx,
) = self._adjust_min_max(combined_min, combined_max, self.upsample_rate)
assert (
combined_min.numel() == 1 and combined_max.numel() == 1
), "histogram min/max values must be scalar."
combined_histogram = torch.histc(
x, self.bins, min=int(combined_min), max=int(combined_max)
)
if combined_min == min_val and combined_max == max_val:
combined_histogram += self.histogram
else:
combined_histogram = self._combine_histograms(
combined_histogram,
self.histogram,
self.upsample_rate,
downsample_rate,
start_idx,
self.bins,
)
self.histogram.detach_().resize_(combined_histogram.shape)
self.histogram.copy_(combined_histogram)
self.min_val.detach_().resize_(combined_min.shape)
self.min_val.copy_(combined_min)
self.max_val.detach_().resize_(combined_max.shape)
self.max_val.copy_(combined_max)
return x_orig
@torch.jit.export
def calculate_qparams(self):
is_uninitialized = self.min_val == float("inf") and self.max_val == float(
"-inf"
)
if is_uninitialized:
warnings.warn(
"must run observer before calling calculate_qparams.\
Returning default scale and zero point "
)
return torch.tensor([1.0], device=self.min_val.device.type), torch.tensor([0], device=self.min_val.device.type)
assert self.bins == len(self.histogram), (
"The number of bins in histogram should be equal to the number of bins "
"supplied while making this observer"
)
new_min, new_max = self._non_linear_param_search()
return self._calculate_qparams(new_min, new_max)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(HistogramObserver, self)._save_to_state_dict(
destination, prefix, keep_vars
)
destination[prefix + "min_val"] = self.min_val
destination[prefix + "max_val"] = self.max_val
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 3:
# if min_val and max_val are not initialized, update their shape
# to account for the differences between v2 and v3
min_val_name, max_val_name = prefix + "min_val", prefix + "max_val"
if min_val_name in state_dict:
if state_dict[min_val_name].shape == torch.Size([0]):
state_dict[min_val_name] = torch.tensor(float("inf"))
if max_val_name in state_dict:
if state_dict[max_val_name].shape == torch.Size([0]):
state_dict[max_val_name] = torch.tensor(float("-inf"))
local_state = ["min_val", "max_val"]
for name in local_state:
key = prefix + name
if key in state_dict:
val = state_dict[key]
setattr(self, name, val)
elif strict:
missing_keys.append(key)
super(HistogramObserver, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
class PlaceholderObserver(ObserverBase):
r"""
Observer that doesn't do anything and just passes its configuration to the
quantized module's ``.from_float()``.
Can be used for quantization to float16 which doesn't require determining
ranges.
Args:
dtype: Quantized data type
custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation
(Can be used in Graph Mode Passes for special case ops).
"""
def __init__(
self, dtype=torch.float32, custom_op_name="", compute_dtype=None
) -> None:
super(PlaceholderObserver, self).__init__(dtype=dtype)
# dtype of input of the target operator, e.g. for dynamic quantization
# ops, the dtype will be float32
self.dtype = dtype
self.custom_op = custom_op_name
# used for configuration of computation type for dynamic quantization
if compute_dtype:
self.compute_dtype = compute_dtype
def forward(self, x):
return x
@torch.jit.export
def calculate_qparams(self):
raise Exception(
"calculate_qparams should not be called for PlaceholderObserver"
)
class RecordingObserver(_ObserverBase):
r"""
The module is mainly for debug and records the tensor values during runtime.
Args:
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
"""
__annotations__ = {"tensor_val": List[Optional[torch.Tensor]]}
def __init__(self, **kwargs):
super(RecordingObserver, self).__init__(**kwargs)
self.tensor_val = []
def forward(self, x):
self.tensor_val.append(x.clone())
return x
@torch.jit.export
def calculate_qparams(self):
raise Exception("calculate_qparams should not be called for RecordingObserver")
@torch.jit.export
def get_tensor_value(self):
return self.tensor_val
class NoopObserver(ObserverBase):
r"""
Observer that doesn't do anything and just passes its configuration to the
quantized module's ``.from_float()``.
Primarily used for quantization to float16 which doesn't require determining
ranges.
Args:
dtype: Quantized data type
custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation
(Can be used in Graph Mode Passes for special case ops).
"""
def __init__(self, dtype=torch.float16, custom_op_name="") -> None:
super(NoopObserver, self).__init__(dtype=dtype)
self.dtype = dtype
self.custom_op = custom_op_name
def forward(self, x):
return x
@torch.jit.export
def calculate_qparams(self):
raise Exception("calculate_qparams should not be called for NoopObserver")
def _is_observer_script_module(mod, obs_type_name):
"""Returns true if given mod is an instance of Observer script module."""
if isinstance(mod, torch.jit.RecursiveScriptModule):
# qualified name looks like '__torch__.torch.quantization.observer.___torch_mangle_2.MinMaxObserver'
suffix = mod._c.qualified_name.split(".", 1)[1]
name = re.sub(r"\.___torch_mangle_\d+", "", suffix)
return obs_type_name in name
return False
def _is_activation_post_process(module):
return (
isinstance(module, torch.quantization.ObserverBase)
or isinstance(module, torch.quantization.FakeQuantize)
or _is_observer_script_module(module, "quantization.observer")
)
def _is_per_channel_script_obs_instance(module):
if isinstance(module, torch.jit.RecursiveScriptModule):
return _is_observer_script_module(
module, "quantization.observer.PerChannelMinMaxObserver"
) or _is_observer_script_module(
module, "quantization.observer.MovingAveragePerChannelMinMaxObserver"
)
return False
def get_observer_state_dict(mod):
r"""
Returns the state dict corresponding to the observer stats.
Traverse the model state_dict and extract out the stats.
"""
od = OrderedDict()
if isinstance(mod, torch.jit.RecursiveScriptModule):
for k, v in mod.state_dict().items():
if "observer" in k:
od[k] = v
else:
# path for GraphModule and nn.Module (eager mode)
for k, v in mod.state_dict().items():
if "activation_post_process" in k:
od[k] = v
od._metadata = mod.state_dict()._metadata # type: ignore[attr-defined]
return od
def load_observer_state_dict(mod, obs_dict):
r"""
Given input model and a state_dict containing model observer stats,
load the stats back into the model. The observer state_dict can be saved
using torch.quantization.get_observer_state_dict
"""
missing_keys: List[str] = []
unexpected_keys: List[str] = []
for name, module in mod.named_modules():
prefix = name + "."
if _is_activation_post_process(module):
if _is_per_channel_script_obs_instance(module):
# For per-channel observers we need to call a custom load_from_state_dict to resize the tensor.
# However this is not called when the module is scripted and we end up calling the default one in module.py
module._load_from_state_dict_script(
obs_dict, prefix, {}, True, missing_keys, unexpected_keys, []
)
else:
module._load_from_state_dict(
obs_dict, prefix, {}, False, missing_keys, unexpected_keys, []
)
for k in missing_keys:
if "observer" in k or "activation_post_process" in k:
raise Exception("Missing keys for observer {} in state_dict".format(k))
for k in unexpected_keys:
if "observer" in k or "activation_post_process" in k:
raise Exception("Unexpected keys for observer {} in state_dict".format(k))
# Restrict activations to be in the range (0,127)
default_observer = MinMaxObserver.with_args(reduce_range=True)
default_placeholder_observer = PlaceholderObserver
default_debug_observer = RecordingObserver
default_weight_observer = MinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_tensor_symmetric
)
default_histogram_observer = HistogramObserver.with_args(reduce_range=True)
default_per_channel_weight_observer = PerChannelMinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
)
default_dynamic_quant_observer = PlaceholderObserver.with_args(
dtype=torch.float, compute_dtype=torch.quint8
)
default_float_qparams_observer = PerChannelMinMaxObserver.with_args(
dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0
)
| 38.974889 | 123 | 0.62914 |
7946f9458a8f7140073a29465eb3912be5b48b1a | 75 | py | Python | demo.py | radroid/AI-in-Enterprise | e46d0af47823b9fedf9ae5788c8d742c0c191a57 | [
"MIT"
] | null | null | null | demo.py | radroid/AI-in-Enterprise | e46d0af47823b9fedf9ae5788c8d742c0c191a57 | [
"MIT"
] | null | null | null | demo.py | radroid/AI-in-Enterprise | e46d0af47823b9fedf9ae5788c8d742c0c191a57 | [
"MIT"
] | null | null | null | # This is a simple program to be uplaoded to GitHub.
print('Hello World')
| 18.75 | 52 | 0.733333 |
7946fa5cdd1268d423a8a01f7ff812ad95e292a0 | 241 | py | Python | Aniyom Ebenezer/Phase 2/LIST/Day_40_Challenge_Solution/Question 4 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Aniyom Ebenezer/Phase 2/LIST/Day_40_Challenge_Solution/Question 4 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Aniyom Ebenezer/Phase 2/LIST/Day_40_Challenge_Solution/Question 4 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | """
Write a Python program to print the numbers of specified list after removing even numbers from it.
"""
numbers = [1, 2, 4, 3, 5, 4, 5, 7, 89, 32, 40]
specified_list =[x for (i,x) in enumerate(numbers) if x % 2 != 0]
print(specified_list) | 40.166667 | 98 | 0.680498 |
7946fb38fa24260cdb59e40d66acccebed52e887 | 1,781 | py | Python | models/networks/simple_nn.py | Piko-Piko-Pon-Taro/navict-recommender | 7eeaf0f77e500c1c0ecb15f9613aa08c2ef5c83c | [
"MIT"
] | null | null | null | models/networks/simple_nn.py | Piko-Piko-Pon-Taro/navict-recommender | 7eeaf0f77e500c1c0ecb15f9613aa08c2ef5c83c | [
"MIT"
] | 9 | 2021-05-03T01:38:46.000Z | 2021-07-14T13:13:25.000Z | models/networks/simple_nn.py | Piko-Piko-Pon-Taro/navict-recommender | 7eeaf0f77e500c1c0ecb15f9613aa08c2ef5c83c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""CBOW Embedding"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from models.base_model import BaseModel
from models.networks.cbow_embedder import Net as CBOW
class Net(nn.Module):
"""Network for CBOW"""
""" CBOW """
def __init__(self, embedder):
super().__init__()
"""
Args:
vocab_size
emb_size
"""
self.embedding = embedder.embedding
self.embedding.weight.requires_grad = False
self.emb_size = embedder.emb_size
self.vocab_size = embedder.vocab_size
self.net = nn.Sequential(
nn.Linear(self.emb_size, 128, bias=False),
nn.Dropout(p=0.2, inplace=False),
nn.Linear(128, self.vocab_size, bias=False),
nn.Softmax(dim=-1)
)
def forward(self, x):
x = self.embedding(x)
x = torch.sum(x, dim=1)
x = self.net(x)
return x
class SimpleNN(BaseModel):
"""SimpleNN"""
def __init__(self, cfg: object) -> None:
"""Initialization
Build model.
Args:
cfg: Config.
"""
super().__init__(cfg)
self.embedder = CBOW(vocab_size=self.cfg.model.embedder.vocab_size, emb_size=self.cfg.model.embedder.emb_size)
ckpt_path = self.cfg.model.embedder.initial_ckpt
if torch.cuda.is_available():
ckpt = torch.load(ckpt_path)
else:
ckpt = torch.load(ckpt_path, torch.device('cpu'))
self.embedder.load_state_dict(ckpt['model_state_dict'])
self.num_class = self.cfg.data.dataset.num_class
self.network = Net(embedder=self.embedder)
self.build() | 24.067568 | 118 | 0.588995 |
7946fbecb0ad2a597ac5afe18f58f64781f6dee3 | 3,004 | py | Python | 2.DataPrepare/split_c18.py | NIA-Adp-Healthcare/test-project | 4ee172cdff14827b6946e672144ee381647d82ad | [
"MIT"
] | null | null | null | 2.DataPrepare/split_c18.py | NIA-Adp-Healthcare/test-project | 4ee172cdff14827b6946e672144ee381647d82ad | [
"MIT"
] | null | null | null | 2.DataPrepare/split_c18.py | NIA-Adp-Healthcare/test-project | 4ee172cdff14827b6946e672144ee381647d82ad | [
"MIT"
] | null | null | null |
import sys
import pandas as pd
import os
import glob
from sklearn.model_selection import train_test_split
# split_dataset(img_list, ALL_TEST, ALL_VALID, ALL_TRAIN)
def split_dataset(file_list, test_rid_list, valid_rid_list, train_rid_list):
print("FILES", len(file_list))
print("TEST LIST", len(test_rid_list))
print("VALID LIST", len(valid_rid_list))
print("TRAIN LIST", len(train_rid_list))
all = pd.DataFrame({"path": file_list}) #series 생성
all["rid"] = all["path"].apply(lambda x: x.split('/')[-1].split('_')[1].lstrip('txt')[1:].rstrip('.txt'))
print(all["rid"][:10])
all["rid"] = all["rid"].apply(lambda x: x.lstrip("0"))
# all["rid"] = all['path']
# print(all)
print(all["rid"])
all["isTest"] = all["rid"].apply(lambda x: x in test_rid_list)
all["isValid"] = all["rid"].apply(lambda x: x in valid_rid_list)
all["isTrain"] = all["rid"].apply(lambda x: x in train_rid_list)
print(all["isTest"])
forTest = all[all["isTest"] == True]
forValid = all[all["isValid"] == True]
forTrain = all[all["isTrain"] == True]
#x_train , x_valid = train_test_split(forTrain, test_size=(1-train_ratio), random_state=1234)
A = list(forTrain["path"])
B = list(forValid["path"])
C = list(forTest["path"])
return A, B, C
####################
df = pd.read_csv('\\Users\\user\\Desktop\\modeling\\daejang_data\\full\\test_list.csv', dtype={'RID': str})
df_test = df[df["GUBUN"] == "TEST"]
ALL_TEST = list(df_test["rid"].unique())
ALL_TEST.sort()
print(ALL_TEST)
print()
df2 = pd.read_csv('\\Users\\user\\Desktop\\modeling\\daejang_data\\full\\valid_list.csv', dtype={'RID': str})
df_valid = df2[df2["GUBUN"] == "VALID"]
df_train = df2[df2["GUBUN"] == "TRAIN"]
ALL_VALID = list(df_valid["rid"].unique())
ALL_VALID.sort()
print(ALL_VALID)
print()
ALL_TRAIN = list(df_train["rid"].unique())
ALL_TRAIN.sort()
print(ALL_TRAIN)
print()
img_list = glob.glob('/Users/user/Desktop/modeling/daejang_data/full/label_txt/*.txt')
img_list.sort()
print(len(img_list))
print(img_list[:10])
train_img_list, val_img_list, test_list = split_dataset(img_list, ALL_TEST, ALL_VALID, ALL_TRAIN)
print(len(train_img_list), len(val_img_list), len(test_list))
with open('\\Users\\user\\Desktop\\modeling\\daejang_data\\full\\anewtrainByRid18.txt', 'w') as f:
f.write('\n'.join(train_img_list) + '\n')
with open('\\Users\\user\\Desktop\\modeling\\daejang_data\\full\\anewvalByRid18.txt', 'w') as f:
f.write('\n'.join(val_img_list) + '\n')
with open('\\Users\\user\\Desktop\\modeling\\daejang_data\\full\\anewtestByRid18.txt', 'w') as f:
f.write('\n'.join(test_list) + '\n')
"""
import yaml
with open('./data.yaml', 'r') as f:
data = yaml.load(f)
print(data)
data['train'] = '/home/joyhyuk/python/y2/train.txt'
data['val'] = '/home/joyhyuk/python/y2/val.txt'
with open('./data.yaml', 'w') as f:
yaml.dump(data, f)
print(data)
"""
| 31.291667 | 110 | 0.641811 |
7946fc2212cd78f528c0bff5184424dbd2cd4c88 | 1,777 | py | Python | src/framat/_util.py | airinnova/FramAT | 4177a95b4ed8d95a8330365e32ca13ac9ef24640 | [
"Apache-2.0"
] | 9 | 2019-08-13T18:49:22.000Z | 2022-03-31T08:40:29.000Z | src/framat/_util.py | airinnova/FramAT | 4177a95b4ed8d95a8330365e32ca13ac9ef24640 | [
"Apache-2.0"
] | 5 | 2020-01-08T08:38:25.000Z | 2022-02-07T17:01:45.000Z | src/framat/_util.py | airinnova/FramAT | 4177a95b4ed8d95a8330365e32ca13ac9ef24640 | [
"Apache-2.0"
] | 5 | 2019-09-20T18:47:20.000Z | 2022-01-11T13:08:55.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2019-2020 Airinnova AB and the FramAT authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
# Author: Aaron Dettmann
"""
Solving
"""
import itertools
from numbers import Number
def enumerate_with_step(iterable, start=0, step=1):
"""
TODO
https://stackoverflow.com/questions/24290025/python-enumerate-downwards-or-with-a-custom-step
"""
for x in iterable:
yield (start, x)
start += step
def pairwise(iterable):
"""
Return a new iterator which yields pairwise items
s --> (s0,s1), (s1,s2), (s2, s3), ...
See: https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
class Schemas:
any_int = {'type': int}
any_num = {'type': Number}
pos_int = {'type': int, '>': 0}
pos_number = {'type': Number, '>': 0}
string = {'type': str, '>': 0}
vector3x1 = {'type': list, 'min_len': 3, 'max_len': 3, 'item_types': Number}
vector6x1 = {'type': list, 'min_len': 6, 'max_len': 6, 'item_types': Number}
| 27.765625 | 97 | 0.605515 |
7946fcfce808e38cd3ff96968412ed809d88663b | 1,223 | py | Python | smacha/test/smacha_test_examples/nesting_params.py | ReconCell/smacha | 253215a35d2d091bf50c28c1ba876209b82d2400 | [
"BSD-3-Clause"
] | 16 | 2019-04-16T07:44:30.000Z | 2022-03-10T08:04:45.000Z | smacha/test/smacha_test_examples/nesting_params.py | ReconCell/smacha | 253215a35d2d091bf50c28c1ba876209b82d2400 | [
"BSD-3-Clause"
] | 2 | 2019-07-18T09:11:00.000Z | 2019-09-26T10:21:26.000Z | smacha/test/smacha_test_examples/nesting_params.py | ReconCell/smacha | 253215a35d2d091bf50c28c1ba876209b82d2400 | [
"BSD-3-Clause"
] | 2 | 2019-08-21T20:14:54.000Z | 2019-09-19T13:26:34.000Z | #!/usr/bin/env python
import smach
# define state Foo
class Foo(smach.State):
def __init__(self, name, outcome):
smach.State.__init__(self, outcomes=['outcome_a','outcome_b'])
self._name = name
self._outcome = outcome
def execute(self, userdata):
smach.loginfo('Executing state {}'.format(self._name))
smach.loginfo('Returning {}'.format(self._outcome))
return self._outcome
def main():
sm = smach.StateMachine(outcomes=['final_outcome'])
with sm:
sm_sub = smach.StateMachine(outcomes=['outcome_c'])
with sm_sub:
smach.StateMachine.add('FOO_0', Foo('FOO_0', 'outcome_a'),
transitions={'outcome_a':'FOO_1',
'outcome_b':'outcome_c'})
smach.StateMachine.add('FOO_1', Foo('FOO_1', 'outcome_b'),
transitions={'outcome_a':'FOO_1',
'outcome_b':'outcome_c'})
smach.StateMachine.add('SUB', sm_sub,
transitions={'outcome_c':'final_outcome'})
outcome = sm.execute()
if __name__ == '__main__':
main() | 27.795455 | 73 | 0.537204 |
7946fd3dcc568389ee67d29cba0fd77e6dcfdea4 | 73,301 | py | Python | libutil.py | dwlee08/dnfp-analyzer | 4ae4ec4d32c08288b997c83655a0c97c7d347216 | [
"Apache-2.0"
] | null | null | null | libutil.py | dwlee08/dnfp-analyzer | 4ae4ec4d32c08288b997c83655a0c97c7d347216 | [
"Apache-2.0"
] | null | null | null | libutil.py | dwlee08/dnfp-analyzer | 4ae4ec4d32c08288b997c83655a0c97c7d347216 | [
"Apache-2.0"
] | 1 | 2020-12-10T06:24:34.000Z | 2020-12-10T06:24:34.000Z | #!/usr/bin/python3
#-*- coding:utf-8 -*-
import os
from json import loads
import json
import urllib.request
from urllib import parse as urlparse
import time
import numpy as np
import random
from io import BytesIO
from parse import compile
import sys
import subprocess
import math
import re
import datetime
from time import sleep
from time import (
process_time,
perf_counter,
sleep,
)
#item_stat_type = ["이동속도", "공격속도", "물리크리티컬히트", "마법크리티컬히트", "모든속성강화", "모든속성저항", "캐스트속도"]
class LibUtil():
parser = [
compile("물리공격력+{깡물공:g}("),
compile("마법공격력+{깡마공:g}("),
compile("독립공격력+{깡독공:g}("),
compile("모든스탯+{깡스탯:g}("),
compile("모든스탯+{깡스탯:g}증가("),
compile("힘지능체력정신력{깡스탯:g}증가"),
compile("모든직업{}Lv스킬공격력{}%증가"),
compile("{}레벨액티브스킬공격력{}%증가"),
compile("도적{}레벨모든스킬공격력{}%증가"),
compile("물리크리티컬히트{물리크리티컬:g}%마법크리티컬히트{마법크리티컬:g}%증가"),
compile("크리티컬공격시데미지{크증댐:g}%증가"),
compile("크리티컬공격시데미지증가{}{크증추:g}%추가증가"),
compile("크리티컬공격시데{}지{크증추:g}%추가증가"),
compile("공격시데미지{증댐:g}%증가"),
compile("공격시데미지증가{}{증추:g}%추가증가"),
compile("공격시{추댐:g}%추가데미지"),
compile("모든공격력{모공:g}%증가"),
compile("모든직업{:d}~{:d}레벨모든스킬쿨타임{:d}%감소({}제외"),
compile("모든직업{minLevel:d}~{maxLevel:d}레벨모든스킬쿨타임{스킬쿨감:g}%감소"),
compile("모든직업{레벨:d}레벨모든스킬공격력{스킬증댐:g}%증가"),
compile("모든직업{:d}~{:d}레벨모든스킬Lv+{:d}({}제외"),
compile("모든직업{minLevel:d}~{maxLevel:d}레벨모든스킬Lv+{스킬레벨:d}"),
compile("스킬공격력{스공:g}%{}가"),
compile("스킬공격력+{스공:g}%"),
compile("물리마법독립공격력{물마독공:g}%"),
compile("물리마법독립공격력+{물마독깡:g}증가"),
compile("물리마법독립공격력{물마독깡:g}증가"),
compile("물리마법독립공격력증가량{물마독공:g}%"),
compile("{속추댐:g}%속성추가데미지"),
compile("{속추댐:g}%{속성종류}속성추가데미지"),
compile("적에게입힌피해의{지속댐:g}%만큼{지속:g}초동안지속피해발생"),
compile("공격시{지속:g}초동안적에게입힌피해의{지속댐:g}%만큼지속피해발생"),
compile("피격시데미지감소{피격뎀감소:g}%"),
compile("피격시데미지{피격뎀:g}%{증감}"),
compile("피격데미지{피격뎀:g}%증가"),
compile("물리마법크리티컬히트{물마크:g}증가"),
compile("힘지능{힘지:g}%공격속도{공속:g}%증가"),
compile("힘지능+{힘지:g}%증가"),
compile("힘지능+{힘지깡:g}"),
compile("힘지능{힘지깡:g}증가"),
compile("힘지능{힘지:g}%"),
compile("모든속도{공이캐속:g}%"),
compile("공격속도이동속도캐스트속도{공이캐속:g}%증가"),
compile("공격속도+{공속:g}%이동속도+{이속:g}%캐스트속도+{캐속:g}%"),
compile("공격속도{공속:g}%이동속도{이속:g}%캐스트속도{캐속:g}%증가"),
compile("공격속도{공속:g}%이동속도{이속:g}%캐스트속도{캐속:g}%{증감}"),
compile("공격속도{공속:g}%이동속도{이속:g}%증가"),
compile("공격속도{공속:g}%캐스트속도{캐속:g}%증가"),
compile("공격속도{공속:g}%증가캐스트속도{캐속:g}%증가"),
compile("공격속도{공속:g}%증가및캐스트속도{캐속:g}%증가"),
compile("공격속도이동속도{공이속:g}%증가"),
compile("공격속도{공속:g}%증가"),
compile("공격속도+{공속:g}%"),
compile("Y축이동속도{:g}%증가"),
compile("이동속도{이속:g}%증가"),
compile("이동속도+{이속:g}%"),
compile("공격속도-{공속감소:g}%"),
compile("적이동속도{:g}%감소"),
compile("이동속도-{이속감소:g}%"),
compile("캐스트속도{캐속:g}%증가"),
compile("공격속도{:g}%감소"),
compile("이동속도{:g}%감소"),
compile("캐스트속도{캐속감소:g}%감소"),
compile("캐스트속도+{캐속:g}%"),
compile("캐스트속도-{캐속감소:g}%"),
compile("물리크리티컬히트{물리크리티컬:g}%증가"),
compile("마법크리티컬히트{마법크리티컬:g}%증가"),
compile("모든속성강화{모속강:g}증가"),
compile("모든속성저항{모속저:g}증가"),
compile("모든속성저항{모속저감소:g}감소"),
compile("모든속성저항{증감}{모속저:g}"),
compile("힘지능증가량{힘지:g}%증가"),
compile("{속성종류1}속성저항{속성종류2}속성저항{속성종류3}속성저항{속저감소:g}감소"),
compile("{속성종류}속성저항{증감}{속저:g}"),
compile("5초마다단일속성강화+{수문장}"),
compile("{속성종류}속성강화+{속강:g}"),
compile("마을적용옵션+{깡모속:g}")
]
# 30Lv버프스킬힘지능증가량{}%증가
# 30Lv버프스킬물리마법독립공격력증가량{}%증가
# 30Lv버프스킬물리공격력증가량{}%증가
# 30Lv버프스킬마법공격력증가량{}%증가
# 30Lv버프스킬독립공격력증가량{}%증가
# 50Lv액티브스킬힘지능증가량{}증가
# 50Lv액티브스킬힘지능증가량{}%증가
#수호의 은총 체력, 정신력 250 증가
#계시 : 아리아, 퍼페티어 지능 173 증가
b_parser = [
compile("30Lv버프스킬힘지능증가량{축힘지:g}%증가"),
compile("30Lv버프스킬물리마법독립공격력증가량{축물마독:g}%증가"),
compile("30Lv버프스킬물리공격력증가량{축물공:g}%증가"),
compile("30Lv버프스킬마법공격력증가량{축마공:g}%증가"),
compile("30Lv버프스킬독립공격력증가량{축독공:g}%증가"),
compile("50Lv액티브스킬힘지능증가량{포계수:g}증가"),
compile("50Lv액티브스킬힘지{}{포힘지:g}%증가"),
#compile("50Lv액티브스킬힘지능{포힘지:g}%증가"),
compile("수호의은총체력정신력{체력:g}증가"),
compile("계시:아리아퍼페티어지능{지능:g}증가"),
compile("계시:아리아지능{라핌지능:g}증가"),
compile("퍼페티어지능{카테지능:g}증가"),
compile("수호의은총계시:아리아퍼페티어스킬Lv+{패시브레벨:g}"),
compile("신념의오라체력정신력증가량{체력오라:g}증가"),
compile("신실한열정소악마힘지능증가량{지능오라:g}증가"),
compile("모든직업30레벨모든스킬Lv+{축레벨:g}"),
compile("모든직업50레벨모든스킬Lv+{포레벨:g}"),
compile("50Lv모든스킬+{포레벨:g}"),
compile("30Lv모든스킬+{축레벨:g}"),
compile("30Lv버프스킬레벨+{축레벨:g}"),
compile("모든직업{min:g}~{max:g}레벨모든스킬Lv+{lv:g}({}제외"),
compile("모든스탯+{모든스탯:g}(+")
]
s_parser = {}
s_parser['암속조건'] = [
compile("암속성저항{v:d}당{option}(최대{max}증가)"),
compile("암속성저항{v:d}당{option}최대{max}중첩"),
compile("암속성저항{v:d}이상일때{option:S}")
]
s_parser['개조조건'] = compile("장비개조단계가{step:d}증가할때마다{option:S}(")
s_parser['강화조건'] = compile("강화증폭수치가{v:d}증가할때마다{option}(최대{max}까지증가)")
s_parser['착용조건'] = [
compile("{item}착용시"),
compile("{item}장착시"),
compile("{item1}과{item2}장착시")
]
s_parser['주사위'] = compile("주사위눈이{v}일경우{option:S}")
s_parser['중첩'] = compile("최대{v:d}중첩")
s_parser['최대'] = compile("(최대{v:g}{}증가)")
myth_db = {}
weapon_tree = {}
set_tree = {}
item_tree = {}
convert_list = {}
@staticmethod
def load_api(URL):
apikey = 'apikey=NqzICVeo3FesBuq3Gw1CmYhiOiFdYcHr'
#print('https://api.neople.co.kr/df/'+ URL + apikey)
max_try = 5
while True:
try:
api_load=urllib.request.urlopen('https://api.neople.co.kr/df/'+ URL + apikey)
api_dic=loads(api_load.read().decode("utf-8"))
break
except:
max_try -= 1
if max_try == 0:
raise
sleep(0.5)
continue
return api_dic
@classmethod
def parse_buff(cls, explain, io, name, skill_db, step = 0):
#print ("#################################################")
#print (name)
explain = explain.replace(' ', '').replace(',','').replace('\n\n', '\n')
e_list = explain.split('\n')
for exp in e_list:
#print(exp)
if len(exp) <= 0:
continue
opt = {}
for p in cls.b_parser:
try:
result = p.search(exp)
except:
raise
if result is not None:
if step > 0:
if step == 10:
opt['per-step'] = 1
else:
opt['step'] = step
if len(result.fixed) > 0 and result[0] == '특성스킬':
min_lv = int(result['min'])
max_lv = int(result['max'])
lvup = int(result['lv'])
data = {'min':min_lv, 'max':max_lv, 'lvup':lvup}
opt['스킬구간'] = data
continue
for key in result.named.keys():
#print(key, result[key])
opt[key] = result[key]
break
if len(opt) >= 1:
io.append(opt)
"""
opt = {}
if name == '운명을 가르는 함성 세트' and step == 3:
data = {'min':30, 'max':50, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
elif name == '운명의 주사위 세트' and step == 2:
data = {'min':30, 'max':48, 'lvup':1}
opt['스킬구간'] = data
io.append(opt)
elif name == '운명의 주사위 세트' and step == 3:
data = {'min':30, 'max':50, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
elif name == '영보 : 세상의 진리 세트' and step == 2:
data = {'min':30, 'max':50, 'lvup':1}
opt['스킬구간'] = data
io.append(opt)
elif name == '시간전쟁의 잔해 세트' and step == 2:
data = {'min':1, 'max':30, 'lvup':1}
opt['스킬구간'] = data
io.append(opt)
elif name == '전설의 대장장이 - 역작 세트' and step == 3:
data = {'min':30, 'max':50, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
elif name == '전설의 대장장이 - 역작 세트' and step == 5:
data = {'min':30, 'max':48, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
opt = {}
opt['축힘지'] = 6
io.append(opt)
opt = {}
opt['포힘지'] = 7
io.append(opt)
opt = {}
opt['포계수'] = 20
io.append(opt)
elif name == '메마른 사막의 유산 세트' and step == 2:
data = {'min':1, 'max':30, 'lvup':1}
opt['스킬구간'] = data
io.append(opt)
elif name == '메마른 사막의 유산 세트' and step == 3:
data = {'min':30, 'max':48, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
elif name == '메마른 사막의 유산 세트' and step == 5:
data = {'min':30, 'max':50, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
elif name == '열대의 트로피카 세트' and step == 3:
data = {'min':1, 'max':48, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
elif name == 'A.D. P 슈트 세트' and step == 5:
data = {'min':1, 'max':50, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
elif name == '죽음을 자아내는 그림자 세트' and step == 5:
data = {'min':1, 'max':48, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
elif name == '천상의 무희 세트' and step == 5:
data = {'min':1, 'max':48, 'lvup':2}
opt['스킬구간'] = data
io.append(opt)
"""
return io;
@classmethod
def parse_explain(cls, explain, io, name, skill_db, step = 0, iid = None):
if explain is None:
return
explain = explain.replace('힘, 지능', '힘/지능')
explain = explain.replace('물리, 마법, 독립', '물리/마법/독립')
explain = explain.replace('물리, 마법', '물리/마법')
explain = explain.replace('레벨,', '레벨/')
explain = explain.replace('\n(', '(')
explain = explain.replace('/','').replace(',','').replace(' ','')
explain = explain.replace('캐스팅','캐스트').replace('피격시받는','피격시')
explain = explain.replace('크리티컬데미지','크리티컬공격시데미지')
explain = explain.replace('불카누스의힘으로','')
e_list = explain.split('\n')
condition = {}
step_fixed = False
for exp in e_list:
e_matched = False
if len(exp) <= 0:
continue
if exp.find("해당효과는화수암명순서로순환됩니다") >= 0:
break
if exp.find("던전입장시파티원이2명이") >= 0:
break
if exp[0] != '-':
if step_fixed is False:
condition = {}
else:
exp = exp[1:]
if exp.find('주사위') >= 0:
p = cls.s_parser['주사위']
result = p.search(exp)
if result is not None:
condition['조건'] = {'type':'주사위', 'cond':result['v']}
exp = result['option']
if exp.find('암속성') >= 0:
for p in cls.s_parser['암속조건']:
result = p.search(exp)
#print(exp, result)
if result is not None:
limit = result.named.get('max')
condition['조건'] = {'type':'암속저', 'per-val':result['v'], 'max':limit}
exp = result['option']
if exp.find('최대') >= 0:
p = cls.s_parser['중첩']
result = p.search(exp)
if result is not None:
condition['중첩'] = result['v']
elif exp.find('강화증폭') >= 0:
p = cls.s_parser['강화조건']
result = p.search(exp)
if result is not None:
condition['조건'] = {'type':'강화증폭', 'per-val':result['v'], 'max':result['max']}
else:
p = cls.s_parser['최대']
#print(exp)
result = p.search(exp)
if result is not None:
condition['최대'] = result['v']
#print(condition)
if exp.find('착용') >= 0 or exp.find('장착'):
if exp.find('보조무기로') < 0:
for p in cls.s_parser['착용조건']:
result = p.search(exp)
if result is not None:
required = []
for r in result.named:
required.append(result[r])
condition['조건'] = {'type':'착용', 'required':required}
if exp.find('개조') >= 0:
if exp == '[개조단계별옵션]':
condition['조건'] = {'type':'개조', 'per-step':1}
step_fixed = True
else:
p = cls.s_parser['개조조건']
result = p.search(exp)
if result is not None:
condition['조건'] = {'type':'개조', 'per-step':result['step']}
exp = result['option']
if exp == '[검은마물의정원전용옵션]':
break
elif exp.find('캐릭터이동속도에따라다음효과') >= 0:
break
opt = {}
for p in cls.parser:
try:
result = p.search(exp)
except:
raise
if result is not None:
for key in result.named.keys():
if '스킬증댐' in result.named.keys():
#print ('스킬증댐', name)
v = result['스킬증댐']
lvl = result['레벨']
opt['스킬'] = [{'job': '공통', 'jid': None},
[{'minLevel':lvl, 'maxLevel':lvl,'damup':v}]
]
break
elif '스킬쿨감' in result.named.keys():
#print ('스킬쿨감', name)
minlvl = result['minLevel']
maxlvl = result['maxLevel']
v = result['스킬쿨감']
opt['스킬'] = [{'job': '공통', 'jid': None},
[{'minLevel':minlvl, 'maxLevel':maxlvl,'cooldown':v}]
]
break
elif '스킬레벨' in result.named.keys():
#print ('스킬레벨', name)
minlvl = result['minLevel']
maxlvl = result['maxLevel']
v = result['스킬레벨']
opt['스킬'] = [{'job': '공통', 'jid': None},
[{'minLevel':minlvl, 'maxLevel':maxlvl,'value':v}]
]
break
v = result[key]
if '중첩' in condition:
f = condition['중첩']
#중첩 횟수가 높을시 보정
"""
if f > 10:
df = f - 10
df = int(df*0.5)
f = df + 10
"""
v *= f
elif '최대' in condition:
f = condition['최대']
v = f
#e = result.named.get('e')
opt[key] = v
break;
#아이템별 커스텀
if len(opt) >= 1:
if '조건' in condition:
opt['condition'] = condition['조건']
io.append(opt)
"""
if step == -2:
convert = cls.convert_list.get(name)
if convert is not None:
io.append({'변환': { 'opts': convert['options'], 'type': convert['type']}})
"""
if step == -2:
if name == '데파르망' and iid is not None:
#print (io, iid, explain)
try:
io.pop(0)
except:
pass
return io
if step == -3:
if name == '데파르망':
#print (io, iid, explain)
try:
io.pop(1)
except:
pass
if name.find('사도 강림 플래티넘') >= 0 or name.find('위대한 의지') >= 0 or name.find('강인한 사도') >= 0 or name.find('기사의 위대한 긍지') >= 0:
p = compile("{}[{lv:d}Lv]")
result = p.parse(name)
if result is not None:
lv = result['lv']
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':lv, 'maxLevel':lv, 'damup':10},
]
)})
elif step == 0 and name == '퍼펙트 컨트롤':
"""
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':85, 'value':1},
{'minLevel':100, 'maxLevel':100, 'value':1},
]
)})
"""
pass
elif step == 4 and name == '선지자의 목걸이':
for opts in io:
if '속추댐' in opts:
opts['속추댐'] *= 0.35
elif '스공' in opts:
if opts['스공'] == 10:
opts['스공'] = 10 * 0.35 + 15 * 0.3
else:
#opts['스공'] *= 0.3
del opts['스공']
elif step == 1 and name == '청면수라의 가면':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':85, 'value':2},
{'minLevel':100, 'maxLevel':100, 'value':2},
]
)})
elif step == 1 and name == '무념의 의복':
io.append({'스킬':({'job':'공통','jid':None},
[
{'minLevel':50, 'maxLevel':50, 'value':2},
{'minLevel':85, 'maxLevel':85, 'value':2},
{'minLevel':100, 'maxLevel':100, 'value':2},
]
)})
elif step == 1 and name == '무형의 절개':
io.append({'스킬':({'job':'공통','jid':None},
[
{'minLevel':50, 'maxLevel':85, 'value':1},
{'minLevel':100, 'maxLevel':100, 'value':1},
]
)})
elif step == 1 and name == '무의식의 꽃':
io.append({'스킬':({'job':'공통','jid':None},
[
{'minLevel':50, 'maxLevel':50, 'damup':30},
{'minLevel':85, 'maxLevel':85, 'damup':25},
{'minLevel':100, 'maxLevel':100, 'damup':16},
]
)})
elif (name == '태극천제검'):
for opts in io:
if '모공' in opts:
opts['모공'] *= 0
"""
elif '스공' in opts:
if opts['스공'] != 30:
opts['스공'] *= 1
elif '공속' in opts:
if '증감' in opts:
opts['공속'] *= 1
opts['이속'] *= 1
opts['캐속'] *= 1
else:
opts['공속'] *= 0
opts['이속'] *= 0
opts['캐속'] *= 0
"""
elif (name == '천장군 : 전승의 빛'):
io.append({'모공':18})
elif (name == '푸른 생명의 이면'):
for opts in io:
if '모공' in opts:
opts['모공'] -= 3 #60초쿨 20초 지속 옵션 고려
elif '모속저' in opts:
opts['모속저'] = int(opts['모속저'] * 0.66)
elif '캐속' in opts:
opts['캐속'] *= 0.4
elif (name == '프로젝트 : 오버코어'):
io.append({'스킬':({'job':'총검사','jid':cls.get_jobid('총검사', skill_db)},
[{'skillId':cls.get_skillid('총검사', '코어 블레이드 마스터리', skill_db),
'name':'코어 블레이드 마스터리',
'damup':100,
'extra':'마법 공격력'
}]
)})
elif (name == '핏빛 무도회'):
io.append({'스킬':({'job':'도적','jid':cls.get_jobid('도적', skill_db)},
[
{'skillId':cls.get_skillid('도적', '히트엔드', skill_db),
'name':'히트엔드',
'value':'연계 점수당 공격력 비율'
}]
)})
elif (name == '화려한 눈속임'):
io.append({'스킬':({'job':'도적','jid':cls.get_jobid('도적', skill_db)},
[
{'minLevel':40, 'maxLevel':40, 'damup':32},
{'minLevel':45, 'maxLevel':45, 'damup':32},
{'minLevel':70, 'maxLevel':70, 'damup':32},
{'skillId':cls.get_skillid('도적', '인법 : 허물 벗기', skill_db),
'name':'인법 : 허물 벗기',
'cooldown':32,
'damdown':32,
},
{'skillId':cls.get_skillid('도적', '샤이닝컷', skill_db),
'name':'샤이닝컷',
'cooldown':32,
'damdown':32,
},
{'skillId':cls.get_skillid('도적', '브레이킹 러시', skill_db),
'name':'브레이킹 러시',
'cooldown':32,
},
{'skillId':cls.get_skillid('도적', '사이드 스텝', skill_db),
'name':'사이드 스텝',
'cooldown':32,
},
]
)})
elif (name == '도화선'):
io.append({'스킬':({'job':'도적','jid':cls.get_jobid('도적', skill_db)},
[{'skillId':cls.get_skillid('도적', '흉멸인법진', skill_db),
'name':'흉멸인법진',
'value':2
}]
)})
elif (name == '라스트 인파이팅'):
io.append({'스킬':({'job':'프리스트(남)', 'jid':cls.get_jobid('프리스트(남)', skill_db)},
[{'skillId':cls.get_skillid('프리스트(남)', '드라이아웃', skill_db),
'name':'드라이아웃',
'cooldown':30
}]
)})
elif (name == '레볼루션 차지'):
io.append({'스킬':({'job':'거너(남)', 'jid':cls.get_jobid('거너(남)', skill_db)},
[{'skillId':cls.get_skillid('거너(남)', '레이저 라이플', skill_db),
'name':'레이저 라이플',
'cooldown':30,
'damup':20,
}]
)})
io.append({'스킬':({'job':'거너(여)', 'jid':cls.get_jobid('거너(여)', skill_db)},
[{'skillId':cls.get_skillid('거너(여)', '레이저 라이플', skill_db),
'name':'레이저 라이플',
'cooldown':30,
'damup':20,
}]
)})
"""
elif (name == '루나 베네딕티오'):
io.append({'스킬':({'job':'마법사(남)','jid':cls.get_jobid('마법사(남)', skill_db)},
[
{'minLevel':50, 'maxLevel':50, 'value':2},
{'minLevel':85, 'maxLevel':85, 'value':2},
{'minLevel':100, 'maxLevel':100, 'value':2},
]
)})
io.append({'스킬':({'job':'마법사(여)','jid':cls.get_jobid('마법사(여)', skill_db)},
[
{'minLevel':50, 'maxLevel':50, 'value':2},
{'minLevel':85, 'maxLevel':85, 'value':2},
{'minLevel':100, 'maxLevel':100, 'value':2},
]
)})
"""
elif (name == '메가쇼크 런처'):
io.append({'스킬':({'job':'거너(남)', 'jid':cls.get_jobid('거너(남)', skill_db)},
[{'skillId':cls.get_skillid('거너(남)', '솔라 모듈 시스템', skill_db),
'name':'솔라 모듈 시스템',
'damup':20,
}]
)})
io.append({'스킬':({'job':'거너(여)', 'jid':cls.get_jobid('거너(여)', skill_db)},
[{'skillId':cls.get_skillid('거너(여)', '솔라 모듈 시스템', skill_db),
'name':'솔라 모듈 시스템',
'damup':20,
}]
)})
elif (name == '백호의 울음소리'):
io.append({'스킬':({'job':'격투가(남)', 'jid':cls.get_jobid('격투가(남)', skill_db)},
[{'skillId':cls.get_skillid('격투가(남)', '사자후', skill_db),
'name':'사자후',
'cooldown':30,
'damup':20,
}]
)})
io.append({'스킬':({'job':'격투가(여)', 'jid':cls.get_jobid('격투가(여)', skill_db)},
[{'skillId':cls.get_skillid('격투가(여)', '사자후', skill_db),
'name':'사자후',
'cooldown':30,
'damup':20,
}]
)})
elif (name == '불카누스의 두번째 흔적'):
io.append({'스킬':({'job':'프리스트(남)', 'jid':cls.get_jobid('프리스트(남)', skill_db)},
[{'skillId':cls.get_skillid('프리스트(남)', '무쌍격', skill_db),
'name':'무쌍격',
'damup':40,
}]
)})
io.append({'스킬':({'job':'프리스트(여)', 'jid':cls.get_jobid('프리스트(여)', skill_db)},
[{'skillId':cls.get_skillid('프리스트(여)', '참수', skill_db),
'name':'참수',
'damup':40,
}]
)})
elif (name == '블러드 샷 부스터'):
io.append({'스킬':({'job':'거너(여)', 'jid':cls.get_jobid('거너(여)', skill_db)},
[{'skillId':cls.get_skillid('거너(여)', '베일드 컷', skill_db),
'name':'베일드 컷',
'damup':50,
'extra':'출혈'
}]
)})
elif (name == '사암주극'):
io.append({'스킬':({'job':'마창사', 'jid':cls.get_jobid('마창사', skill_db)},
[
{'minLevel':1, 'maxLevel':48, 'value':2, 'cooldown':20},
{'minLevel':60, 'maxLevel':80, 'value':2, 'cooldown':20},
{'minLevel':90, 'maxLevel':95, 'cooldown':20},
{'skillId':cls.get_skillid('마창사', '임팩트 스매쉬', skill_db),
'name':'임팩트 스매쉬',
'cooldown':15,
'extra':'스택'}
]
)})
elif (name == '사일런트 베놈'):
io.append({'스킬':({'job':'마창사', 'jid':cls.get_jobid('마창사', skill_db)},
[
{'skillId':cls.get_skillid('마창사', '멸광천투', skill_db),
'name':'멸광천투',
'damup':11.4,
'extra':'폭발'
}
]
)})
elif (name == '기가 드릴러'):
io.append({'스킬':({'job':'마창사', 'jid':cls.get_jobid('마창사', skill_db)},
[
{'skillId':cls.get_skillid('마창사', '스파이럴 러쉬', skill_db),
'name':'스파이럴 러쉬',
'damup':31.5,
'extra':'다단히트'
},
{'skillId':cls.get_skillid('마창사', '흑광폭살', skill_db),
'name':'흑광폭살',
'damup':14.4,
'extra':'꿰뚫는'
},
{'skillId':cls.get_skillid('마창사', '광폭 : 흑화연창', skill_db),
'name':'광폭 : 흑화연창',
'damup':13.5,
'extra':'어둠의 창'
}
]
)})
elif (name == '끊임없는 환영'):
io.append({'스킬':({'job':'마창사', 'jid':cls.get_jobid('마창사', skill_db)},
[
{'skillId':cls.get_skillid('마창사', '미라지 스탠스', skill_db),
'name':'미라지 스탠스',
'cooldown':50,
}
]
)})
elif (name == '세계수의 뿌리'):
io.append({'스킬':({'job':'마법사(남)','jid':cls.get_jobid('마법사(남)', skill_db)},
[
{'minLevel':1, 'maxLevel':100, 'cooldown':10},
]
)})
io.append({'스킬':({'job':'마법사(여)','jid':cls.get_jobid('마법사(여)', skill_db)},
[
{'minLevel':1, 'maxLevel':100, 'cooldown':10},
]
)})
io.append({'스킬':({'job':'크리에이터','jid':cls.get_jobid('크리에이터', skill_db)},
[
{'minLevel':1, 'maxLevel':100, 'cooldown':10},
]
)})
"""
elif (name == '야천도'):
io.append({'스킬':({'job':'총검사','jid':cls.get_jobid('총검사', skill_db)},
[
{'minLevel':50, 'maxLevel':50, 'value':2, 'extra':'히트맨'},
{'minLevel':85, 'maxLevel':85, 'value':2, 'extra':'히트맨'},
{'minLevel':100, 'maxLevel':100, 'value':2, 'extra':'히트맨'},
]
)})
"""
elif (name == '어나이얼레이터'):
io.append({'스킬':({'job':'마법사(여)', 'jid':cls.get_jobid('마법사(여)', skill_db)},
[
{'skillId':cls.get_skillid('마법사(여)', '쇄패', skill_db),
'name':'쇄패',
'damup':50,
}
]
)})
io.append({'스킬':({'job':'마법사(남)', 'jid':cls.get_jobid('마법사(남)', skill_db)},
[
{'skillId':cls.get_skillid('마법사(남)', '팽', skill_db),
'name':'팽',
'damup':50,
}
]
)})
elif (name == '윤회의 고리 : 환룡'):
io.append({'스킬':({'job':'프리스트(남)', 'jid':cls.get_jobid('프리스트(남)', skill_db)},
[
{'minLevel':1, 'maxLevel':100, 'cooldown':10},
{'minLevel':48, 'maxLevel':80, 'value':1},
]
)})
io.append({'스킬':({'job':'프리스트(여)', 'jid':cls.get_jobid('프리스트(여)', skill_db)},
[
{'minLevel':1, 'maxLevel':100, 'cooldown':10},
{'minLevel':48, 'maxLevel':80, 'value':1},
]
)})
elif (name == '카심의 대검'):
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':48, 'cooldown':20},
{'minLevel':60, 'maxLevel':80, 'cooldown':20},
{'minLevel':90, 'maxLevel':95, 'cooldown':20},
]
)})
"""
elif (name == '통곡의 수문장'):
io.append({'스킬':({'job':'마창사','jid':cls.get_jobid('마창사', skill_db)},
[
{'minLevel':50, 'maxLevel':50, 'value':2, 'extra':'워로드'},
{'minLevel':85, 'maxLevel':85, 'value':2, 'extra':'워로드'},
{'minLevel':100, 'maxLevel':100, 'value':2, 'extra':'워로드'},
]
)})
elif (name == '대 마법사 [???]의 로브'):
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':45, 'value':1},
]
)})
elif (name == '마법사 [???]의 로브'):
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':45, 'value':1},
]
)})
"""
elif step == 3 and name == '개악 : 지옥의 길 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':85, 'value':1},
{'minLevel':100, 'maxLevel':100, 'value':1},
]
)})
elif step == 5 and name == '열대의 트로피카 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':100, 'cooldown':15},
]
)})
io.append({'공속':5, 'condition':{'type':'착용', 'required':['트로피카:리치']}})
io.append({'공속':5, 'condition':{'type':'착용', 'required':['트로피카:드레이크']}})
elif step == 5 and name == '잊혀진 마법사의 유산 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':85, 'value':2},
{'minLevel':100, 'maxLevel':100, 'value':2},
]
)})
elif step == 5 and name == 'A.D. P 슈트 세트':
for opts in io:
if '스공' in opts:
#opts['스공'] *= 0.5
pass
elif '공속' in opts:
opts['공속'] *= 0.5
opts['이속'] *= 0.5
opts['캐속'] *= 0.5
"""
elif name == '낭만적인 선율의 왈츠' or name == '우아한 선율의 왈츠':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':45, 'cooldown':10},
]
)})
elif name == '격렬한 스텝의 자이브':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':30, 'cooldown':15},
]
)})
elif name == '즉흥적인 감각의 탱고':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':75, 'maxLevel':80, 'cooldown':15},
]
)})
elif name == '매혹적인 리듬의 룸바':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':35, 'maxLevel':45, 'cooldown':15},
]
)})
elif name == '정열적인 흐름의 삼바':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':60, 'maxLevel':70, 'cooldown':15},
]
)})
"""
elif step == 5 and name == '베테랑 군인의 정복 세트':
io.pop(1)
io[1] = {'추댐': 29}
elif step == 3 and name == '전설의 대장장이 - 역작 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':48, 'cooldown':20},
{'minLevel':60, 'maxLevel':80, 'cooldown':20},
]
)})
elif step == 5 and name == '전설의 대장장이 - 역작 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':50, 'maxLevel':50, 'cooldown':30},
{'minLevel':85, 'maxLevel':85, 'cooldown':30},
{'minLevel':100, 'maxLevel':100, 'cooldown':17},
]
)})
elif step == 3 and name == '구속의 가시덩굴 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':48, 'cooldown':15},
{'minLevel':60, 'maxLevel':80, 'cooldown':15},
{'minLevel':90, 'maxLevel':95, 'cooldown':15},
]
)})
elif step == 5 and name == '구속의 가시덩굴 세트':
io.append({'이속':-2})
elif step == 3 and name == '선택의 기로 세트':
for opts in io:
if '공속' in opts.keys():
if '증감' in opts.keys():
opts['공속'] = opts['이속'] = opts['캐속'] = 0
else:
opts['공속'] = opts['이속'] = 14
opts['캐속'] = 21
elif (name == '지체없는 흐름의 한뉘' or name == '영명한 세상의 순환') and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':45, 'maxLevel':45, 'damdown':30, 'coolrecover':100},
]
)})
io.append({'스킬':({'job':'크리에이터', 'jid':cls.get_jobid('크리에이터', skill_db)},
[
{'skillId':cls.get_skillid('크리에이터', '웜홀', skill_db),
'name':'웜홀',
'coolrecover':100, 'damdown':30
}
]
)})
elif name == '지체없는 흐름의 미리내' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':25, 'maxLevel':25, 'damdown':30, 'coolrecover':100}
]
)})
elif name == '지체없는 흐름의 마루' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':35, 'maxLevel':35, 'damdown':30, 'coolrecover':100}
]
)})
elif name == '지체없는 흐름의 가람' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':40, 'maxLevel':40, 'damdown':30, 'coolrecover':100}
]
)})
elif name == '지체없는 흐름의 바람' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':30, 'maxLevel':30, 'damdown':30, 'coolrecover':100}
]
)})
"""
elif step == 2 and name == '영원한 흐름의 길 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':60, 'maxLevel':60, 'damup':20, 'coolup':30}
]
)})
elif step == 3 and name == '영원한 흐름의 길 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':70, 'maxLevel':70, 'damup':20, 'coolup':30}
]
)})
"""
elif name == '임의 선택' and step < 0:
for opts in io:
for key in opts:
if key in ['증추', '크증추', '모공', '물마독공', '스공']:
opts[key] *= 0.2
elif name == '합리적 선택' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':50, 'maxLevel':50, 'damup':25},
{'minLevel':85, 'maxLevel':85, 'damup':45},
{'minLevel':100, 'maxLevel':100, 'damup':13},
]
)})
elif step == 3 and name == '먼동 틀 무렵 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':100, 'maxLevel':100, 'value':1},
]
)})
elif step == 3 and name == '행운의 트라이앵글 세트':
for opts in io:
if '스공' in opts.keys():
if opts['스공'] == 27:
opts['스공'] = 27*0.5 + 31*0.45 + 34*0.05
elif opts['스공'] == 31:
#opts['스공'] = 0
del opts['스공']
else:
#opts['스공'] = 0
del opts['스공']
elif step == 2 and name == '고대의 술식 세트':
for opts in io:
if '이속' in opts.keys():
opts['이속'] /= 12
elif (name == '새벽을 녹이는 따스함' or name == '새벽을 감싸는 따스함') and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
#{'minLevel':1, 'maxLevel':48, 'value':1},
{'minLevel':15, 'maxLevel':30, 'coolrecover':30},
]
)})
elif name == '달빛을 가두는 여명' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
#{'minLevel':50, 'maxLevel':70, 'value':1},
{'minLevel':35, 'maxLevel':45, 'coolrecover':30},
]
)})
elif name == '고요를 머금은 이슬' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
#{'minLevel':75, 'maxLevel':85, 'value':1},
{'minLevel':60, 'maxLevel':80, 'coolrecover':30},
]
)})
elif step == 3 and name == '정령사의 장신구 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':100, 'cooldown':10},
]
)})
elif step == 3 and name == '영보 : 세상의 진리 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
#{'minLevel':1, 'maxLevel':85, 'value':1},
{'minLevel':100, 'maxLevel':100, 'value':1},
]
)})
elif name == '종말의 시간' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':100, 'cooldown':12},
]
)})
elif name == '전자기 진공관' and step < 0:
for opts in io:
if '추댐' in opts.keys():
opts['condition'] = {'type':'착용', 'required':['제어회로모듈']}
elif '모속강' in opts.keys():
opts['condition'] = {'type':'착용', 'required':['에너지분배제어기']}
elif name == '플라즈마 초 진공관' and step < 0:
for opts in io:
if '추댐' in opts.keys():
opts['이속'] = 10
opts['condition'] = {'type':'착용', 'required':['제어회로모듈']}
opts['모속저'] = 20
elif '모속강' in opts.keys():
opts['condition'] = {'type':'착용', 'required':['에너지분배제어기']}
elif step == 2 and name == '심연을 엿보는 자 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':48, 'value':1},
]
),
'condition':{'type':'암속저','per-val':28, 'max':2}
})
elif step == 3 and name == '심연을 엿보는 자 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':60, 'maxLevel':80, 'value':1},
]
),
'condition':{'type':'암속저','per-val':30, 'max':2}
})
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':50, 'maxLevel':50, 'value':1},
{'minLevel':85, 'maxLevel':85, 'value':1},
{'minLevel':100, 'maxLevel':100, 'value':1},
]
),
'condition':{'type':'암속저','per-val':61, 'max':None}
})
elif name == '길 안내자의 계절' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':48, 'cooldown':10},
{'minLevel':60, 'maxLevel':80, 'cooldown':10},
{'minLevel':90, 'maxLevel':95, 'cooldown':10},
]
),
'이속':10,
})
elif step == 3 and name == '황혼의 여행자 세트':
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':48, 'cooldown':10},
{'minLevel':60, 'maxLevel':80, 'cooldown':10},
{'minLevel':90, 'maxLevel':95, 'cooldown':10},
]
)})
elif name == '시간에 휩쓸린 물소 각반' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':60, 'maxLevel':80, 'cooldown':10},
]
)})
elif name == '시간을 거스르는 자침' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':50, 'maxLevel':50, 'cooldown':15},
{'minLevel':85, 'maxLevel':85, 'cooldown':15},
]
)})
elif name == '시간을 가리키는 지침' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':50, 'maxLevel':50, 'cooldown':10},
{'minLevel':85, 'maxLevel':85, 'cooldown':10},
]
)})
elif name == '시간에 갇혀버린 모래' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':45, 'cooldown':10},
]
)})
elif name == '나락으로 빠진 발' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':90, 'value':1},
{'minLevel':100, 'maxLevel':100, 'value':1},
]
),
'condition':{'type':'암속저','per-val':16, 'max':None}
})
elif name == '차원을 걷는 물소 부츠' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':1, 'maxLevel':45, 'value':1},
]
)})
elif name == '차원을 지나는 자의 인장' and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':60, 'maxLevel':80, 'value':1},
]
)})
elif (name == '차원을 관통하는 초신성' or name == '차원을 맴도는 혜성') and step < 0:
io.append({'스킬':({'job':'공통', 'jid':None},
[
{'minLevel':50, 'maxLevel':50, 'value':1},
{'minLevel':85, 'maxLevel':85, 'value':1},
{'minLevel':100, 'maxLevel':100, 'value':1},
]
)})
elif (name == '무너진 세상의 슬픔' or name == '광란을 품은 자의 종막' or name == '슬픔을 담은 운명') and step < 0:
for opts in io:
if '추댐' in opts.keys():
if opts['추댐'] == 8 or opts['추댐'] == 12:
opts['추댐'] = 0
elif name == '아린 고통의 비극' and step < 0:
for opts in io:
if '추댐' in opts.keys():
opts['추댐'] = 5.5
elif name == '천상의 날개' and step < 0:
io.append({'이속':25})
elif step == 3 and name == "열대의 트로피카 세트":
io.append({'이속':5, 'condition':{'type':'착용', 'required':['트로피카:리치']}})
elif step == 5 and name == "메마른 사막의 유산 세트":
for opts in io:
if '스공' in opts.keys() and opts['스공'] == 4:
opts['스공'] = 1
elif name in ['전쟁의 시작', '오퍼레이션 델타', '퀘이크 프론', '전장의 매', '데파르망'] and iid is not None:
try:
io.pop(0)
except:
#print (name, io, explain, iid)
pass
elif name == '종말의 역전' and step < 0:
for opts in io:
if '물마독공' in opts.keys():
opts['물마독공'] *= -1
return io
@classmethod
def get_jobid(self, name, skill_db):
for jid in skill_db.keys():
if skill_db[jid]['name'] == name:
return jid
@classmethod
def get_skillid(self, jobname, skillname, skill_db):
for jid in skill_db.keys():
if skill_db[jid]['name'] == jobname:
break
for gid in skill_db[jid].keys():
if gid == 'name':
continue
for skill in skill_db[jid][gid]['skills']:
if skill['name'] == skillname:
return skill['skillId']
"""
@classmethod
def parse_stats(cls, stats, io):
for stat in stats:
s = stat['name'].replace(' ','').replace('캐스팅', '캐스트')
if s in item_stat_type:
v = stat['value']
print (s, ":", v, None)
io.append({s:(v, None)})
#else:
#io.append({'미분류s':s})
return io
"""
@classmethod
def build_single_item(cls, ids, skill_db, item_db, runtime = True):
item_ids = ','.join(ids)
url = "multi/items?itemIds=" + item_ids + "&"
item_dict = cls.load_api(url)
#with open("item_dict.json", "w") as f:
#json.dump(item_dict, f)
for cur in item_dict['rows']:
item_id = cur['itemId']
name = cur['itemName']
itype = cur['itemType']
ityped = cur['itemTypeDetail']
igrade = cur['itemRarity']
remodel = cur.get('remodelInfo')
transform = cur.get('transformInfo')
siroco = cur.get('sirocoInfo')
status = cur.get('itemStatus')
if runtime is False:
if itype == '무기':
if cls.weapon_tree.get(itype) is None:
cls.weapon_tree[itype] = {}
cls.weapon_tree[itype][ityped] = {}
else:
if cls.weapon_tree[itype].get(ityped) is None:
cls.weapon_tree[itype][ityped] = {}
cls.weapon_tree[itype][ityped][item_id] = {'name': name, 'rarity': igrade, 'status': status, 'type': ityped}
if remodel is not None:
cls.weapon_tree[itype][ityped][item_id]['remodel'] = True
if transform is not None:
cls.weapon_tree[itype][ityped][item_id]['upgrade'] = True
else:
cls.weapon_tree[itype][ityped][item_id]['upgrade'] = False
else:
cls.weapon_tree[itype][ityped][item_id]['remodel'] = False
cls.weapon_tree[itype][ityped][item_id]['upgrade'] = False
elif cur.get('setItemId') is None and remodel is not None:
if ityped[0] == '천':
if cls.item_tree.get(ityped) is None:
cls.item_tree[ityped] = {}
if transform is not None:
upgr = True
else:
upgr = False
cls.item_tree[ityped][item_id] = {'name': name, 'rarity': igrade, 'remodel': True, 'upgr': upgr}
elif cur.get('setItemId') is None and siroco is not None:
_name = name.replace(' ', '').split(':')[1]
setName = _name.split('의')[0]
if ityped.find(' ') >= 0:
slot = ityped.split(' ')[1]
else:
slot = ityped
if cls.set_tree.get(setName) is None:
cls.set_tree[setName] = {'name': setName, 'itemList': {}}
cls.set_tree[setName]['itemList'][slot] = {'name': name, 'rarity': igrade, 'id': item_id}
elif ityped == '칭호':
if cls.set_tree.get(ityped) is None:
cls.set_tree[ityped] = {'name': ityped, 'itemList':[]}
cls.set_tree[ityped]['itemList'].append({'name': name, 'rarity': igrade, 'id': item_id})
item = {}
item['name'] = name
item['options'] = []
item['buffopts'] = []
#print(name)
explain = cur['itemExplainDetail']
#e_origin_list = explain.split('\n')
#item['origin'] = e_origin_list
"""
if remodel is not None:
step_mode = -1
else:
step_mode = -2
"""
cls.parse_explain(explain, item['options'], name, skill_db, step = -1, iid = item_id)
if explain.find("파티원이 2명") >= 0:
item['synergy'] = {'깡스탯': 10}
buffopt = cur.get('itemBuff')
if buffopt is not None:
buffexplain = buffopt['explain']
cls.parse_buff(buffexplain, item['buffopts'], name, skill_db, step = -1)
skills = buffopt.get('reinforceSkill')
if skills is not None and len(skills) > 0:
for skill in skills[0]['skills']:
if skill['name'] in ['마리오네트', '아포칼립스', '크럭스 오브 빅토리아']:
odata = {'포레벨':skill['value']}
elif skill['name'] in ['영광의 축복', '용맹의 축복', '금단의 저주']:
odata = {'축레벨':skill['value']}
elif skill['name'] in ['소악마', '신실한 열정', '신념의 오라']:
odata = {'오라레벨':skill['value']}
else:
print(skills)
odata = None
#raise Exception
if odata is not None:
item['buffopts'].append(odata)
skills = cur.get('itemReinforceSkill')
if skills is not None:
for s in skills:
e = []
try:
v = {'job':s['jobName'], 'jid':s['jobId']}
if 'levelRange' in s.keys():
for r in s['levelRange']:
e.append(r)
if 'skills' in s.keys():
for r in s['skills']:
e.append(r)
except:
#print(item_id)
#print(skills)
raise
item['options'].append({'스킬':(v, e)})
#remodel = cur.get('remodelInfo')
if remodel is not None:
_explain = remodel['explain'].split('버퍼 전용')
explain = _explain[0]
if len(_explain) == 2:
buffExplain = _explain[1]
else:
buffExplain = None
cls.parse_explain(explain, item['options'], name, skill_db)
if buffExplain is not None:
cls.parse_buff(buffExplain, item['buffopts'], name, skill_db, step = 10)
explain = explain.replace('\n(', '(')
#e_origin_list = explain.split('\n')
#item['remodel_origin'] = e_origin_list
if remodel['stepInfo'] is not None:
for step in remodel['stepInfo']:
_explain = step.get('explainDetail')
if _explain is None:
_explain = step.get('explain')
_explain = _explain.split("버퍼 전용")
explain = _explain[0]
if len(_explain) == 2:
buffExplain = _explain[1]
else:
buffExplain = None
#print(explain)
stepinfo = {}
"""
if step.get('transform') is True:
#_explain = explain.replace('%', '')
#print(explain, name, item_id)
try:
expRange = re.findall(r'\(.*?\)', explain)[0][1:-1]
_explain_prefix = explain.split('(')[0]
_explain_postfix = explain.split(')')[1]
if expRange.find('~') < 0:
raise Exception
except:
__explain_prefix = []
__explain_postfix = []
expRange_list = explain.split(' ')
pre = True
for expr in expRange_list:
if expr.find('~') >= 0 and pre is True:
expRange = expr
pre = False
elif pre is True:
__explain_prefix.append(expr)
else:
__explain_postfix.append(expr)
_explain_prefix = ' '.join(__explain_prefix)
_explain_postfix = ' '.join(__explain_postfix)
#print(expRange)
expRange = expRange.split('~')
#range_min = int(expRange[0])
#range_max = int(expRange[1])
range_max = expRange[1]
explain = _explain_prefix + range_max + _explain_postfix
stepinfo['transform'] = True
"""
if step.get('transform') is None:
stepinfo['step'] = step['step']
stepinfo['options'] = []
cls.parse_explain(explain, stepinfo['options'], name, skill_db, step = step['step'])
#e_origin_list = explain.split('\n')
#stepinfo['origin'] = e_origin_list
if buffExplain is not None:
stepinfo['buffopts'] = []
cls.parse_buff(buffExplain, stepinfo['buffopts'], name, skill_db, step = step['step'])
item['options'].append(stepinfo)
"""
transform = cur.get('transformInfo')
if transform is not None:
explain = transform['explain']
topt = []
if explain.find('모든 직업') >= 0:
topt.append({'각성기':2})
else:
if name == '데파르망':
cls.parse_explain(transform['explainDetail'], topt, name, skill_db, step = -2, iid = item_id)
else:
cls.parse_explain(explain, topt, name, skill_db, step = -2, iid = item_id)
item['options'].append({'transform': topt})
"""
itemStatus = cur.get('itemStatus')
if itemStatus is not None and len(itemStatus) > 0:
item['status'] = itemStatus
mythInfo = cur.get('mythologyInfo')
if mythInfo is not None:
cls.myth_db[item_id] = {'name':name, 'options':[], 'buffOptions':[]}
mopt = mythInfo['options']
for o in mopt:
mexp = o['explain']
fexp = re.sub('\d', '*', mexp)
mexpd = o['explainDetail']
expRange = re.findall(r'\(.*?\)', mexpd)[0][1:-1]
expRange = expRange.split('~')
range_min = expRange[0]
range_max = expRange[1]
cls.myth_db[item_id]['options'].append({'explain':fexp, 'min':range_min, 'max':range_max})
mexp = o['buffExplain']
fexp = mexp[:2] + re.sub('\d', '*', mexp[2:])
mexpd = o['buffExplainDetail']
expRange = re.findall(r'\(.*?\)', mexpd)[0][1:-1]
expRange = expRange.split('~')
range_min = expRange[0]
range_max = expRange[1]
cls.myth_db[item_id]['buffOptions'].append({'explain':fexp, 'min':range_min, 'max':range_max})
item_db[item_id] = item
@classmethod
def build_set_option(cls, sid, sname, options, skill_db, set_db):
sopt = {}
sopt['name'] = sname
#print(sname)
for option in options:
n = option['optionNo']
sopt[str(n)] = {}
sopt[str(n)]['options'] = []
sopt[str(n)]['buffopts'] = []
if 'detailExplain' in option.keys():
explain = option['detailExplain']
else:
explain = option.get('explain')
#print(n, explain)
if explain is not None:
cls.parse_explain(explain, sopt[str(n)]['options'], sname, skill_db, step = n)
if explain.find("2명 이상인 경우") >= 0:
#explain = explain.split("2명 이상인 경우")[1]
sopt[str(n)]['synergy'] = sname + '|' + str(n)
itemStatus = option.get('status')
if itemStatus is not None and len(itemStatus) > 0:
sopt[str(n)]['status'] = itemStatus
#for stat in itemStatus:
# if stat['name'] in ['지능', '체력', '정신력', '암속성저항']:
# sopt[str(n)]['status'].append(stat)
skill = option.get('reinforceSkill')
#if skill is not None:
# print ('스킬옵션있음', skill)
buffopt = option.get('itemBuff')
if buffopt is not None or (sname == '전설의 대장장이 - 역작 세트' and n == 5):
try:
buffexplain = buffopt['explain']
isreturn = False
except:
buffexplain = ""
isreturn = True
cls.parse_buff(buffexplain, sopt[str(n)]['buffopts'], sname, skill_db, step = n)
if isreturn is True:
continue
skills = buffopt.get('reinforceSkill')
if skills is not None and len(skills) > 0:
lv30 = False
lv50 = False
lv45 = False
for skill in skills:
if skill.get('skills') is not None:
for skill in skill['skills']:
if skill['name'] in ['마리오네트', '아포칼립스', '크럭스 오브 빅토리아']:
if lv50 is False:
sopt[str(n)]['buffopts'].append({'포레벨':skill['value']})
lv50 = True
elif skill['name'] in ['영광의 축복', '용맹의 축복', '금단의 저주']:
if lv30 is False:
sopt[str(n)]['buffopts'].append({'축레벨':skill['value']})
lv30 = True
elif skill['name'] in ['소악마', '신실한 열정', '신념의 오라']:
if lv45 is False:
sopt[str(n)]['buffopts'].append({'오라레벨':skill['value']})
lv45 = True
else:
print(skills)
raise Exception
elif skill.get('levelRange') is not None:
for lvRange in skill['levelRange']:
min_lv = int(lvRange['minLevel'])
max_lv = int(lvRange['maxLevel'])
lvup = int(lvRange['value'])
data = {'min':min_lv, 'max':max_lv, 'lvup':lvup}
sopt[str(n)]['buffopts'].append({'스킬구간': data})
else:
pass
#print(sname)
#e_origin_list = explain.split('\n')
#sopt[str(n)].append({'origin':e_origin_list})
#stats = option.get('status')
#if stats is not None:
# self.parse_stats(stats, sopt[str(n)])
#print("")
set_db[sid] = sopt
#수동 작업 목록
#아린 혈관파열
#시간자침(신화) 쿨초
#세계수의 뿌리 쿨초
#print(self.set_db)
@classmethod
def do_build_set_item(cls, setId, name, skill_db, item_db, set_db, runtime = True):
url = "setitems/" + setId + "?"
s_info = cls.load_api(url)
sitems = s_info['setItems']
soptions = s_info['setItemOption']
if runtime is False:
cls.set_tree[setId] = {'name': name, 'itemList': {}}
ids = []
for cur in sitems:
#print (item['itemName'])
if runtime is False:
iname = cur['itemName']
islot = cur['slotName']
url = "items?itemName="+urlparse.quote(iname)+"&"
try:
i_search = cls.load_api(url)
except:
raise
try:
if len(i_search['rows']) > 5:
mat_count = {'천':0, '가죽':0, '중갑':0, '경갑':0, '판금':0}
for ilist in i_search['rows']:
_ityped = ilist['itemTypeDetail']
ityped = _ityped.split(' ')[0]
mat_count[ityped] += 1
if max(mat_count.values()) == 2:
oritype = '판'
else:
for k, v in mat_count.items():
if v == 3:
oritype = k[0]
else:
oritype = None
for ilist in i_search['rows']:
itemId = ilist['itemId']
igrade = ilist['itemRarity']
ityped = ilist['itemTypeDetail']
if igrade == '신화':
url = 'items/' + itemId + '?'
itemDetail = cls.load_api(url)
status = itemDetail.get('itemStatus')
cls.set_tree[setId]['itemList']['신화'] = {'name': iname, 'rarity': igrade, 'id':itemId, 'slot':islot}
else:
if oritype is not None:
if oritype == ityped[0]:
url = 'items/' + itemId + '?'
itemDetail = cls.load_api(url)
remodel = itemDetail.get('remodelInfo')
transform = itemDetail.get('transformInfo')
status = itemDetail.get('itemStatus')
if remodel is not None:
if transform is not None:
cls.set_tree[setId]['itemList']['업글산물-' + islot] = {'name': iname, 'rarity': igrade, 'id':itemId, 'status':status}
else:
cls.set_tree[setId]['itemList']['산물-' + islot] = {'name': iname, 'rarity': igrade, 'id':itemId, 'status':status}
else:
cls.set_tree[setId]['itemList'][islot] = {'name': iname, 'rarity': igrade, 'id':itemId, 'status':status}
else:
url = 'items/' + itemId + '?'
itemDetail = cls.load_api(url)
remodel = itemDetail.get('remodelInfo')
transform = itemDetail.get('transformInfo')
status = itemDetail.get('itemStatus')
if remodel is not None:
if transform is not None:
cls.set_tree[setId]['itemList']['업글산물-' + islot] = {'name': iname, 'rarity': igrade, 'id':itemId, 'status':status}
else:
cls.set_tree[setId]['itemList']['산물-' + islot] = {'name': iname, 'rarity': igrade, 'id':itemId, 'status':status}
else:
cls.set_tree[setId]['itemList'][islot] = {'name': iname, 'rarity': igrade, 'id':itemId, 'status':status}
#print(ilist['itemName'], itemId)
ids.append(itemId)
if len(ids) >= 15:
cls.build_single_item(ids, skill_db, item_db, runtime = False)
ids = []
except:
print(ilist)
raise
if len(ids) > 0:
cls.build_single_item(ids, skill_db, item_db, runtime = False)
cls.build_set_option(setId, name, soptions, skill_db, set_db)
retId = setId
return retId
@classmethod
def build_set_item(cls, name, skill_db, item_db, set_db, runtime = True):
url = "setitems?setItemName="+urlparse.quote(name)+"&"
#print(url)
retId = None
try:
s_search = cls.load_api(url)
print(s_search)
except:
url = "setitems?setItemName="+urlparse.quote(name)+"&wordType=full&"
s_search = cls.load_api(url)
#print(s_search)
raise
try:
for slist in s_search['rows']:
setId = slist['setItemId']
retId = cls.do_build_set_item(setId, name, skill_db, item_db, set_db, runtime)
except:
#print(slist)
raise
return retId
| 40.927415 | 158 | 0.366339 |
7946fe2aeb47cc09a8bde3d964fb24ca01ff4a2d | 6,562 | py | Python | modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py | Aexyn/webrtc2 | daea5bf2deb843567a792f22ea2047a037e09d78 | [
"DOC",
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py | modulesio/webrtc | ea143e774b4c00a74b617f272f5a8f71169cf24e | [
"DOC",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py | modulesio/webrtc | ea143e774b4c00a74b617f272f5a8f71169cf24e | [
"DOC",
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Finds the APM configuration that maximizes a provided metric by
parsing the output generated apm_quality_assessment.py.
"""
from __future__ import division
import collections
import logging
import os
import quality_assessment.data_access as data_access
import quality_assessment.collect_data as collect_data
def _InstanceArgumentsParser():
"""Arguments parser factory. Extends the arguments from 'collect_data'
with a few extra for selecting what parameters to optimize for.
"""
parser = collect_data.InstanceArgumentsParser()
parser.description = (
'Rudimentary optimization of a function over different parameter'
'combinations.')
parser.add_argument('-n', '--config_dir', required=False,
help=('path to the folder with the configuration files'),
default='apm_configs')
parser.add_argument('-p', '--params', required=True, nargs='+',
help=('parameters to parse from the config files in'
'config_dir'))
parser.add_argument('-z', '--params_not_to_optimize', required=False,
nargs='+', default=[],
help=('parameters from `params` not to be optimized for'))
return parser
def _ConfigurationAndScores(data_frame, params,
params_not_to_optimize, config_dir):
"""Returns a list of all configurations and scores.
Args:
data_frame: A pandas data frame with the scores and config name
returned by _FindScores.
params: The parameter names to parse from configs the config
directory
params_not_to_optimize: The parameter names which shouldn't affect
the optimal parameter
selection. E.g., fixed settings and not
tunable parameters.
config_dir: Path to folder with config files.
Returns:
Dictionary of the form
{param_combination: [{params: {param1: value1, ...},
scores: {score1: value1, ...}}]}.
The key `param_combination` runs over all parameter combinations
of the parameters in `params` and not in
`params_not_to_optimize`. A corresponding value is a list of all
param combinations for params in `params_not_to_optimize` and
their scores.
"""
results = collections.defaultdict(list)
config_names = data_frame['apm_config'].drop_duplicates().values.tolist()
score_names = data_frame['eval_score_name'].drop_duplicates().values.tolist()
# Normalize the scores
normalization_constants = {}
for score_name in score_names:
scores = data_frame[data_frame.eval_score_name == score_name].score
normalization_constants[score_name] = max(scores)
params_to_optimize = [p for p in params if p not in params_not_to_optimize]
param_combination = collections.namedtuple("ParamCombination",
params_to_optimize)
for config_name in config_names:
config_json = data_access.AudioProcConfigFile.Load(
os.path.join(config_dir, config_name + ".json"))
scores = {}
data_cell = data_frame[data_frame.apm_config == config_name]
for score_name in score_names:
data_cell_scores = data_cell[data_cell.eval_score_name ==
score_name].score
scores[score_name] = sum(data_cell_scores) / len(data_cell_scores)
scores[score_name] /= normalization_constants[score_name]
result = {'scores': scores, 'params': {}}
config_optimize_params = {}
for param in params:
if param in params_to_optimize:
config_optimize_params[param] = config_json['-' + param]
else:
result['params'][param] = config_json['-' + param]
current_param_combination = param_combination( # pylint: disable=star-args
**config_optimize_params)
results[current_param_combination].append(result)
return results
def _FindOptimalParameter(configs_and_scores, score_weighting):
"""Finds the config producing the maximal score.
Args:
configs_and_scores: structure of the form returned by
_ConfigurationAndScores
score_weighting: a function to weight together all score values of
the form [{params: {param1: value1, ...}, scores:
{score1: value1, ...}}] into a numeric
value
Returns:
the config that has the largest values of |score_weighting| applied
to its scores.
"""
min_score = float('+inf')
best_params = None
for config in configs_and_scores:
scores_and_params = configs_and_scores[config]
current_score = score_weighting(scores_and_params)
if current_score < min_score:
min_score = current_score
best_params = config
logging.debug("Score: %f", current_score)
logging.debug("Config: %s", str(config))
return best_params
def _ExampleWeighting(scores_and_configs):
"""Example argument to `_FindOptimalParameter`
Args:
scores_and_configs: a list of configs and scores, in the form
described in _FindOptimalParameter
Returns:
numeric value, the sum of all scores
"""
res = 0
for score_config in scores_and_configs:
res += sum(score_config['scores'].values())
return res
def main():
# Init.
# TODO(alessiob): INFO once debugged.
logging.basicConfig(level=logging.DEBUG)
parser = _InstanceArgumentsParser()
args = parser.parse_args()
# Get the scores.
src_path = collect_data.ConstructSrcPath(args)
logging.debug('Src path <%s>', src_path)
scores_data_frame = collect_data.FindScores(src_path, args)
all_scores = _ConfigurationAndScores(scores_data_frame,
args.params,
args.params_not_to_optimize,
args.config_dir)
opt_param = _FindOptimalParameter(all_scores, _ExampleWeighting)
logging.info('Optimal parameter combination: <%s>', opt_param)
logging.info('It\'s score values: <%s>', all_scores[opt_param])
if __name__ == "__main__":
main()
| 36.455556 | 80 | 0.674489 |
7946fffaba9c93c4ec0138fc131478b28bfaf59f | 89 | py | Python | run.py | rabramley/telomere | e0246c0be18ce0b7e9d4ca88999626a005fede80 | [
"MIT"
] | null | null | null | run.py | rabramley/telomere | e0246c0be18ce0b7e9d4ca88999626a005fede80 | [
"MIT"
] | null | null | null | run.py | rabramley/telomere | e0246c0be18ce0b7e9d4ca88999626a005fede80 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from app import telomere
telomere.run(host='0.0.0.0', debug=True)
| 17.8 | 40 | 0.719101 |
7947013d8121bc25db6fbfdeec8607b6be530f9c | 409 | py | Python | iioy/movies/models/movie_rating.py | tizz98/iioy-v4 | 64d12fafb3c758f8291c6f577aba203932c44cbd | [
"MIT"
] | null | null | null | iioy/movies/models/movie_rating.py | tizz98/iioy-v4 | 64d12fafb3c758f8291c6f577aba203932c44cbd | [
"MIT"
] | 2 | 2020-06-05T18:23:54.000Z | 2021-03-19T22:02:23.000Z | iioy/movies/models/movie_rating.py | tizz98/iioy-v4 | 64d12fafb3c758f8291c6f577aba203932c44cbd | [
"MIT"
] | null | null | null | from django.db import models
from django_extensions.db.models import TimeStampedModel
class MovieRating(TimeStampedModel):
source = models.TextField()
value = models.TextField()
movie = models.ForeignKey(
to='movies.Movie',
related_name='ratings',
on_delete=models.CASCADE,
)
def __str__(self):
return f'{self.value} via {self.source} for {self.movie}'
| 24.058824 | 65 | 0.679707 |
79470150321b5821c928d9445069d9847b8e8d1d | 4,631 | py | Python | test/functional/combine_logs.py | anandhu-here/chuckrum | f2a734745e752cda50f5556cded7a713d969f4bc | [
"MIT"
] | null | null | null | test/functional/combine_logs.py | anandhu-here/chuckrum | f2a734745e752cda50f5556cded7a713d969f4bc | [
"MIT"
] | 1 | 2021-07-12T07:38:58.000Z | 2021-07-12T07:38:58.000Z | test/functional/combine_logs.py | anandhu-here/chuckrum | f2a734745e752cda50f5556cded7a713d969f4bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Combine logs from multiple chuckrum nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| 40.269565 | 196 | 0.618657 |
7947034c6b0d8b6a09cf2c5f960cd4da1240c1a9 | 2,925 | py | Python | driving_kalman.py | mihaigalos/DrivingKalman | c6bff74d1c7cd1994fe941a5a95a3a0e260c00d1 | [
"MIT"
] | null | null | null | driving_kalman.py | mihaigalos/DrivingKalman | c6bff74d1c7cd1994fe941a5a95a3a0e260c00d1 | [
"MIT"
] | null | null | null | driving_kalman.py | mihaigalos/DrivingKalman | c6bff74d1c7cd1994fe941a5a95a3a0e260c00d1 | [
"MIT"
] | null | null | null | # Adapted from https://stackoverflow.com/questions/47210512/using-pykalman-on-raw-acceleration-data-to-calculate-position
# on dataset from https://github.com/mmalekzadeh/motion-sense
from pykalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
from numpy import genfromtxt
AccX_HP = genfromtxt('driving_accelerometer_x_data.csv', delimiter=',')
Time = [i for i in range(len(AccX_HP))]
# Data description
# Time
# AccX_HP - high precision acceleration signal
# AccX_LP - low precision acceleration signal
# RefPosX - real position (ground truth)
# RefVelX - real velocity (ground truth)
# switch between two acceleration signals
use_HP_signal = 1
if use_HP_signal:
AccX_Value = AccX_HP
AccX_Variance = 0.0007
else:
AccX_Value = AccX_LP
AccX_Variance = 0.0020
# time step
dt = 0.01
# transition_matrix
F = [[1, dt, 0.5 * dt**2],
[0, 1, dt],
[0, 0, 1]]
# observation_matrix
H = [0, 0, 1]
# transition_covariance
Q = [[0.2, 0, 0],
[0, 0.1, 0],
[0, 0, 10e-4]]
# observation_covariance
R = AccX_Variance
# initial_state_mean
X0 = [0,
0,
AccX_Value[0]]
# initial_state_covariance
P0 = [[0, 0, 0],
[0, 0, 0],
[0, 0, AccX_Variance]]
n_timesteps = AccX_Value.shape[0]
n_dim_state = 3
filtered_state_means = np.zeros((n_timesteps, n_dim_state))
filtered_state_covariances = np.zeros((n_timesteps, n_dim_state, n_dim_state))
kf = KalmanFilter(transition_matrices=F,
observation_matrices=H,
transition_covariance=Q,
observation_covariance=R,
initial_state_mean=X0,
initial_state_covariance=P0)
# iterative estimation for each new measurement
for t in range(n_timesteps):
if t == 0:
filtered_state_means[t] = X0
filtered_state_covariances[t] = P0
else:
filtered_state_means[t], filtered_state_covariances[t] = (
kf.filter_update(
filtered_state_means[t - 1],
filtered_state_covariances[t - 1],
AccX_Value[t]
)
)
f, axarr = plt.subplots(3, sharex=True)
axarr[0].plot(Time, AccX_Value, label="Input AccX")
axarr[0].plot(Time, filtered_state_means[:, 2], "r-", label="Estimated AccX")
axarr[0].set_title('Acceleration X')
axarr[0].grid()
axarr[0].legend()
axarr[0].set_ylim([-4, 4])
#
# axarr[1].plot(Time, RefVelX, label="Reference VelX")
axarr[1].plot(Time, filtered_state_means[:, 1], "r-", label="Estimated VelX")
axarr[1].set_title('Velocity X')
axarr[1].grid()
axarr[1].legend()
axarr[1].set_ylim([-1, 20])
#
# axarr[2].plot(Time, RefPosX, label="Reference PosX")
axarr[2].plot(Time, filtered_state_means[:, 0], "r-", label="Estimated PosX")
axarr[2].set_title('Position X')
axarr[2].grid()
axarr[2].legend()
axarr[2].set_ylim([-10, 1000])
plt.show()
| 26.351351 | 121 | 0.648547 |
7947034e8d769a6a2c756434a17ff141700c941d | 11,654 | py | Python | src/training/network_training/nnUNetTrainerV2_DP.py | YZArren/ETCI2021 | 6dffb759b3439bc597e835f8dbd610ab4706e269 | [
"Apache-2.0"
] | 5 | 2021-07-26T12:19:08.000Z | 2022-01-18T07:50:12.000Z | src/training/network_training/nnUNetTrainerV2_DP.py | YZArren/ETCI2021 | 6dffb759b3439bc597e835f8dbd610ab4706e269 | [
"Apache-2.0"
] | null | null | null | src/training/network_training/nnUNetTrainerV2_DP.py | YZArren/ETCI2021 | 6dffb759b3439bc597e835f8dbd610ab4706e269 | [
"Apache-2.0"
] | 1 | 2022-03-02T15:19:07.000Z | 2022-03-02T15:19:07.000Z | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from batchgenerators.utilities.file_and_folder_operations import *
from src.network_architecture.generic_UNet_DP import Generic_UNet_DP
from src.training.data_augmentation.data_augmentation_moreDA import get_moreDA_augmentation
from src.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from src.utilities.to_torch import maybe_to_torch, to_cuda
from src.network_architecture.initialization import InitWeights_He
from src.network_architecture.neural_network import SegmentationNetwork
from src.training.dataloading.dataset_loading import unpack_dataset
from src.training.network_training.nnUNetTrainer import nnUNetTrainer
from src.utilities.nd_softmax import softmax_helper
from torch import nn
from torch.cuda.amp import autocast
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.utils import clip_grad_norm_
class nnUNetTrainerV2_DP(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, num_gpus=1, distribute_batch_size=False, fp16=False):
super(nnUNetTrainerV2_DP, self).__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
unpack_data, deterministic, fp16)
self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, num_gpus, distribute_batch_size, fp16)
self.num_gpus = num_gpus
self.distribute_batch_size = distribute_batch_size
self.dice_smooth = 1e-5
self.dice_do_BG = False
self.loss = None
self.loss_weights = None
def setup_DA_params(self):
super(nnUNetTrainerV2_DP, self).setup_DA_params()
self.data_aug_params['num_threads'] = 8 * self.num_gpus
def process_plans(self, plans):
super(nnUNetTrainerV2_DP, self).process_plans(plans)
if not self.distribute_batch_size:
self.batch_size = self.num_gpus * self.plans['plans_per_stage'][self.stage]['batch_size']
else:
if self.batch_size < self.num_gpus:
print("WARNING: self.batch_size < self.num_gpus. Will not be able to use the GPUs well")
elif self.batch_size % self.num_gpus != 0:
print("WARNING: self.batch_size % self.num_gpus != 0. Will not be able to use the GPUs well")
def initialize(self, training=True, force_load_plans=False):
"""
- replaced get_default_augmentation with get_moreDA_augmentation
- only run this code once
- loss function wrapper for deep supervision
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here configure the loss for deep supervision ############
net_numpool = len(self.net_num_pool_op_kernel_sizes)
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.loss_weights = weights
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
replace genericUNet with the implementation of above for super speeds
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet_DP(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_training(self):
self.maybe_update_lr(self.epoch)
# amp must be initialized before DP
ds = self.network.do_ds
self.network.do_ds = True
self.network = DataParallel(self.network, tuple(range(self.num_gpus)), )
ret = nnUNetTrainer.run_training(self)
self.network = self.network.module
self.network.do_ds = ds
return ret
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
ret = self.network(data, target, return_hard_tp_fp_fn=run_online_evaluation)
if run_online_evaluation:
ces, tps, fps, fns, tp_hard, fp_hard, fn_hard = ret
self.run_online_evaluation(tp_hard, fp_hard, fn_hard)
else:
ces, tps, fps, fns = ret
del data, target
l = self.compute_loss(ces, tps, fps, fns)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
ret = self.network(data, target, return_hard_tp_fp_fn=run_online_evaluation)
if run_online_evaluation:
ces, tps, fps, fns, tp_hard, fp_hard, fn_hard = ret
self.run_online_evaluation(tp_hard, fp_hard, fn_hard)
else:
ces, tps, fps, fns = ret
del data, target
l = self.compute_loss(ces, tps, fps, fns)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
return l.detach().cpu().numpy()
def run_online_evaluation(self, tp_hard, fp_hard, fn_hard):
tp_hard = tp_hard.detach().cpu().numpy().mean(0)
fp_hard = fp_hard.detach().cpu().numpy().mean(0)
fn_hard = fn_hard.detach().cpu().numpy().mean(0)
self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp.append(list(tp_hard))
self.online_eval_fp.append(list(fp_hard))
self.online_eval_fn.append(list(fn_hard))
def compute_loss(self, ces, tps, fps, fns):
# we now need to effectively reimplement the loss
loss = None
for i in range(len(ces)):
if not self.dice_do_BG:
tp = tps[i][:, 1:]
fp = fps[i][:, 1:]
fn = fns[i][:, 1:]
else:
tp = tps[i]
fp = fps[i]
fn = fns[i]
if self.batch_dice:
tp = tp.sum(0)
fp = fp.sum(0)
fn = fn.sum(0)
else:
pass
nominator = 2 * tp + self.dice_smooth
denominator = 2 * tp + fp + fn + self.dice_smooth
dice_loss = (- nominator / denominator).mean()
if loss is None:
loss = self.loss_weights[i] * (ces[i].mean() + dice_loss)
else:
loss += self.loss_weights[i] * (ces[i].mean() + dice_loss)
###########
return loss | 45.346304 | 124 | 0.596276 |
79470405ae200ae24d0ad0c23c149bf223d866e8 | 3,964 | py | Python | saleor/account/utils.py | sebasgoldberg/saleor | 2e94e8df80f305889434f549a4da6abc1257b598 | [
"CC-BY-4.0"
] | 1 | 2020-04-08T14:24:43.000Z | 2020-04-08T14:24:43.000Z | saleor/account/utils.py | sebasgoldberg/saleor | 2e94e8df80f305889434f549a4da6abc1257b598 | [
"CC-BY-4.0"
] | 10 | 2021-03-19T04:33:44.000Z | 2022-03-12T00:45:59.000Z | saleor/account/utils.py | sebasgoldberg/saleor | 2e94e8df80f305889434f549a4da6abc1257b598 | [
"CC-BY-4.0"
] | 4 | 2020-05-08T07:17:03.000Z | 2020-05-16T12:34:57.000Z | import os
import random
import jwt
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.files import File
from django.utils import timezone
from ..account.error_codes import AccountErrorCode
from ..checkout import AddressType
from ..core.utils import create_thumbnails
from ..extensions.manager import get_extensions_manager
from .models import User
AVATARS_PATH = os.path.join(
settings.PROJECT_ROOT, "saleor", "static", "images", "avatars"
)
def store_user_address(user, address, address_type):
"""Add address to user address book and set as default one."""
address = get_extensions_manager().change_user_address(address, address_type, user)
address_data = address.as_data()
address = user.addresses.filter(**address_data).first()
if address is None:
address = user.addresses.create(**address_data)
if address_type == AddressType.BILLING:
if not user.default_billing_address:
set_user_default_billing_address(user, address)
elif address_type == AddressType.SHIPPING:
if not user.default_shipping_address:
set_user_default_shipping_address(user, address)
def set_user_default_billing_address(user, address):
user.default_billing_address = address
user.save(update_fields=["default_billing_address"])
def set_user_default_shipping_address(user, address):
user.default_shipping_address = address
user.save(update_fields=["default_shipping_address"])
def change_user_default_address(user, address, address_type):
address = get_extensions_manager().change_user_address(address, address_type, user)
if address_type == AddressType.BILLING:
if user.default_billing_address:
user.addresses.add(user.default_billing_address)
set_user_default_billing_address(user, address)
elif address_type == AddressType.SHIPPING:
if user.default_shipping_address:
user.addresses.add(user.default_shipping_address)
set_user_default_shipping_address(user, address)
def create_superuser(credentials):
user, created = User.objects.get_or_create(
email=credentials["email"],
defaults={"is_active": True, "is_staff": True, "is_superuser": True},
)
if created:
user.avatar = get_random_avatar()
user.set_password(credentials["password"])
user.save()
create_thumbnails(
pk=user.pk, model=User, size_set="user_avatars", image_attr="avatar"
)
msg = "Superuser - %(email)s/%(password)s" % credentials
else:
msg = "Superuser already exists - %(email)s" % credentials
return msg
def get_random_avatar():
"""Return random avatar picked from a pool of static avatars."""
avatar_name = random.choice(os.listdir(AVATARS_PATH))
avatar_path = os.path.join(AVATARS_PATH, avatar_name)
return File(open(avatar_path, "rb"), name=avatar_name)
def remove_staff_member(staff):
"""Remove staff member account only if it has no orders placed.
Otherwise, switches is_staff status to False.
"""
if staff.orders.exists():
staff.is_staff = False
staff.user_permissions.clear()
staff.save()
else:
staff.delete()
def create_jwt_token(token_data):
expiration_date = timezone.now() + timezone.timedelta(hours=1)
token_kwargs = {"exp": expiration_date}
token_kwargs.update(token_data)
token = jwt.encode(token_kwargs, settings.SECRET_KEY, algorithm="HS256").decode()
return token
def decode_jwt_token(token):
try:
decoded_token = jwt.decode(
token.encode(), settings.SECRET_KEY, algorithms=["HS256"]
)
except jwt.PyJWTError:
raise ValidationError(
{
"token": ValidationError(
"Invalid or expired token.", code=AccountErrorCode.INVALID
)
}
)
return decoded_token
| 32.760331 | 87 | 0.703078 |
794706b3c7e50240edce37cb61c48a2b9be260d1 | 1,064 | py | Python | gravityspytools/collection_to_subjectset/forms.py | Gravity-Spy/gravityspytools | 23ef83e36ed934f7c39440bf43f4d5c7b7b4abb0 | [
"BSD-3-Clause"
] | 4 | 2019-03-11T12:32:24.000Z | 2020-12-01T06:31:39.000Z | gravityspytools/collection_to_subjectset/forms.py | johnwick211/gravityspytools | 23ef83e36ed934f7c39440bf43f4d5c7b7b4abb0 | [
"BSD-3-Clause"
] | 19 | 2018-01-29T21:28:39.000Z | 2020-07-14T18:38:23.000Z | gravityspytools/collection_to_subjectset/forms.py | johnwick211/gravityspytools | 23ef83e36ed934f7c39440bf43f4d5c7b7b4abb0 | [
"BSD-3-Clause"
] | 4 | 2018-02-02T16:47:16.000Z | 2020-12-01T06:31:49.000Z | from django import forms
import panoptes_client
class SearchForm(forms.Form):
username = forms.CharField(label='The Zoo username of the collection owner', max_length=100)
collection_display_name = forms.CharField(label='The display name of the collection as it is in the url. If the collecion url is https://www.zooniverse.org/collections/sbc538/45hz then 45hz would go here')
workflow_name = forms.CharField(label='The name of the workflow. Should include the name of the category')
def clean(self):
cleaned_data = super(SearchForm, self).clean()
username = cleaned_data.get('username')
collection_display_name = cleaned_data.get('collection_display_name')
tmp = panoptes_client.Collection.where(slug='{0}/{1}'.format(username, collection_display_name))
try:
tmp.next()
except:
raise forms.ValidationError("Either this collection does not "
"exist or the form was filled out in correctly."
)
| 50.666667 | 209 | 0.668233 |
7947086a836610c8280f78fa44fe081e98cb729c | 12,826 | py | Python | pontoon/sync/tests/test_core.py | zbraniecki/pontoon | d559316783938bc66d5d9a52dcdb01137475c259 | [
"BSD-3-Clause"
] | 1 | 2017-04-04T06:55:46.000Z | 2017-04-04T06:55:46.000Z | pontoon/sync/tests/test_core.py | zbraniecki/pontoon | d559316783938bc66d5d9a52dcdb01137475c259 | [
"BSD-3-Clause"
] | null | null | null | pontoon/sync/tests/test_core.py | zbraniecki/pontoon | d559316783938bc66d5d9a52dcdb01137475c259 | [
"BSD-3-Clause"
] | null | null | null | import os.path
from django_nose.tools import (
assert_equal,
assert_false,
assert_not_equal,
assert_raises,
assert_true
)
from mock import ANY, Mock, patch, PropertyMock, MagicMock
from pontoon.base.models import (
Entity,
Repository,
Resource,
TranslatedResource,
)
from pontoon.base.tests import (
CONTAINS,
NOT,
UserFactory,
)
from pontoon.sync.core import (
commit_changes,
entity_key,
pull_changes,
update_entities,
update_resources,
update_translated_resources,
update_translations,
)
from pontoon.sync.tests import FAKE_CHECKOUT_PATH, FakeCheckoutTestCase
class UpdateEntityTests(FakeCheckoutTestCase):
def call_update_entities(self, collected):
with patch('pontoon.sync.core.collect_entities') as mock_collect_entities:
mock_collect_entities.return_value = collected
return update_entities(self.db_project, self.vcs_project, self.changeset)
def test_none(self):
"""
If both the db_entity and vcs_entity are None, raise a
CommandError, as that should never happen.
"""
with assert_raises(ValueError):
self.call_update_entities([('key', None, None)])
def test_obsolete(self):
"""If VCS is missing the entity in question, obsolete it."""
self.changeset.obsolete_db_entity = Mock()
self.call_update_entities([('key', self.main_db_entity, None)])
self.changeset.obsolete_db_entity.assert_called_with(self.main_db_entity)
def test_create(self):
"""If the DB is missing an entity in VCS, create it."""
self.changeset.create_db_entity = Mock()
self.call_update_entities([('key', None, self.main_vcs_entity)])
self.changeset.create_db_entity.assert_called_with(self.main_vcs_entity)
class UpdateTranslationsTests(FakeCheckoutTestCase):
def call_update_translations(self, collected):
with patch('pontoon.sync.core.collect_entities') as mock_collect_entities:
mock_collect_entities.return_value = collected
return update_translations(self.db_project, self.vcs_project,
self.translated_locale, self.changeset)
def test_missing_entities(self):
"""If either of the entities is missing, skip it."""
self.changeset.update_vcs_entity = Mock()
self.changeset.update_db_entity = Mock()
self.call_update_translations([
('one', None, self.main_vcs_entity),
('other', self.main_db_entity, None),
('both', None, None),
])
assert_false(self.changeset.update_vcs_entity.called)
assert_false(self.changeset.update_db_entity.called)
def test_no_translation(self):
"""If no translation exists for a specific locale, skip it."""
self.changeset.update_vcs_entity = Mock()
self.changeset.update_db_entity = Mock()
self.main_vcs_entity.has_translation_for = Mock(return_value=False)
self.call_update_translations([('key', self.main_db_entity, self.main_vcs_entity)])
assert_false(self.changeset.update_vcs_entity.called)
assert_false(self.changeset.update_db_entity.called)
def test_db_changed(self):
"""
If the DB entity has changed since the last sync, update the
VCS.
"""
self.changeset.update_vcs_entity = Mock()
with patch.object(Entity, 'has_changed', return_value=True):
self.call_update_translations([('key', self.main_db_entity, self.main_vcs_entity)])
self.changeset.update_vcs_entity.assert_called_with(
self.translated_locale, self.main_db_entity, self.main_vcs_entity
)
def test_vcs_changed(self):
"""
If the DB entity has not changed since the last sync, update the DB with
the latest changes from VCS.
"""
self.changeset.update_db_entity = Mock()
with patch.object(Entity, 'has_changed', return_value=False):
self.call_update_translations([('key', self.main_db_entity, self.main_vcs_entity)])
self.changeset.update_db_entity.assert_called_with(
self.translated_locale, self.main_db_entity, self.main_vcs_entity
)
class UpdateResourcesTests(FakeCheckoutTestCase):
def test_basic(self):
# Check for self.main_db_resource to be updated and
# self.other_db_resource to be created.
self.main_db_resource.total_strings = 5000
self.main_db_resource.save()
self.other_db_resource.delete()
update_resources(self.db_project, self.vcs_project)
self.main_db_resource.refresh_from_db()
assert_equal(self.main_db_resource.total_strings, len(self.main_vcs_resource.entities))
other_db_resource = Resource.objects.get(path=self.other_vcs_resource.path)
assert_equal(other_db_resource.total_strings, len(self.other_vcs_resource.entities))
class UpdateTranslatedResourcesTests(FakeCheckoutTestCase):
def test_basic(self):
"""
Create/update the TranslatedResource object on all resources
available in the current locale.
"""
update_translated_resources(self.db_project, self.vcs_project,
self.changeset, self.translated_locale)
assert_true(TranslatedResource.objects.filter(
resource=self.main_db_resource, locale=self.translated_locale
).exists())
assert_true(TranslatedResource.objects.filter(
resource=self.other_db_resource, locale=self.translated_locale
).exists())
assert_false(TranslatedResource.objects.filter(
resource=self.missing_db_resource, locale=self.translated_locale
).exists())
def test_asymmetric(self):
"""
Create/update the TranslatedResource object on asymmetric resources
even if they don't exist in the target locale.
"""
with patch.object(Resource, 'is_asymmetric', new_callable=PropertyMock) as is_asymmetric:
is_asymmetric.return_value = True
update_translated_resources(self.db_project, self.vcs_project,
self.changeset, self.translated_locale)
assert_true(TranslatedResource.objects.filter(
resource=self.main_db_resource, locale=self.translated_locale
).exists())
assert_true(TranslatedResource.objects.filter(
resource=self.other_db_resource, locale=self.translated_locale
).exists())
assert_true(TranslatedResource.objects.filter(
resource=self.missing_db_resource, locale=self.translated_locale
).exists())
def test_extra_locales(self):
"""
Only create/update the TranslatedResource object for active locales,
even if the inactive locale has a resource.
"""
update_translated_resources(self.db_project, self.vcs_project,
self.changeset, self.translated_locale)
assert_true(TranslatedResource.objects.filter(
resource=self.main_db_resource, locale=self.translated_locale
).exists())
assert_true(TranslatedResource.objects.filter(
resource=self.other_db_resource, locale=self.translated_locale
).exists())
assert_false(TranslatedResource.objects.filter(
resource=self.main_db_resource, locale=self.inactive_locale
).exists())
assert_false(TranslatedResource.objects.filter(
resource=self.other_db_resource, locale=self.inactive_locale
).exists())
class EntityKeyTests(FakeCheckoutTestCase):
def test_entity_key_common_string(self):
"""
Entities with the same string from different resources must not get the
same key from entity_key.
"""
assert_not_equal(
entity_key(self.main_vcs_resource.entities['Common String']),
entity_key(self.other_vcs_resource.entities['Common String'])
)
class CommitChangesTests(FakeCheckoutTestCase):
def setUp(self):
super(CommitChangesTests, self).setUp()
self.mock_repo_commit = self.patch_object(Repository, 'commit')
def test_multiple_authors(self):
"""
Tests if multiple authors are passed to commit message. The
author with the most occurrences for the locale should be set as
the commit author.
"""
first_author, second_author = UserFactory.create_batch(2)
self.changeset.commit_authors_per_locale = {
self.translated_locale.code: [first_author, first_author, second_author]
}
self.db_project.repository_for_path = Mock(return_value=self.repository)
commit_changes(self.db_project, self.vcs_project,
self.changeset, self.translated_locale)
self.repository.commit.assert_called_with(
CONTAINS(first_author.display_name_and_email, second_author.display_name_and_email),
first_author,
os.path.join(FAKE_CHECKOUT_PATH, self.translated_locale.code)
)
def test_author_with_multiple_contributions(self):
"""
Tests if author with multiple contributions occurs once in commit message.
"""
author = UserFactory.create()
self.changeset.commit_authors_per_locale = {
self.translated_locale.code: [author, author]
}
self.db_project.repository_for_path = Mock(return_value=self.repository)
commit_changes(self.db_project, self.vcs_project,
self.changeset, self.translated_locale)
self.repository.commit.assert_called_with(
CONTAINS(author.display_name_and_email),
author,
os.path.join(FAKE_CHECKOUT_PATH, self.translated_locale.code)
)
commit_message = self.repository.commit.mock_calls[0][1][0]
assert_equal(commit_message.count(author.display_name_and_email), 1)
def test_no_authors(self):
"""
If no authors are found in the changeset, default to a fake
"Mozilla Pontoon" user.
"""
self.changeset.commit_authors_per_locale = {
self.translated_locale.code: []
}
self.db_project.repository_for_path = Mock(return_value=self.repository)
commit_changes(self.db_project, self.vcs_project,
self.changeset, self.translated_locale)
self.repository.commit.assert_called_with(
NOT(CONTAINS('Authors:')), # Don't list authors in commit
ANY,
os.path.join(FAKE_CHECKOUT_PATH, self.translated_locale.code)
)
user = self.mock_repo_commit.call_args[0][1]
assert_equal(user.first_name, 'Mozilla Pontoon')
assert_equal(user.email, '[email protected]')
class PullChangesTests(FakeCheckoutTestCase):
def setUp(self):
super(PullChangesTests, self).setUp()
self.mock_repo_pull = self.patch_object(Repository, 'pull')
def test_basic(self):
"""
Pull_changes should call repo.pull for each repo for the
project, save the return value to repo.last_synced_revisions,
and return whether any changes happened in VCS.
"""
mock_db_project = MagicMock()
mock_db_project.repositories.all.return_value = [self.repository]
self.mock_repo_pull.return_value = {'single_locale': 'asdf'}
has_changed, revisions = pull_changes(self.db_project)
assert_true(has_changed)
assert_equal(revisions, {self.repository.pk: {'single_locale': 'asdf'}})
self.repository.last_synced_revisions = revisions[self.repository.pk]
self.repository.save()
self.repository.refresh_from_db()
assert_equal(self.repository.last_synced_revisions, {'single_locale': 'asdf'})
def test_unsure_changes(self):
"""
If any of the repos returns None as a revision number, consider
the VCS as changed even if the revisions match the last sync.
"""
self.mock_repo_pull.return_value = {'single_locale': None}
self.repository.last_synced_revisions = {'single_locale': None}
self.repository.save()
assert_true(pull_changes(self.db_project))
def test_unchanged(self):
"""
If the revisions returned by repo.pull match those from the last
sync, consider the VCS unchanged and return False.
"""
self.mock_repo_pull.return_value = {'single_locale': 'asdf'}
self.repository.last_synced_revisions = {'single_locale': 'asdf'}
self.repository.save()
has_changed, _ = pull_changes(self.db_project)
assert_false(has_changed)
| 39.223242 | 97 | 0.677608 |
794709794a8012f56bb9469cb256b06877905b67 | 31,821 | py | Python | ansible_navigator/actions/run.py | ptoal/ansible-navigator | 47d51e4c2861903e002f6ad58707a2126af18f00 | [
"Apache-2.0",
"MIT"
] | null | null | null | ansible_navigator/actions/run.py | ptoal/ansible-navigator | 47d51e4c2861903e002f6ad58707a2126af18f00 | [
"Apache-2.0",
"MIT"
] | null | null | null | ansible_navigator/actions/run.py | ptoal/ansible-navigator | 47d51e4c2861903e002f6ad58707a2126af18f00 | [
"Apache-2.0",
"MIT"
] | null | null | null | """ :run
"""
import curses
import datetime
import json
import logging
import os
import re
import shlex
import shutil
import uuid
from math import floor
from queue import Queue
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from . import run_action
from . import _actions as actions
from ..runner.api import CommandRunnerAsync
from ..app import App
from ..app_public import AppPublic
from ..configuration_subsystem import ApplicationConfiguration
from ..steps import Step
from ..ui_framework import CursesLinePart
from ..ui_framework import CursesLines
from ..ui_framework import Interaction
from ..ui_framework import dict_to_form
from ..ui_framework import form_to_dict
from ..ui_framework import warning_notification
from ..utils import abs_user_path
from ..utils import human_time
from ..utils import remove_ansi
RESULT_TO_COLOR = [
("(?i)^failed$", 9),
("(?i)^ok$", 10),
("(?i)^ignored$", 13),
("(?i)^skipped$", 14),
("(?i)^in_progress$", 8),
]
get_color = lambda word: next( # noqa: E731
(x[1] for x in RESULT_TO_COLOR if re.match(x[0], word)), 0
)
def color_menu(_colno: int, colname: str, entry: Dict[str, Any]) -> Tuple[int, int]:
# pylint: disable=too-many-branches
"""Find matching color for word
:param word: A word to match
:type word: str(able)
"""
colval = entry[colname]
color = 0
decoration = 0
if "__play_name" in entry:
if not colval:
color = 8
elif colname in ["__task_count", "__play_name", "__progress"]:
failures = entry["__failed"] + entry["__unreachable"]
if failures:
color = 9
elif entry["__ok"]:
color = 10
else:
color = 8
elif colname == "__changed":
color = 11
else:
color = get_color(colname[2:])
if colname == "__progress" and entry["__progress"].strip().lower() == "complete":
decoration = curses.A_BOLD
elif "task" in entry:
if entry["__result"].lower() == "__in_progress":
color = get_color(entry["__result"])
elif colname in ["__result", "__host", "__number", "__task", "__task_action"]:
color = get_color(entry["__result"])
elif colname == "__changed":
if colval is True:
color = 11
else:
color = get_color(entry["__result"])
elif colname == "__duration":
color = 12
return color, decoration
def content_heading(obj: Any, screen_w: int) -> Union[CursesLines, None]:
"""create a heading for some piece fo content showing
:param obj: The content going to be shown
:type obj: Any
:param screen_w: The current screen width
:type screen_w: int
:return: The heading
:rtype: Union[CursesLines, None]
"""
if isinstance(obj, dict) and "task" in obj:
heading = []
detail = "PLAY [{play}:{tnum}] ".format(play=obj["play"], tnum=obj["__number"])
stars = "*" * (screen_w - len(detail))
heading.append(
tuple([CursesLinePart(column=0, string=detail + stars, color=0, decoration=0)])
)
detail = "TASK [{task}] ".format(task=obj["task"])
stars = "*" * (screen_w - len(detail))
heading.append(
tuple([CursesLinePart(column=0, string=detail + stars, color=0, decoration=0)])
)
if obj["__changed"] is True:
color = 11
res = "CHANGED"
else:
color = next((x[1] for x in RESULT_TO_COLOR if re.match(x[0], obj["__result"])), 0)
res = obj["__result"]
if "res" in obj and "msg" in obj["res"]:
msg = str(obj["res"]["msg"]).replace("\n", " ").replace("\r", "")
else:
msg = ""
string = "{res}: [{host}] {msg}".format(res=res, host=obj["__host"], msg=msg)
string = string + (" " * (screen_w - len(string) + 1))
heading.append(
tuple(
[
CursesLinePart(
column=0,
string=string,
color=color,
decoration=curses.A_UNDERLINE,
)
]
)
)
return tuple(heading)
return None
def filter_content_keys(obj: Dict[Any, Any]) -> Dict[Any, Any]:
"""when showing content, filter out some keys"""
return {k: v for k, v in obj.items() if not (k.startswith("_") or k.endswith("uuid"))}
PLAY_COLUMNS = [
"__play_name",
"__ok",
"__changed",
"__unreachable",
"__failed",
"__skipped",
"__ignored",
"__in_progress",
"__task_count",
"__progress",
]
TASK_LIST_COLUMNS = [
"__result",
"__host",
"__number",
"__changed",
"__task",
"__task_action",
"__duration",
]
@actions.register
class Action(App):
# pylint: disable=too-many-instance-attributes
""":run"""
KEGEX = r"""(?x)
^
(?P<run>r(?:un)?
(\s(?P<params_run>.*))?)
$"""
def __init__(
self,
args: ApplicationConfiguration,
play_columns: List = PLAY_COLUMNS,
task_list_columns: List = TASK_LIST_COLUMNS,
content_key_filter: Callable = filter_content_keys,
):
# pylint: disable=dangerous-default-value
# for display purposes use the 4: of the uuid
super().__init__(args=args, logger_name=__name__, name="run")
self._subaction_type: str
self._msg_from_plays: Tuple[Optional[str], Optional[int]] = (None, None)
self._queue: Queue = Queue()
self.runner: CommandRunnerAsync
self._runner_finished: bool
self._auto_scroll = False
self._plays = Step(
name="plays",
tipe="menu",
columns=play_columns,
value=[],
show_func=self._play_stats,
select_func=self._task_list_for_play,
)
self._task_list_columns = task_list_columns
self._content_key_filter = content_key_filter
@property
def mode(self):
"""if mode == stdout and playbook artifact creation is enabled
run in interactive mode, but print stdout"""
if all(
(
self._args.mode == "stdout",
self._args.playbook_artifact_enable,
self._args.app != "replay",
)
):
return "stdout_w_artifact"
return self._args.mode
def run_stdout(self) -> int:
"""Run in oldschool mode, just stdout
:param args: The parsed args from the cli
:type args: Namespace
"""
if self._args.app == "replay":
successful: bool = self._init_replay()
return 0 if successful else 1
self._logger.debug("playbook requested in interactive mode")
self._subaction_type = "playbook"
self._logger = logging.getLogger(f"{__name__}_{self._subaction_type}")
self._run_runner()
while True:
self._dequeue()
if self.runner.finished:
if self._args.playbook_artifact_enable:
self.write_artifact()
self._logger.debug("runner finished")
break
return self.runner.ansible_runner_instance.rc
def run(self, interaction: Interaction, app: AppPublic) -> Union[Interaction, None]:
# pylint: disable=too-many-branches
"""run :run or :replay
:param interaction: The interaction from the user
:type interaction: Interaction
:param app: The app instance
:type app: App
"""
self._prepare_to_run(app, interaction)
if interaction.action.match.groupdict().get("run"):
self._logger.debug("run requested in interactive mode")
self._subaction_type = "run"
str_uuid = str(uuid.uuid4())
self._logger = logging.getLogger(f"{__name__}_{str_uuid[-4:]}")
self._name = f"run_{str_uuid[-4:]}"
initialized = self._init_run()
elif interaction.action.match.groupdict().get("replay"):
self._logger.debug("replay requested in interactive mode")
self._subaction_type = "replay"
self._name = "replay"
self._logger = logging.getLogger(f"{__name__}_{self._subaction_type}")
initialized = self._init_replay()
if not initialized:
self._prepare_to_exit(interaction)
return None
self.steps.append(self._plays)
while True:
self.update()
self._take_step()
if not self.steps:
if not self._runner_finished:
self._logger.error("Can not step back while playbook in progress, :q! to exit")
self.steps.append(self._plays)
else:
self._logger.debug(
"No steps remaining for '%s' returning to calling app", self._name
)
break
if self.steps.current.name == "quit":
if self._args.app == "replay":
self._prepare_to_exit(interaction)
return self.steps.current
done = self._prepare_to_quit(self.steps.current)
if done:
self._prepare_to_exit(interaction)
return self.steps.current
self.steps.back_one()
self._prepare_to_exit(interaction)
return None
# pylint: disable=too-many-branches
def _init_run(self) -> bool:
"""in the case of :run, check the user input"""
# Ensure the playbook and inventory are valid
self._update_args(
["run"] + shlex.split(self._interaction.action.match.groupdict()["params_run"] or "")
)
if isinstance(self._args.playbook, str):
playbook_valid = os.path.exists(self._args.playbook)
else:
playbook_valid = False
if isinstance(self._args.inventory, list):
inventory_valid = all((os.path.exists(inv) for inv in self._args.inventory))
else:
# Permit running without an inventory
inventory_valid = True
if not all((playbook_valid, inventory_valid)):
populated_form = self._prompt_for_playbook()
if populated_form["cancelled"]:
return False
new_cmd = ["run"]
new_cmd.append(populated_form["fields"]["playbook"]["value"])
for field in populated_form["fields"].values():
if field["name"].startswith("inv_") and field["value"] != "":
new_cmd.extend(["-i", field["value"]])
if populated_form["fields"]["cmdline"]["value"]:
new_cmd.extend(shlex.split(populated_form["fields"]["cmdline"]["value"]))
# Parse as if provided from the cmdline
self._update_args(new_cmd)
self._run_runner()
self._logger.info("Run initialized and playbook started.")
return True
def _init_replay(self) -> bool:
"""in the case of :replay, replay the artifact
check for a version, to be safe
copy the calling app args as our our so the can be updated safely
with a uuid attached to the name
"""
self._logger.debug("Starting replay artifact request with mode %s", self.mode)
if self.mode == "interactive":
self._update_args(
["replay"]
+ shlex.split(self._interaction.action.match.groupdict()["params_replay"] or "")
)
artifact_file = self._args.playbook_artifact_replay
if isinstance(self._args.playbook_artifact_replay, str):
artifact_valid = os.path.exists(self._args.playbook_artifact_replay)
else:
artifact_valid = False
if not artifact_valid and self.mode == "interactive":
populated_form = self._prompt_for_artifact(artifact_file=artifact_file)
if populated_form["cancelled"]:
return False
artifact_file = populated_form["fields"]["artifact_file"]["value"]
try:
with open(artifact_file) as json_file:
data = json.load(json_file)
except json.JSONDecodeError as exc:
self._logger.debug("json decode error: %s", str(exc))
self._logger.error("Unable to parse artifact file")
return False
version = data.get("version", "")
if version.startswith("1."):
try:
stdout = data["stdout"]
if self.mode == "interactive":
self._plays.value = data["plays"]
self._interaction.ui.update_status(data["status"], data["status_color"])
self.stdout = stdout
else:
for line in data["stdout"]:
if self._args.display_color is True:
print(line)
else:
print(remove_ansi(line))
except KeyError as exc:
self._logger.debug("missing keys from artifact file")
self._logger.debug("error was: %s", str(exc))
return False
else:
self._logger.error(
"Incompatible artifact version, got '%s', compatible = '1.y.z'", version
)
return False
self._runner_finished = True
self._logger.debug("Completed replay artifact request with mode %s", self.mode)
return True
def _prompt_for_artifact(self, artifact_file: str) -> Dict[Any, Any]:
"""prompt for a valid artifact file"""
if not isinstance(artifact_file, str):
artifact_file = ""
FType = Dict[str, Any]
form_dict: FType = {
"title": "Artifact file not found, please confirm the following",
"fields": [],
}
form_field = {
"name": "artifact_file",
"prompt": "Path to artifact file",
"type": "text_input",
"validator": {"name": "valid_file_path"},
"pre_populate": artifact_file,
}
form_dict["fields"].append(form_field)
form = dict_to_form(form_dict)
self._interaction.ui.show(form)
populated_form = form_to_dict(form, key_on_name=True)
return populated_form
def _prompt_for_playbook(self) -> Dict[Any, Any]:
"""prepopulate a form to confirm the playbook details"""
self._logger.debug("Inventory/Playbook not set, provided, or valid, prompting")
if isinstance(self._args.playbook, str):
playbook = self._args.playbook
else:
playbook = ""
if isinstance(self._args.inventory, list):
inventory = self._args.inventory
else:
inventory = ["", "", ""]
if isinstance(self._args.cmdline, list):
cmdline = " ".join(self._args.cmdline)
else:
cmdline = ""
FType = Dict[str, Any]
form_dict: FType = {
"title": "Inventory and/or playbook not found, please confirm the following",
"fields": [],
}
form_field = {
"name": "playbook",
"pre_populate": playbook,
"prompt": "Path to playbook",
"type": "text_input",
"validator": {"name": "valid_file_path"},
}
form_dict["fields"].append(form_field)
for idx, inv in enumerate(inventory):
form_field = {
"name": f"inv_{idx}",
"pre_populate": inv,
"prompt": "Inventory source",
"type": "text_input",
"validator": {"name": "valid_path_or_none"},
}
form_dict["fields"].append(form_field)
form_field = {
"name": "cmdline",
"pre_populate": cmdline,
"prompt": "Additional command line parameters",
"type": "text_input",
"validator": {"name": "none"},
}
form_dict["fields"].append(form_field)
form = dict_to_form(form_dict)
self._interaction.ui.show(form)
populated_form = form_to_dict(form, key_on_name=True)
return populated_form
def _take_step(self) -> None:
"""run the current step on the stack"""
result = None
if isinstance(self.steps.current, Interaction):
result = run_action(self.steps.current.name, self.app, self.steps.current)
elif isinstance(self.steps.current, Step):
if self.steps.current.show_func:
self.steps.current.show_func()
if self.steps.current.type == "menu":
new_scroll = len(self.steps.current.value)
if self._auto_scroll:
self._interaction.ui.scroll(new_scroll)
result = self._interaction.ui.show(
obj=self.steps.current.value,
columns=self.steps.current.columns,
color_menu_item=color_menu,
)
if self._interaction.ui.scroll() < new_scroll and self._auto_scroll:
self._logger.debug("autoscroll disabled")
self._auto_scroll = False
elif self._interaction.ui.scroll() >= new_scroll and not self._auto_scroll:
self._logger.debug("autoscroll enabled")
self._auto_scroll = True
elif self.steps.current.type == "content":
result = self._interaction.ui.show(
obj=self.steps.current.value,
index=self.steps.current.index,
content_heading=content_heading,
filter_content_keys=self._content_key_filter,
)
if result is None:
self.steps.back_one()
else:
self.steps.append(result)
def _run_runner(self) -> None:
"""spin up runner"""
executable_cmd: Optional[str]
if self.mode == "stdout_w_artifact":
mode = "interactive"
else:
mode = self.mode
if isinstance(self._args.set_environment_variable, dict):
set_envvars = {**self._args.set_environment_variable}
else:
set_envvars = {}
if self._args.display_color is False:
set_envvars["ANSIBLE_NOCOLOR"] = "1"
kwargs = {
"container_engine": self._args.container_engine,
"host_cwd": os.getcwd(),
"execution_environment_image": self._args.execution_environment_image,
"execution_environment": self._args.execution_environment,
"inventory": self._args.inventory,
"navigator_mode": mode,
"pass_environment_variable": self._args.pass_environment_variable,
"set_environment_variable": set_envvars,
}
if isinstance(self._args.playbook, str):
kwargs.update({"playbook": self._args.playbook})
if isinstance(self._args.execution_environment_volume_mounts, list):
kwargs.update(
{"container_volume_mounts": self._args.execution_environment_volume_mounts}
)
if self._args.execution_environment:
executable_cmd = "ansible-playbook"
else:
executable_cmd = shutil.which("ansible-playbook")
if not executable_cmd:
self._logger.error("'ansible-playbook' executable not found")
return
pass_through_arg = []
if self._args.help_playbook is True:
pass_through_arg.append("--help")
if isinstance(self._args.cmdline, list):
pass_through_arg.extend(self._args.cmdline)
kwargs.update({"cmdline": pass_through_arg})
self.runner = CommandRunnerAsync(executable_cmd=executable_cmd, queue=self._queue, **kwargs)
self.runner.run()
self._runner_finished = False
self._logger.debug("runner requested to start")
def _dequeue(self) -> None:
"""Drain the runner queue"""
drain_count = 0
while not self._queue.empty():
message = self._queue.get()
self._handle_message(message)
drain_count += 1
if drain_count:
self._logger.debug("Drained %s events", drain_count)
def _handle_message(self, message: dict) -> None:
# pylint: disable=too-many-branches
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-statements
"""Handle a runner message
:param message: The message from runner
:type message: dict
"""
try:
event = message["event"]
except KeyError:
error = f"Unhandled message from runner queue, discarded: {message}"
self._logger.critical(error)
else:
if "stdout" in message and message["stdout"]:
self.stdout.extend(message["stdout"].splitlines())
if self.mode == "stdout_w_artifact":
print(message["stdout"])
if event in ["verbose", "error"]:
if "ERROR!" in message["stdout"]:
self._msg_from_plays = ("ERROR", 9)
if self.mode == "interactive":
self._notify_error(message["stdout"])
elif "WARNING" in message["stdout"]:
self._msg_from_plays = ("WARNINGS", 13)
if event == "playbook_on_play_start":
play = message["event_data"]
play["__play_name"] = play["name"]
play["tasks"] = []
self._plays.value.append(play)
if event.startswith("runner_on_"):
runner_event = event.split("_")[2]
task = message["event_data"]
play_id = next(
idx for idx, p in enumerate(self._plays.value) if p["uuid"] == task["play_uuid"]
)
if runner_event in ["ok", "skipped", "unreachable", "failed"]:
if runner_event == "failed" and task["ignore_errors"]:
result = "ignored"
else:
result = runner_event
task["__result"] = result.upper()
task["__changed"] = task.get("res", {}).get("changed", False)
task["__duration"] = human_time(seconds=round(task["duration"], 2))
task_id = None
for idx, play_task in enumerate(self._plays.value[play_id]["tasks"]):
if task["task_uuid"] == play_task["task_uuid"]:
if task["host"] == play_task["host"]:
task_id = idx
break
if task_id is not None:
self._plays.value[play_id]["tasks"][task_id].update(task)
elif runner_event == "start":
task["__host"] = task["host"]
task["__result"] = "IN_PROGRESS"
task["__changed"] = "unknown"
task["__duration"] = None
task["__number"] = len(self._plays.value[play_id]["tasks"])
task["__task"] = task["task"]
task["__task_action"] = task["task_action"]
self._plays.value[play_id]["tasks"].append(task)
def _play_stats(self) -> None:
"""Calculate the play's stats based
on it's tasks
"""
for idx, play in enumerate(self._plays.value):
total = ["__ok", "__skipped", "__failed", "__unreachable", "__ignored", "__in_progress"]
self._plays.value[idx].update(
{
tot: len([t for t in play["tasks"] if t["__result"].lower() == tot[2:]])
for tot in total
}
)
self._plays.value[idx]["__changed"] = len(
[t for t in play["tasks"] if t["__changed"] is True]
)
task_count = len(play["tasks"])
self._plays.value[idx]["__task_count"] = task_count
completed = task_count - self._plays.value[idx]["__in_progress"]
if completed:
new = floor((completed / task_count * 100))
current = self._plays.value[idx].get("__pcomplete", 0)
self._plays.value[idx]["__pcomplete"] = max(new, current)
self._plays.value[idx]["__progress"] = str(max(new, current)) + "%"
else:
self._plays.value[idx]["__progress"] = "0%"
def _prepare_to_quit(self, interaction: Interaction) -> bool:
"""Looks like we're headed out of here
:param interaction: the quit interaction
:type interaction: Interaction
:return: a bool indicating whether of not it's safe to exit
:rtype: bool
"""
self.update()
if self.runner is not None and not self.runner.finished:
if interaction.action.match.groupdict()["exclamation"]:
self._logger.debug("shutting down runner")
self.runner.cancelled = True
while not self.runner.finished:
pass
self.write_artifact()
return True
self._logger.warning("Quit requested but playbook running, try q! or quit!")
return False
self._logger.debug("runner not running")
return True
def _task_list_for_play(self) -> Step:
"""generate a menu of task for the currently selected play
:return: The menu step
:rtype: Step
"""
value = self.steps.current.selected["tasks"]
step = Step(
name="task_list",
tipe="menu",
columns=self._task_list_columns,
select_func=self._task_from_task_list,
value=value,
)
return step
def _task_from_task_list(self) -> Step:
"""generate task content for the selected task
:return: content whic show a task
:rtype: Step
"""
value = self.steps.current.value
index = self.steps.current.index
step = Step(name="task", tipe="content", index=index, value=value)
return step
def update(self) -> None:
"""Drain the queue, set the status and write the artifact if needed"""
# let the calling app update as well
self._calling_app.update()
if hasattr(self, "runner"):
self._dequeue()
self._set_status()
if self.runner.finished and not self._runner_finished:
# self._interaction.ui.disable_refresh()
self._logger.debug("runner finished")
self._logger.info("Playbook complete")
self.write_artifact()
self._runner_finished = True
def _get_status(self) -> Tuple[str, int]:
"""Get the status and color
:return: status string, status color
:rtype: tuple of str and int
"""
status = ""
status_color = 0
if self.runner.status:
if self.runner and self.runner.finished and self.runner.status:
status = self.runner.status
if self.runner.status == "failed":
status_color = 9
else:
status_color = self._msg_from_plays[1] or 10
else:
if self._msg_from_plays[0] is not None and self._msg_from_plays[1] is not None:
status = self._msg_from_plays[0]
status_color = self._msg_from_plays[1]
else:
status = self.runner.status
status_color = 10
return status, status_color
def _set_status(self) -> None:
"""Set the ui status"""
status, status_color = self._get_status()
self._interaction.ui.update_status(status, status_color)
def write_artifact(self, filename: Optional[str] = None) -> None:
"""Write the artifact
:param filename: The file to write to
:type filename: str
"""
if (
filename
or self._args.playbook_artifact_enable is True
and self._args.help_playbook is not True
):
filename = filename or self._args.playbook_artifact_save_as
filename = filename.format(
playbook_dir=os.path.dirname(self._args.playbook),
playbook_name=os.path.splitext(os.path.basename(self._args.playbook))[0],
ts_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(),
)
self._logger.debug("Formatted artifact file name set to %s", filename)
filename = abs_user_path(filename)
self._logger.debug("Resolved artifact file name set to %s", filename)
status, status_color = self._get_status()
try:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as outfile:
artifact = {
"version": "1.0.0",
"plays": self._plays.value,
"stdout": self.stdout,
"status": status,
"status_color": status_color,
}
json.dump(artifact, outfile, indent=4)
self._logger.info("Saved artifact as %s", filename)
except (IOError, OSError) as exc:
error = (
f"Saving the artifact file failed, resulted in the following error: f{str(exc)}"
)
self._logger.error(error)
def rerun(self) -> None:
"""rerun the current playbook
since we're not reinstantiating run,
drain the queue, clear the steps, reset the index, etc
"""
if self._subaction_type == "run":
if self.runner.finished:
self._plays.value = []
self._plays.index = None
self._msg_from_plays = (None, None)
self._queue.queue.clear()
self.stdout = []
self._run_runner()
self.steps.clear()
self.steps.append(self._plays)
self._logger.debug("Playbook rerun triggered")
else:
self._logger.warning("Playbook rerun ignored, current playbook not complete")
elif self._subaction_type == "replay":
self._logger.error("No rerun available when artifact is loaded")
else:
self._logger.error("sub-action type '%s' is invalid", self._subaction_type)
def _notify_error(self, message: str):
"""show a blocking warning"""
warn_msg = ["Errors were encountered while running the playbook:"]
messages = remove_ansi(message).splitlines()
messages[-1] += "..."
warn_msg.extend(messages)
warn_msg += ["[HINT] To see the full error message try ':stdout'"]
warn_msg += ["[HINT] After it's fixed, try to ':rerun' the playbook"]
warning = warning_notification(warn_msg)
self._interaction.ui.show(warning)
| 36.119183 | 100 | 0.554602 |
7947099da5d94978d65208650463b1519b38bf4e | 3,183 | py | Python | tv.boxeeplay.svtplay3/settings.py | BoxeePlay/svtplay3 | 8bbaf5af0ec6927417ff1172bd0ea10b61fab605 | [
"MIT"
] | 1 | 2015-03-03T14:59:02.000Z | 2015-03-03T14:59:02.000Z | tv.boxeeplay.svtplay3/settings.py | BoxeePlay/svtplay3 | 8bbaf5af0ec6927417ff1172bd0ea10b61fab605 | [
"MIT"
] | null | null | null | tv.boxeeplay.svtplay3/settings.py | BoxeePlay/svtplay3 | 8bbaf5af0ec6927417ff1172bd0ea10b61fab605 | [
"MIT"
] | null | null | null | #encoding:utf-8
#author:Andreas Pehrson
#project:boxeeplay.tv
import mc
from logger import BPLog, Level
USE_PIRATEPLAY_KEY = "use_pirateplay"
BITRATE_LIMIT_KEY = "bitrate_limit"
def conf():
return mc.GetApp().GetLocalConfig()
def activate():
decision = mc.ShowDialogSelect("Jag vill ändra...", [get_option_stream_source(), get_option_bitrate_limit()])
if decision == 0:
activate_stream_source_selection()
if decision == 1:
activate_bitrate_limit_selection()
def activate_stream_source_selection():
cont = mc.ShowDialogConfirm("Ändra Uppspelningskälla", "Traditionellt öppnas videor i den inbyggda webbläsaren. Med pirateplay kan vi istället öppna videoströmmarna direkt i Boxee. Då startar de snabbare men med låst bitrate/kvalitet.", "Avbryt", "Fortsätt")
if not cont:
return
opt_pirateplay = "Pirateplay om möjligt"
opt_webonly = "Endast webbläsaren"
if use_pirateplay(): opt_pirateplay = "[valt] " + opt_pirateplay
else: opt_webonly = "[valt] " + opt_webonly
decision = mc.ShowDialogSelect("Ändra Uppspelningskälla", [opt_pirateplay, opt_webonly])
if decision == 0:
set_use_pirateplay(True)
elif decision == 1:
set_use_pirateplay(False)
else:
BPLog("Stream source dialog cancelled")
return
def activate_bitrate_limit_selection():
cont = mc.ShowDialogConfirm("Ändra bandbreddsbegränsning", "När Pirateplay används spelar vi normalt upp strömmarna med högsta möjliga kvalitet. Har du då problem med hackig och buffrande uppspelning kan du här ställa in en gräns för hur hög bitrate vi får välja.", "Avbryt", "Fortsätt")
if not cont:
return
options = [ "Obegränsat", "2500 Kbps", "2000 Kbps", "1500 Kbps", "1000 Kbps", "500 Kbps" ]
option_values = [ -1, 2500, 2000, 1500, 1000, 500 ]
limit = bitrate_limit()
active_value_index = 0
try: active_value_index = option_values.index(limit)
except: BPLog("Value %d not found in list of bitrate limit options" %limit, Level.WARNING)
options[active_value_index] = "[valt] " + options[active_value_index]
decision = mc.ShowDialogSelect("Begränsa bandbredd", options)
if decision == -1:
BPLog("Bitrate limit dialog cancelled")
return
chosen_limit = option_values[decision]
set_bitrate_limit(chosen_limit)
BPLog("Bitrate limit set to %d kbps (%s)" %(chosen_limit, options[decision]))
def use_pirateplay():
return conf().GetValue(USE_PIRATEPLAY_KEY) == "True"
def set_use_pirateplay(use_pirateplay):
conf().SetValue(USE_PIRATEPLAY_KEY, str(use_pirateplay))
def bitrate_limit():
limit = conf().GetValue(BITRATE_LIMIT_KEY)
if limit == "": return -1
else: return int(limit)
def set_bitrate_limit(limit):
conf().SetValue(BITRATE_LIMIT_KEY, str(limit))
def get_option_stream_source():
opt = "Uppspelningskälla: "
if use_pirateplay():
opt += "Pirateplay"
else:
opt += "Web"
return opt
def get_option_bitrate_limit():
opt = "Bandbreddsbegränsning: "
limit = bitrate_limit()
if limit == -1:
opt += "Obegränsat"
else:
opt += "%d kbps" %limit
return opt
| 34.225806 | 291 | 0.700283 |
79470a3e7e9fc34968d9340c4c032e16fd48e191 | 11,524 | py | Python | scripts/test_globalF.py | Tarekbouamer/Image-Retrieval-for-Image-Based-Localization | fcad9af4f558bebb3cbec1d08e49603a452f439d | [
"BSD-3-Clause"
] | 3 | 2021-01-15T13:58:22.000Z | 2021-01-22T00:03:34.000Z | scripts/test_globalF.py | Tarekbouamer/Image-Retrieval-for-Image-Based-Localization | fcad9af4f558bebb3cbec1d08e49603a452f439d | [
"BSD-3-Clause"
] | null | null | null | scripts/test_globalF.py | Tarekbouamer/Image-Retrieval-for-Image-Based-Localization | fcad9af4f558bebb3cbec1d08e49603a452f439d | [
"BSD-3-Clause"
] | null | null | null | import argparse
import os
import time
import pickle
import numpy as np
import torch
from torch.utils.model_zoo import load_url
from torchvision import transforms
from cirtorch.models.GF_net import init_network, extract_vectors
from cirtorch.datasets.datahelpers import cid2filename
from cirtorch.datasets.testdataset import configdataset
from cirtorch.utils.download import download_train, download_test
from cirtorch.utils.whiten import whitenlearn, whitenapply
from cirtorch.utils.evaluate import compute_map_and_print
from cirtorch.utils.general import get_data_root, htime
PRETRAINED = {
'retrievalSfM120k-vgg16-gem' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/retrievalSfM120k-vgg16-gem-b4dcdc6.pth',
'retrievalSfM120k-resnet101-gem' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/retrievalSfM120k-resnet101-gem-b80fb85.pth',
# new modules with whitening learned end-to-end
'rSfM120k-tl-resnet50-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet50-gem-w-97bf910.pth',
'rSfM120k-tl-resnet101-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet101-gem-w-a155e54.pth',
'rSfM120k-tl-resnet152-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet152-gem-w-f39cada.pth',
'gl18-tl-resnet50-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet50-gem-w-83fdc30.pth',
'gl18-tl-resnet101-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet101-gem-w-a4d43db.pth',
'gl18-tl-resnet152-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet152-gem-w-21278d5.pth',
}
datasets_names = ['oxford5k', 'paris6k', 'roxford5k', 'rparis6k']
whitening_names = ['retrieval-SfM-30k', 'retrieval-SfM-120k']
parser = argparse.ArgumentParser(description='PyTorch CNN Image Retrieval Testing')
# network
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--network-path', '-npath', metavar='NETWORK',
help="pretrained network or network path (destination where network is saved)")
group.add_argument('--network-offtheshelf', '-noff', metavar='NETWORK',
help="off-the-shelf network, in the format 'ARCHITECTURE-POOLING' or 'ARCHITECTURE-POOLING-{reg-lwhiten-whiten}'," +
" examples: 'resnet101-gem' | 'resnet101-gem-reg' | 'resnet101-gem-whiten' | 'resnet101-gem-lwhiten' | 'resnet101-gem-reg-whiten'")
# test options
parser.add_argument('--datasets', '-d', metavar='DATASETS', default='oxford5k,paris6k',
help="comma separated list of test datasets: " +
" | ".join(datasets_names) +
" (default: 'oxford5k,paris6k')")
parser.add_argument('--image-size', '-imsize', default=1024, type=int, metavar='N',
help="maximum size of longer image side used for testing (default: 1024)")
parser.add_argument('--multiscale', '-ms', metavar='MULTISCALE', default='[1]',
help="use multiscale vectors for testing, " +
" examples: '[1]' | '[1, 1/2**(1/2), 1/2]' | '[1, 2**(1/2), 1/2**(1/2)]' (default: '[1]')")
parser.add_argument('--whitening', '-w', metavar='WHITENING', default=None, choices=whitening_names,
help="dataset used to learn whitening for testing: " +
" | ".join(whitening_names) +
" (default: None)")
# GPU ID
parser.add_argument('--gpu-id', '-g', default='0', metavar='N',
help="gpu id used for testing (default: '0')")
def main():
args = parser.parse_args()
# check if there are unknown datasets
for dataset in args.datasets.split(','):
if dataset not in datasets_names:
raise ValueError('Unsupported or unknown dataset: {}!'.format(dataset))
# check if test dataset are downloaded
# and download if they are not
download_train(get_data_root())
download_test(get_data_root())
# setting up the visible GPU
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
# loading network from path
if args.network_path is not None:
print(">> Loading network:\n>>>> '{}'".format(args.network_path))
if args.network_path in PRETRAINED:
# pretrained modules (downloaded automatically)
state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'modules'))
else:
# fine-tuned network from path
state = torch.load(args.network_path)
# parsing net params from meta
# architecture, pooling, mean, std required
# the rest has default values, in case that is doesnt exist
net_params = {}
net_params['architecture'] = state['meta']['architecture']
net_params['pooling'] = state['meta']['pooling']
net_params['local_whitening'] = state['meta'].get('local_whitening', False)
net_params['regional'] = state['meta'].get('regional', False)
net_params['whitening'] = state['meta'].get('whitening', False)
net_params['mean'] = state['meta']['mean']
net_params['std'] = state['meta']['std']
net_params['pretrained'] = False
# load network
net = init_network(net_params)
net.load_state_dict(state['state_dict'])
# if whitening is precomputed
if 'Lw' in state['meta']:
net.meta['Lw'] = state['meta']['Lw']
print(">>>> loaded network: ")
print(net.meta_repr())
# loading offtheshelf network
elif args.network_offtheshelf is not None:
# parse off-the-shelf parameters
offtheshelf = args.network_offtheshelf.split('-')
net_params = {}
net_params['architecture'] = offtheshelf[0]
net_params['pooling'] = offtheshelf[1]
net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:]
net_params['regional'] = 'reg' in offtheshelf[2:]
net_params['whitening'] = 'whiten' in offtheshelf[2:]
net_params['pretrained'] = True
# load off-the-shelf network
print(">> Loading off-the-shelf network:\n>>>> '{}'".format(args.network_offtheshelf))
net = init_network(net_params)
print(">>>> loaded network: ")
print(net.meta_repr())
# setting up the multi-scale parameters
ms = list(eval(args.multiscale))
if len(ms)>1 and net.meta['pooling'] == 'gem' and not net.meta['regional'] and not net.meta['whitening']:
msp = net.pool.p.item()
print(">> Set-up multiscale:")
print(">>>> ms: {}".format(ms))
print(">>>> msp: {}".format(msp))
else:
msp = 1
# moving network to gpu and eval mode
net.cuda()
net.eval()
# set up the transform
normalize = transforms.Normalize(
mean=net.meta['mean'],
std=net.meta['std']
)
transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
# compute whitening
if args.whitening is not None:
start = time.time()
if 'Lw' in net.meta and args.whitening in net.meta['Lw']:
print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening))
if len(ms)>1:
Lw = net.meta['Lw'][args.whitening]['ms']
else:
Lw = net.meta['Lw'][args.whitening]['ss']
else:
# if we evaluate modules from path we should save/load whitening
# not to compute it every time
if args.network_path is not None:
whiten_fn = args.network_path + '_{}_whiten'.format(args.whitening)
if len(ms) > 1:
whiten_fn += '_ms'
whiten_fn += '.pth'
else:
whiten_fn = None
if whiten_fn is not None and os.path.isfile(whiten_fn):
print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening))
Lw = torch.load(whiten_fn)
else:
print('>> {}: Learning whitening...'.format(args.whitening))
# loading db
db_root = os.path.join(get_data_root(), 'train', args.whitening)
ims_root = os.path.join(db_root, 'ims')
db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening))
with open(db_fn, 'rb') as f:
db = pickle.load(f)
images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))]
# extract whitening vectors
print('>> {}: Extracting...'.format(args.whitening))
wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)
# learning whitening
print('>> {}: Learning...'.format(args.whitening))
wvecs = wvecs.numpy()
m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
Lw = {'m': m, 'P': P}
# saving whitening if whiten_fn exists
if whiten_fn is not None:
print('>> {}: Saving to {}...'.format(args.whitening, whiten_fn))
torch.save(Lw, whiten_fn)
print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time()-start)))
else:
Lw = None
# evaluate on test datasets
datasets = args.datasets.split(',')
for dataset in datasets:
start = time.time()
print('>> {}: Extracting...'.format(dataset))
# prepare config structure for the test dataset
cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])]
qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])]
try:
bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
except:
bbxs = None # for holidaysmanrot and copydays
# extract database and query vectors
print('>> {}: database images...'.format(dataset))
vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)
print('>> {}: query images...'.format(dataset))
qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp)
print('>> {}: Evaluating...'.format(dataset))
# convert to numpy
vecs = vecs.numpy()
qvecs = qvecs.numpy()
# search, rank, and print
scores = np.dot(vecs.T, qvecs)
ranks = np.argsort(-scores, axis=0)
compute_map_and_print(dataset, ranks, cfg['gnd'])
if Lw is not None:
# whiten the vectors
vecs_lw = whitenapply(vecs, Lw['m'], Lw['P'])
qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])
# search, rank, and print
scores = np.dot(vecs_lw.T, qvecs_lw)
ranks = np.argsort(-scores, axis=0)
compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'])
print('>> {}: elapsed time: {}'.format(dataset, htime(time.time()-start)))
if __name__ == '__main__':
main() | 43.486792 | 162 | 0.599098 |
79470abaee1d412287761485b77daf98d263b262 | 2,734 | py | Python | poc.py | hcongvan/emotion-recognition-neural-networks | e98b1ce21599e83fccb97a31386ecd8ebfc6d3b3 | [
"MIT"
] | null | null | null | poc.py | hcongvan/emotion-recognition-neural-networks | e98b1ce21599e83fccb97a31386ecd8ebfc6d3b3 | [
"MIT"
] | null | null | null | poc.py | hcongvan/emotion-recognition-neural-networks | e98b1ce21599e83fccb97a31386ecd8ebfc6d3b3 | [
"MIT"
] | null | null | null | # Proof-of-concept
import cv2
import sys
from constants import *
from emotion_recognition import EmotionRecognition
import numpy as np
import pdb
cascade_classifier = cv2.CascadeClassifier(CASC_PATH)
def brighten(data,b):
datab = data * b
return datab
def format_image(image):
if len(image.shape) > 2 and image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)
faces = cascade_classifier.detectMultiScale(
image,
scaleFactor = 1.3,
minNeighbors = 5
)
# pdb.set_trace()
# faces = cv2.resize(faces,(SIZE_FACE,SIZE_FACE))
# None is we don't found an image
if not len(faces) > 0:
return None
max_area_face = faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face = face
# Chop image to face
face = max_area_face
image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]
# Resize image to network size
try:
image = cv2.resize(image, (SIZE_FACE, SIZE_FACE), interpolation = cv2.INTER_CUBIC) / 255.
except Exception:
print("[+] Problem during resize")
return None
#cv2.imshow("Lol", image)
#cv2.waitKey(0)
return image
# Load Model
network = EmotionRecognition()
network.build_network()
video_capture = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
feelings_faces = []
for index, emotion in enumerate(EMOTIONS):
feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
#pdb.set_trace()
# Predict result with network
result = network.predict(format_image(frame))
# Draw face in frame
# for (x,y,w,h) in faces:
# cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2)
# Write results in frame
if result is not None:
for index, emotion in enumerate(EMOTIONS):
cv2.putText(frame, emotion, (10, index * 20 + 20), cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1);
cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4), (255, 0, 0), -1)
# m = 0
# i = 0
# for c in result[0]:
# if m < c:
# m = c
# i = i +1;
face_image = feelings_faces[np.argmax(result[0])]
# Ugly transparent fix
for c in range(0, 3):
frame[200:320, 10:130, c] = face_image[:,:,c] * (face_image[:, :, 3] / 255.0) + frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows() | 28.778947 | 150 | 0.644111 |
79470bc632bfdd5e3ab8a407df89043379e127a5 | 1,632 | py | Python | SRWLIB_ExampleViewDataFile.py | mrakitin/SRW-gui | 9c603d5a407dfa36ba92e8646d1a57151de825ed | [
"Apache-2.0"
] | null | null | null | SRWLIB_ExampleViewDataFile.py | mrakitin/SRW-gui | 9c603d5a407dfa36ba92e8646d1a57151de825ed | [
"Apache-2.0"
] | null | null | null | SRWLIB_ExampleViewDataFile.py | mrakitin/SRW-gui | 9c603d5a407dfa36ba92e8646d1a57151de825ed | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#############################################################################
# SRWLIB Example View Data File: View data stored in a file
# v 0.01
#############################################################################
from __future__ import print_function #Python 2.7 compatibility
from uti_plot import *
import optparse
if __name__=='__main__':
p = optparse.OptionParser()
p.add_option('-f', '--infile', dest='infile', metavar='FILE', default='', help='input file name')
p.add_option('-e', '--e', dest='e', metavar='NUMBER', type='float', default=0, help='photon energy')
p.add_option('-x', '--x', dest='x', metavar='NUMBER', type='float', default=0, help='horizontal position')
p.add_option('-y', '--y', dest='y', metavar='NUMBER', type='float', default=0, help='vertical position')
p.add_option('-l', '--readlab', dest='readlab', metavar='NUMBER', type='int', nargs=0, default=0, help='read labels from the file header (1) or not (0)')
p.add_option('-j', '--joined', dest='joined', metavar='NUMBER', type='int', nargs=0, default=0, help='place different graphs jointly into one figure (1) or into separate figures (0)')
opt, args = p.parse_args()
if opt.readlab != 0: opt.readlab = 1
if opt.joined != 0: opt.joined = 1
if len(opt.infile) == 0:
print('File name was not specified. Use -f option to specify the file name with path.')
quit()
#print(opt.joined)
uti_data_file_plot(opt.infile, opt.readlab, opt.e, opt.x, opt.y, opt.joined)
uti_plot_show()
#print(opt.infile, opt.readlab, opt.e, opt.x, opt.y, opt.joined)
| 49.454545 | 187 | 0.588848 |
79470bece896f0caea5cb18cf4b22ee0b32b2bd1 | 602 | py | Python | If_and_circulation.py | Becavalier/Python-Practice-Cases | 23139e8bab0f5c712d38f378364842262d17e520 | [
"MIT"
] | 1 | 2016-08-18T00:53:51.000Z | 2016-08-18T00:53:51.000Z | If_and_circulation.py | Becavalier/Python-Practice-Cases | 23139e8bab0f5c712d38f378364842262d17e520 | [
"MIT"
] | null | null | null | If_and_circulation.py | Becavalier/Python-Practice-Cases | 23139e8bab0f5c712d38f378364842262d17e520 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# If
age = 3;
if age >= 18:
print('adult');
elif age >= 6:
print('teenager');
else:
print('kid');
s = input('birth: ');
birth = int(s);
if birth < 2000:
print('00前');
else:
print('00后');
# Circulation
names = ['Michael', 'Bob', 'Tracy'];
for name in names:
print(name);
sum = 0;
for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
sum = sum + x;
print(sum);
# Python method: range()
sum = 0;
for x in range(101):
sum = sum + x;
print(sum);
# While
sum = 0;
n = 99;
while n > 0:
sum = sum + n;
n = n - 2;
print(sum);
| 13.377778 | 41 | 0.51495 |
79470c218eb93beb3cbe517362d3c67eafa14117 | 1,446 | py | Python | chameleon/legacy/skfeature/utility/mutual_information.py | diviyat/chameleon | 12c212ef3ecaab73e6b2a4ae378b25495926ad75 | [
"MIT"
] | 2 | 2021-10-21T23:09:00.000Z | 2021-12-14T07:55:43.000Z | chameleon/legacy/skfeature/utility/mutual_information.py | diviyat/chameleon | 12c212ef3ecaab73e6b2a4ae378b25495926ad75 | [
"MIT"
] | null | null | null | chameleon/legacy/skfeature/utility/mutual_information.py | diviyat/chameleon | 12c212ef3ecaab73e6b2a4ae378b25495926ad75 | [
"MIT"
] | null | null | null | import chameleon.legacy.skfeature.utility.entropy_estimators as ee
def information_gain(f1, f2):
"""
This function calculates the information gain, where ig(f1,f2) = H(f1) - H(f1|f2)
Input
-----
f1: {numpy array}, shape (n_samples,)
f2: {numpy array}, shape (n_samples,)
Output
------
ig: {float}
"""
ig = ee.entropyd(f1) - conditional_entropy(f1, f2)
return ig
def conditional_entropy(f1, f2):
"""
This function calculates the conditional entropy, where ce = H(f1) - I(f1;f2)
Input
-----
f1: {numpy array}, shape (n_samples,)
f2: {numpy array}, shape (n_samples,)
Output
------
ce: {float}
ce is conditional entropy of f1 and f2
"""
ce = ee.entropyd(f1) - ee.midd(f1, f2)
return ce
def su_calculation(f1, f2):
"""
This function calculates the symmetrical uncertainty, where su(f1,f2) = 2*IG(f1,f2)/(H(f1)+H(f2))
Input
-----
f1: {numpy array}, shape (n_samples,)
f2: {numpy array}, shape (n_samples,)
Output
------
su: {float}
su is the symmetrical uncertainty of f1 and f2
"""
# calculate information gain of f1 and f2, t1 = ig(f1,f2)
t1 = information_gain(f1, f2)
# calculate entropy of f1, t2 = H(f1)
t2 = ee.entropyd(f1)
# calculate entropy of f2, t3 = H(f2)
t3 = ee.entropyd(f2)
# su(f1,f2) = 2*t1/(t2+t3)
su = 2.0*t1/(t2+t3)
return su
| 21.58209 | 101 | 0.584371 |
79470c6ffe2f20b3049cbc386160009ef026da3d | 1,716 | py | Python | xlsxwriter/test/comparison/test_table08.py | shareablee/XlsxWriter | 3cfcbe18fbc4526158ffbb5e7bb5227f78e3f5f9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_table08.py | shareablee/XlsxWriter | 3cfcbe18fbc4526158ffbb5e7bb5227f78e3f5f9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_table08.py | shareablee/XlsxWriter | 3cfcbe18fbc4526158ffbb5e7bb5227f78e3f5f9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'table08.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column('C:F', 10.288)
worksheet.write_string('A1', 'Column1')
worksheet.write_string('B1', 'Column2')
worksheet.write_string('C1', 'Column3')
worksheet.write_string('D1', 'Column4')
worksheet.write_string('E1', 'Total')
worksheet.add_table('C3:F14', {'total_row': 1,
'columns': [{'total_string': 'Total'},
{},
{},
{'total_function': 'count'},
]})
workbook.close()
self.assertExcelEqual()
| 31.2 | 101 | 0.52331 |
79470c836165dc8858da71bf43bd6b14c83a53dd | 68 | py | Python | bci_learning_studio/__init__.py | hellomoto-ai/bci-learning-studio | f37256bd0e2d85590ff258f14d70f09afcd9609f | [
"MIT"
] | null | null | null | bci_learning_studio/__init__.py | hellomoto-ai/bci-learning-studio | f37256bd0e2d85590ff258f14d70f09afcd9609f | [
"MIT"
] | 13 | 2018-11-28T05:00:00.000Z | 2019-01-07T04:06:37.000Z | bci_learning_studio/__init__.py | hellomoto-ai/bci-learning-studio | f37256bd0e2d85590ff258f14d70f09afcd9609f | [
"MIT"
] | null | null | null | __all__ = [
'__version__',
]
from ._version import __version__
| 11.333333 | 33 | 0.691176 |
79470f8e4c770d0b26e1dd0a0fc2b31f65659d51 | 9,021 | py | Python | src/python/ilp_pipeline_config.py | hhelm10/distPURL | 735480bceff38b7a10ea618c13fe93a5b3d26910 | [
"MIT"
] | 2 | 2021-04-27T15:26:00.000Z | 2021-09-12T23:15:02.000Z | src/python/ilp_pipeline_config.py | hhelm10/distPURL | 735480bceff38b7a10ea618c13fe93a5b3d26910 | [
"MIT"
] | null | null | null | src/python/ilp_pipeline_config.py | hhelm10/distPURL | 735480bceff38b7a10ea618c13fe93a5b3d26910 | [
"MIT"
] | 2 | 2021-04-27T15:26:33.000Z | 2021-09-13T12:32:24.000Z | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
import logging
import os
import sys
from typing import *
from typing import List, Any, Dict
from multiprocessing import Lock
from datetime import datetime
from pytz import timezone
from ilp_common import *
from ilp_common_classes import *
from ilp_manual_input import *
from ilp_voi_experiment import *
from ilp_pre_proc import *
class ILPPipelineParameter:
def __init__(self, name: str, function_handler: Callable[[str],Any], value: Any = None, default: Any = None):
self.name: str = name
self.function_handler: Callable[[str],Any] = function_handler
self.value: Any = value
self.default: Any = default
class ILPPipelineConfig:
def __init__(self):
self.ilp_params: ILPParams
self.cmd_options: Dict[str, ILPPipelineParameter]
self.voi_indices: List[int] = []
self.pipeline_driver_input_path: str = "../json/pipeline_driver_input_template.json"
self.run_id_input_flag: bool = False
self.run_id_dir_path = '../../.ilp_code/'
self.run_id: str = '100'
self.sub_run_id: int = 1
self.run_output_dir_path: str = ''
self.experiment_dicts: Dict[str,List[ILPVoiExperiment]] = {}
self.output_dir_paths: List[str] = []
self.all_training_similar_sets_dict: Dict[strDict[str,int]] = {}
self.logger: str = ''
self.test_successful: bool = False
def reset_run_id(self):
#increment run_id
with open(os.path.join(self.run_id_dir_path, 'run_id_file.txt'), 'w') as run_id_file:
run_id_file.write(str(int(self.run_id) + 1))
def create_output_dirs(self):
'''
Create an output directory
'''
create_dir((self.ilp_params.path_to_root+'/src/output_data'),False)
create_dir(self.ilp_params.path_to_output,False)
if self.run_id_input_flag == False:
get_run_id_lock = Lock()
create_dir(self.run_id_dir_path, False)
runid_file_path = os.path.join(self.run_id_dir_path, 'run_id_file.txt')
if os.path.exists(runid_file_path) == False:
with open(runid_file_path, 'w') as f:
f.write(str(self.run_id))
with get_run_id_lock:
with open(runid_file_path, 'r') as run_id_file:
self.run_id = run_id_file.read()
self.reset_run_id()
self.run_output_dir_path = os.path.join(self.ilp_params.path_to_output, '{d}_{r}_{t}'.format(d=self.ilp_params.dataset, r=self.run_id, t=(datetime.now(timezone('US/Pacific')).strftime('_%m%d%Y_%H:%M:%S_%z'))))
create_dir(self.run_output_dir_path, True)
def save_log_preproc_data_debug_mode(self, pre_processed_data:ILPPreProcData):
'''
Configure logger
'''
self.logger = logging.getLogger('ilp_pipeline_driver')
self.logger.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
'''
Save logging to file in debug mode
'''
if self.ilp_params.mode == RunMode.DEBUG:
# create file handler and set level to debug
fh = logging.FileHandler(os.path.join(self.run_output_dir_path,'ilp_pipeline_logging.log'),'w')
fh.setLevel(logging.DEBUG)
# add formatter to fh
fh.setFormatter(formatter)
# add fh to logger
self.logger.addHandler(fh)
'''
Save preprocessed data in debug mode
'''
if self.ilp_params.persist_data == True and self.ilp_params.mode == RunMode.DEBUG:
try:
dump_json(pre_processed_data.__dict__, os.path.join(self.run_output_dir_path,'pre_processed_data.json'))
except ValueError:
raise ILPError('Output directory needs to be created before saving pre-processed data.')
self.logger.info("pre-processed data loaded to JSON")
def help_handler(self, arg: List[str]):
print('\n-- use default values for all testing parameters --')
print('ilp_pipeline_driver.py\n')
print("-- modify specific parameters --")
print("ilp_pipeline_driver.py --dataset --root-path --metadata-path --output-path --num-vois --training-sets-sizes --minimum-ranking-thresholds --solvers-apis --weight-approx-resolutions --num-cores --persist-data --mode --eval-data-mode --training-data-mode --gurobi-outputflag --time-limit --num-threads --eval-size --eval-param\n")
print('-- create a new json input file using current testing parameters: [-c or --create] (avoid spaces in list params) --')
print('ilp_pipeline_driver.py -c <inputfile.json>\n')
print('-- use a pre-written json input file: [-i or --ifile] --')
print('ilp_pipeline_driver.py -i <inputfile.json>\n')
print('-- enter parameters one by one (for users unfamiliar with required input): [--manual] --')
print('ilp_pipeline_driver.py --manual\n')
print('-- specify your run label: [--runid] --')
print('ilp_pipeline_driver.py --runid <run_label>\n')
print('-- specify your vertices of interest: [--indices] --')
print('ilp_pipeline_driver.py --indices <[voi1,voi2,..,voi_n]>\n')
sys.exit(0)
def ifile_handler(self, arg: List[str]):
ilp_params = ILPParams()
ilp_params.load_data(arg[0])
self.ilp_params = ilp_params
self.pipeline_driver_input_path = arg[0]
def runid_handler(self, arg: List[str]):
self.run_id_input_flag = True
self.run_id = arg[0]
def dataset_handler(self, arg: List[str]):
self.ilp_params.dataset = arg[0]
def path_to_root_handler(self, arg: List[str]):
self.ilp_params.path_to_root = arg[0]
def path_to_metadata_handler(self, arg: List[str]):
self.ilp_params.path_to_metadata = arg[0]
def path_to_output_handler(self, arg: List[str]):
self.ilp_params.path_to_output = arg[0]
def num_vois_handler(self, arg: List[str]):
self.ilp_params.num_vois = int(arg[0])
def training_sizes_handler(self, arg: List[str]):
self.ilp_params.training_sets_sizes = [int(i) for i in arg[0].strip().strip('[]').split(',')]
def threshold_handler(self, arg: List[str]):
self.ilp_params.minimum_ranking_thresholds = [int(i) if (i != 'None') else None for i in arg[0].strip().strip('[]').split(',')]
def solver_api_handler(self, arg: List[str]):
self.ilp_params.solvers_and_apis = [[i for i in j.split(',')] for j in arg[0].strip().strip('[]').split('],[')]
def weight_approx_resolution_handler(self, arg: List[str]):
self.ilp_params.weight_approx_resolutions = [float(i) if (i != 'None') else None for i in arg[0].strip().strip('[]').split(',')]
def num_cores_handler(self, arg: List[str]):
self.ilp_params.num_cores = int(arg[0])
def persist_data_handler(self, arg: List[str]):
self.ilp_params.persist_data = True if arg[0].lower()=='true' else False
def mode_handler(self, arg: List[str]):
self.ilp_params.mode = RunMode(arg[0].upper())
def eval_data_mode_handler(self, arg: List[str]):
self.ilp_params.eval_data_mode = DataInputMode(arg[0].upper())
def training_data_mode_handler(self, arg: List[str]):
self.ilp_params.training_data_mode = DataInputMode(arg[0].upper())
def gurobi_outputflag_handler(self, arg: List[str]):
self.ilp_params.gurobi_outputflag = int(arg[0])
def time_limit_handler(self, arg: List[str]):
self.ilp_params.time_limit = float(arg[0])
def num_threads_handler(self, arg: List[str]):
self.ilp_params.num_threads = int(arg[0])
def eval_size_handler(self, arg: List[str]):
self.ilp_params.eval_size = int(arg[0])
def eval_param_handler(self, arg: List[str]):
self.ilp_params.eval_param = EvalParam(arg[0])
def create_handler(self, arg: List[str]):
drive_archive_dir = os.path.join(self.ilp_params.path_to_root, 'src/driver_input_temp')
create_dir(drive_archive_dir,False)
self.pipeline_driver_input_path = os.path.join(drive_archive_dir, arg[0])
with open(self.pipeline_driver_input_path, 'w') as json_file:
json_file.write(self.ilp_params.__repr__())
def manual_input_handler(self, arg: List[str]):
ilp_params = ILPParams()
pipeline_driver_input_path = input_from_cmd(ilp_params)
self.ilp_params = ilp_params
self.pipeline_driver_input_path = pipeline_driver_input_path
def indices_handler(self, arg: List[str]):
voi_indices = [int(i) for i in arg[0].strip().strip('[]').split(',')]
self.voi_indices = voi_indices
self.ilp_params.num_vois = len(voi_indices)
def cmd_update(self,opt:str,arg: str):
self.cmd_options[opt].function_handler([arg])
| 42.551887 | 342 | 0.651923 |
79471059b35427cd96ca20cc6bb060b2e13e059e | 2,290 | py | Python | lncrawl/sources/tapread.py | betabeast12/lightnovel-crawler | 215b20846a71ad37ec893799cf684c76af707e62 | [
"Apache-2.0"
] | 2 | 2021-05-01T23:21:35.000Z | 2022-01-15T20:05:17.000Z | lncrawl/sources/tapread.py | dragonroad99/lncrawlod | a2f5e8fb115822d855baa992ecc65741822e315b | [
"Apache-2.0"
] | 4 | 2021-03-31T20:08:34.000Z | 2021-12-13T20:49:55.000Z | lncrawl/sources/tapread.py | dragonroad99/lncrawlod | a2f5e8fb115822d855baa992ecc65741822e315b | [
"Apache-2.0"
] | 1 | 2020-05-21T23:40:47.000Z | 2020-05-21T23:40:47.000Z | # -*- coding: utf-8 -*-
import logging
from urllib.parse import urlparse
from ..utils.crawler import Crawler
logger = logging.getLogger(__name__)
chapter_list_url = 'https://www.tapread.com/book/contents?bookId=%s'
chapter_url = 'https://www.tapread.com/book/chapter?bookId=%s&chapterId=%s'
class TapreadCrawler(Crawler):
base_url = 'https://www.tapread.com/'
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.select_one('.book-name').text.strip()
logger.info('Novel title: %s', self.novel_title)
try:
self.novel_cover = self.absolute_url(
soup.select_one('img.bg-img, img.cover-img, .book-img img')['src'])
except Exception:
pass
# end try
logger.info('Novel cover: %s', self.novel_cover)
try:
possible_authors = []
for div in soup.select('.author, .translator'):
possible_authors.append(
': '.join([x.strip() for x in div.text.split(':')]))
# end for
self.novel_author = ', '.join(possible_authors)
except Exception:
pass
# end try
logger.info(self.novel_author)
path = urlparse(self.novel_url).path
book_id = path.split('/')[3]
data = self.get_json(chapter_list_url % book_id)
volumes = set()
for chap in data['result']['chapterList']:
chap_id = chap['chapterNo']
vol_id = (chap_id - 1) // 100 + 1
volumes.add(vol_id)
self.chapters.append({
'id': chap_id,
'volume': vol_id,
'title': chap['chapterName'],
'url': chapter_url % (chap['bookId'], chap['chapterId']),
})
# end for
self.volumes = [{'id': x} for x in volumes]
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format'''
logger.info('Downloading %s', chapter['url'])
data = self.get_json(chapter['url'])
return data['result']['content']
# end def
# end class
| 32.714286 | 83 | 0.567686 |
794711790c51b1f818edfafbd0bdc0ffd8bf4765 | 10,925 | py | Python | compound_disease/compound_treats_disease/edge_prediction_experiment/edge_prediction_experiment.py | ajlee21/snorkeling | 93ca5269199a55ed2093334cb32b6d3120ae3535 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | compound_disease/compound_treats_disease/edge_prediction_experiment/edge_prediction_experiment.py | ajlee21/snorkeling | 93ca5269199a55ed2093334cb32b6d3120ae3535 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | compound_disease/compound_treats_disease/edge_prediction_experiment/edge_prediction_experiment.py | ajlee21/snorkeling | 93ca5269199a55ed2093334cb32b6d3120ae3535 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Compound Treats Disease Edge Prediction
# This notebook is designed to take the next step moving from predicted sentences to edge predictions. After training the discriminator model, each sentences contains a confidence score for the likelihood of mentioning a relationship. Multiple relationships contain multiple sentences, which makes establishing an edge unintuitive. Is taking the max score appropiate for determining existence of an edge? Does taking the mean of each relationship make more sense? The answer towards these questions are shown below.
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('matplotlib', 'inline')
import math
import os
import sys
import pandas as pd
from sklearn.metrics import precision_recall_curve, roc_curve, auc
import matplotlib.pyplot as plt
import plotnine as p9
import seaborn as sns
sys.path.append(os.path.abspath('../../../modules'))
from utils.notebook_utils.dataframe_helper import mark_sentence, tag_sentence
# In[2]:
#Set up the environment
username = "danich1"
password = "snorkel"
dbname = "pubmeddb"
#Path subject to change for different os
database_str = "postgresql+psycopg2://{}:{}@/{}?host=/var/run/postgresql".format(username, password, dbname)
os.environ['SNORKELDB'] = database_str
from snorkel import SnorkelSession
session = SnorkelSession()
# In[3]:
from snorkel.learning.pytorch.rnn.utils import candidate_to_tokens
from snorkel.models import Candidate, candidate_subclass
# In[4]:
CompoundDisease = candidate_subclass('CompoundDisease', ['Compound', 'Disease'])
# In[5]:
total_candidates_df = (
pd
.read_csv("input/all_ctd_candidates.tsv.xz", sep="\t")
.sort_values("candidate_id")
)
total_candidates_df.head(2)
# In[6]:
sentence_prediction_df = (
pd
.read_csv("input/all_predicted_ctd_sentences.tsv.xz", sep="\t")
.sort_values("candidate_id")
)
sentence_prediction_df.head(2)
# In[7]:
# DataFrame that combines likelihood scores with each candidate sentence
total_candidates_pred_df = (
total_candidates_df[[
"doid_id", "doid_name",
"drugbank_id", "drug_name",
"text", "hetionet",
"candidate_id", "split"
]]
.merge(sentence_prediction_df, on="candidate_id")
)
#total_candidates_pred_df.to_csv(
# "output/combined_predicted_ctd_sentences.tsv.xz",
# sep="\t", index=False, compression="xz"
#)
total_candidates_pred_df.head(2)
# In[8]:
# DataFrame that groups disease and compound mentions together and takes
# the max, median and mean of each group
grouped_candidates_pred_df=(
total_candidates_pred_df
.groupby(["doid_id", "drugbank_id"], as_index=False)
.agg({
"pred": ['max', 'mean', 'median'],
'hetionet': 'max',
"drug_name": 'first',
"doid_name": 'first',
"split": 'first'
})
)
grouped_candidates_pred_df.head(2)
# In[9]:
grouped_candidates_pred_df.columns = [
"_".join(col)
if col[1] != '' and col[0] not in ['hetionet', 'drug_name', 'doid_name', 'split'] else col[0]
for col in grouped_candidates_pred_df.columns.values
]
grouped_candidates_pred_df.head(2)
# In[10]:
grouped_candidates_pred_subset_df = (
grouped_candidates_pred_df
.query("split==11")
.drop("split", axis=1)
)
grouped_candidates_pred_subset_df.head(2)
# In[11]:
grouped_candidates_pred_subset_df.hetionet.value_counts()
# # Best Sentence Representation Metric
# This section aims to answer the question: What metric (Mean, Max, Median) best predicts Hetionet Edges?
# In[12]:
performance_map = {}
# In[13]:
precision, recall, pr_threshold = precision_recall_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_max,
)
fpr, tpr, roc_threshold = roc_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_max,
)
performance_map['max'] = {
"precision":precision, "recall":recall,
"pr_threshold":pr_threshold, "false_pos":fpr,
"true_pos":tpr, "roc_threshold":roc_threshold,
}
# In[14]:
precision, recall, pr_threshold = precision_recall_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_mean,
)
fpr, tpr, roc_threshold = roc_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_mean,
)
performance_map['mean'] = {
"precision":precision, "recall":recall,
"pr_threshold":pr_threshold, "false_pos":fpr,
"true_pos":tpr, "roc_threshold":roc_threshold,
}
# In[15]:
precision, recall, pr_threshold = precision_recall_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_median,
)
fpr, tpr, roc_threshold = roc_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_median,
)
performance_map['median'] = {
"precision":precision, "recall":recall,
"pr_threshold":pr_threshold, "false_pos":fpr,
"true_pos":tpr, "roc_threshold":roc_threshold,
}
# In[16]:
for key in performance_map:
plt.plot(
performance_map[key]['false_pos'],
performance_map[key]['true_pos'],
label=f"{key}:AUC ({auc(performance_map[key]['false_pos'], performance_map[key]['true_pos']):.3f})"
)
plt.plot([0,1], [0,1], linestyle='--', color='black')
plt.legend()
plt.show()
# In[17]:
for key in performance_map:
plt.plot(
performance_map[key]['recall'],
performance_map[key]['precision'],
label=f"{key}:AUC ({auc(performance_map[key]['recall'], performance_map[key]['precision']):.3f})"
)
plt.legend()
plt.show()
# # Optimal Cutoff Using PR-Curve
# In[18]:
threshold_df = (
pd.DataFrame(
list(
zip(
performance_map['max']['precision'],
performance_map['max']['recall'],
performance_map['max']['pr_threshold']
)
),
columns=["precision", "recall", "pr_threshold"]
)
.sort_values("precision", ascending=False)
)
threshold_df.head(2)
# In[19]:
#precision_thresholds = pd.np.linspace(0,1,num=5)
precision_thresholds = threshold_df.round(2).drop_duplicates("precision").precision.values
# Add the lowest precision rather than
# Keep it zero
precision_thresholds = (
pd.np.where(
precision_thresholds==0,
threshold_df.precision.min(),
precision_thresholds
)
)
performance_records = []
for precision_cutoff in precision_thresholds:
cutoff = (
threshold_df
.query("precision>=@precision_cutoff")
.pr_threshold
.min()
)
values_added = (
grouped_candidates_pred_subset_df
.query("pred_max >= @cutoff")
.hetionet
.value_counts()
)
series_keys = list(values_added.keys())
for key in series_keys:
performance_records.append(
{
"edges": values_added[key],
"in_hetionet": "Existing" if key == 1 else "Novel",
"precision": precision_cutoff,
"sen_cutoff": cutoff
}
)
edges_added_df = (
pd
.DataFrame
.from_records(performance_records)
)
edges_added_df.head(10)
# In[20]:
ax = sns.scatterplot(x="precision", y="edges", hue="in_hetionet", data=edges_added_df)
ax.set(yscale="log")
# In[21]:
edges_added_df.to_csv("output/precision_ctd_edges_added.tsv", index=False, sep="\t")
# # Total Recalled Edges
# How many edges of hetionet can we recall using an equal error rate cutoff score?
# In[23]:
gen_pred_df = (
pd.read_csv("../label_sampling_experiment/results/CtD/marginals/train/22_sampled_train.tsv.xz", sep="\t")
.iloc[:, [0,-1]]
.append(
pd.read_csv("../label_sampling_experiment/results/CtD/marginals/tune/22_sampled_dev.tsv", sep="\t")
.iloc[:, [0,-1]]
)
.append(
pd.read_csv("../label_sampling_experiment/results/CtD/marginals/test/22_sampled_test.tsv", sep="\t")
.iloc[:, [0,-1]]
)
)
gen_pred_df.columns = ["gen_pred", "candidate_id"]
gen_pred_df.head(2)
# In[24]:
(
total_candidates_pred_df.iloc[
total_candidates_pred_df
.groupby(["drugbank_id", "doid_id"], as_index=False)
.agg({
"pred": 'idxmax'
})
.pred
]
.merge(gen_pred_df, on=["candidate_id"])
.assign(edge_type="CtD")
.sort_values("pred", ascending=False)
.head(10)
.sort_values("candidate_id")
.assign(text=lambda x: tag_sentence(x, CompoundDisease))
.merge(total_candidates_df[["n_sentences", "candidate_id"]], on="candidate_id")
.sort_values("pred", ascending=False)
.drop_duplicates()
.assign(hetionet=lambda x: x.hetionet.apply(lambda x: "Existing" if x == 1 else "Novel"))
[["edge_type", "drug_name", "doid_name", "gen_pred", "pred", "n_sentences", "hetionet", "text"]]
.to_csv("output/top_ten_edge_predictions.tsv", sep="\t", index=False, float_format="%.3g")
)
# In[25]:
datarows = []
fpr, tpr, threshold = roc_curve(
grouped_candidates_pred_df.hetionet.values,
grouped_candidates_pred_df.pred_max.values
)
fnr = 1 - tpr
optimal_threshold = threshold[pd.np.nanargmin(pd.np.absolute((fnr - fpr)))]
datarows.append({
"recall":(
grouped_candidates_pred_df
.query("pred_max > @optimal_threshold")
.hetionet
.value_counts()[1] /
grouped_candidates_pred_df
.hetionet.
value_counts()[1]
),
"edges":(
grouped_candidates_pred_df
.query("pred_max > @optimal_threshold")
.hetionet
.value_counts()[1]
),
"in_hetionet": "Existing",
"total": int(grouped_candidates_pred_df.hetionet.value_counts()[1]),
"relation":"CtD"
})
datarows.append({
"edges":(
grouped_candidates_pred_df
.query("pred_max > @optimal_threshold")
.hetionet
.value_counts()[0]
),
"in_hetionet": "Novel",
"relation":"CtD"
})
edges_df = pd.DataFrame.from_records(datarows)
edges_df
# In[26]:
g = (
p9.ggplot(edges_df, p9.aes(x="relation", y="edges", fill="in_hetionet"))
+ p9.geom_col(position="dodge")
+ p9.geom_text(
p9.aes(
label=(
edges_df
.apply(
lambda x:
f"{x['edges']} ({x['recall']*100:.0f}%)"
if not math.isnan(x['recall']) else
f"{x['edges']}",
axis=1
)
)
),
position=p9.position_dodge(width=1),
size=9,
va="bottom"
)
+ p9.scale_y_log10()
+ p9.theme(
axis_text_y=p9.element_blank(),
axis_ticks_major = p9.element_blank(),
rect=p9.element_blank()
)
)
print(g)
| 23.596112 | 515 | 0.660137 |
794712af13447b27948a2be27b8ed594d55572aa | 9,275 | py | Python | src/geneflow/extend/data_manager_contexts.py | 82ndAirborneDiv/geneflow2 | 521544adbae1221d2d80496016548670ce5391c1 | [
"Apache-2.0"
] | null | null | null | src/geneflow/extend/data_manager_contexts.py | 82ndAirborneDiv/geneflow2 | 521544adbae1221d2d80496016548670ce5391c1 | [
"Apache-2.0"
] | null | null | null | src/geneflow/extend/data_manager_contexts.py | 82ndAirborneDiv/geneflow2 | 521544adbae1221d2d80496016548670ce5391c1 | [
"Apache-2.0"
] | null | null | null | """
This module contains data management extension functions for various contexts.
"""
import glob
import os
import shutil
from geneflow.log import Log
from geneflow.uri_parser import URIParser
from geneflow.extend.agave_wrapper import AgaveWrapper
### Local data management functions and move/copy with Local as source
def _list_local(uri, local=None):
"""
List contents of local URI.
Args:
uri: parsed URI to list.
local: local context options.
Returns:
On success: a list of filenames (basenames only).
On failure: False.
"""
try:
file_list = [
os.path.basename(item) for item in glob.glob(
uri['chopped_path']+'/*'
)
]
except OSError as err:
Log.an().error(
'cannot get file list for uri: %s [%s]',
uri['chopped_uri'], str(err)
)
return False
return file_list
def _exists_local(uri, local=None):
"""
Check if local URI exists.
Args:
uri: parsed URI to check.
local: local context options.
Returns:
True if the URI exists, False otherwise.
"""
return os.path.exists(uri['chopped_path'])
def _mkdir_local(uri, local=None):
"""
Create local directory specified by URI.
Args:
uri: parsed URI to create.
local: local context options.
Returns:
On success: True.
On failure: False.
"""
try:
os.makedirs(uri['chopped_path'])
except OSError as err:
Log.an().error(
'cannot create uri: %s [%s]', uri['chopped_uri'], str(err)
)
return False
return True
def _mkdir_recursive_local(uri, local=None):
"""
Recursively create local directory specified by URI.
Args:
uri: parsed URI to create.
local: local context options.
Returns:
On success: True.
On failure: False.
"""
# same as the non-recursive call
return _mkdir_local(uri, local)
def _delete_local(uri, local=None):
"""
Delete local file/folder specified by URI.
Args:
uri: parsed URI to delete.
local: local context options.
Returns:
On success: True.
On failure: False.
"""
try:
shutil.rmtree(uri['chopped_path'])
except OSError as err:
Log.an().error(
'cannot delete uri: %s [%s]', uri['chopped_uri'], str(err)
)
return False
return True
def _copy_local_local(src_uri, dest_uri, local=None):
"""
Copy local data with system shell.
Args:
src_uri: Source URI parsed into dict with URIParser.
dest_uri: Destination URI parsed into dict with URIParser.
local: local context options.
Returns:
On success: True.
On failure: False.
"""
try:
shutil.copytree(
src_uri['path'],
dest_uri['path']
)
except OSError as err:
Log.an().error(
'cannot copy from %s to %s [%s]',
src_uri['uri'],
dest_uri['uri'],
str(err)
)
return False
return True
def _move_local_local(src_uri, dest_uri, local=None):
"""
Move local data with system shell.
Args:
src_uri: Source URI parsed into dict with URIParser.
dest_uri: Destination URI parsed into dict with URIParser.
local: local context options.
Returns:
On success: True.
On failure: False.
"""
try:
shutil.move(
src_uri['path'],
dest_uri['path']
)
except OSError as err:
Log.an().error(
'cannot move from %s to %s [%s]',
src_uri['uri'],
dest_uri['uri'],
str(err)
)
return False
return True
### Agave data management functions and move/copy with Agave as source
def _list_agave(uri, agave):
"""
List contents of agave URI.
Args:
uri: parsed URI to list.
agave: dict that contains:
agave_wrapper: Agave wrapper object.
Returns:
On success: a list of filenames (basenames only).
On failure: False.
"""
file_list = agave['agave_wrapper'].files_list(uri['authority'], uri['chopped_path'])
if file_list is False:
Log.an().error(
'cannot get file list for uri: %s', uri['chopped_uri']
)
return False
return [file['name'] for file in file_list]
def _exists_agave(uri, agave):
"""
Check if agave URI exists.
Args:
uri: parsed URI to check.
agave: dict that contains:
agave_wrapper: Agave wrapper object.
Returns:
True if the URI exists, False if it doesn't or if there's an error.
"""
if agave['agave_wrapper'].files_exist(uri['authority'], uri['chopped_path']) is False:
return False
return True
def _mkdir_agave(uri, agave):
"""
Create agave directory specified by URI.
Args:
uri: parsed URI to create.
agave: dict that contains:
agave_wrapper: Agave wrapper object.
Returns:
On success: True.
On failure: False.
"""
if not agave['agave_wrapper'].files_mkdir(uri['authority'], uri['folder'], uri['name']):
Log.an().error(
'cannot create folder at uri: %s', uri['chopped_uri']
)
return False
return True
def _mkdir_recursive_agave(uri, agave):
"""
Recursively create agave directory specified by URI.
Args:
uri: parsed URI to create.
agave: dict that contains:
agave_wrapper: Agave wrapper object.
Returns:
On success: True.
On failure: False.
"""
if uri['folder'] != '/':
# make sure parent folder exists first
parent_uri = URIParser.parse(
'{}://{}{}'.format(
uri['scheme'], uri['authority'], uri['folder']
)
)
if not _exists_agave(parent_uri, agave):
# parent folder does not exist, create
if not _mkdir_recursive_agave(parent_uri, agave):
Log.an().error(
'cannot create parent folder at uri: %s',
parent_uri['chopped_uri']
)
return False
return _mkdir_agave(uri, agave)
def _delete_agave(uri, agave):
"""
Delete agave file/folder specified by URI.
Args:
uri: parsed URI to delete.
agave: dict that contains:
agave_wrapper: Agave wrapper object.
Returns:
On success: True.
On failure: False.
"""
if not agave['agave_wrapper'].files_delete(uri['authority'], uri['chopped_path']):
Log.an().error('cannot delete uri: %s', uri['chopped_path'])
return False
return True
def _copy_agave_agave(src_uri, dest_uri, agave):
"""
Copy Agave data using AgavePy Wrapper.
Args:
src_uri: Source URI parsed into dict with URIParser.
dest_uri: Destination URI parsed into dict with URIParser.
agave: dict that contains:
agave_wrapper: Agave wrapper object.
Returns:
On success: True.
On failure: False.
"""
if not agave['agave_wrapper'].files_import_from_agave(
dest_uri['authority'],
dest_uri['folder'],
dest_uri['name'],
src_uri['uri']
):
Log.an().error(
'cannot copy from %s to %s',
src_uri['uri'],
dest_uri['uri']
)
return False
return True
# local/Agave mixed methods
def _copy_local_agave(src_uri, dest_uri, agave, local=None):
"""
Copy local data to Agave using AgavePy Wrapper.
Args:
src_uri: Source URI parsed into dict with URIParser.
dest_uri: Destination URI parsed into dict with URIParser.
agave: dict that contains:
agave_wrapper: Agave wrapper object.
Returns:
On success: True.
On failure: False.
"""
if not agave['agave_wrapper'].files_import_from_local(
dest_uri['authority'],
dest_uri['folder'],
dest_uri['name'],
src_uri['chopped_path']
):
Log.an().error(
'cannot copy from %s to %s',
src_uri['uri'],
dest_uri['uri']
)
return False
return True
def _copy_agave_local(src_uri, dest_uri, agave, local=None):
"""
Copy Agave data to a local destination using AgavePy Wrapper.
Args:
src_uri: Source URI parsed into dict with URIParser.
dest_uri: Destination URI parsed into dict with URIParser.
agave: dict that contains:
agave_wrapper: Agave wrapper object.
Returns:
On success: True.
On failure: False.
"""
if not agave['agave_wrapper'].files_download(
src_uri['authority'],
src_uri['chopped_path'],
dest_uri['chopped_path'],
-1
):
Log.an().error(
'cannot copy from %s to %s',
src_uri['uri'],
dest_uri['uri']
)
return False
return True
| 22.788698 | 92 | 0.570135 |
7947148c3dc373d9f56992cc048c15d158fab7b0 | 742 | py | Python | hello_app/plotly/__init__.py | agaitskellwork/flask2 | d1de6baf721f3a4d7ccf8afbc39e08683eb7642d | [
"MIT"
] | null | null | null | hello_app/plotly/__init__.py | agaitskellwork/flask2 | d1de6baf721f3a4d7ccf8afbc39e08683eb7642d | [
"MIT"
] | null | null | null | hello_app/plotly/__init__.py | agaitskellwork/flask2 | d1de6baf721f3a4d7ccf8afbc39e08683eb7642d | [
"MIT"
] | null | null | null | """Initialize Flask app."""
from ddtrace import patch_all
from flask import Flask
from flask_assets import Environment
patch_all()
def init_app():
"""Construct core Flask application with embedded Dash app."""
app = Flask(__name__, instance_relative_config=False)
app.config.from_object("config.Config")
assets = Environment()
assets.init_app(app)
with app.app_context():
# Import parts of our core Flask app
from . import routes
from .assets import compile_static_assets
# Import Dash application
from .plotlydash.dashboard import init_dashboard
app = init_dashboard(app)
# Compile static assets
compile_static_assets(assets)
return app
| 24.733333 | 66 | 0.692722 |
7947159b76871693b28d5bca513dbd85fdfa6e77 | 2,220 | py | Python | odds/scraper.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | 1 | 2017-11-05T20:41:12.000Z | 2017-11-05T20:41:12.000Z | odds/scraper.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | 2 | 2021-03-31T18:43:15.000Z | 2021-12-13T19:46:28.000Z | odds/scraper.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | null | null | null | import pandas
import requests
class scrape:
"""
Scraping class, using asyncio and aiophttp.
"""
def __init__(self, url):
"""
Initialiser for scrape class.
"""
self.URL = url
def _get(self, port=None):
"""
Method to scrape a single page and return a json object.
:param port: Port for access Optional
:return: text object containing scraped data
"""
req_str = self.URL
response = requests.get(url=req_str)
assert response.status_code == 200
return response.text
def get_odds_html(self, config=None):
"""
Method to return a Dataframe object of scraped results.
:param config: search criteria, type dict
:return: Pandas Dataframe object
"""
scr_data = pandas.read_html(self._get())[4]
if config:
if config['query'] is not None:
data = scr_data
return data
else:
return scr_data.fillna(value='').to_html()
def get_odds_obj(self, config=None):
"""
Method to return a Dataframe object of scraped results.
:param config: search criteria, type dict
:return: Pandas Dataframe object
"""
df = pandas.read_html(self._get())[4]
indexes = df[df[0].str.contains(' - ', na=False) |
df[0].isnull()].index.tolist()
df_dict = {}
for i in list(range(len(indexes)-1)):
current = indexes[i]
nex = indexes[i + 1]
df_dict[str(df.ix[indexes[i], 0])] = df[current:nex]
if config:
if config['query'] is not None:
data = df_dict
return data
else:
return df_dict
def download(self, config=None):
"""
Method to download .csv file of scraped results
:param config: search criteria, type dict
:return: Pandas Dataframe object
"""
scr_data = pandas.read_html(self._get())[4]
if config:
if config['query'] is not None:
data = scr_data
return data
else:
return scr_data
| 29.6 | 64 | 0.541441 |
794715a9a7dee6c86ae10497f790c75abeafa000 | 2,589 | py | Python | caffe2/python/modifier_context.py | KevinKecc/caffe2 | a2b6c6e2f0686358a84277df65e9489fb7d9ddb2 | [
"Apache-2.0"
] | 585 | 2015-08-10T02:48:52.000Z | 2021-12-01T08:46:59.000Z | caffe2/python/modifier_context.py | mingzhe09088/caffe2 | 8f41717c46d214aaf62b53e5b3b9b308b5b8db91 | [
"Apache-2.0"
] | 27 | 2018-04-14T06:44:22.000Z | 2018-08-01T18:02:39.000Z | caffe2/python/modifier_context.py | mingzhe09088/caffe2 | 8f41717c46d214aaf62b53e5b3b9b308b5b8db91 | [
"Apache-2.0"
] | 183 | 2015-08-10T02:49:04.000Z | 2021-12-01T08:47:13.000Z | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# @package modifier_context
# Module caffe2.python.modifier_context
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
DEFAULT_MODIFIER = 'DEFAULT'
class ModifierContext(object):
"""
provide context to allow param_info to have different modifiers
"""
def __init__(self):
self._modifiers = {}
self._modifiers_list = []
def _rebuild_modifiers(self):
self._modifiers = {}
for m in self._modifiers_list:
self._modifiers.update(m)
def _has_modifier(self, name):
return name in self._modifiers
def _get_modifier(self, name):
return self._modifiers.get(name)
def push_modifiers(self, modifiers):
# modifier override is allowed
self._modifiers_list.append(modifiers)
self._modifiers.update(modifiers)
def pop_modifiers(self):
assert len(self._modifiers_list) > 0
self._modifiers_list.pop()
self._rebuild_modifiers()
class UseModifierBase(object):
'''
context class to allow setting the current context.
Example useage with layer:
modifiers = {'modifier1': modifier1, 'modifier2': modifier2}
with Modifiers(modifiers):
modifier = ModifierContext.current().get_modifier('modifier1')
layer(modifier=modifier)
'''
def __init__(self, modifier_or_dict):
if isinstance(modifier_or_dict, dict):
self._modifiers = modifier_or_dict
else:
self._modifiers = {DEFAULT_MODIFIER: modifier_or_dict}
def _context_class(self):
raise NotImplementedError
def __enter__(self):
self._context_class().current().push_modifiers(self._modifiers)
return self
def __exit__(self, type, value, traceback):
self._context_class().current().pop_modifiers()
| 31.192771 | 78 | 0.677868 |
79471627f96f2d3da751e1335163ae36d5daae5a | 195 | py | Python | app/api/__init__.py | typ0520/flask-blog | 198898c2576962fb635e518dd32147b56cc3ba5f | [
"Apache-2.0"
] | 1 | 2017-08-07T10:49:49.000Z | 2017-08-07T10:49:49.000Z | app/api/__init__.py | typ0520/flask-blog | 198898c2576962fb635e518dd32147b56cc3ba5f | [
"Apache-2.0"
] | null | null | null | app/api/__init__.py | typ0520/flask-blog | 198898c2576962fb635e518dd32147b56cc3ba5f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'typ0520'
from flask import Blueprint
api = Blueprint('api', __name__)
from . import authentication, posts, users, comments, errors
| 19.5 | 60 | 0.707692 |
7947162fae6fa8268965ea38631a7f9a631f0b3f | 686 | py | Python | app/core/migrations/0003_ingredient.py | arona504/recipe-api | 27acd3d1b1908248cb5e53d78e911f8abaec8fc4 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | arona504/recipe-api | 27acd3d1b1908248cb5e53d78e911f8abaec8fc4 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | arona504/recipe-api | 27acd3d1b1908248cb5e53d78e911f8abaec8fc4 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-04-21 17:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.583333 | 118 | 0.618076 |
79471647145c001f33f3289666fe54f4eb1e45aa | 444 | py | Python | raiden/messages/cmdid.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | null | null | null | raiden/messages/cmdid.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | 69 | 2020-07-21T05:49:21.000Z | 2022-03-08T18:09:44.000Z | raiden/messages/cmdid.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | null | null | null | import enum
@enum.unique
class CmdId(enum.Enum):
""" Identifier for off-chain messages.
These magic numbers are used to identify the type of a message.
"""
PROCESSED = 0
PING = 1
PONG = 2
SECRETREQUEST = 3
UNLOCK = 4
LOCKEDTRANSFER = 7
REFUNDTRANSFER = 8
REVEALSECRET = 11
DELIVERED = 12
LOCKEXPIRED = 13
WITHDRAW_REQUEST = 15
WITHDRAW_CONFIRMATION = 16
WITHDRAW_EXPIRED = 17
| 18.5 | 67 | 0.644144 |
794716c12e2c51044e5d52eb20108781b83aa94a | 981 | py | Python | sample_project/env/lib/python3.9/site-packages/qtpy/Qt3DInput.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | 4 | 2021-11-19T03:25:13.000Z | 2022-02-24T15:32:30.000Z | sample_project/env/lib/python3.9/site-packages/qtpy/Qt3DInput.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | null | null | null | sample_project/env/lib/python3.9/site-packages/qtpy/Qt3DInput.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | 3 | 2020-08-04T02:48:32.000Z | 2020-08-17T01:20:09.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Provides Qt3DInput classes and functions."""
# Local imports
from . import PYQT5, PYSIDE2, PythonQtError, PYSIDE_VERSION
from .py3compat import PY2
if PYQT5:
from PyQt5.Qt3DInput import *
elif PYSIDE2:
if not PY2 or (PY2 and PYSIDE_VERSION < '5.12.4'):
# https://bugreports.qt.io/projects/PYSIDE/issues/PYSIDE-1026
import PySide2.Qt3DInput as __temp
import inspect
for __name in inspect.getmembers(__temp.Qt3DInput):
globals()[__name[0]] = __name[1]
else:
raise PythonQtError('A bug in Shiboken prevents this')
else:
raise PythonQtError('No Qt bindings could be found')
| 36.333333 | 80 | 0.561672 |
794717b6ae039d7359e14e4b2ff18feac4c29ec6 | 8,265 | py | Python | bert-sst2/bert_sst2.py | yyxx1997/pytorch | 3d9fca2119c260317c06b11aa8bdb9b29b4c8cca | [
"MIT"
] | 1 | 2022-02-17T11:34:02.000Z | 2022-02-17T11:34:02.000Z | bert-sst2/bert_sst2.py | yyxx1997/pytorch | 3d9fca2119c260317c06b11aa8bdb9b29b4c8cca | [
"MIT"
] | null | null | null | bert-sst2/bert_sst2.py | yyxx1997/pytorch | 3d9fca2119c260317c06b11aa8bdb9b29b4c8cca | [
"MIT"
] | 1 | 2022-03-17T10:36:58.000Z | 2022-03-17T10:36:58.000Z | # -*- coding: utf-8 -*-
# @Time : 2021/1/11 9:09
# @Author : yx
# @File : bert_sst2.py
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from transformers import BertModel
from tqdm import tqdm
import os
import time
from transformers import BertTokenizer
from transformers import logging
# 设置transformers模块的日志等级,减少不必要的警告,对训练过程无影响,请忽略
logging.set_verbosity_error()
# 环境变量:设置程序能使用的GPU序号。例如:
# 当前服务器有8张GPU可用,想用其中的第2、5、8卡,这里应该设置为:
# os.environ["CUDA_VISIBLE_DEVICES"] = "1,4,7"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# 通过继承nn.Module类自定义符合自己需求的模型
class BertSST2Model(nn.Module):
# 初始化类
def __init__(self, class_size, pretrained_name='bert-base-chinese'):
"""
Args:
class_size :指定分类模型的最终类别数目,以确定线性分类器的映射维度
pretrained_name :用以指定bert的预训练模型
"""
# 类继承的初始化,固定写法
super(BertSST2Model, self).__init__()
# 加载HuggingFace的BertModel
# BertModel的最终输出维度默认为768
# return_dict=True 可以使BertModel的输出具有dict属性,即以 bert_output['last_hidden_state'] 方式调用
self.bert = BertModel.from_pretrained(pretrained_name,
return_dict=True)
# 通过一个线性层将[CLS]标签对应的维度:768->class_size
# class_size 在SST-2情感分类任务中设置为:2
self.classifier = nn.Linear(768, class_size)
def forward(self, inputs):
# 获取DataLoader中已经处理好的输入数据:
# input_ids :tensor类型,shape=batch_size*max_len max_len为当前batch中的最大句长
# input_tyi :tensor类型,
# input_attn_mask :tensor类型,因为input_ids中存在大量[Pad]填充,attention mask将pad部分值置为0,让模型只关注非pad部分
input_ids, input_tyi, input_attn_mask = inputs['input_ids'], inputs[
'token_type_ids'], inputs['attention_mask']
# 将三者输入进模型,如果想知道模型内部如何运作,前面的蛆以后再来探索吧~
output = self.bert(input_ids, input_tyi, input_attn_mask)
# bert_output 分为两个部分:
# last_hidden_state:最后一个隐层的值
# pooler output:对应的是[CLS]的输出,用于分类任务
# 通过线性层将维度:768->2
# categories_numberic:tensor类型,shape=batch_size*class_size,用于后续的CrossEntropy计算
categories_numberic = self.classifier(output.pooler_output)
return categories_numberic
def save_pretrained(model, path):
# 保存模型,先利用os模块创建文件夹,后利用torch.save()写入模型文件
os.makedirs(path, exist_ok=True)
torch.save(model, os.path.join(path, 'model.pth'))
def load_sentence_polarity(data_path, train_ratio=0.8):
# 本任务中暂时只用train、test做划分,不包含dev验证集,
# train的比例由train_ratio参数指定,train_ratio=0.8代表训练语料占80%,test占20%
# 本函数只适用于读取指定文件,不具通用性,仅作示范
all_data = []
# categories用于统计分类标签的总数,用set结构去重
categories = set()
with open(data_path, 'r', encoding="utf8") as file:
for sample in file.readlines():
# polar指情感的类别,当前只有两种:
# ——0:positive
# ——1:negative
# sent指对应的句子
polar, sent = sample.strip().split("\t")
categories.add(polar)
all_data.append((polar, sent))
length = len(all_data)
train_len = int(length * train_ratio)
train_data = all_data[:train_len]
test_data = all_data[train_len:]
return train_data, test_data, categories
"""
torch提供了优秀的数据加载类Dataloader,可以自动加载数据。
1. 想要使用torch的DataLoader作为训练数据的自动加载模块,就必须使用torch提供的Dataset类
2. 一定要具有__len__和__getitem__的方法,不然DataLoader不知道如何如何加载数据
这里是固定写法,是官方要求,不懂可以不做深究,一般的任务这里都通用
"""
class BertDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.data_size = len(dataset)
def __len__(self):
return self.data_size
def __getitem__(self, index):
# 这里可以自行定义,Dataloader会使用__getitem__(self, index)获取数据
# 这里我设置 self.dataset[index] 规定了数据是按序号取得,序号是多少DataLoader自己算,用户不用操心
return self.dataset[index]
def coffate_fn(examples):
inputs, targets = [], []
for polar, sent in examples:
inputs.append(sent)
targets.append(int(polar))
inputs = tokenizer(inputs,
padding=True,
truncation=True,
return_tensors="pt",
max_length=512)
targets = torch.tensor(targets)
return inputs, targets
# 训练准备阶段,设置超参数和全局变量
batch_size = 8
num_epoch = 5 # 训练轮次
check_step = 1 # 用以训练中途对模型进行检验:每check_step个epoch进行一次测试和保存模型
data_path = "./sst2_shuffled.tsv" # 数据所在地址
train_ratio = 0.8 # 训练集比例
learning_rate = 1e-5 # 优化器的学习率
# 获取训练、测试数据、分类类别总数
train_data, test_data, categories = load_sentence_polarity(
data_path=data_path, train_ratio=train_ratio)
# 将训练数据和测试数据的列表封装成Dataset以供DataLoader加载
train_dataset = BertDataset(train_data)
test_dataset = BertDataset(test_data)
"""
DataLoader主要有以下几个参数:
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load(default: ``1``).
shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch (default: ``False``).
collate_fn : 传入一个处理数据的回调函数
DataLoader工作流程:
1. 先从dataset中取出batch_size个数据
2. 对每个batch,执行collate_fn传入的函数以改变成为适合模型的输入
3. 下个epoch取数据前先对当前的数据集进行shuffle,以防模型学会数据的顺序而导致过拟合
"""
train_dataloader = DataLoader(train_dataset,
batch_size=batch_size,
collate_fn=coffate_fn,
shuffle=True)
test_dataloader = DataLoader(test_dataset,
batch_size=1,
collate_fn=coffate_fn)
#固定写法,可以牢记,cuda代表Gpu
# torch.cuda.is_available()可以查看当前Gpu是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 加载预训练模型,因为这里是英文数据集,需要用在英文上的预训练模型:bert-base-uncased
# uncased指该预训练模型对应的词表不区分字母的大小写
# 详情可了解:https://huggingface.co/bert-base-uncased
pretrained_model_name = 'bert-base-uncased'
# 创建模型 BertSST2Model
model = BertSST2Model(len(categories), pretrained_model_name)
# 固定写法,将模型加载到device上,
# 如果是GPU上运行,此时可以观察到GPU的显存增加
model.to(device)
# 加载预训练模型对应的tokenizer
tokenizer = BertTokenizer.from_pretrained(pretrained_model_name)
# 训练过程
# Adam是最近较为常用的优化器,详情可查看:https://www.jianshu.com/p/aebcaf8af76e
optimizer = Adam(model.parameters(), learning_rate) #使用Adam优化器
CE_loss = nn.CrossEntropyLoss() # 使用crossentropy作为二分类任务的损失函数
# 记录当前训练时间,用以记录日志和存储
timestamp = time.strftime("%m_%d_%H_%M", time.localtime())
# 开始训练,model.train()固定写法,详情可以百度
model.train()
for epoch in range(1, num_epoch + 1):
# 记录当前epoch的总loss
total_loss = 0
# tqdm用以观察训练进度,在console中会打印出进度条
for batch in tqdm(train_dataloader, desc=f"Training Epoch {epoch}"):
# tqdm(train_dataloader, desc=f"Training Epoch {epoch}") 会自动执行DataLoader的工作流程,
# 想要知道内部如何工作可以在debug时将断点打在 coffate_fn 函数内部,查看数据的处理过程
# 对batch中的每条tensor类型数据,都执行.to(device),
# 因为模型和数据要在同一个设备上才能运行
inputs, targets = [x.to(device) for x in batch]
# 清除现有的梯度
optimizer.zero_grad()
# 模型前向传播,model(inputs)等同于model.forward(inputs)
bert_output = model(inputs)
# 计算损失,交叉熵损失计算可参考:https://zhuanlan.zhihu.com/p/159477597
loss = CE_loss(bert_output, targets)
# 梯度反向传播
loss.backward()
# 根据反向传播的值更新模型的参数
optimizer.step()
# 统计总的损失,.item()方法用于取出tensor中的值
total_loss += loss.item()
#测试过程
# acc统计模型在测试数据上分类结果中的正确个数
acc = 0
for batch in tqdm(test_dataloader, desc=f"Testing"):
inputs, targets = [x.to(device) for x in batch]
# with torch.no_grad(): 为固定写法,
# 这个代码块中的全部有关tensor的操作都不产生梯度。目的是节省时间和空间,不加也没事
with torch.no_grad():
bert_output = model(inputs)
"""
.argmax()用于取出一个tensor向量中的最大值对应的下表序号,dim指定了维度
假设 bert_output为3*2的tensor:
tensor
[
[3.2,1.1],
[0.4,0.6],
[-0.1,0.2]
]
则 bert_output.argmax(dim=1) 的结果为:tensor[0,1,1]
"""
acc += (bert_output.argmax(dim=1) == targets).sum().item()
#输出在测试集上的准确率
print(f"Acc: {acc / len(test_dataloader):.2f}")
if epoch % check_step == 0:
# 保存模型
checkpoints_dirname = "bert_sst2_" + timestamp
os.makedirs(checkpoints_dirname, exist_ok=True)
save_pretrained(model,
checkpoints_dirname + '/checkpoints-{}/'.format(epoch))
| 32.667984 | 110 | 0.66824 |
794717b6cf3b92d6c4748c894de5e5a27422406e | 1,078 | py | Python | LC/74.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | 2 | 2018-02-24T17:20:02.000Z | 2018-02-24T17:25:43.000Z | LC/74.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | LC/74.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
m = len(matrix)
n = len(matrix[0]) if m else 0
if m * n == 0:
return False
if target < matrix[0][0] or target > matrix[-1][-1]:
return False
i = 0
j = len(matrix) - 1
if target>=matrix[-1][0]:
return self.searchList(matrix[-1], target)
while i < j - 1:
mid = (i + j) // 2
if matrix[mid][0] == target:
return True
if matrix[mid][0] < target:
i = mid
else:
j = mid
return self.searchList(matrix[i], target)
def searchList(self, l, t):
i = 0
j = len(l) - 1
while i <= j:
m = (i + j) // 2
if l[m] == t:
return True
if l[m] > t:
j = m - 1
else:
i = m + 1
return False
| 26.292683 | 60 | 0.396104 |
7947187f8c809c12d5b883cb598cfa7083adde50 | 649 | py | Python | snakepit/adapters/pip.py | kk6/pip-require | 09ef6da8a13ec65cdaea653b6d69f8218ee1da2e | [
"MIT"
] | 1 | 2018-08-27T10:11:21.000Z | 2018-08-27T10:11:21.000Z | snakepit/adapters/pip.py | kk6/pip-require | 09ef6da8a13ec65cdaea653b6d69f8218ee1da2e | [
"MIT"
] | 2 | 2015-11-20T06:55:37.000Z | 2015-11-22T17:03:14.000Z | snakepit/adapters/pip.py | kk6/pip-require | 09ef6da8a13ec65cdaea653b6d69f8218ee1da2e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Adapter of pip module."""
from __future__ import absolute_import, division, print_function, unicode_literals
import pip
def install(packages, options):
"""Execute `pip install`.
:param packages: Package name list.
:return: If `pip install` is successful, then return 0 else 1.
"""
return pip.main(["install"] + options + [pkg for pkg in packages])
def uninstall(packages):
"""Execute `pip uninstall`.
:param packages: Package name list.
:return: If `pip install` is successful, then return 0 else 1.
"""
return pip.main(["uninstall"] + ['-y'] + [pkg for pkg in packages])
| 24.037037 | 82 | 0.657935 |
794718f0f368c542ad42a45410bc21c8376ac316 | 6,003 | py | Python | docs/conf.py | MorrisMA/py65 | bda1553ff88fc577944bde3d7cb3e75a3b83ccfa | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | MorrisMA/py65 | bda1553ff88fc577944bde3d7cb3e75a3b83ccfa | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | MorrisMA/py65 | bda1553ff88fc577944bde3d7cb3e75a3b83ccfa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# test documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 30 14:39:06 2008.
#
# This file is execfile()d with the current dir set to its containing
# directory.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import date
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Py65'
year = date.today().year
copyright = u'2008-%d, Mike Naberezny and contributors' % year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.25.dev0'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'testdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'test.tex', ur'test Documentation',
ur'foo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| 30.784615 | 79 | 0.729635 |
79471a236af4e0e4a170b71a7efd197a63ece32a | 27,131 | py | Python | wgail_info_0/models.py | liubaoryol/InfoGAIL | 89f553da1ed415f8383c0f9e8c3774debc628dcd | [
"MIT"
] | null | null | null | wgail_info_0/models.py | liubaoryol/InfoGAIL | 89f553da1ed415f8383c0f9e8c3774debc628dcd | [
"MIT"
] | null | null | null | wgail_info_0/models.py | liubaoryol/InfoGAIL | 89f553da1ed415f8383c0f9e8c3774debc628dcd | [
"MIT"
] | null | null | null | from utils import *
import numpy as np
import time
import math
import argparse
from keras.initializers import normal, identity, uniform
from keras.models import model_from_json
from keras.models import Sequential, Model
from keras.layers import Dense, BatchNormalization, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge, Lambda
from keras.layers.advanced_activations import LeakyReLU
from tensorflow.keras.optimizers import Adam, RMSprop
import tensorflow as tf
import keras.backend as K
import json
from tensorflow.keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
from keras.utils.np_utils import to_categorical
parser = argparse.ArgumentParser(description="TRPO")
parser.add_argument("--paths_per_collect", type=int, default=10)
parser.add_argument("--max_step_limit", type=int, default=300)
parser.add_argument("--min_step_limit", type=int, default=100)
parser.add_argument("--pre_step", type=int, default=100)
parser.add_argument("--n_iter", type=int, default=1000)
parser.add_argument("--gamma", type=float, default=.95)
parser.add_argument("--lam", type=float, default=.97)
parser.add_argument("--max_kl", type=float, default=0.01)
parser.add_argument("--cg_damping", type=float, default=0.1)
parser.add_argument("--lr_discriminator", type=float, default=5e-5)
parser.add_argument("--d_iter", type=int, default=100)
parser.add_argument("--clamp_lower", type=float, default=-0.01)
parser.add_argument("--clamp_upper", type=float, default=0.01)
parser.add_argument("--lr_baseline", type=float, default=1e-4)
parser.add_argument("--b_iter", type=int, default=25)
parser.add_argument("--lr_posterior", type=float, default=1e-4)
parser.add_argument("--p_iter", type=int, default=50)
parser.add_argument("--buffer_size", type=int, default=75)
parser.add_argument("--sample_size", type=int, default=50)
parser.add_argument("--batch_size", type=int, default=500)
args = parser.parse_args()
tf.compat.v1.disable_eager_execution()
class TRPOAgent(object):
config = dict2(paths_per_collect = args.paths_per_collect,
max_step_limit = args.max_step_limit,
min_step_limit = args.min_step_limit,
pre_step = args.pre_step,
n_iter = args.n_iter,
gamma = args.gamma,
lam = args.lam,
max_kl = args.max_kl,
cg_damping = args.cg_damping,
lr_discriminator = args.lr_discriminator,
d_iter = args.d_iter,
clamp_lower = args.clamp_lower,
clamp_upper = args.clamp_upper,
lr_baseline = args.lr_baseline,
b_iter = args.b_iter,
lr_posterior = args.lr_posterior,
p_iter = args.p_iter,
buffer_size = args.buffer_size,
sample_size = args.sample_size,
batch_size = args.batch_size)
def __init__(self, env, sess, feat_dim, aux_dim, encode_dim, action_dim,
img_dim, pre_actions):
self.env = env
self.sess = sess
self.buffer = ReplayBuffer(self.config.buffer_size)
self.feat_dim = feat_dim
self.aux_dim = aux_dim
self.encode_dim = encode_dim
self.action_dim = action_dim
self.img_dim = img_dim
self.pre_actions = pre_actions
self.feats = feats = tf.compat.v1.placeholder(
dtype, shape=[None, feat_dim[0], feat_dim[1], feat_dim[2]]
)
self.auxs = auxs = tf.compat.v1.placeholder(dtype, shape=[None, aux_dim])
self.encodes = encodes = tf.compat.v1.placeholder(dtype, shape=[None, encode_dim])
self.actions = actions = tf.compat.v1.placeholder(dtype, shape=[None, action_dim])
self.advants = advants = tf.compat.v1.placeholder(dtype, shape=[None])
self.oldaction_dist_mu = oldaction_dist_mu = \
tf.compat.v1.placeholder(dtype, shape=[None, action_dim])
self.oldaction_dist_logstd = oldaction_dist_logstd = \
tf.compat.v1.placeholder(dtype, shape=[None, action_dim])
# Create neural network.
print("Now we build trpo generator")
self.generator = self.create_generator(feats, auxs, encodes)
print("Now we build discriminator")
self.discriminator, self.discriminate = \
self.create_discriminator(img_dim, aux_dim, action_dim)
print("Now we build posterior")
self.posterior = \
self.create_posterior(img_dim, aux_dim, action_dim, encode_dim)
self.posterior_target = \
self.create_posterior(img_dim, aux_dim, action_dim, encode_dim)
self.demo_idx = 0
action_dist_mu = self.generator.outputs[0]
# self.action_dist_logstd_param = action_dist_logstd_param = \
# tf.compat.v1.placeholder(dtype, shape=[1, action_dim])
# action_dist_logstd = tf.tile(action_dist_logstd_param,
# tf.pack((tf.shape(action_dist_mu)[0], 1)))
action_dist_logstd = tf.compat.v1.placeholder(dtype, shape=[None, action_dim])
eps = 1e-8
self.action_dist_mu = action_dist_mu
self.action_dist_logstd = action_dist_logstd
N = tf.shape(feats)[0]
# compute probabilities of current actions and old actions
log_p_n = gauss_log_prob(action_dist_mu, action_dist_logstd, actions)
log_oldp_n = gauss_log_prob(oldaction_dist_mu, oldaction_dist_logstd, actions)
ratio_n = tf.exp(log_p_n - log_oldp_n)
Nf = tf.cast(N, dtype)
surr = -tf.reduce_mean(ratio_n * advants) # Surrogate loss
var_list = self.generator.trainable_weights
kl = gauss_KL(oldaction_dist_mu, oldaction_dist_logstd,
action_dist_mu, action_dist_logstd) / Nf
ent = gauss_ent(action_dist_mu, action_dist_logstd) / Nf
self.losses = [surr, kl, ent]
self.pg = flatgrad(surr, var_list)
# KL divergence where first arg is fixed
kl_firstfixed = gauss_selfKL_firstfixed(action_dist_mu,
action_dist_logstd) / Nf
grads = tf.gradients(kl_firstfixed, var_list)
self.flat_tangent = tf.compat.v1.placeholder(dtype, shape=[None])
shapes = list(map(var_shape, var_list))
start = 0
tangents = []
for shape in shapes:
size = np.prod(shape)
param = tf.reshape(self.flat_tangent[start:(start + size)], shape)
tangents.append(param)
start += size
gvp = [tf.reduce_sum(g * t) for (g, t) in zip(grads, tangents)]
self.fvp = flatgrad(gvp, var_list)
self.gf = GetFlat(self.sess, var_list)
self.sff = SetFromFlat(self.sess, var_list)
self.baseline = NNBaseline(sess, feat_dim, aux_dim, encode_dim,
self.config.lr_baseline, self.config.b_iter,
self.config.batch_size)
self.sess.run(tf.global_variables_initializer())
# Create feature extractor
self.base_model = ResNet50(weights='imagenet', include_top=False)
self.feat_extractor = Model(
input=self.base_model.input,
output=self.base_model.get_layer('activation_40').output
)
def create_generator(self, feats, auxs, encodes):
feats = Input(tensor=feats)
x = Convolution2D(256, 3, 3)(feats)
x = LeakyReLU()(x)
x = Convolution2D(256, 3, 3, subsample=(2, 2))(x)
x = LeakyReLU()(x)
x = Flatten()(x)
auxs = Input(tensor=auxs)
h = merge([x, auxs], mode='concat')
h = Dense(256)(h)
h = LeakyReLU()(h)
h = Dense(128)(h)
encodes = Input(tensor=encodes)
c = Dense(128)(encodes)
h = merge([h, c], mode='sum')
h = LeakyReLU()(h)
steer = Dense(1, activation='tanh', init=lambda shape, name:
normal(shape, scale=1e-4, name=name))(h)
accel = Dense(1, activation='sigmoid', init=lambda shape, name:
normal(shape, scale=1e-4, name=name))(h)
brake = Dense(1, activation='sigmoid', init=lambda shape, name:
normal(shape, scale=1e-4, name=name))(h)
actions = merge([steer, accel, brake], mode='concat')
model = Model(input=[feats, auxs, encodes], output=actions)
return model
def create_discriminator(self, img_dim, aux_dim, action_dim):
imgs = Input(shape=[img_dim[0], img_dim[1], img_dim[2]])
x = Convolution2D(32, 3, 3, subsample=(2, 2))(imgs)
x = LeakyReLU()(x)
x = Convolution2D(64, 3, 3, subsample=(2, 2))(x)
x = LeakyReLU()(x)
x = Convolution2D(128, 3, 3, subsample=(2, 2))(x)
x = LeakyReLU()(x)
x = Flatten()(x)
auxs = Input(shape=[aux_dim])
actions = Input(shape=[action_dim])
h = merge([x, auxs, actions], mode='concat')
h = Dense(256)(h)
h = LeakyReLU()(h)
h = Dense(128)(h)
h = LeakyReLU()(h)
p = Dense(1)(h)
discriminate = Model(input=[imgs, auxs, actions], output=p)
imgs_n = Input(shape=[img_dim[0], img_dim[1], img_dim[2]])
imgs_d = Input(shape=[img_dim[0], img_dim[1], img_dim[2]])
auxs_n = Input(shape=[aux_dim])
auxs_d = Input(shape=[aux_dim])
actions_n = Input(shape=[action_dim])
actions_d = Input(shape=[action_dim])
p_n = discriminate([imgs_n, auxs_n, actions_n])
p_d = discriminate([imgs_d, auxs_d, actions_d])
p_d = Lambda(lambda x: -x)(p_d)
p_output = merge([p_n, p_d], mode='sum')
model = Model(input=[imgs_n, auxs_n, actions_n,
imgs_d, auxs_d, actions_d],
output=p_output)
rmsprop = RMSprop(lr=self.config.lr_discriminator)
model.compile(
# little trick to use Keras predefined lambda loss function
loss=lambda y_pred, p_true: K.mean(y_pred * p_true), optimizer=rmsprop
)
return model, discriminate
def create_posterior(self, img_dim, aux_dim, action_dim, encode_dim):
imgs = Input(shape=[img_dim[0], img_dim[1], img_dim[2]])
x = Convolution2D(32, 3, 3, subsample=(2, 2))(imgs)
x = LeakyReLU()(x)
x = Convolution2D(64, 3, 3, subsample=(2, 2))(x)
x = LeakyReLU()(x)
x = Convolution2D(128, 3, 3, subsample=(2, 2))(x)
x = LeakyReLU()(x)
x = Flatten()(x)
auxs = Input(shape=[aux_dim])
actions = Input(shape=[action_dim])
h = merge([x, auxs, actions], mode='concat')
h = Dense(256)(h)
h = LeakyReLU()(h)
h = Dense(128)(h)
h = LeakyReLU()(h)
c = Dense(encode_dim, activation='softmax')(h)
model = Model(input=[imgs, auxs, actions], output=c)
adam = Adam(lr=self.config.lr_posterior)
model.compile(loss='categorical_crossentropy', optimizer=adam,
metrics=['accuracy'])
return model
def act(self, feats, auxs, encodes, logstds, *args):
action_dist_mu = \
self.sess.run(
self.action_dist_mu,
{self.feats: feats, self.auxs: auxs, self.encodes: encodes}
)
act = action_dist_mu + np.exp(logstds) * \
np.random.randn(*logstds.shape)
act[:, 0] = np.clip(act[:, 0], -1, 1)
act[:, 1] = np.clip(act[:, 1], 0, 1)
act[:, 2] = np.clip(act[:, 2], 0, 1)
return act
def learn(self, demo):
config = self.config
start_time = time.time()
numeptotal = 0
# Set up for training discrimiator
print("Loading data ...")
imgs_d, auxs_d, actions_d = demo["imgs"], demo["auxs"], demo["actions"]
numdetotal = imgs_d.shape[0]
idx_d = np.arange(numdetotal)
np.random.shuffle(idx_d)
imgs_d = imgs_d[idx_d]
auxs_d = auxs_d[idx_d]
actions_d = actions_d[idx_d]
print("Resizing img for demo ...")
imgs_reshaped_d = []
for i in range(numdetotal):
imgs_reshaped_d.append(np.expand_dims(cv2.resize(imgs_d[i],
(self.img_dim[0], self.img_dim[1])), axis=0))
imgs_d = np.concatenate(imgs_reshaped_d, axis=0).astype(np.float32)
imgs_d = (imgs_d - 128.) / 128.
print("Shape of resized demo images:", imgs_d.shape)
for i in range(38, config.n_iter):
# Generating paths.
# if i == 1:
if i == 38:
paths_per_collect = 30
else:
paths_per_collect = 10
rollouts = rollout_contin(
self.env,
self,
self.feat_extractor,
self.feat_dim,
self.aux_dim,
self.encode_dim,
config.max_step_limit,
config.min_step_limit,
config.pre_step,
paths_per_collect,
self.pre_actions,
self.discriminate,
self.posterior_target)
for path in rollouts:
self.buffer.add(path)
print("Buffer count:", self.buffer.count())
paths = self.buffer.get_sample(config.sample_size)
print("Calculating actions ...")
for path in paths:
path["mus"] = self.sess.run(
self.action_dist_mu,
{self.feats: path["feats"],
self.auxs: path["auxs"],
self.encodes: path["encodes"]}
)
mus_n = np.concatenate([path["mus"] for path in paths])
logstds_n = np.concatenate([path["logstds"] for path in paths])
feats_n = np.concatenate([path["feats"] for path in paths])
auxs_n = np.concatenate([path["auxs"] for path in paths])
encodes_n = np.concatenate([path["encodes"] for path in paths])
actions_n = np.concatenate([path["actions"] for path in paths])
imgs_n = np.concatenate([path["imgs"] for path in paths])
print("Epoch:", i, "Total sampled data points:", feats_n.shape[0])
# Train discriminator
numnototal = feats_n.shape[0]
batch_size = config.batch_size
start_d = self.demo_idx
start_n = 0
if i <= 5:
d_iter = 120 - i * 20
else:
d_iter = 10
for k in range(d_iter):
loss = self.discriminator.train_on_batch(
[imgs_n[start_n:start_n + batch_size],
auxs_n[start_n:start_n + batch_size],
actions_n[start_n:start_n + batch_size],
imgs_d[start_d:start_d + batch_size],
auxs_d[start_d:start_d + batch_size],
actions_d[start_d:start_d + batch_size]],
np.ones(batch_size)
)
# print self.discriminator.summary()
for l in self.discriminator.layers:
weights = l.get_weights()
weights = [np.clip(w, config.clamp_lower, config.clamp_upper)
for w in weights]
l.set_weights(weights)
start_d = self.demo_idx = self.demo_idx + batch_size
start_n = start_n + batch_size
if start_d + batch_size >= numdetotal:
start_d = self.demo_idx = (start_d + batch_size) % numdetotal
if start_n + batch_size >= numnototal:
start_n = (start_n + batch_size) % numnototal
print("Discriminator step:", k, "loss:", loss)
idx = np.arange(numnototal)
np.random.shuffle(idx)
train_val_ratio = 0.7
# Training data for posterior
numno_train = int(numnototal * train_val_ratio)
imgs_train = imgs_n[idx][:numno_train]
auxs_train = auxs_n[idx][:numno_train]
actions_train = actions_n[idx][:numno_train]
encodes_train = encodes_n[idx][:numno_train]
# Validation data for posterior
imgs_val = imgs_n[idx][numno_train:]
auxs_val = auxs_n[idx][numno_train:]
actions_val = actions_n[idx][numno_train:]
encodes_val = encodes_n[idx][numno_train:]
start_n = 0
for j in range(config.p_iter):
loss = self.posterior.train_on_batch(
[imgs_train[start_n:start_n + batch_size],
auxs_train[start_n:start_n + batch_size],
actions_train[start_n:start_n + batch_size]],
encodes_train[start_n:start_n + batch_size]
)
start_n += batch_size
if start_n + batch_size >= numno_train:
start_n = (start_n + batch_size) % numno_train
posterior_weights = self.posterior.get_weights()
posterior_target_weights = self.posterior_target.get_weights()
for k in range(len(posterior_weights)):
posterior_target_weights[k] = 0.5 * posterior_weights[k] +\
0.5 * posterior_target_weights[k]
self.posterior_target.set_weights(posterior_target_weights)
output_p = self.posterior_target.predict(
[imgs_val, auxs_val, actions_val])
val_loss = -np.average(
np.sum(np.log(output_p) * encodes_val, axis=1))
print("Posterior step:", j, "loss:", loss, val_loss)
# Computing returns and estimating advantage function.
path_idx = 0
for path in paths:
file_path = "C:/Users/HRI/Documents/GitHub/InfoGAIL/log/iter_%d_path_%d.txt" % (i, path_idx)
f = open(file_path, "w")
path["baselines"] = self.baseline.predict(path)
output_d = self.discriminate.predict(
[path["imgs"], path["auxs"], path["actions"]])
output_p = self.posterior_target.predict(
[path["imgs"], path["auxs"], path["actions"]])
path["rewards"] = np.ones(path["raws"].shape[0]) * 2 + \
output_d.flatten() * 0.1 + \
np.sum(np.log(output_p) * path["encodes"], axis=1)
path_baselines = np.append(path["baselines"], 0 if
path["baselines"].shape[0] == 100 else
path["baselines"][-1])
deltas = path["rewards"] + config.gamma * path_baselines[1:] -\
path_baselines[:-1]
# path["returns"] = discount(path["rewards"], config.gamma)
# path["advants"] = path["returns"] - path["baselines"]
path["advants"] = discount(deltas, config.gamma * config.lam)
path["returns"] = discount(path["rewards"], config.gamma)
f.write("Baseline:\n" + np.array_str(path_baselines) + "\n")
f.write("Returns:\n" + np.array_str(path["returns"]) + "\n")
f.write("Advants:\n" + np.array_str(path["advants"]) + "\n")
f.write("Mus:\n" + np.array_str(path["mus"]) + "\n")
f.write("Actions:\n" + np.array_str(path["actions"]) + "\n")
f.write("Logstds:\n" + np.array_str(path["logstds"]) + "\n")
path_idx += 1
# Standardize the advantage function to have mean=0 and std=1
advants_n = np.concatenate([path["advants"] for path in paths])
# advants_n -= advants_n.mean()
advants_n /= (advants_n.std() + 1e-8)
# Computing baseline function for next iter.
self.baseline.fit(paths)
feed = {self.feats: feats_n,
self.auxs: auxs_n,
self.encodes: encodes_n,
self.actions: actions_n,
self.advants: advants_n,
self.action_dist_logstd: logstds_n,
self.oldaction_dist_mu: mus_n,
self.oldaction_dist_logstd: logstds_n}
thprev = self.gf()
def fisher_vector_product(p):
feed[self.flat_tangent] = p
return self.sess.run(self.fvp, feed) + p * config.cg_damping
g = self.sess.run(self.pg, feed_dict=feed)
stepdir = conjugate_gradient(fisher_vector_product, -g)
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
assert shs > 0
lm = np.sqrt(shs / config.max_kl)
fullstep = stepdir / lm
neggdotstepdir = -g.dot(stepdir)
def loss(th):
self.sff(th)
return self.sess.run(self.losses[0], feed_dict=feed)
theta = linesearch(loss, thprev, fullstep, neggdotstepdir / lm)
self.sff(theta)
surrafter, kloldnew, entropy = self.sess.run(
self.losses, feed_dict=feed
)
episoderewards = np.array([path["rewards"].sum() for path in paths])
stats = {}
numeptotal += len(episoderewards)
stats["Total number of episodes"] = numeptotal
stats["Average sum of rewards per episode"] = episoderewards.mean()
stats["Entropy"] = entropy
stats["Time elapsed"] = "%.2f mins" % ((time.time() - start_time) / 60.0)
stats["KL between old and new distribution"] = kloldnew
stats["Surrogate loss"] = surrafter
print(("\n********** Iteration {} **********".format(i)))
for k, v in stats.items():
print((k + ": " + " " * (40 - len(k)) + str(v)))
if entropy != entropy:
exit(-1)
param_dir = "C:/Users/HRI/Documents/GitHub/InfoGAIL/params/"
print("Now we save model")
self.generator.save_weights(
param_dir + "generator_model_%d.h5" % i, overwrite=True)
with open(param_dir + "generator_model_%d.json" % i, "w") as outfile:
json.dump(self.generator.to_json(), outfile)
self.discriminator.save_weights(
param_dir + "discriminator_model_%d.h5" % i, overwrite=True)
with open(param_dir + "discriminator_model_%d.json" % i, "w") as outfile:
json.dump(self.discriminator.to_json(), outfile)
self.baseline.model.save_weights(
param_dir + "baseline_model_%d.h5" % i, overwrite=True)
with open(param_dir + "baseline_model_%d.json" % i, "w") as outfile:
json.dump(self.baseline.model.to_json(), outfile)
self.posterior.save_weights(
param_dir + "posterior_model_%d.h5" % i, overwrite=True)
with open(param_dir + "posterior_model_%d.json" % i, "w") as outfile:
json.dump(self.posterior.to_json(), outfile)
self.posterior_target.save_weights(
param_dir + "posterior_target_model_%d.h5" % i, overwrite=True)
with open(param_dir + "posterior_target_model_%d.json" % i, "w") as outfile:
json.dump(self.posterior_target.to_json(), outfile)
class Generator(object):
def __init__(self, sess, feat_dim, aux_dim, encode_dim, action_dim):
self.sess = sess
self.lr = tf.compat.v1.placeholder(tf.float32, shape=[])
K.set_session(sess)
self.model, self.weights, self.feats, self.auxs, self.encodes = \
self.create_generator(feat_dim, aux_dim, encode_dim)
self.action_gradient = tf.compat.v1.placeholder(tf.float32, [None, action_dim])
self.params_grad = tf.gradients(self.model.output, self.weights,
self.action_gradient)
grads = list(zip(self.params_grad, self.weights))
self.optimize = tf.train.AdamOptimizer(self.lr).apply_gradients(grads)
self.sess.run(tf.global_variables_initializer())
def train(self, feats, auxs, encodes, action_grads, lr):
self.sess.run(self.optimize, feed_dict={
self.feats: feats,
self.auxs: auxs,
self.encodes: encodes,
self.lr: lr,
self.action_gradient: action_grads,
K.learning_phase(): 1
})
def create_generator(self, feat_dim, aux_dim, encode_dim):
feats = Input(shape=[feat_dim[0], feat_dim[1], feat_dim[2]])
x = Convolution2D(256, 3, 3)(feats)
x = LeakyReLU()(x)
x = Convolution2D(256, 3, 3, subsample=(2, 2))(x)
x = LeakyReLU()(x)
x = Flatten()(x)
auxs = Input(shape=[aux_dim])
h = merge([x, auxs], mode='concat')
h = Dense(256)(h)
h = LeakyReLU()(h)
h = Dense(128)(h)
encodes = Input(shape=[encode_dim])
c = Dense(128)(encodes)
h = merge([h, c], mode='sum')
h = LeakyReLU()(h)
steer = Dense(1, activation='tanh', init=lambda shape, name:
normal(shape, scale=1e-4, name=name))(h)
accel = Dense(1, activation='sigmoid', init=lambda shape, name:
normal(shape, scale=1e-4, name=name))(h)
brake = Dense(1, activation='sigmoid', init=lambda shape, name:
normal(shape, scale=1e-4, name=name))(h)
actions = merge([steer, accel, brake], mode='concat')
model = Model(input=[feats, auxs, encodes], output=actions)
return model, model.trainable_weights, feats, auxs, encodes
class Posterior(object):
def __init__(self, sess, img_dim, aux_dim, action_dim, encode_dim):
self.sess = sess
self.lr = tf.compat.v1.placeholder(tf.float32, shape=[])
K.set_session(sess)
self.model = self.create_posterior(img_dim, aux_dim, action_dim, encode_dim)
def create_posterior(self, img_dim, aux_dim, action_dim, encode_dim):
imgs = Input(shape=[img_dim[0], img_dim[1], img_dim[2]])
x = Convolution2D(32, 3, 3, subsample=(2, 2))(imgs)
x = LeakyReLU()(x)
x = Convolution2D(64, 3, 3, subsample=(2, 2))(x)
x = LeakyReLU()(x)
x = Convolution2D(128, 3, 3, subsample=(2, 2))(x)
x = LeakyReLU()(x)
x = Flatten()(x)
auxs = Input(shape=[aux_dim])
actions = Input(shape=[action_dim])
h = merge([x, auxs, actions], mode='concat')
h = Dense(256)(h)
h = LeakyReLU()(h)
h = Dense(128)(h)
h = LeakyReLU()(h)
c = Dense(encode_dim, activation='softmax')(h)
model = Model(input=[imgs, auxs, actions], output=c)
return model
| 43.618971 | 122 | 0.570454 |
79471ac969dd0b15dc004a68049eb9673c928b2c | 2,582 | py | Python | river/metrics/fowlkes_mallows.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 1,105 | 2019-01-24T15:15:30.000Z | 2020-11-10T18:27:00.000Z | river/metrics/fowlkes_mallows.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 328 | 2019-01-25T13:48:43.000Z | 2020-11-11T11:41:44.000Z | river/metrics/fowlkes_mallows.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 150 | 2019-01-29T19:05:21.000Z | 2020-11-11T11:50:14.000Z | import math
from river import metrics
__all__ = ["FowlkesMallows"]
class FowlkesMallows(metrics.base.MultiClassMetric):
r"""Fowlkes-Mallows Index.
The Fowlkes-Mallows Index [^1] [^2] is an external evaluation method that is
used to determine the similarity between two clusterings, and also a metric
to measure confusion matrices. The measure of similarity could be either between
two hierarchical clusterings or a clustering and a benchmark classification. A
higher value for the Fowlkes-Mallows index indicates a greater similarity between
the clusters and the benchmark classifications.
The Fowlkes-Mallows Index, for two cluster algorithms, is defined as:
$$
FM = \sqrt{PPV \times TPR} = \sqrt{\frac{TP}{TP+FP} \times \frac{TP}{TP+FN}}
$$
where
* TP, FP, FN are respectively the number of true positives, false positives and
false negatives;
* TPR is the True Positive Rate (or Sensitivity/Recall), PPV is the Positive Predictive
Rate (or Precision).
Parameters
----------
cm
This parameter allows sharing the same confusion
matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage
and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = [0, 0, 0, 1, 1, 1]
>>> y_pred = [0, 0, 1, 1, 2, 2]
>>> metric = metrics.FowlkesMallows()
>>> for yt, yp in zip(y_true, y_pred):
... print(metric.update(yt, yp))
FowlkesMallows: 0.00%
FowlkesMallows: 100.00%
FowlkesMallows: 57.74%
FowlkesMallows: 40.82%
FowlkesMallows: 35.36%
FowlkesMallows: 47.14%
References
----------
[^1]: Wikipedia contributors. (2020, December 22).
Fowlkes–Mallows index. In Wikipedia, The Free Encyclopedia,
from https://en.wikipedia.org/w/index.php?title=Fowlkes%E2%80%93Mallows_index&oldid=995714222
[^2]: E. B. Fowkles and C. L. Mallows (1983).
“A method for comparing two hierarchical clusterings”.
Journal of the American Statistical Association
"""
@property
def works_with_weights(self):
return False
def get(self):
n = self.cm.n_samples
tk = sum(c * c for row in self.cm.data.values() for c in row.values()) - n
pk = sum(sc * sc for sc in self.cm.sum_col.values()) - n
qk = sum(sr * sr for sr in self.cm.sum_row.values()) - n
try:
return math.sqrt(tk / pk) * math.sqrt(tk / qk)
except ZeroDivisionError:
return 0.0
| 31.108434 | 103 | 0.647947 |
79471b7e8c70b597c18a2028023b4e0bd4bf6540 | 17,810 | py | Python | elasticsearch/transport.py | shub1095/elasticsearch-py | 778c7e4ac000b51ced7c9a1a588200ec395e40ca | [
"Apache-2.0"
] | null | null | null | elasticsearch/transport.py | shub1095/elasticsearch-py | 778c7e4ac000b51ced7c9a1a588200ec395e40ca | [
"Apache-2.0"
] | null | null | null | elasticsearch/transport.py | shub1095/elasticsearch-py | 778c7e4ac000b51ced7c9a1a588200ec395e40ca | [
"Apache-2.0"
] | 1 | 2020-08-04T11:42:43.000Z | 2020-08-04T11:42:43.000Z | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from itertools import chain
from .connection import Urllib3HttpConnection
from .connection_pool import ConnectionPool, DummyConnectionPool, EmptyConnectionPool
from .serializer import JSONSerializer, Deserializer, DEFAULT_SERIALIZERS
from .exceptions import (
ConnectionError,
TransportError,
SerializationError,
ConnectionTimeout,
)
def get_host_info(node_info, host):
"""
Simple callback that takes the node info from `/_cluster/nodes` and a
parsed connection information and return the connection information. If
`None` is returned this node will be skipped.
Useful for filtering nodes (by proximity for example) or if additional
information needs to be provided for the :class:`~elasticsearch.Connection`
class. By default master only nodes are filtered out since they shouldn't
typically be used for API operations.
:arg node_info: node information from `/_cluster/nodes`
:arg host: connection information (host, port) extracted from the node info
"""
# ignore master only nodes
if node_info.get("roles", []) == ["master"]:
return None
return host
class Transport(object):
"""
Encapsulation of transport-related to logic. Handles instantiation of the
individual connections as well as creating a connection pool to hold them.
Main interface is the `perform_request` method.
"""
DEFAULT_CONNECTION_CLASS = Urllib3HttpConnection
def __init__(
self,
hosts,
connection_class=None,
connection_pool_class=ConnectionPool,
host_info_callback=get_host_info,
sniff_on_start=False,
sniffer_timeout=None,
sniff_timeout=0.1,
sniff_on_connection_fail=False,
serializer=JSONSerializer(),
serializers=None,
default_mimetype="application/json",
max_retries=3,
retry_on_status=(502, 503, 504),
retry_on_timeout=False,
send_get_body_as="GET",
**kwargs
):
"""
:arg hosts: list of dictionaries, each containing keyword arguments to
create a `connection_class` instance
:arg connection_class: subclass of :class:`~elasticsearch.Connection` to use
:arg connection_pool_class: subclass of :class:`~elasticsearch.ConnectionPool` to use
:arg host_info_callback: callback responsible for taking the node information from
`/_cluster/nodes`, along with already extracted information, and
producing a list of arguments (same as `hosts` parameter)
:arg sniff_on_start: flag indicating whether to obtain a list of nodes
from the cluster at startup time
:arg sniffer_timeout: number of seconds between automatic sniffs
:arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff
:arg sniff_timeout: timeout used for the sniff request - it should be a
fast api call and we are talking potentially to more nodes so we want
to fail quickly. Not used during initial sniffing (if
``sniff_on_start`` is on) when the connection still isn't
initialized.
:arg serializer: serializer instance
:arg serializers: optional dict of serializer instances that will be
used for deserializing data coming from the server. (key is the mimetype)
:arg default_mimetype: when no mimetype is specified by the server
response assume this mimetype, defaults to `'application/json'`
:arg max_retries: maximum number of retries before an exception is propagated
:arg retry_on_status: set of HTTP status codes on which we should retry
on a different node. defaults to ``(502, 503, 504)``
:arg retry_on_timeout: should timeout trigger a retry on different
node? (default `False`)
:arg send_get_body_as: for GET requests with body this option allows
you to specify an alternate way of execution for environments that
don't support passing bodies with GET requests. If you set this to
'POST' a POST method will be used instead, if to 'source' then the body
will be serialized and passed as a query parameter `source`.
Any extra keyword arguments will be passed to the `connection_class`
when creating and instance unless overridden by that connection's
options provided as part of the hosts parameter.
"""
if connection_class is None:
connection_class = self.DEFAULT_CONNECTION_CLASS
# serialization config
_serializers = DEFAULT_SERIALIZERS.copy()
# if a serializer has been specified, use it for deserialization as well
_serializers[serializer.mimetype] = serializer
# if custom serializers map has been supplied, override the defaults with it
if serializers:
_serializers.update(serializers)
# create a deserializer with our config
self.deserializer = Deserializer(_serializers, default_mimetype)
self.max_retries = max_retries
self.retry_on_timeout = retry_on_timeout
self.retry_on_status = retry_on_status
self.send_get_body_as = send_get_body_as
# data serializer
self.serializer = serializer
# store all strategies...
self.connection_pool_class = connection_pool_class
self.connection_class = connection_class
# ...save kwargs to be passed to the connections
self.kwargs = kwargs
self.hosts = hosts
# Start with an empty pool specifically for `AsyncTransport`.
# It should never be used, will be replaced on first call to
# .set_connections()
self.connection_pool = EmptyConnectionPool()
if hosts:
# ...and instantiate them
self.set_connections(hosts)
# retain the original connection instances for sniffing
self.seed_connections = list(self.connection_pool.connections[:])
else:
self.seed_connections = []
# Don't enable sniffing on Cloud instances.
if kwargs.get("cloud_id", False):
sniff_on_start = False
sniff_on_connection_fail = False
# sniffing data
self.sniffer_timeout = sniffer_timeout
self.sniff_on_start = sniff_on_start
self.sniff_on_connection_fail = sniff_on_connection_fail
self.last_sniff = time.time()
self.sniff_timeout = sniff_timeout
# callback to construct host dict from data in /_cluster/nodes
self.host_info_callback = host_info_callback
if sniff_on_start:
self.sniff_hosts(True)
def add_connection(self, host):
"""
Create a new :class:`~elasticsearch.Connection` instance and add it to the pool.
:arg host: kwargs that will be used to create the instance
"""
self.hosts.append(host)
self.set_connections(self.hosts)
def set_connections(self, hosts):
"""
Instantiate all the connections and create new connection pool to hold them.
Tries to identify unchanged hosts and re-use existing
:class:`~elasticsearch.Connection` instances.
:arg hosts: same as `__init__`
"""
# construct the connections
def _create_connection(host):
# if this is not the initial setup look at the existing connection
# options and identify connections that haven't changed and can be
# kept around.
if hasattr(self, "connection_pool"):
for (connection, old_host) in self.connection_pool.connection_opts:
if old_host == host:
return connection
# previously unseen params, create new connection
kwargs = self.kwargs.copy()
kwargs.update(host)
return self.connection_class(**kwargs)
connections = map(_create_connection, hosts)
connections = list(zip(connections, hosts))
if len(connections) == 1:
self.connection_pool = DummyConnectionPool(connections)
else:
# pass the hosts dicts to the connection pool to optionally extract parameters from
self.connection_pool = self.connection_pool_class(
connections, **self.kwargs
)
def get_connection(self):
"""
Retrieve a :class:`~elasticsearch.Connection` instance from the
:class:`~elasticsearch.ConnectionPool` instance.
"""
if self.sniffer_timeout:
if time.time() >= self.last_sniff + self.sniffer_timeout:
self.sniff_hosts()
return self.connection_pool.get_connection()
def _get_sniff_data(self, initial=False):
"""
Perform the request to get sniffing information. Returns a list of
dictionaries (one per node) containing all the information from the
cluster.
It also sets the last_sniff attribute in case of a successful attempt.
In rare cases it might be possible to override this method in your
custom Transport class to serve data from alternative source like
configuration management.
"""
previous_sniff = self.last_sniff
try:
# reset last_sniff timestamp
self.last_sniff = time.time()
# go through all current connections as well as the
# seed_connections for good measure
for c in chain(self.connection_pool.connections, self.seed_connections):
try:
# use small timeout for the sniffing request, should be a fast api call
_, headers, node_info = c.perform_request(
"GET",
"/_nodes/_all/http",
timeout=self.sniff_timeout if not initial else None,
)
node_info = self.deserializer.loads(
node_info, headers.get("content-type")
)
break
except (ConnectionError, SerializationError):
pass
else:
raise TransportError("N/A", "Unable to sniff hosts.")
except Exception:
# keep the previous value on error
self.last_sniff = previous_sniff
raise
return list(node_info["nodes"].values())
def _get_host_info(self, host_info):
host = {}
address = host_info.get("http", {}).get("publish_address")
# malformed or no address given
if not address or ":" not in address:
return None
if "/" in address:
# Support 7.x host/ip:port behavior where http.publish_host has been set.
fqdn, ipaddress = address.split("/", 1)
host["host"] = fqdn
_, host["port"] = ipaddress.rsplit(":", 1)
host["port"] = int(host["port"])
else:
host["host"], host["port"] = address.rsplit(":", 1)
host["port"] = int(host["port"])
return self.host_info_callback(host_info, host)
def sniff_hosts(self, initial=False):
"""
Obtain a list of nodes from the cluster and create a new connection
pool using the information retrieved.
To extract the node connection parameters use the ``nodes_to_host_callback``.
:arg initial: flag indicating if this is during startup
(``sniff_on_start``), ignore the ``sniff_timeout`` if ``True``
"""
node_info = self._get_sniff_data(initial)
hosts = list(filter(None, (self._get_host_info(n) for n in node_info)))
# we weren't able to get any nodes or host_info_callback blocked all -
# raise error.
if not hosts:
raise TransportError(
"N/A", "Unable to sniff hosts - no viable hosts found."
)
self.set_connections(hosts)
def mark_dead(self, connection):
"""
Mark a connection as dead (failed) in the connection pool. If sniffing
on failure is enabled this will initiate the sniffing process.
:arg connection: instance of :class:`~elasticsearch.Connection` that failed
"""
# mark as dead even when sniffing to avoid hitting this host during the sniff process
self.connection_pool.mark_dead(connection)
if self.sniff_on_connection_fail:
self.sniff_hosts()
def perform_request(self, method, url, headers=None, params=None, body=None):
"""
Perform the actual request. Retrieve a connection from the connection
pool, pass all the information to it's perform_request method and
return the data.
If an exception was raised, mark the connection as failed and retry (up
to `max_retries` times).
If the operation was successful and the connection used was previously
marked as dead, mark it as live, resetting it's failure count.
:arg method: HTTP method to use
:arg url: absolute url (without host) to target
:arg headers: dictionary of headers, will be handed over to the
underlying :class:`~elasticsearch.Connection` class
:arg params: dictionary of query parameters, will be handed over to the
underlying :class:`~elasticsearch.Connection` class for serialization
:arg body: body of the request, will be serialized using serializer and
passed to the connection
"""
method, params, body, ignore, timeout = self._resolve_request_args(
method, params, body
)
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
status, headers_response, data = connection.perform_request(
method,
url,
params,
body,
headers=headers,
ignore=ignore,
timeout=timeout,
)
except TransportError as e:
if method == "HEAD" and e.status_code == 404:
return False
retry = False
if isinstance(e, ConnectionTimeout):
retry = self.retry_on_timeout
elif isinstance(e, ConnectionError):
retry = True
elif e.status_code in self.retry_on_status:
retry = True
if retry:
try:
# only mark as dead if we are retrying
self.mark_dead(connection)
except TransportError:
# If sniffing on failure, it could fail too. Catch the
# exception not to interrupt the retries.
pass
# raise exception on last retry
if attempt == self.max_retries:
raise e
else:
raise e
else:
# connection didn't fail, confirm it's live status
self.connection_pool.mark_live(connection)
if method == "HEAD":
return 200 <= status < 300
if data:
data = self.deserializer.loads(
data, headers_response.get("content-type")
)
return data
def close(self):
"""
Explicitly closes connections
"""
self.connection_pool.close()
def _resolve_request_args(self, method, params, body):
"""Resolves parameters for .perform_request()"""
if body is not None:
body = self.serializer.dumps(body)
# some clients or environments don't support sending GET with body
if method in ("HEAD", "GET") and self.send_get_body_as != "GET":
# send it as post instead
if self.send_get_body_as == "POST":
method = "POST"
# or as source parameter
elif self.send_get_body_as == "source":
if params is None:
params = {}
params["source"] = body
body = None
if body is not None:
try:
body = body.encode("utf-8", "surrogatepass")
except (UnicodeDecodeError, AttributeError):
# bytes/str - no need to re-encode
pass
ignore = ()
timeout = None
if params:
timeout = params.pop("request_timeout", None)
ignore = params.pop("ignore", ())
if isinstance(ignore, int):
ignore = (ignore,)
return method, params, body, ignore, timeout
| 39.8434 | 95 | 0.616788 |
79471bce507a8a833060d8eb8f6f34c58aa6bb7d | 3,016 | py | Python | submission_patch.py | doublechenching/ship_detection | 1ba4926e0d28043863df05ae8afc3d5b336b350d | [
"Apache-2.0"
] | 8 | 2019-03-12T08:47:37.000Z | 2021-05-13T05:28:20.000Z | submission_patch.py | doublechenching/ship_detection | 1ba4926e0d28043863df05ae8afc3d5b336b350d | [
"Apache-2.0"
] | null | null | null | submission_patch.py | doublechenching/ship_detection | 1ba4926e0d28043863df05ae8afc3d5b336b350d | [
"Apache-2.0"
] | null | null | null | #encoding: utf-8
from __future__ import print_function
from utils import init_env
init_env('1')
from config import config as cfg
from models.patch_dense_unet import dense121_unet
from dataset.data import compose_ndcube, decompose_ndimage
import os
import glob
import numpy as np
from dataset.data import rle_encode
from skimage import morphology as m
import pandas as pd
from skimage.io import imread
from tqdm import tqdm
cfg.patch_shape = (256, 256, 3)
def multi_rle_encode(img, **kwargs):
'''
Encode connected regions as separated masks
'''
labels = m.label(img[0, :, :, :], connectivity=2)
if img.ndim > 2:
return [rle_encode(np.sum(labels==k, axis=2), **kwargs) for k in np.unique(labels[labels>0])]
else:
return [rle_encode(labels==k, **kwargs) for k in np.unique(labels[labels>0])]
def load_model(weigths_path):
model = dense121_unet(cfg.patch_shape)
model.load_weights(weigths_path)
return model
def submission(model, test_img_dir, opt_threshold=0.5, tta=True):
test_img_paths = glob.glob(os.path.join(test_img_dir, '*.jpg'))
pred_rows = []
for path in tqdm(test_img_paths):
test_img = imread(path) / 255.0
test_patches = decompose_ndimage(test_img, cfg.patch_shape[:2])
test_patches = np.stack(test_patches, axis=0)
if tta:
pred_prob1 = model.predict(test_patches)
pred_prob2 = model.predict(np.flip(test_patches, axis=1))
pred_prob2 = np.flip(pred_prob2, axis=1)
pred_prob3 = model.predict(np.flip(test_patches, axis=2))
pred_prob3 = np.flip(pred_prob3, axis=2)
test_img4 = np.flip(test_patches, axis=1)
test_img4 = np.flip(test_img4, axis=2)
pred_prob4 = model.predict(test_img4)
pred_prob4 = np.flip(pred_prob4, axis=2)
pred_prob4 = np.flip(pred_prob4, axis=1)
pred_prob = (pred_prob1 + pred_prob2 + pred_prob3 + pred_prob4) / 4
else:
pred_prob = model.predict(test_patches)
pred_patches = [patches for patches in pred_prob]
com_pred = compose_ndcube(pred_patches, list(test_img.shape[:2]) + [1])
pred_mask = com_pred > opt_threshold
pred_mask = np.expand_dims(pred_mask, axis=0) # (b, h, w, c)
rles = multi_rle_encode(pred_mask)
name = os.path.split(path)[-1]
if len(rles)>0:
for rle in rles:
pred_rows += [{'ImageId': name, 'EncodedPixels': rle}]
else:
pred_rows += [{'ImageId': name, 'EncodedPixels': None}]
submission_df = pd.DataFrame(pred_rows)[['ImageId', 'EncodedPixels']]
submission_df.to_csv('submission_patch_28.csv', index=False)
if __name__ == "__main__":
cfg.task_name = 'dense_unet_patch'
epoch = 28
log_dir = os.path.join(cfg.log_dir, cfg.task_name)
weights_path = os.path.join(log_dir, cfg.weigts_file.format(epoch=epoch))
model = load_model(weights_path)
submission(model, cfg.test_dir) | 35.482353 | 101 | 0.66313 |
79471c8306b7078fd9c52c359a3fc526d7e31533 | 5,223 | py | Python | scripts/auto_nav_guidance.py | vanttec/rb_missions | f05384f84a256b9f64defaef625bee689b79701f | [
"MIT"
] | null | null | null | scripts/auto_nav_guidance.py | vanttec/rb_missions | f05384f84a256b9f64defaef625bee689b79701f | [
"MIT"
] | null | null | null | scripts/auto_nav_guidance.py | vanttec/rb_missions | f05384f84a256b9f64defaef625bee689b79701f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from custom_msgs.msg import obj_detected
from custom_msgs.msg import obj_detected_list
from geometry_msgs.msg import Pose2D
import numpy as np
import math
import time
#import matplotlib.pyplot as plt
class AutoNav:
def __init__(self):
self.yaw = 0
self.obj_list = []
self.activated = True
self.state = -1
self.ang = 0
self.desired_speed = 0
self.distance = 0
self.InitTime = rospy.Time.now().secs
self.desired_heading = 0
rospy.Subscriber("/vectornav/ins_2d/ins_pose", Pose2D, self.ins_pose_callback)
rospy.Subscriber('/usv_perception/yolo_zed/objects_detected', obj_detected_list, self.objs_callback)
self.d_speed_pub = rospy.Publisher("/guidance/desired_speed", Float64, queue_size=10)
self.d_heading_pub = rospy.Publisher("/guidance/desired_heading", Float64, queue_size=10)
self.status_pub = rospy.Publisher("/status", Int32, queue_size=10)
self.test = rospy.Publisher("/test", Int32, queue_size=10)
def ins_pose_callback(self,pose):
self.yaw = pose.theta
def objs_callback(self,data):
#print("a")
self.obj_list = []
for i in range(data.len):
if str(data.objects[i].clase) == 'marker':
self.obj_list.append({'X' : data.objects[i].X + 0.55, 'Y' : data.objects[i].Y, 'color' : data.objects[i].color, 'class' : data.objects[i].clase})
def punto_medio(self):
distances_list = []
y_list = []
class_list = []
for i in range(len(self.obj_list)):
distances_list.append(self.obj_list[i]['X'])
y_list.append(self.obj_list[i]['Y'])
class_list.append(self.obj_list[i]['class'])
ind_x1 = np.argsort(distances_list)[0]
ind_x2 = np.argsort(distances_list)[1]
x1 = distances_list[ind_x1]
y1 = -1*y_list[ind_x1]
x2 = distances_list[ind_x2]
y2 = -1*y_list[ind_x2]
xc = min([x1,x2]) + abs(x1 - x2)/2
yc = min([y1,y2]) + abs(y1 - y2)/2
self.distance = xc
offset = .55
yc = 0.00001 if yc == 0 else yc
relative_heading = math.atan((xc+offset)/yc)
if relative_heading < 0:
relative_heading = -1*(relative_heading + math.pi/2)
else:
relative_heading = math.pi/2 - relative_heading
self.ang = -1 if relative_heading < 0 else 1
self.desired_heading = relative_heading + self.yaw
self.desired_speed = 1
if self.distance < 7:
self.desired_speed = 0.6
if self.distance < 0.2:
self.desired_speed = 0
self.desired(self.desired_speed, self.desired_heading)
'''plt.clf()
plt.plot(y1,x1, 'go',markersize=5)
plt.plot(y2,x2, 'go',markersize=5)
plt.plot(0,0,'ro')
plt.plot(yc,xc,'r*')
plt.axis([-15, 15, 0, 20])
plt.pause(0.0001)
plt.draw()'''
def straight(self):
self.desired_speed = 0.7
self.desired(self.desired_speed, self.yaw)
def desired(self, speed, heading):
self.d_speed_pub.publish(speed)
self.d_heading_pub.publish(heading)
def main():
rospy.init_node('auto_nav_guidance', anonymous=True)
rate = rospy.Rate(100)
E = AutoNav()
while not rospy.is_shutdown() and E.activated:
if E.state == -1:
while not rospy.is_shutdown() and len(E.obj_list) < 2:
E.test.publish(E.state)
rate.sleep()
E.state = 0
if E.state == 0:
E.test.publish(E.state)
if len(E.obj_list) >= 2:
E.punto_medio()
else:
initTime = rospy.Time.now().secs
while not rospy.is_shutdown() and len(E.obj_list) < 2:
if rospy.Time.now().secs - initTime > 5:
E.state = 1
rate.sleep()
break
if E.state == 1:
E.test.publish(E.state)
E.straight()
time.sleep(2)
angle = E.yaw
E.state = 2
if E.state == 2:
E.test.publish(E.state)
if len(E.obj_list) >= 2:
E.state = 3
else:
E.desired(0.4,E.yaw)
if E.state == 3:
E.test.publish(E.state)
if len(E.obj_list) >= 2:
E.punto_medio()
else:
initTime = rospy.Time.now().secs
while not rospy.is_shutdown() and len(E.obj_list) < 2:
if rospy.Time.now().secs - initTime > 5:
E.state = 4
rate.sleep()
break
if E.state == 4:
E.test.publish(E.state)
E.desired(0,E.yaw)
E.activated = False
time.sleep(1)
E.status_pub.publish(1)
rate.sleep()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| 30.905325 | 161 | 0.548152 |
79471d2c8ff431311b7675ff0e4d1680dff8526c | 810 | py | Python | gallery/urls.py | Nyota254/my-gallery | 956262b746ea65bccb7842e54e76a1d7474c643a | [
"MIT"
] | null | null | null | gallery/urls.py | Nyota254/my-gallery | 956262b746ea65bccb7842e54e76a1d7474c643a | [
"MIT"
] | 6 | 2020-06-05T23:32:54.000Z | 2021-06-09T18:28:32.000Z | gallery/urls.py | Nyota254/my-gallery | 956262b746ea65bccb7842e54e76a1d7474c643a | [
"MIT"
] | 1 | 2019-10-13T10:05:40.000Z | 2019-10-13T10:05:40.000Z | """gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,re_path,include
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'',include('images.urls'))
]
| 35.217391 | 77 | 0.708642 |
79471d57640fa4b9231c1ff021231c8e7d4f1086 | 23,312 | py | Python | Wrapping/Python/vtk/wx/wxVTKRenderWindow.py | Lin1225/vtk_v5.10.0 | b54ac74f4716572862365fbff28cd0ecb8d08c3d | [
"BSD-3-Clause"
] | 1 | 2021-10-13T01:57:14.000Z | 2021-10-13T01:57:14.000Z | Wrapping/Python/vtk/wx/wxVTKRenderWindow.py | Lin1225/vtk_v5.10.0 | b54ac74f4716572862365fbff28cd0ecb8d08c3d | [
"BSD-3-Clause"
] | null | null | null | Wrapping/Python/vtk/wx/wxVTKRenderWindow.py | Lin1225/vtk_v5.10.0 | b54ac74f4716572862365fbff28cd0ecb8d08c3d | [
"BSD-3-Clause"
] | 5 | 2015-03-23T21:13:19.000Z | 2022-01-03T11:15:39.000Z | """
A simple VTK widget for wxPython.
Find wxPython info at http://wxPython.org
Created by David Gobbi, December 2001
Based on vtkTkRenderWindget.py
Updated to new wx namespace and some cleaning by Andrea Gavana,
December 2006
"""
"""
Please see the example at the end of this file.
----------------------------------------
Creation:
wxVTKRenderWindow(parent, ID, stereo=0, [wx keywords]):
You should create a wx.PySimpleApp() or some other wx**App
before creating the window.
----------------------------------------
Methods:
Render()
AddRenderer(ren)
GetRenderers()
GetRenderWindow()
----------------------------------------
Methods to override (all take a wx.Event):
OnButtonDown(event) default: propagate event to Left, Right, Middle
OnLeftDown(event) default: set _Mode to 'Rotate'
OnRightDown(event) default: set _Mode to 'Zoom'
OnMiddleDown(event) default: set _Mode to 'Pan'
OnButtonUp(event) default: propagate event to L, R, M and unset _Mode
OnLeftUp(event)
OnRightUp(event)
OnMiddleUp(event)
OnMotion(event) default: call appropriate handler for _Mode
OnEnterWindow(event) default: set focus to this window
OnLeaveWindow(event) default: release focus
OnKeyDown(event) default: [R]eset, [W]irefreme, [S]olid, [P]ick
OnKeyUp(event)
OnChar(event)
OnSetFocus(event)
OnKillFocus(event)
OnSize(event)
OnMove(event)
OnPaint(event) default: Render()
----------------------------------------
Protected Members:
_Mode: Current mode: 'Rotate', 'Zoom', 'Pan'
_LastX, _LastY: The (x,y) coordinates of the previous event
_CurrentRenderer: The renderer that was most recently clicked in
_CurrentCamera: The camera for the current renderer
----------------------------------------
Private Members:
__Handle: Handle to the window containing the vtkRenderWindow
"""
# import usual libraries
import math, os, sys
import wx
import vtk
# a few configuration items, see what works best on your system
# Use GLCanvas as base class instead of wx.Window.
# This is sometimes necessary under wxGTK or the image is blank.
# (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars)
baseClass = wx.Window
if wx.Platform == "__WXGTK__":
import wx.glcanvas
baseClass = wx.glcanvas.GLCanvas
# Keep capturing mouse after mouse is dragged out of window
# (in wxGTK 2.3.2 there is a bug that keeps this from working,
# but it is only relevant in wxGTK if there are multiple windows)
_useCapture = (wx.Platform == "__WXMSW__")
# end of configuration items
class wxVTKRenderWindow(baseClass):
"""
A wxRenderWindow for wxPython.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
def __init__(self, parent, ID, *args, **kw):
"""Default class constructor.
@param parent: parent window
@param ID: window id
@param **kw: wxPython keywords (position, size, style) plus the
'stereo' keyword
"""
# miscellaneous protected variables
self._CurrentRenderer = None
self._CurrentCamera = None
self._CurrentZoom = 1.0
self._CurrentLight = None
self._ViewportCenterX = 0
self._ViewportCenterY = 0
self._Picker = vtk.vtkCellPicker()
self._PickedActor = None
self._PickedProperty = vtk.vtkProperty()
self._PickedProperty.SetColor(1,0,0)
self._PrePickedProperty = None
# these record the previous mouse position
self._LastX = 0
self._LastY = 0
# the current interaction mode (Rotate, Pan, Zoom, etc)
self._Mode = None
self._ActiveButton = None
# private attributes
self.__OldFocus = None
# used by the LOD actors
self._DesiredUpdateRate = 15
self._StillUpdateRate = 0.0001
# First do special handling of some keywords:
# stereo, position, size, width, height, style
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
del kw['stereo']
position = wx.DefaultPosition
if kw.has_key('position'):
position = kw['position']
del kw['position']
try:
size = parent.GetSize()
except AttributeError:
size = wx.DefaultSize
if kw.has_key('size'):
size = kw['size']
del kw['size']
# wx.WANTS_CHARS says to give us e.g. TAB
# wx.NO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK
style = wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE
if kw.has_key('style'):
style = style | kw['style']
del kw['style']
# the enclosing frame must be shown under GTK or the windows
# don't connect together properly
l = []
p = parent
while p: # make a list of all parents
l.append(p)
p = p.GetParent()
l.reverse() # sort list into descending order
for p in l:
p.Show(1)
# initialize the wx.Window
if baseClass.__name__ == 'GLCanvas':
# Set the doublebuffer attribute of the GL canvas.
baseClass.__init__(self, parent, ID, position, size, style,
attribList=[wx.glcanvas.WX_GL_DOUBLEBUFFER])
else:
baseClass.__init__(self, parent, ID, position, size, style)
# create the RenderWindow and initialize it
self._RenderWindow = vtk.vtkRenderWindow()
self._RenderWindow.SetSize(size.width, size.height)
if stereo:
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
self.__handle = None
# refresh window by doing a Render
self.Bind(wx.EVT_PAINT, self.OnPaint)
# turn off background erase to reduce flicker
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
# Bind the events to the event converters
self.Bind(wx.EVT_RIGHT_DOWN, self._OnButtonDown)
self.Bind(wx.EVT_LEFT_DOWN, self._OnButtonDown)
self.Bind(wx.EVT_MIDDLE_DOWN, self._OnButtonDown)
self.Bind(wx.EVT_RIGHT_UP, self._OnButtonUp)
self.Bind(wx.EVT_LEFT_UP, self._OnButtonUp)
self.Bind(wx.EVT_MIDDLE_UP, self._OnButtonUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_ENTER_WINDOW, self._OnEnterWindow)
self.Bind(wx.EVT_LEAVE_WINDOW, self._OnLeaveWindow)
self.Bind(wx.EVT_CHAR, self.OnChar)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_SIZE, self._OnSize)
self.Bind(wx.EVT_MOVE, self.OnMove)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def SetDesiredUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor.
"""
self._DesiredUpdateRate = rate
def GetDesiredUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor.
"""
return self._DesiredUpdateRate
def SetStillUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor.
"""
self._StillUpdateRate = rate
def GetStillUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor.
"""
return self._StillUpdateRate
def OnPaint(self, event):
"""Handles the wx.EVT_PAINT event for wxVTKRenderWindow.
"""
dc = wx.PaintDC(self)
self.Render()
def _OnSize(self, event):
"""Handles the wx.EVT_SIZE event for wxVTKRenderWindow.
"""
if wx.Platform != '__WXMSW__':
width, height = event.GetSize()
self._RenderWindow.SetSize(width, height)
self.OnSize(event)
self.Render()
def OnSize(self, event):
"""Overridable event.
"""
pass
def OnMove(self, event):
"""Overridable event.
"""
pass
def _OnEnterWindow(self, event):
"""Handles the wx.EVT_ENTER_WINDOW event for
wxVTKRenderWindow.
"""
self.UpdateRenderer(event)
self.OnEnterWindow(event)
def OnEnterWindow(self, event):
"""Overridable event.
"""
if self.__OldFocus == None:
self.__OldFocus = wx.Window.FindFocus()
self.SetFocus()
def _OnLeaveWindow(self, event):
"""Handles the wx.EVT_LEAVE_WINDOW event for
wxVTKRenderWindow.
"""
self.OnLeaveWindow(event)
def OnLeaveWindow(self, event):
"""Overridable event.
"""
if self.__OldFocus:
self.__OldFocus.SetFocus()
self.__OldFocus = None
def OnSetFocus(self, event):
"""Overridable event.
"""
pass
def OnKillFocus(self, event):
"""Overridable event.
"""
pass
def _OnButtonDown(self, event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_DOWN events for
wxVTKRenderWindow.
"""
# helper function for capturing mouse until button released
self._RenderWindow.SetDesiredUpdateRate(self._DesiredUpdateRate)
if event.RightDown():
button = "Right"
elif event.LeftDown():
button = "Left"
elif event.MiddleDown():
button = "Middle"
else:
button = None
# save the button and capture mouse until the button is released
if button and not self._ActiveButton:
self._ActiveButton = button
if _useCapture:
self.CaptureMouse()
self.OnButtonDown(event)
def OnButtonDown(self, event):
"""Overridable event.
"""
if not self._Mode:
# figure out what renderer the mouse is over
self.UpdateRenderer(event)
if event.LeftDown():
self.OnLeftDown(event)
elif event.RightDown():
self.OnRightDown(event)
elif event.MiddleDown():
self.OnMiddleDown(event)
def OnLeftDown(self, event):
"""Overridable event.
"""
if not self._Mode:
if event.ControlDown():
self._Mode = "Zoom"
elif event.ShiftDown():
self._Mode = "Pan"
else:
self._Mode = "Rotate"
def OnRightDown(self, event):
"""Overridable event.
"""
if not self._Mode:
self._Mode = "Zoom"
def OnMiddleDown(self, event):
"""Overridable event.
"""
if not self._Mode:
self._Mode = "Pan"
def _OnButtonUp(self, event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_UP events for
wxVTKRenderWindow.
"""
# helper function for releasing mouse capture
self._RenderWindow.SetDesiredUpdateRate(self._StillUpdateRate)
if event.RightUp():
button = "Right"
elif event.LeftUp():
button = "Left"
elif event.MiddleUp():
button = "Middle"
else:
button = None
# if the ActiveButton is realeased, then release mouse capture
if self._ActiveButton and button == self._ActiveButton:
if _useCapture:
self.ReleaseMouse()
self._ActiveButton = None
self.OnButtonUp(event)
def OnButtonUp(self, event):
"""Overridable event.
"""
if event.LeftUp():
self.OnLeftUp(event)
elif event.RightUp():
self.OnRightUp(event)
elif event.MiddleUp():
self.OnMiddleUp(event)
# if not interacting, then do nothing more
if self._Mode:
if self._CurrentRenderer:
self.Render()
self._Mode = None
def OnLeftUp(self, event):
"""Overridable event.
"""
pass
def OnRightUp(self, event):
"""Overridable event.
"""
pass
def OnMiddleUp(self, event):
"""Overridable event.
"""
pass
def OnMotion(self, event):
"""Overridable event.
"""
if self._Mode == "Pan":
self.Pan(event)
elif self._Mode == "Rotate":
self.Rotate(event)
elif self._Mode == "Zoom":
self.Zoom(event)
def OnChar(self, event):
"""Overridable event.
"""
pass
def OnKeyDown(self, event):
"""Handles the wx.EVT_KEY_DOWN events for wxVTKRenderWindow.
"""
if event.GetKeyCode() == ord('r'):
self.Reset(event)
if event.GetKeyCode() == ord('w'):
self.Wireframe()
if event.GetKeyCode() == ord('s'):
self.Surface()
if event.GetKeyCode() == ord('p'):
self.PickActor(event)
if event.GetKeyCode() < 256:
self.OnChar(event)
def OnKeyUp(self, event):
"""Overridable event.
"""
pass
def GetZoomFactor(self):
"""Returns the current zoom factor.
"""
return self._CurrentZoom
def GetRenderWindow(self):
"""Returns the render window (vtkRenderWindow).
"""
return self._RenderWindow
def GetPicker(self):
"""Returns the current picker (vtkCellPicker).
"""
return self._Picker
def Render(self):
"""Actually renders the VTK scene on screen.
"""
if self._CurrentLight:
light = self._CurrentLight
light.SetPosition(self._CurrentCamera.GetPosition())
light.SetFocalPoint(self._CurrentCamera.GetFocalPoint())
if not self.GetUpdateRegion().IsEmpty() or self.__handle:
if self.__handle and self.__handle == self.GetHandle():
self._RenderWindow.Render()
elif self.GetHandle():
# this means the user has reparented us
# let's adapt to the new situation by doing the WindowRemap
# dance
self._RenderWindow.SetNextWindowInfo(str(self.GetHandle()))
self._RenderWindow.WindowRemap()
# store the new situation
self.__handle = self.GetHandle()
self._RenderWindow.Render()
def UpdateRenderer(self, event):
"""
UpdateRenderer will identify the renderer under the mouse and set
up _CurrentRenderer, _CurrentCamera, and _CurrentLight.
"""
x = event.GetX()
y = event.GetY()
windowX, windowY = self._RenderWindow.GetSize()
renderers = self._RenderWindow.GetRenderers()
numRenderers = renderers.GetNumberOfItems()
self._CurrentRenderer = None
renderers.InitTraversal()
for i in range(0,numRenderers):
renderer = renderers.GetNextItem()
vx,vy = (0,0)
if (windowX > 1):
vx = float(x)/(windowX-1)
if (windowY > 1):
vy = (windowY-float(y)-1)/(windowY-1)
(vpxmin,vpymin,vpxmax,vpymax) = renderer.GetViewport()
if (vx >= vpxmin and vx <= vpxmax and
vy >= vpymin and vy <= vpymax):
self._CurrentRenderer = renderer
self._ViewportCenterX = float(windowX)*(vpxmax-vpxmin)/2.0\
+vpxmin
self._ViewportCenterY = float(windowY)*(vpymax-vpymin)/2.0\
+vpymin
self._CurrentCamera = self._CurrentRenderer.GetActiveCamera()
lights = self._CurrentRenderer.GetLights()
lights.InitTraversal()
self._CurrentLight = lights.GetNextItem()
break
self._LastX = x
self._LastY = y
def GetCurrentRenderer(self):
"""Returns the current renderer.
"""
return self._CurrentRenderer
def Rotate(self, event):
"""Rotates the scene (camera).
"""
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
self._CurrentCamera.Azimuth(self._LastX - x)
self._CurrentCamera.Elevation(y - self._LastY)
self._CurrentCamera.OrthogonalizeViewUp()
self._LastX = x
self._LastY = y
self._CurrentRenderer.ResetCameraClippingRange()
self.Render()
def Pan(self, event):
"""Pans the scene (camera).
"""
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
camera = self._CurrentCamera
(pPoint0,pPoint1,pPoint2) = camera.GetPosition()
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
if camera.GetParallelProjection():
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetFocalPoint(fx,fy,fz)
renderer.SetWorldPoint(pPoint0,pPoint1,pPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetPosition(fx,fy,fz)
else:
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
# Specify a point location in world coordinates
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
# Convert world point coordinates to display coordinates
dPoint = renderer.GetDisplayPoint()
focalDepth = dPoint[2]
aPoint0 = self._ViewportCenterX + (x - self._LastX)
aPoint1 = self._ViewportCenterY - (y - self._LastY)
renderer.SetDisplayPoint(aPoint0,aPoint1,focalDepth)
renderer.DisplayToWorld()
(rPoint0,rPoint1,rPoint2,rPoint3) = renderer.GetWorldPoint()
if (rPoint3 != 0.0):
rPoint0 = rPoint0/rPoint3
rPoint1 = rPoint1/rPoint3
rPoint2 = rPoint2/rPoint3
camera.SetFocalPoint((fPoint0 - rPoint0) + fPoint0,
(fPoint1 - rPoint1) + fPoint1,
(fPoint2 - rPoint2) + fPoint2)
camera.SetPosition((fPoint0 - rPoint0) + pPoint0,
(fPoint1 - rPoint1) + pPoint1,
(fPoint2 - rPoint2) + pPoint2)
self._LastX = x
self._LastY = y
self.Render()
def Zoom(self, event):
"""Zooms the scene (camera).
"""
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
camera = self._CurrentCamera
zoomFactor = math.pow(1.02,(0.5*(self._LastY - y)))
self._CurrentZoom = self._CurrentZoom * zoomFactor
if camera.GetParallelProjection():
parallelScale = camera.GetParallelScale()/zoomFactor
camera.SetParallelScale(parallelScale)
else:
camera.Dolly(zoomFactor)
renderer.ResetCameraClippingRange()
self._LastX = x
self._LastY = y
self.Render()
def Reset(self, event=None):
"""Resets the camera.
"""
if self._CurrentRenderer:
self._CurrentRenderer.ResetCamera()
self.Render()
def Wireframe(self):
"""Sets the current actor representation as wireframe.
"""
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToWireframe()
self.Render()
def Surface(self):
"""Sets the current actor representation as surface.
"""
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToSurface()
self.Render()
def PickActor(self, event):
"""Picks an actor.
"""
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
picker = self._Picker
windowX, windowY = self._RenderWindow.GetSize()
picker.Pick(x,(windowY - y - 1),0.0,renderer)
actor = picker.GetActor()
if (self._PickedActor != None and
self._PrePickedProperty != None):
self._PickedActor.SetProperty(self._PrePickedProperty)
# release hold of the property
self._PrePickedProperty.UnRegister(self._PrePickedProperty)
self._PrePickedProperty = None
if (actor != None):
self._PickedActor = actor
self._PrePickedProperty = self._PickedActor.GetProperty()
# hold onto the property
self._PrePickedProperty.Register(self._PrePickedProperty)
self._PickedActor.SetProperty(self._PickedProperty)
self.Render()
#----------------------------------------------------------------------------
def wxVTKRenderWindowConeExample():
"""Like it says, just a simple example.
"""
# every wx app needs an app
app = wx.PySimpleApp()
# create the widget
frame = wx.Frame(None, -1, "wxVTKRenderWindow", size=(400,400))
widget = wxVTKRenderWindow(frame, -1)
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the window
frame.Show()
app.MainLoop()
if __name__ == "__main__":
wxVTKRenderWindowConeExample()
| 30.43342 | 77 | 0.575026 |
79471da6604539c7e590e8eda7fab5640057d702 | 1,985 | py | Python | src/api/StockService.py | webclinic017/stocksim | bdc658eb7d08b228eb47a92964f474cf33c2f226 | [
"MIT"
] | 3 | 2019-10-16T16:27:06.000Z | 2019-12-30T16:16:32.000Z | src/api/StockService.py | webclinic017/stocksim | bdc658eb7d08b228eb47a92964f474cf33c2f226 | [
"MIT"
] | 20 | 2019-10-10T20:01:54.000Z | 2019-12-10T05:03:12.000Z | src/api/StockService.py | webclinic017/stocksim | bdc658eb7d08b228eb47a92964f474cf33c2f226 | [
"MIT"
] | 2 | 2020-11-11T15:29:26.000Z | 2020-12-02T01:06:33.000Z | import json
from flask import render_template
from flask_restful import Resource, reqparse
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)
from controllers.StockController import StockController
from models.assets import Stock
from Resources.MarketProvider import MarketProvider
parser = reqparse.RequestParser()
parser.add_argument("stock_symbol", help="", required=False)
parser.add_argument("user_id", help="", required=False)
parser.add_argument("stock_id", help="", required=False)
# Test function for MarketProvider
class GetStock(Resource):
def post(self):
data = parser.parse_args()
mp = MarketProvider()
stock = mp.getStock(data["stock_symbol"])
controller = StockController()
lastid = controller.insertStock(stock)
return json.loads(stock.to_json())
# Arguments: username, stock symbol
class PurchaseAsset(Resource):
@jwt_required
def post(self):
data = parser.parse_args()
mp = MarketProvider()
stock = mp.getStock(data["stock_symbol"])
# This is gonna be a hefty boi
class WatchListAsset(Resource):
@jwt_required
def post(self):
data = parser.parse_args()
marketProvider = MarketProvider()
stockController = StockController()
return stockController.addWatch(stock, data["user_id"])
class WatchAsset(Resource):
@jwt_required
def post(self):
data = parser.parse_args()
marketProvider = MarketProvider()
stockController = StockController()
stock = marketProvider.getStock(data["stock_symbol"])
return stockController.addWatch(stock, data["user_id"])
class RemoveWatchedAsset(Resource):
@jwt_required
def post(self):
data = parser.parse_args()
stockController = StockController()
return stockController.removeWatch(data["stock_id"], data["user_id"])
| 30.538462 | 147 | 0.713854 |
79471f91c34e8d7ace449201d6c823778ec68a43 | 5,538 | py | Python | setup.py | mos3abof/dir2html | fe8f09a43ebbcf9913e31ea3bf799eefc8654ea8 | [
"MIT"
] | 1 | 2018-11-21T12:55:47.000Z | 2018-11-21T12:55:47.000Z | setup.py | mos3abof/dir2html | fe8f09a43ebbcf9913e31ea3bf799eefc8654ea8 | [
"MIT"
] | 17 | 2018-11-21T12:33:55.000Z | 2018-11-21T14:41:58.000Z | setup.py | mos3abof/dir2html | fe8f09a43ebbcf9913e31ea3bf799eefc8654ea8 | [
"MIT"
] | null | null | null | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='dir2html', # Required
version='0.2.3', # Required
description='Generate an html album from a given directory', # Optional
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/mos3abof/dir2html', # Optional
author='Mosab Ibrahim', # Optional
author_email='[email protected]', # Optional
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
# keywords='html album generator', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# install_requires=['peppercorn'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
package_data={ # Optional
'': [
'resources/*.html',
'resources/assets/*.css',
'resources/assets/*.js',
],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'dir2html=dir2html.__main__:main',
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/mos3abof/dir2html/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
'Source': 'https://github.com/mos3abof/dir2html/',
},
)
| 39.276596 | 89 | 0.665222 |
79472016f82c670ec23409193dda7bbc698e9c52 | 2,657 | py | Python | test/data_processing/base_data_processor_test.py | spencerking/qiskit-experiments | 11a254b010afe35933aaabac70de12b5b5a244bf | [
"Apache-2.0"
] | null | null | null | test/data_processing/base_data_processor_test.py | spencerking/qiskit-experiments | 11a254b010afe35933aaabac70de12b5b5a244bf | [
"Apache-2.0"
] | 1 | 2021-06-01T01:43:52.000Z | 2021-06-01T01:43:52.000Z | test/data_processing/base_data_processor_test.py | spencerking/qiskit-experiments | 11a254b010afe35933aaabac70de12b5b5a244bf | [
"Apache-2.0"
] | 2 | 2021-05-17T10:13:20.000Z | 2021-06-01T01:34:34.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Base class for data processor tests."""
from test.fake_experiment import FakeExperiment
from typing import Any, List
from qiskit.result import Result
from qiskit.test import QiskitTestCase
from qiskit.qobj.common import QobjExperimentHeader
from qiskit.result.models import ExperimentResultData, ExperimentResult
from qiskit_experiments.framework import ExperimentData
class BaseDataProcessorTest(QiskitTestCase):
"""Define some basic setup functionality for data processor tests."""
def setUp(self):
"""Define variables needed for most tests."""
super().setUp()
self.base_result_args = dict(
backend_name="test_backend",
backend_version="1.0.0",
qobj_id="id-123",
job_id="job-123",
success=True,
)
self.header = QobjExperimentHeader(
memory_slots=2,
metadata={"experiment_type": "fake_test_experiment"},
)
def create_experiment(self, iq_data: List[Any], single_shot: bool = False):
"""Populate avg_iq_data to use it for testing.
Args:
iq_data: A List of IQ data.
single_shot: Indicates if the data is single-shot or not.
"""
results = []
if not single_shot:
for circ_data in iq_data:
res = ExperimentResult(
success=True,
meas_level=1,
meas_return="avg",
data=ExperimentResultData(memory=circ_data),
header=self.header,
shots=1024,
)
results.append(res)
else:
res = ExperimentResult(
success=True,
meas_level=1,
meas_return="single",
data=ExperimentResultData(memory=iq_data),
header=self.header,
shots=1024,
)
results.append(res)
# pylint: disable=attribute-defined-outside-init
self.iq_experiment = ExperimentData(FakeExperiment())
self.iq_experiment.add_data(Result(results=results, **self.base_result_args))
| 33.632911 | 85 | 0.621001 |
7947205d7b124abc0236cf35ac8f0d0f3f4d2756 | 5,950 | py | Python | test.py | czella/poker-player-theempiredidnothingwrong | fd064967b9ff37c81a22cb22e72ff086a752e158 | [
"MIT"
] | null | null | null | test.py | czella/poker-player-theempiredidnothingwrong | fd064967b9ff37c81a22cb22e72ff086a752e158 | [
"MIT"
] | null | null | null | test.py | czella/poker-player-theempiredidnothingwrong | fd064967b9ff37c81a22cb22e72ff086a752e158 | [
"MIT"
] | null | null | null | import player
import json
testJSon_one_high = json.loads("""{"community_cards": [],
"minimum_raise": 2,
"big_blind": 4,
"orbits": 0,
"in_action": 3,
"bet_index": 2,
"current_buy_in": 4,
"round": 0,
"players": [{"id": 0, "bet": 0, "version": "Pony 1.0.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Bright Pony"},
{"id": 1, "bet": 2, "version": "1.0", "time_used": 0, "stack": 998, "status": "active", "name": "PokerMasters"},
{"id": 2, "bet": 4, "version": "ERROR: Unreachable", "time_used": 0, "stack": 996, "status": "active", "name": "NADagascar"},
{"hole_cards": [{"suit": "hearts", "rank": "4"},
{"suit": "diamonds", "rank": "Q"}],
"bet": 0,
"version": "DS.1.0.0",
"time_used": 0,
"id": 3,
"stack": 1000,
"status": "active",
"name": "TheEmpireDidNothingWrong"},
{"id": 4,
"bet": 0,
"version": "1.0",
"time_used": 0,
"stack": 1000,
"status": "active",
"name": "Hive"},
{"id": 5, "bet": 0, "version": "Gopnik_FM_ver_1.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Gopnik FM"}],
"small_blind": 2,
"game_id": "5c5d4b96a972e80004000021",
"dealer": 0,
"pot": 6,
"tournament_id": "5c38a553b0fea40004000003"}
""")
testJSon_two_high = json.loads("""{"community_cards": [],
"minimum_raise": 2,
"big_blind": 4,
"orbits": 0,
"in_action": 3,
"bet_index": 2,
"current_buy_in": 4,
"round": 0,
"players": [{"id": 0, "bet": 0, "version": "Pony 1.0.0", "time_used": 0, "stack": 1000, "status": "out", "name": "Bright Pony"},
{"id": 1, "bet": 2, "version": "1.0", "time_used": 0, "stack": 998, "status": "active", "name": "PokerMasters"},
{"id": 2, "bet": 4, "version": "ERROR: Unreachable", "time_used": 0, "stack": 996, "status": "active", "name": "NADagascar"},
{"hole_cards": [{"suit": "hearts", "rank": "J"},
{"suit": "diamonds", "rank": "Q"}],
"bet": 0,
"version": "DS.1.0.0",
"time_used": 0,
"id": 3,
"stack": 1000,
"status": "active",
"name": "TheEmpireDidNothingWrong"},
{"id": 4,
"bet": 0,
"version": "1.0",
"time_used": 0,
"stack": 1000,
"status": "active",
"name": "Hive"},
{"id": 5, "bet": 0, "version": "Gopnik_FM_ver_1.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Gopnik FM"}],
"small_blind": 2,
"game_id": "5c5d4b96a972e80004000021",
"dealer": 0,
"pot": 6,
"tournament_id": "5c38a553b0fea40004000003"}
""")
testJSon_three_out_one_high = json.loads("""{"community_cards": [],
"minimum_raise": 2,
"big_blind": 4,
"orbits": 0,
"in_action": 3,
"bet_index": 2,
"current_buy_in": 4,
"round": 0,
"players": [{"id": 0, "bet": 0, "version": "Pony 1.0.0", "time_used": 0, "stack": 1000, "status": "out", "name": "Bright Pony"},
{"id": 1, "bet": 2, "version": "1.0", "time_used": 0, "stack": 998, "status": "out", "name": "PokerMasters"},
{"id": 2, "bet": 4, "version": "ERROR: Unreachable", "time_used": 0, "stack": 996, "status": "out", "name": "NADagascar"},
{"hole_cards": [{"suit": "hearts", "rank": "2"},
{"suit": "diamonds", "rank": "Q"}],
"bet": 0,
"version": "DS.1.0.0",
"time_used": 0,
"id": 3,
"stack": 1000,
"status": "active",
"name": "TheEmpireDidNothingWrong"},
{"id": 4,
"bet": 0,
"version": "1.0",
"time_used": 0,
"stack": 1000,
"status": "active",
"name": "Hive"},
{"id": 5, "bet": 0, "version": "Gopnik_FM_ver_1.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Gopnik FM"}],
"small_blind": 2,
"game_id": "5c5d4b96a972e80004000021",
"dealer": 0,
"pot": 6,
"tournament_id": "5c38a553b0fea40004000003"}
""")
testJSon_three_out_two_high = json.loads("""{"community_cards": [],
"minimum_raise": 2,
"big_blind": 4,
"orbits": 0,
"in_action": 3,
"bet_index": 2,
"current_buy_in": 4,
"round": 0,
"players": [{"id": 0, "bet": 0, "version": "Pony 1.0.0", "time_used": 0, "stack": 1000, "status": "out", "name": "Bright Pony"},
{"id": 1, "bet": 2, "version": "1.0", "time_used": 0, "stack": 998, "status": "out", "name": "PokerMasters"},
{"id": 2, "bet": 4, "version": "ERROR: Unreachable", "time_used": 0, "stack": 996, "status": "out", "name": "NADagascar"},
{"hole_cards": [{"suit": "hearts", "rank": "Q"},
{"suit": "diamonds", "rank": "Q"}],
"bet": 0,
"version": "DS.1.0.0",
"time_used": 0,
"id": 3,
"stack": 1000,
"status": "active",
"name": "TheEmpireDidNothingWrong"},
{"id": 4,
"bet": 0,
"version": "1.0",
"time_used": 0,
"stack": 1000,
"status": "active",
"name": "Hive"},
{"id": 5, "bet": 0, "version": "Gopnik_FM_ver_1.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Gopnik FM"}],
"small_blind": 2,
"game_id": "5c5d4b96a972e80004000021",
"dealer": 0,
"pot": 6,
"tournament_id": "5c38a553b0fea40004000003"}
""")
if __name__ == "__main__":
player = player.Player()
print(player.betRequest(testJSon_one_high))
print(player.betRequest(testJSon_two_high))
print(player.betRequest(testJSon_three_out_one_high))
print(player.betRequest(testJSon_three_out_two_high))
| 39.666667 | 138 | 0.482857 |
7947227eb14292f59667dcd8dae977259e513856 | 4,933 | py | Python | scripts/pyqtgraph-develop/examples/__main__.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | scripts/pyqtgraph-develop/examples/__main__.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | scripts/pyqtgraph-develop/examples/__main__.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | import sys, os
if __name__ == "__main__" and (__package__ is None or __package__==''):
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
import examples
__package__ = "examples"
import pyqtgraph as pg
import subprocess
from pyqtgraph.python2_3 import basestring
from pyqtgraph.Qt import QtGui, USE_PYSIDE, USE_PYQT5
from .utils import buildFileList, testFile, path, examples
if USE_PYSIDE:
from .exampleLoaderTemplate_pyside import Ui_Form
elif USE_PYQT5:
from .exampleLoaderTemplate_pyqt5 import Ui_Form
else:
from .exampleLoaderTemplate_pyqt import Ui_Form
class ExampleLoader(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.ui = Ui_Form()
self.cw = QtGui.QWidget()
self.setCentralWidget(self.cw)
self.ui.setupUi(self.cw)
self.codeBtn = QtGui.QPushButton('Run Edited Code')
self.codeLayout = QtGui.QGridLayout()
self.ui.codeView.setLayout(self.codeLayout)
self.codeLayout.addItem(QtGui.QSpacerItem(100,100,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding), 0, 0)
self.codeLayout.addWidget(self.codeBtn, 1, 1)
self.codeBtn.hide()
global examples
self.itemCache = []
self.populateTree(self.ui.exampleTree.invisibleRootItem(), examples)
self.ui.exampleTree.expandAll()
self.resize(1000,500)
self.show()
self.ui.splitter.setSizes([250,750])
self.ui.loadBtn.clicked.connect(self.loadFile)
self.ui.exampleTree.currentItemChanged.connect(self.showFile)
self.ui.exampleTree.itemDoubleClicked.connect(self.loadFile)
self.ui.codeView.textChanged.connect(self.codeEdited)
self.codeBtn.clicked.connect(self.runEditedCode)
def populateTree(self, root, examples):
for key, val in examples.items():
item = QtGui.QTreeWidgetItem([key])
self.itemCache.append(item) # PyQt 4.9.6 no longer keeps references to these wrappers,
# so we need to make an explicit reference or else the .file
# attribute will disappear.
if isinstance(val, basestring):
item.file = val
else:
self.populateTree(item, val)
root.addChild(item)
def currentFile(self):
item = self.ui.exampleTree.currentItem()
if hasattr(item, 'file'):
global path
return os.path.join(path, item.file)
return None
def loadFile(self, edited=False):
extra = []
qtLib = str(self.ui.qtLibCombo.currentText())
gfxSys = str(self.ui.graphicsSystemCombo.currentText())
if qtLib != 'default':
extra.append(qtLib.lower())
elif gfxSys != 'default':
extra.append(gfxSys)
if edited:
path = os.path.abspath(os.path.dirname(__file__))
proc = subprocess.Popen([sys.executable, '-'] + extra, stdin=subprocess.PIPE, cwd=path)
code = str(self.ui.codeView.toPlainText()).encode('UTF-8')
proc.stdin.write(code)
proc.stdin.close()
else:
fn = self.currentFile()
if fn is None:
return
if sys.platform.startswith('win'):
os.spawnl(os.P_NOWAIT, sys.executable, '"'+sys.executable+'"', '"' + fn + '"', *extra)
else:
os.spawnl(os.P_NOWAIT, sys.executable, sys.executable, fn, *extra)
def showFile(self):
fn = self.currentFile()
if fn is None:
self.ui.codeView.clear()
return
if os.path.isdir(fn):
fn = os.path.join(fn, '__main__.py')
text = open(fn).read()
self.ui.codeView.setPlainText(text)
self.ui.loadedFileLabel.setText(fn)
self.codeBtn.hide()
def codeEdited(self):
self.codeBtn.show()
def runEditedCode(self):
self.loadFile(edited=True)
def run():
app = QtGui.QApplication([])
loader = ExampleLoader()
app.exec_()
if __name__ == '__main__':
args = sys.argv[1:]
if '--test' in args:
# get rid of orphaned cache files first
pg.renamePyc(path)
files = buildFileList(examples)
if '--pyside' in args:
lib = 'PySide'
elif '--pyqt' in args or '--pyqt4' in args:
lib = 'PyQt4'
elif '--pyqt5' in args:
lib = 'PyQt5'
else:
lib = ''
exe = sys.executable
print("Running tests:", lib, sys.executable)
for f in files:
testFile(f[0], f[1], exe, lib)
else:
run()
| 34.256944 | 122 | 0.579769 |
794722eb5e6a8a8c2eaeba7de981d8963f81ab68 | 1,907 | py | Python | practiceCR/restAPI/models.py | faisaladisoe/learn-python-rest-API | d597f38eea4aff54889c241303df5136dc9278ae | [
"MIT"
] | null | null | null | practiceCR/restAPI/models.py | faisaladisoe/learn-python-rest-API | d597f38eea4aff54889c241303df5136dc9278ae | [
"MIT"
] | null | null | null | practiceCR/restAPI/models.py | faisaladisoe/learn-python-rest-API | d597f38eea4aff54889c241303df5136dc9278ae | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Biodata(models.Model):
nickname = models.CharField(max_length = 20, null = True)
fullname = models.CharField(max_length = 100, null = True)
CLASSROOM_CHOICES = (
('12 IPA 1', '12 IPA 1'),
('12 IPA 2', '12 IPA 2'),
('12 IPA 3', '12 IPA 3'),
('12 IPA 4', '12 IPA 4'),
('12 IPA 5', '12 IPA 5'),
('12 IPA 6', '12 IPA 6'),
('12 IPA 7', '12 IPA 7'),
('12 IPA 8', '12 IPA 8'),
('12 IPS', '12 IPS'),
)
classroom = models.CharField(
max_length = 10,
choices = CLASSROOM_CHOICES,
default = '12 IPA 1',
null = True
)
randomThought = models.CharField(max_length = 100, null = True)
email = models.EmailField(max_length = 100, null = True)
instagram = models.CharField(max_length = 50, null = True)
linkedin = models.CharField(max_length = 50, null = True)
def __str__(self):
return self.fullname
class Layouting(models.Model):
search = models.CharField(max_length = 50, null = True)
GROUP_CHOICES = (
('All', 'All'),
('12 IPA 1', '12 IPA 1'),
('12 IPA 2', '12 IPA 2'),
('12 IPA 3', '12 IPA 3'),
('12 IPA 4', '12 IPA 4'),
('12 IPA 5', '12 IPA 5'),
('12 IPA 6', '12 IPA 6'),
('12 IPA 7', '12 IPA 7'),
('12 IPA 8', '12 IPA 8'),
('12 IPS', '12 IPS'),
)
groupby = models.CharField(
max_length = 10,
choices = GROUP_CHOICES,
default = 'All',
null = True
)
SORTING_CHOICES = (
('Ascending', 'Ascending'),
('Descending', 'Descending'),
)
sorting = models.CharField(
max_length = 10,
choices = SORTING_CHOICES,
default = 'Ascending',
null = True
) | 31.262295 | 68 | 0.505506 |
79472384747a0ca864b420aaaf702e46b4c64004 | 6,191 | py | Python | src/test_workflow/integ_test/integ_test_suite.py | VijayanB/opensearch-build | c84c06cffee2396360c0ae8f41f0027982e0b2d8 | [
"Apache-2.0"
] | null | null | null | src/test_workflow/integ_test/integ_test_suite.py | VijayanB/opensearch-build | c84c06cffee2396360c0ae8f41f0027982e0b2d8 | [
"Apache-2.0"
] | null | null | null | src/test_workflow/integ_test/integ_test_suite.py | VijayanB/opensearch-build | c84c06cffee2396360c0ae8f41f0027982e0b2d8 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import glob
import logging
import os
from git.git_repository import GitRepository
from paths.script_finder import ScriptFinder
from paths.tree_walker import walk
from system.execute import execute
from test_workflow.dependency_installer import DependencyInstaller
from test_workflow.integ_test.local_test_cluster import LocalTestCluster
from test_workflow.test_recorder.test_result_data import TestResultData
from test_workflow.test_result.test_component_results import \
TestComponentResults
from test_workflow.test_result.test_result import TestResult
class IntegTestSuite:
"""
Kicks of integration tests for a component based on test configurations provided in
test_support_matrix.yml
"""
def __init__(
self,
component,
test_config,
bundle_manifest,
build_manifest,
work_dir,
s3_bucket_name,
test_recorder
):
self.component = component
self.bundle_manifest = bundle_manifest
self.build_manifest = build_manifest
self.work_dir = work_dir
self.test_config = test_config
self.s3_bucket_name = s3_bucket_name
self.additional_cluster_config = None
self.test_recorder = test_recorder
self.repo = GitRepository(
self.component.repository,
self.component.commit_id,
os.path.join(self.work_dir, self.component.name),
test_config.working_directory
)
self.save_logs = test_recorder.test_results_logs
def execute(self):
test_results = TestComponentResults()
self.__install_build_dependencies()
for config in self.test_config.integ_test["test-configs"]:
status = self.__setup_cluster_and_execute_test_config(config)
test_results.append(TestResult(self.component.name, config, status))
return test_results
def __install_build_dependencies(self):
if "build-dependencies" in self.test_config.integ_test:
dependency_list = self.test_config.integ_test["build-dependencies"]
if len(dependency_list) == 1 and "job-scheduler" in dependency_list:
self.__copy_job_scheduler_artifact()
else:
raise InvalidTestConfigError(
"Integration test job only supports job-scheduler build dependency at present."
)
def __copy_job_scheduler_artifact(self):
custom_local_path = os.path.join(
self.repo.dir, "src/test/resources/job-scheduler"
)
for file in glob.glob(
os.path.join(custom_local_path, "opensearch-job-scheduler-*.zip")
):
os.unlink(file)
job_scheduler = self.build_manifest.get_component("job-scheduler")
DependencyInstaller(self.build_manifest.build).install_build_dependencies(
{"opensearch-job-scheduler": job_scheduler.version}, custom_local_path
)
@staticmethod
def __is_security_enabled(config):
if config in ["with-security", "without-security"]:
return True if config == "with-security" else False
else:
raise InvalidTestConfigError("Unsupported test config: " + config)
def __setup_cluster_and_execute_test_config(self, config):
security = self.__is_security_enabled(config)
if "additional-cluster-configs" in self.test_config.integ_test.keys():
self.additional_cluster_config = self.test_config.integ_test.get(
"additional-cluster-configs"
)
logging.info(f"Additional config found: {self.additional_cluster_config}")
with LocalTestCluster.create(
self.work_dir,
self.component.name,
self.additional_cluster_config,
self.bundle_manifest,
security,
config,
self.test_recorder,
self.s3_bucket_name) as (test_cluster_endpoint, test_cluster_port):
self.__pretty_print_message(
"Running integration tests for " + self.component.name
)
os.chdir(self.work_dir)
return self.__execute_integtest_sh(
test_cluster_endpoint, test_cluster_port, security, config
)
def __execute_integtest_sh(self, endpoint, port, security, test_config):
script = ScriptFinder.find_integ_test_script(
self.component.name, self.repo.working_directory
)
if os.path.exists(script):
cmd = f"{script} -b {endpoint} -p {port} -s {str(security).lower()} -v {self.bundle_manifest.build.version}"
work_dir = (
os.path.join(self.repo.dir, self.test_config.working_directory)
if self.test_config.working_directory is not None
else self.repo.dir
)
(status, stdout, stderr) = execute(cmd, work_dir, True, False)
results_dir = os.path.join(
work_dir, "build", "reports", "tests", "integTest"
)
test_result_data = TestResultData(self.component.name, test_config, status, stdout, stderr,
walk(results_dir))
self.save_logs.save_test_result_data(test_result_data)
if stderr:
logging.info(
"Integration test run failed for component " + self.component.name
)
logging.info(stderr)
return status
else:
logging.info(
f"{script} does not exist. Skipping integ tests for {self.name}"
)
@staticmethod
def __pretty_print_message(message):
logging.info("===============================================")
logging.info(message)
logging.info("===============================================")
class InvalidTestConfigError(Exception):
pass
| 39.941935 | 120 | 0.633177 |
794723b171c2cb1868e1c2891e709109b0f92ddf | 3,238 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2019_01_01/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2019_01_01/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2019_01_01/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class PolicyClientConfiguration(Configuration):
"""Configuration for PolicyClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(PolicyClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2019-01-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 44.972222 | 129 | 0.68252 |
7947242267b9a3c24f97e667faebe3d047946bcd | 2,263 | py | Python | src/emoji_prediction/config/ABCDM_config.py | Ehsan-Tavan/Emoji-Prediction | f79e19ac3b975b7d43bb1971f12afeffda1a072d | [
"MIT"
] | 1 | 2020-11-11T04:35:33.000Z | 2020-11-11T04:35:33.000Z | src/emoji_prediction/config/ABCDM_config.py | Ehsan-Tavan/Emoji-Prediction | f79e19ac3b975b7d43bb1971f12afeffda1a072d | [
"MIT"
] | null | null | null | src/emoji_prediction/config/ABCDM_config.py | Ehsan-Tavan/Emoji-Prediction | f79e19ac3b975b7d43bb1971f12afeffda1a072d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=no-member
"""
ABCDM_config.py is a module for ABCDM model config
"""
import torch
__author__ = "Ehsan Tavan"
__project__ = "Persian Emoji Prediction"
__credits__ = ["Ehsan Tavan"]
__license__ = "Public Domain"
__version__ = "1.0.0"
__maintainer__ = "Ehsan Tavan"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "01/11/2021"
RAW_NO_MENTION_DATA_PATH = "../data/Raw/first_categori_no_mention_tweets.csv"
RAW_DATA_PATH = "../data/Raw/first_categori_tweets.csv"
TRAIN_NORMAL_NO_MENTION_DATA_PATH = "../data/Processed/" \
"train_first_categori_no_mention_tweets_normal.csv"
TEST_NORMAL_NO_MENTION_DATA_PATH = "../data/Processed/" \
"test_first_categori_no_mention_tweets_normal.csv"
TRAIN_NORMAL_DATA_PATH = "../data/Processed/" \
"train_first_categori_tweets_normal_5.csv"
TEST_NORMAL_DATA_PATH = "../data/Processed/" \
"train_first_categori_tweets_normal_5.csv"
VALIDATION_NORMAL_DATA_PATH = "../data/Processed/" \
"train_first_categori_tweets_normal_5.csv"
GLOVE_NEWS_300D = "../data/Embeddings/news_glove_300d_e10.txt"
SKIPGRAM_NEWS_300D = "../data/Embeddings/skipgram_news_300d_30e.txt"
CBOW_NEWS_300D = "../data/Embeddings/cbow_news_300d_30e.txt"
EMOTION_EMBEDDING_PATH = "../data/Embeddings/word_emotion_dict.pkl"
LOSS_CURVE_PATH = "../models/tmp/Curves/loss_curve.png"
ACC_CURVE_PATH = "../models/tmp/Curves/accuracy_curve.png"
TEXT_FIELD_PATH = "../models/tmp/Fields/text_field"
LABEL_FIELD_PATH = "../models/tmp/Fields/label_field"
LOG_PATH = "../models/tmp/Logs/log.txt"
TEST_AUG_LOG_PATH = "../models/tmp/Logs/test_aug_log.txt"
MODEL_PATH = "../models/tmp/"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
N_EPOCHS = 10
BATCH_SIZE = 512
EMBEDDING_DIM = 300
EMOTION_EMBEDDING_DIM = 10
LSTM_HIDDEN_DIM = 128
LSTM_LAYERS = 1
BIDIRECTIONAL = True
DROPOUT = 0.2
N_FILTERS = 32
FILTER_SIZE = [4, 6]
DENSE_UNITS = 64
MIN_FREQ = 20
MAX_LENGTH = None
ADDING_NOISE = False
LR_DECAY = False
TRAIN_AUGMENTATION = False
TEST_AUGMENTATION = True
USE_EMOTION = False
| 31.873239 | 87 | 0.714538 |
7947278a711fa0097a7a51f1aa722134292f1353 | 27,513 | py | Python | nrm_analysis/misctools/mask_definitions.py | vandalt/ImPlaneIA | 72b22e487ef45a8a665e4a6a88a91e99e382fdd0 | [
"BSD-3-Clause"
] | null | null | null | nrm_analysis/misctools/mask_definitions.py | vandalt/ImPlaneIA | 72b22e487ef45a8a665e4a6a88a91e99e382fdd0 | [
"BSD-3-Clause"
] | null | null | null | nrm_analysis/misctools/mask_definitions.py | vandalt/ImPlaneIA | 72b22e487ef45a8a665e4a6a88a91e99e382fdd0 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
from __future__ import print_function
import numpy as np
import math
import sys
import time
from nrm_analysis.misctools.utils import makedisk, rotate2dccw
from astropy.io import fits
from copy import copy
"""
================
NRM_mask_definitions
================
A module defining mask geometry in pupil space.
Mask names (str):
* gpi_g10s40
* jwst_g7s6c
* jwst_g7s6
* p1640
* keck_nirc2
* pharo
Code by
Alexandra Greenbaum <[email protected]> and
Anand Sivaramakrishnan <[email protected]>
Dec 2012
"""
m = 1.0
mm = 1.0e-3 * m
um = 1.0e-6 * m
class NRM_mask_definitions():
def __init__(self, maskname=None, rotdeg=None, holeshape="circ", rescale=False,\
verbose=False, chooseholes=None):
if verbose: print("NRM_mask_definitions(maskname,...:" + maskname)
if maskname not in ["gpi_g10s40", "jwst_g7s6", "jwst_g7s6c", "visir_sam", \
"p1640", "keck_nirc2", "pharo", "NIRC2_9NRM"]:
raise ValueError("mask not supported")
if holeshape == None:
holeshape = 'circ'
if verbose: print(holeshape)
if holeshape not in ["circ", "hex",]:
raise ValueError("Unsupported mask holeshape" + maskname)
self.maskname = maskname
if verbose:
print("\n\t=====================================")
print("Mask being created" + self.maskname)
if self.maskname == "gpi_g10s40":
self.hdia, self.ctrs = gpi_g10s40(rescale=rescale)
#self.rotdeg = 115.0 # By inspection of I&T data Dec 2012
#if rotdeg is not None:
# self.rotdeg = rotdeg
# print("rotating by {0} deg".format(rotdeg))
#else:
# print("rotating by 115.0 deg -- hard coded.")
#self.ctrs = rotate2dccw(self.ctrs, self.rotdeg*np.pi/180.0)
#self.rotate = self.rotdeg
self.activeD = self.showmask() # calculates circle dia including all holes
self.OD = 7770.1 * mm # DGS = 7770.1 * mm with M2 cardboard baffle
# GS OD from GPI fundamental values
self.ID = 1023.75 * mm # Formed by M2 diameter
# (Bauman GPI_Optical_Fundamental_Values.doc)
elif self.maskname == "jwst_g7s6c":
""" activeD and D taken from webbpsf-data/NIRISS/coronagraph/MASK_NRM.fits"""
if verbose: print('self.maskname = "jwst_g7s6c"')
self.hdia, self.ctrs = jwst_g7s6c(chooseholes=chooseholes) #
self.activeD = 6.559*m # webbpsf kwd DIAM - not a 'circle including all holes'
self.OD = 6.610645669291339*m # Full pupil file size, incl padding, webbpsf kwd PUPLDIAM
if rotdeg is not None:
self.rotdeg = rotdeg
elif self.maskname == "jwst_g7s6":
print("\tnot finished")
elif self.maskname == "visir_sam":
"""Mask dimensions from Eric Pantin"""
self.hdia, self.ctrs = visir_sam(rescale=rescale)
self.rotdeg = 16.5 # By inspection of data
if rotdeg is not None:
self.rotdeg += rotdeg
print("rotating by {0} + 9 (hardcoded) deg".format(rotdeg))
else:
print("rotating by 9 deg -- hard coded")
self.ctrs = rotate2dccw(self.ctrs, self.rotdeg*np.pi/180.0)
self.rotate = self.rotdeg
self.activeD = self.showmask() # calculated circle dia including all holes
self.OD = 8115.0 * mm # DVLT = 8115.0 * mm -- but what is the M2 dia??
# From Eric Pantin Document
self.ID = 1000.0 * mm # Don't know this, but this number shouldn't matter
elif self.maskname == "NIRC2_9NRM":
"""Mask dimensions from Steph Sallum?"""
self.hdia, self.ctrs = keck_nrm()
self.rotdeg = 28.0 # By inspection of data
if rotdeg is not None:
self.rotdeg += rotdeg
print("rotating by {0} + 9 (hardcoded) deg".format(rotdeg))
else:
print("rotating by 9 deg -- hard coded")
self.ctrs = rotate2dccw(self.ctrs, self.rotdeg*np.pi/180.0)
self.rotate = self.rotdeg
self.activeD = self.showmask() # calculated circle dia including all holes
self.OD = 10.0 # DVLT = 8115.0 * mm -- but what is the M2 dia??
# From Eric Pantin Document
self.ID = 1.0 # Don't know this, but this number shouldn't matter
else:
print("\tmask_definitions: Unknown maskname: check back later")
# make image at angular pixel scale, at given wavelength/bandpass
# choose pupil pixel scale
# image oversampling before rebinning
# possibly put in OPD
def get_scale(self, band='Hband', date='2013_Jan15', slc=0, fldr = ''):
# write now file is J through H, can we somehow specify the band and use that to get this info by just putting in a slice?
if band == 'Hband':
start = 0
end = 37
elif band == 'Yband':
start = 37
end = 74
elif band == 'Jband':
start = 74
end = None
else:
print('Must specify valid band: Yband, Jband, or Hband')
fh='wlcorrection_'
info = np.loadtxt(fldr+fh+date+'.txt')
print('Returning scaled wavelengths from '+date)
return info[start:end, 2]
def get_rotation(self, band = 'Hband', date='2013_Jan15', slc=None, fldr=''):
if band == 'Hband':
start = 0
end = 37
elif band == 'Yband':
start = 37
end = 74
elif band == 'Jband':
start = 74
end = None
else:
print('Must specify valid band: Yband, Jband, or Hband')
fh='rotcorrection_'
info = np.loadtxt(fldr+fh+date+'.txt')
return info[start:end,1]
def createpupilarray(self, puplscal=None, fitsfile=None):
pupil = np.zeros((int(np.ceil(self.OD/puplscal)), int(np.ceil(self.OD/puplscal))))
pupil = pupil + \
makedisk(s=pupil.shape, c=(pupil.shape[0]/2.0 - 0.5, pupil.shape[1]/2.0 - 0.5),
r=0.5*self.OD/puplscal, t=np.float64, grey=0) - \
makedisk(s=pupil.shape, c=(pupil.shape[0]/2.0 - 0.5, pupil.shape[1]/2.0 - 0.5),
r=0.5*self.ID/puplscal, t=np.float64, grey=0)
hdu = fits.PrimaryHDU ()
hdu.data = pupil.astype(np.uint8)
hdu.header.update("PUPLSCAL", puplscal, "Pupil pixel scale in m/pixels DL")
hdu.header.update("PIXSCALE", puplscal, "Pupil pixel scale in m/pixels MDP")
hdu.header.update("PUPLDIAM", self.OD, "Full pupil file size, incl padding in m")
hdu.header.update("DIAM", self.activeD, "Active pupil diameter in m") # changed from OD - AS Feb 13
hdu.header.update("ROTATE", self.rotdeg, "Mask counterclockwise rotation (deg)")
if fitsfile is not None:
hdu.writeto(fitsfile, clobber=True)
self.fullpupil = pupil.copy()
self.fullpuplscale = puplscal
hdulist = fits.HDUList([hdu])
return hdulist
def createnrmarray(self, puplscal=None, fitsfile=None, holeid=None, fullpupil=False):
""" fullpupil is a possibly oversized array, in meters using puplscal """
if fullpupil:
D = self.OD # array side size, m
else:
D = self.activeD # light-transmitting diameter, m
pupil = np.zeros((int(np.ceil(D/puplscal)), int(np.ceil(D/puplscal))))
print("creating pupil array with shape ", pupil.shape)
factor=1
#modify to add hex holes later
for ctrn, ctr in enumerate(self.ctrs):
if holeid:
factor = ctrn +1
# convert to zero-at-corner, meters
center = (0.5*pupil.shape[0] + ctr[0]/puplscal - 0.5, 0.5*pupil.shape[1] + ctr[1]/puplscal - 0.5)
pupil = pupil + \
makedisk(s=pupil.shape, c=center,
r=0.5*self.hdia/puplscal, t=np.float64, grey=0)* factor
self.nrmpupil = pupil.copy()
self.puplscale = puplscal
hdu = fits.PrimaryHDU ()
hdu.data = pupil.astype(np.uint8)
hdu.header.update("PUPLSCAL", puplscal, "Pupil pixel scale in m/pixels MDP")
hdu.header.update("PIXSCALE", puplscal, "Pupil pixel scale in m/pixels DL")
hdu.header.update("PUPLDIAM", D, "Full pupil file size, incl padding in m")
hdu.header.update("DIAM", self.activeD, "Active pupil diameter in m")
if hasattr(self, 'rotate'):
hdu.header.update("ROTATE", self.rotdeg, "Mask counterclockwise rotation (deg)")
(year, month, day, hour, minute, second, weekday, DOY, DST) = time.gmtime()
hdu.header.update("CODESRC", "NRM_mask_definitions.py", "Anand S. and Alex G.")
hdu.header.update("DATE", "%4d-%02d-%02dT%02d:%02d:%02d" % \
(year, month, day, hour, minute, second), "Date of calculation")
if fitsfile is not None:
hdu.writeto(fitsfile, clobber=True)
hdulist = fits.HDUList([hdu])
return hdulist
def showmask(self):
"""
prints mask geometry,
returns diameter of smallest centered circle (D) enclosing live mask area
"""
print("\t%s" % self.maskname)
print("\tholeD\t%+6.3f" % self.hdia)
print("\t\t x/m \t y/m r/m r+h_rad/m 2(r+h)/m")
radii = []
for ctr in self.ctrs:
print("\t\t%+7.3f\t%+7.3f" % (ctr[0], -1.0*ctr[1]), end=' ')
radii.append(math.sqrt(ctr[0]*ctr[0] + ctr[1]*ctr[1]))
print(" %.3f " % radii[-1], end=' ')
print(" %.3f " % (radii[-1] + self.hdia/2.0), end=' ')
print(" %.3f " % (2.0*radii[-1] + self.hdia))
print("\t2X max (r+h) \t%.3f m" % (2.0*(max(radii) + 0.5*self.hdia)))
print()
return 2.0*(max(radii) + 0.5*self.hdia)
"""
[email protected]:172 ./gpipoppy.py
All X,Y dimensions in m from center of mask
All hole DIAMETERS are in m:
G10S40lenox holeD +0.596
x/m y/m r/m r+h_rad/m 2(r+h)/m
-0.687 +2.514 2.606 2.904 5.808
-0.252 +3.362 3.372 3.670 7.340
+1.822 +0.157 1.829 2.127 4.254
+2.342 -0.644 2.429 2.727 5.453
-2.862 -1.733 3.346 3.644 7.287
-0.869 -3.288 3.401 3.699 7.398
-3.026 +1.568 3.408 3.706 7.411
+2.692 +1.855 3.269 3.567 7.134
+3.297 -0.596 3.350 3.648 7.297
+1.036 -3.192 3.356 3.654 7.308
2X max (r+h) 7.411 m
"""
"[email protected] Feb 2011"
"""
Barnaby Norris <[email protected]>
to Peter Tuthill <[email protected]>
cc Anand Sivaramakrishnan <[email protected]>,
James Lloyd <[email protected]>,
"[email protected]" <[email protected]>,
"[email protected]" <[email protected]>,
"[email protected]" <[email protected]>,
Laurent Pueyo <[email protected]>,
David Lafreniere <[email protected]>
date Tue, Feb 8, 2011 at 6:24 PM
Hi Everyone,
I've been working on the mask designs for Peter, and we've whittled
down the various mask solutions to a 10, 12 and 15 hole mask -
diagrams and the corresponding Fourier coverage are attached. I've
used the diagram Anand sent (GPI_LyotBadActuators.png) as a guide and
avoided the bad actuators by the size of the blacked-out circles in
that diagram, which corresponds to 2 actuator spacings away (these
obstructions are shown in green on the diagrams).. The measurements
are in mm and based on the inside of the black filled area (in Anand's
png file) being the Lyot stop outer diameter (ie 9.532mm). The holes
are sized based on a slight relaxation of strict non-redundancy, with
the hole radius being 0.33 times the minimum baseline.
The measurements are included below - please let me know if you need
anything else.
Cheers
Barnaby
GPI 10 hole mask, soln 40
X Position Y Position
-0.580002 -3.13987
0.00000 -4.14446
2.32001 -0.126087
2.90001 0.878506
-3.48001 1.88310
-1.16000 3.89229
-3.48001 -2.13527
3.48001 -2.13527
4.06002 0.878506
1.16000 3.89229
Hole Radius: 0.382802
"""
# go from Barnaby's first LS design to cut-metal-coords in GPI PPM
# mag is slight expansion (~0.5%) then 1.2 mag factor to PPM
def gpi_g10s40_asmanufactured(mag):
""" In PPM metal space - measured cooordinates from sample G10S40"""
holedia = 0.920*mm
holectrs = [
[-1.061*mm, -3.882*mm],
[-0.389*mm, -5.192*mm],
[ 2.814*mm, -0.243*mm],
[ 3.616*mm, 0.995*mm],
[-4.419*mm, 2.676*mm],
[-1.342*mm, 5.077*mm],
[-4.672*mm, -2.421*mm],
[ 4.157*mm, -2.864*mm],
[ 5.091*mm, 0.920*mm],
[ 1.599*mm, 4.929*mm],
]
# design2metal mag
holedia = holedia * mag
print(mag)
ctrs = []
#REVERSE = -1 # Flip y dimensions to match I&T data Dec 2012
REVERSE = 1 # With new affine updates, need to reverse the reverse March 2019
for r in holectrs:
ctrs.append([r[0]*mag, r[1]*mag*REVERSE])
# return cut-the-metal coords per Lenox PPM mm spec in meters on PM
ctrs_asbuilt = copy(ctrs)
# March 2019:
# Rotate hole centers by 90 deg to match GPI detector with new coordinate handling
# no affine2d transformations 8/2018 AS
ctrs_asbuilt = rotate2dccw(ctrs_asbuilt, 160*np.pi/180.0) # overwrites attributes
return holedia, ctrs_asbuilt
def gpi_mag_asdesigned():
#logging.basicConfig(level=logging.DEBUG,format='%(name)-10s: %(levelname)-8s %(message)s')
datapath='/Users/anand/data/NRM/GPI/'
"""
returns demag (dimensionless)
pupil dimensions * demag gives manufactured dimensions
"""
"""
COR-SATR...
The coronagraph module shall achieve the specifications at an inner working distance
of 4lam/D (goal 3.5 lam/D), where D is the telescope entrance diameter of 7.7701 m.
"""
DGN = 7908.0
DGS = 7770.1 * mm # with M2 baffle GS OD Bauman http://dms.hia.nrc.ca/view.php?fDocumentId=2164
D = DGS
d = 11.998 * mm # with M2 baffle GS OD Bauman http://dms.hia.nrc.ca/view.php?fDocumentId=2164
#d = 11.68 * mm # March 2019 update based on manufactured values.
#demag = 0.99*d/D # about 1/800...
demag = d/D # about 1/800...
dppm = 11.671 * mm # Precision Optical or Aktiwave dapod 11.68
flip = "_flip"
print(""""
This program (gpipoppy.py) uses DGS = 7770.1 * mm with M2 cardboard baffle
GS OD from GPI Fundamental Values (Bauman GPI_Optical_Fundamental_Values.doc)
http://dms.hia.nrc.ca/view.php?fDocumentId=2164
and Lenox Laser measured hole diameter of G40S10 sample design,
with average hole size in
LenoxSTScI_delivery_APOD_NRM10.[pdf xlsx] (also on HIA's KT)
All X,Y dimensions in m from center of mask in PM space
All hole DIAMETERS are in m:\n""")
##### Preliminary set-up:- design-to-metal scale
##### Multiply design by MAG to get metal coords, origin at part center
####assumedLSOD = 9.532 * mm # Barnaby's email Feb 2011
####correctLSOD = 9.571 * mm # source - Anand's order of GS version 3 apodizers to CTM
####magLS2PPM = 11.790/9.825 # GSOD@PPM/GSOD@Lyot source - Remi's COR dimensions final designs.pdf, 1.2 exactly
####magBarnaby2LS = correctLSOD / assumedLSOD # slight mag, about 0.5%
####magBarnaby2PPM = magBarnaby2LS * magLS2PPM
####MAG = magBarnaby2PPM
####print "DESIGN to PPM magnification is %.4f\n" % MAG
print(demag)
return demag
def gpi_g10s40(rescale=False):
"""
Multiply by the 'rescale' factor to adjust hole sizes and centers in entrance pupil (PM)
(Magnify the physical mask coordinates up to the primary mirror size)
"""
demag = gpi_mag_asdesigned()
if rescale:
demag = demag/rescale # rescale 1.1 gives a bigger mask in PM pupil space
print ("gpi_g10s4...")
hdia, ctrs = gpi_g10s40_asmanufactured(1.0/demag) # meters
return hdia, ctrs
""" From GPI FPRD 2008 http://dms.hia.nrc.ca/view.php?fDocumentId=1398
Filter 1/2 pwr bandwidth
name wavelen/um %
Y 0.95-1.14 18
J 1.12-1.35 19
H 1.50-1.80 18
K1 1.9-2.19 14
K2 2.13-2.4 12
Spectral Resolution 34-36 35-39 44-49 62-70 75-83
# spectral pixels 12-13 13-15 16-18 18-20
18-20
pixels 14mas are nyquist at 1.1
"""
""" Mathilde Beaulieu
eg. Thu, Jun 18, 2009 at 06:28:19PM
Thank you for the drawing. It really helps!
The distance between the center of the 2 segments in your drawing does not
match exactly with the distance I have (1.32 instead of 1.325).
Could you please check if I have the good center coordinates?
XY - PUPIL
0.00000 -2.64000
-2.28631 0.00000
2.28631 -1.32000
-2.28631 1.32000
-1.14315 1.98000
2.28631 1.32000
1.14315 1.98000
where y is the direction aligned with the spider which is not collinear with
any of pupil edges (it is not the same definition as Ball).
Thank you,
Regards,
Mathilde
n.b. This differs from the metal-mask-projected-to-PM-space with
Zheng Hai (Com Dev)'s mapping communicated by Mathilde Beaulieu to Anand.
This mapping has offset, rotation, shrink x, magnification.
Reference is a JWST Tech Report to be finalized 2013 by Anand.
jwst_g7s6_centers_asdesigned function superceded in LG++:
"""
def jwst_g7s6_centers_asbuilt(chooseholes=None): # was jwst_g7s6_centers_asdesigned
holedict = {} # as_built names, C2 open, C5 closed, but as designed coordinates
# Assemble holes by actual open segment names (as_built). Either the full mask or the
# subset-of-holes mask will be V2-reversed after the as_designed centers are defined
# Debug orientations with b4,c6,[c2]
allholes = ('b4','c2','b5','b2','c1','b6','c6')
b4,c2,b5,b2,c1,b6,c6 = ('b4','c2','b5','b2','c1','b6','c6')
# design built
holedict['b4'] = [ 0.00000000, -2.640000] #B4 -> B4
holedict['c2'] = [-2.2863100 , 0.0000000] #C5 -> C2
holedict['b5'] = [ 2.2863100 , -1.3200001] #B3 -> B5
holedict['b2'] = [-2.2863100 , 1.3200001] #B6 -> B2
holedict['c1'] = [-1.1431500 , 1.9800000] #C6 -> C1
holedict['b6'] = [ 2.2863100 , 1.3200001] #B2 -> B6
holedict['c6'] = [ 1.1431500 , 1.9800000] #C1 -> C6
# as designed MB coordinates (Mathilde Beaulieu, Peter, Anand).
# as designed: segments C5 open, C2 closed, meters V2V3 per Paul Lightsey def
# as built C5 closed, C2 open
#
# undistorted pupil coords on PM. These numbers are considered immutable.
# as designed seg -> as built seg in comments each ctr entry (no distortion)
if chooseholes: #holes B4 B5 C6 asbuilt for orientation testing
print("\n chooseholes creates mask with JWST as_built holes ", chooseholes)
#time.sleep(2)
holelist = []
for h in allholes:
if h in chooseholes:
holelist.append(holedict[h])
ctrs_asdesigned = np.array( holelist )
else:
# the REAL THING - as_designed 7 hole, m in PM space, no distortion shape (7,2)
ctrs_asdesigned = np.array( [
[ 0.00000000, -2.640000], #B4 -> B4 as-designed -> as-built mapping
[-2.2863100 , 0.0000000], #C5 -> C2
[ 2.2863100 , -1.3200001], #B3 -> B5
[-2.2863100 , 1.3200001], #B6 -> B2
[-1.1431500 , 1.9800000], #C6 -> C1
[ 2.2863100 , 1.3200001], #B2 -> B6
[ 1.1431500 , 1.9800000] ] ) #C1 -> C6
# Preserve ctrs.as-designed (treat as immutable)
# Reverse V2 axis coordinates to close C5 open C2, and others follow suit...
# preserve cts.as_built (treat as immutable)
ctrs_asbuilt = ctrs_asdesigned.copy()
# create 'live' hole centers in an ideal, orthogonal undistorted xy pupil space,
# eg maps open hole C5 in as_designed to C2 as_built, eg C4 unaffacted....
ctrs_asbuilt[:,0] *= -1
# LG++ rotate hole centers by 90 deg to match MAST o/p DMS PSF with
# no affine2d transformations 8/2018 AS
# LG++ The above aligns the hole patern with the hex analytic FT,
# flat top & bottom as seen in DMS data. 8/2018 AS
ctrs_asbuilt = rotate2dccw(ctrs_asbuilt, np.pi/2.0) # overwrites attributes
# create 'live' hole centers in an ideal, orthogonal undistorted xy pupil space,
return ctrs_asbuilt * m
def jwst_g7s6c(chooseholes=None):
# WARNING! JWST CHOOSEHOLES CODE NOW DUPLICATED IN LG_Model.py WARNING! ###
#return 0.80*m, jwst_g7s6_centers_asbuilt(chooseholes=chooseholes) #comment out 2019 Aug w/Joel to match webbpsf 0.8.something
f2f = 0.82 * m # m flat to flat
return f2f, jwst_g7s6_centers_asbuilt(chooseholes=chooseholes)
def visir_sam_asmanufactured(mag):
"""VISIR has hexagonal holes
The SAM plate has a main external diameter of 26 mm
"""
holedia = 2.8*mm # (=side length) => 0.61 m in telescope pupil of 8.115 m
# Equivalenent diameter is thus 1.22m
# pupil footprint center (/= coordinates center in the drawing)
holectrs = [
[-5.707*mm, -2.885*mm],
[-5.834*mm, 3.804*mm],
[ 0.099*mm, 7.271*mm],
[ 7.989*mm, 0.422*mm],
[ 3.989*mm, -6.481*mm],
[-3.790*mm, -6.481*mm],
[-1.928*mm, -2.974*mm]
]
"""
xc, yc = 387., 294.75
holectrs = =[
[342.000-xc, 272.000-yc],
[341.000-xc, 324.750-yc],
[387.780-xc, 352.080-yc],
[449.992-xc, 298.080-yc],
[418.456-xc, 243.648-yc],
[357.112-xc, 243.648-yc],
[371.800-xc, 271.296-yc]
]
There is an offset between the mechanical plate and the fottprint of the
cold stop of - 0.636 mm, which probably explains the need for manual (filing)
increase of the size of the fixing holes on the size after mounting and
checking the optical alignment on the instrument.
"""
# design2metal mag
holedia = holedia * mag
print(mag)
ctrs = []
REVERSE = -1 # The pupil we measure with VISIR pupil imaging lens is
# [probably] point symmetry reversed w.r.t reality. => OK checked
for r in holectrs:
ctrs.append([r[0]*mag*REVERSE, r[1]*mag*REVERSE])
# return mask coords
return holedia, ctrs
def visir_mag_asdesigned():
#logging.basicConfig(level=logging.DEBUG,format='%(name)-10s: %(levelname)-8s %(message)s')
datapath='/Users/agreenba/data/visir/'
"""
returns demag (dimensionless)
pupil dimensions * demag gives manufactured dimensions
"""
"""
The hexagons have radii of 1.4 mm (=side length) => 0.61 m in telescope
pupil of 8.115 m. Equivalenent diameter is thus 1.22m
"""
DVLT = 8115. * mm #
DTH = 1220.*mm # diam of hex on primary
D = DTH
d = 2.8 * mm # diam of hex in pupil
demag = d/D # about 1/800...
print(""""
This program (VISIR.py) uses GVLT = 8115.0 * mm from SAM report c/o Eric Pantin
(report_SAM_pupil.pdf)
All X,Y dimensions in m from center of mask in PM space
All hole DIAMETERS are ... :\n""")
####print "DESIGN to PPM magnification is %.4f\n" % MAG
print(demag)
return demag
def visir_sam(rescale=False):
"""
Multiply by the 'rescale' factor to adjust hole sizes and centers in entrance pupil (PM)
(Magnify the physical mask coordinates up to the primary mirror size)
"""
demag = visir_mag_asdesigned()
if rescale:
demag = demag/rescale # rescale 1.1 gives a bigger mask in PM pupil space
print ("VISIR SAM")
hdia, ctrs = visir_sam_asmanufactured(1.0/demag) # meters
return hdia, ctrs
""" From VISIR documentation
Filter 1/2 pwr bandwidth
name wavelen/um %
Spectral Resolution 34-36 35-39 44-49 62-70 75-83
# spectral pixels 12-13 13-15 16-18 18-20
18-20
pixels ??mas are nyquist at ??um
"""
def keck_nrm(rescale=False):
"""
Multiply by the 'rescale' factor to adjust hole sizes and centers in entrance pupil (PM)
(Magnify the physical mask coordinates up to the primary mirror size)
"""
hdia = 1.098 # m
# All projected onto the primary in m
holectrs = np.array([[-3.44, -2.22],
[-4.57 , -1.00],
[-2.01 , 2.52],
[-0.20 , 4.09],
[ 1.42 , 4.46],
[ 3.19 , 0.48],
[ 3.65 , -1.87],
[ 3.15 , -3.46],
[-1.18 , -3.01]])
ctrs = []
REVERSE = -1 # The pupil we measure with VISIR pupil imaging lens is
# [probably] point symmetry reversed w.r.t reality. => OK checked
for r in holectrs:
ctrs.append([r[0], r[1]*REVERSE])
return hdia, ctrs
if __name__ == "__main__":
# JWST G7S6 circular 0.8m dia holes...
nrm = NRM_mask_definitions("gpi_g10s40")
PUPLSCAL= 0.006455708661417323 # scale (m/pixels) from webbpsf-data/NIRISS/coronagraph/MASK_NRM.fits
# for jwst-g7s6c fullpupil=True gets us a file like that in webbpsf-data...
#maskobj = nrm.createnrmarray(puplscal=PUPLSCAL,fitsfile='g7s6c.fits' % r, fullpupil=True)
print(nrm.activeD)
sys.exit()
maskobj = nrm.createnrmarray(puplscal=PUPLSCAL,
fitsfile='/Users/anand/Desktop/jwst_g7s6c_which.fits',
fullpupil=True,
holeid=True)
maskobj = nrm.createnrmarray(puplscal=PUPLSCAL,
fitsfile='/Users/anand/Desktop/jwst_g7s6c.fits',
fullpupil=True,
holeid=False)
# GPI explorations
##for r in (0.99, 1.0, 1.01):
## nrm = NRM_mask_definitions("gpi_g10s40", rescale = r)
## maskobj = nrm.createnrmarray(puplscal=1.0e-2,fitsfile='g10s40Fr%.2f.fits' % r, fullpupil=True)
##pupobj = nrm.createpupilarray(puplscal=1.0e-2)
##print maskobj
#nrm.createnrmarray(puplscal=1.0e-2,fitsfile='/Users/anand/Desktop/g10s40id.fits', holeid=True)
#nrm.createpupilarray(puplscal=1.0e-2,fitsfile='/Users/anand/Desktop/gpipupil.fits')
#pupil = NRM_mask_definitions("gpi_g10s40")
#pupil.createpupilarray(puplscal)
# make image at angular pixel scale, at given wavelength/bandpass
# choose pupil pixel scale
# image oversampling before rebinning
# possibly put in OPD
| 38.587658 | 130 | 0.589285 |
794727abf7142ffbc261e4a1a49b5f0c79b14daa | 1,742 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/sensu/sensu_go/tests/unit/plugins/filter/test_package_name.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/sensu/sensu_go/tests/unit/plugins/filter/test_package_name.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/sensu/sensu_go/tests/unit/plugins/filter/test_package_name.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | # Copyright: (c) 2020, XLAB Steampunk <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import pytest
from ansible_collections.sensu.sensu_go.plugins.filter import package_name
pytestmark = pytest.mark.skipif(
sys.version_info < (2, 7), reason="requires python2.7 or higher"
)
class TestPackageName:
def test_yum_latest_version(self):
assert "package" == package_name.package_name(
"yum", "package", "latest", "latest",
)
def test_yum_latest_build(self):
assert "package-123" == package_name.package_name(
"yum", "package", "123", "latest",
)
def test_yum_selected_build(self):
assert "package-123-456" == package_name.package_name(
"yum", "package", "123", "456",
)
def test_yum_ignore_build_if_latest_version(self):
assert "package" == package_name.package_name(
"yum", "package", "latest", "456",
)
def test_apt_latest_version(self):
assert "package" == package_name.package_name(
"apt", "package", "latest", "latest",
)
def test_apt_latest_build(self):
assert "package=123-*" == package_name.package_name(
"apt", "package", "123", "latest",
)
def test_apt_selected_build(self):
assert "package=123-456" == package_name.package_name(
"apt", "package", "123", "456",
)
def test_apt_ignore_build_if_latest_version(self):
assert "package" == package_name.package_name(
"apt", "package", "latest", "456",
)
| 29.525424 | 92 | 0.630884 |
7947284b67f1da002bca200aa1819af6491a4871 | 66 | py | Python | addons/checks/__init__.py | enterspeed/kurisu | 48163391d1861fe92125f59dad63884d5e767ab2 | [
"MIT"
] | 10 | 2017-06-10T21:22:21.000Z | 2021-01-22T13:45:31.000Z | addons/checks/__init__.py | enterspeed/kurisu | 48163391d1861fe92125f59dad63884d5e767ab2 | [
"MIT"
] | 1 | 2018-06-08T18:18:17.000Z | 2018-06-08T21:12:23.000Z | addons/checks/__init__.py | enterspeed/makisekurisu | 48163391d1861fe92125f59dad63884d5e767ab2 | [
"MIT"
] | 3 | 2017-06-10T21:22:22.000Z | 2021-01-29T00:23:10.000Z | from addons.checks import checks
from addons.checks import errors
| 22 | 32 | 0.848485 |
7947284bab43037b9b043e76a4c98c1aab80d885 | 432 | py | Python | award/api/urls.py | margaret254/Awwards | 9af3de00c4200b86c92b16b141a4642fe8f02c87 | [
"MIT"
] | null | null | null | award/api/urls.py | margaret254/Awwards | 9af3de00c4200b86c92b16b141a4642fe8f02c87 | [
"MIT"
] | 9 | 2020-02-12T03:24:03.000Z | 2022-02-10T14:10:21.000Z | award/api/urls.py | margaret254/Awwards | 9af3de00c4200b86c92b16b141a4642fe8f02c87 | [
"MIT"
] | 1 | 2020-06-05T17:47:06.000Z | 2020-06-05T17:47:06.000Z | from django.urls import path
from award.api.views import(
api_detail_award_view,
api_update_award_view,
api_delete_award_view,
api_create_award_view
)
app_name = 'award'
urlpatterns = [
path('<slug>/', api_detail_award_view, name="detail"),
path('<slug>/update', api_update_award_view, name="update"),
path('<slug>/delete', api_delete_award_view, name="delete"),
path('create', api_create_award_view, name="create")
]
| 21.6 | 61 | 0.752315 |
79472870ae23492e959b3e728a3208fd1fa720fa | 514 | py | Python | env/lib/python3.8/site-packages/plotly/validators/mesh3d/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/mesh3d/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/mesh3d/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="mesh3d", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
| 36.714286 | 78 | 0.643969 |
794728abe39bf42e1e0e72d5c6382d9f34f070f5 | 3,609 | py | Python | py/simulator/game.py | coderanger/farmrpg-ext | bade8519fcd0481b08ecba3f8554fde2de5828ed | [
"Apache-2.0"
] | 8 | 2022-02-04T05:57:47.000Z | 2022-03-25T08:02:42.000Z | py/simulator/game.py | coderanger/farmrpg-ext | bade8519fcd0481b08ecba3f8554fde2de5828ed | [
"Apache-2.0"
] | null | null | null | py/simulator/game.py | coderanger/farmrpg-ext | bade8519fcd0481b08ecba3f8554fde2de5828ed | [
"Apache-2.0"
] | 2 | 2022-02-07T05:38:33.000Z | 2022-02-07T05:54:51.000Z | from __future__ import annotations
import datetime
from io import StringIO
from typing import Optional
import attrs
import structlog
from .ai import AI
from .buildings import HayField, RaptorPen, Sawmill
from .farm import Farm
from .player import Player
from .steak_market import SteakMarket
from .utils import format_number
def SelfFactory(type: type):
return attrs.Factory(lambda self: type(game=self), takes_self=True)
@attrs.define
class Game:
player: Player = SelfFactory(Player)
farm: Farm = SelfFactory(Farm)
sawmill: Sawmill = SelfFactory(Sawmill)
hay_field: HayField = SelfFactory(HayField)
raptor_pen: RaptorPen = SelfFactory(RaptorPen)
steak_market: SteakMarket = SelfFactory(SteakMarket)
ai_class: Optional[type] = None
ai: Optional[AI] = None
time: datetime.datetime = datetime.datetime.now()
log: structlog.stdlib.BoundLogger = structlog.stdlib.get_logger(mod="farm")
def __attrs_post_init__(self):
if self.ai is None and self.ai_class is not None:
self.ai = self.ai_class(self)
def tick(self, seconds: int) -> None:
self.time += datetime.timedelta(seconds=seconds)
self.player.tick(seconds)
self.farm.tick(seconds)
self.sawmill.tick(seconds)
self.hay_field.tick(seconds)
self.raptor_pen.tick(seconds)
self.steak_market.tick(seconds)
def process_ai(self):
if self.ai is None:
return
if self.farm.can_harvest:
self.log.debug("AI harvest")
self.farm.harvest_all()
if self.farm.can_plant:
seed = self.ai.plant()
if seed is not None:
self.log.debug("AI plant", seed=seed.name)
self.farm.plant_all(seed)
if self.raptor_pen.can_pet:
self.raptor_pen.pet_all()
while self.player.can_fish:
loc = self.ai.fish()
if loc is not None:
self.log.debug("AI fish", location=loc.name)
items = loc.net(self.player)
self.player.sell_all(items)
else:
break
while self.player.can_explore:
loc = self.ai.explore()
if loc is not None:
self.log.debug("AI explore", location=loc.name)
loc.explore(self.player)
else:
break
self.ai.process()
def run(self, iterations: int = 60, interval: int = 60) -> None:
"""Run a simulation for the given number of iterations."""
# ???: Should this run for a given length of simulated time instead?
for _ in range(iterations):
self.log.debug(
"AI state", silver=self.player.silver, stamina=self.player.stamina
)
self.process_ai()
self.tick(interval)
# Finalize the AI.
if self.ai:
self.ai.finish()
def summary(self) -> str:
"""Render a string summary of the game state."""
out = StringIO()
out.write(f"Silver: {format_number(self.player.silver)}\n")
out.write(f"Stamina Used: {self.player.stamina_used}\n")
out.write(f"Explore Count: {self.player.explore_count}\n")
out.write("Inventory:\n")
for item, count in self.player.inventory.items():
out.write(f"\t{item.name}: {count}\n")
out.write("Overflow:\n")
for item, count in self.player.overflow_items.items():
out.write(f"\t{item.name}: {count}\n")
out.write(self.steak_market.summary())
return out.getvalue()
| 33.728972 | 82 | 0.610973 |
794729e152b3f51acb5a858812fa29d853af4f73 | 8,792 | py | Python | kivy/input/providers/probesysfs.py | CharaD7/kivy | 85065fe6633f5ac831c193dc84e3f636b789cc3a | [
"MIT"
] | 2 | 2021-05-16T09:46:14.000Z | 2021-11-17T11:23:15.000Z | kivy/input/providers/probesysfs.py | CharaD7/kivy | 85065fe6633f5ac831c193dc84e3f636b789cc3a | [
"MIT"
] | 1 | 2016-11-11T13:45:42.000Z | 2016-11-11T13:45:42.000Z | kivy/input/providers/probesysfs.py | CharaD7/kivy | 85065fe6633f5ac831c193dc84e3f636b789cc3a | [
"MIT"
] | 2 | 2020-03-28T10:18:00.000Z | 2021-02-13T06:34:14.000Z | '''
Auto Create Input Provider Config Entry for Available MT Hardware (linux only).
===============================================================================
Thanks to Marc Tardif for the probing code, taken from scan-for-mt-device.
The device discovery is done by this provider. However, the reading of
input can be performed by other providers like: hidinput, mtdev and
linuxwacom. mtdev is used prior to other providers. For more
information about mtdev, check :py:class:`~kivy.input.providers.mtdev`.
Here is an example of auto creation::
[input]
# using mtdev
device_%(name)s = probesysfs,provider=mtdev
# using hidinput
device_%(name)s = probesysfs,provider=hidinput
# using mtdev with a match on name
device_%(name)s = probesysfs,provider=mtdev,match=acer
# using hidinput with custom parameters to hidinput (all on one line)
%(name)s = probesysfs,
provider=hidinput,param=min_pressure=1,param=max_pressure=99
# you can also match your wacom touchscreen
touch = probesysfs,match=E3 Finger,provider=linuxwacom,
select_all=1,param=mode=touch
# and your wacom pen
pen = probesysfs,match=E3 Pen,provider=linuxwacom,
select_all=1,param=mode=pen
By default, ProbeSysfs module will enumerate hardware from the /sys/class/input
device, and configure hardware with ABS_MT_POSITION_X capability. But for
example, the wacom screen doesn't support this capability. You can prevent this
behavior by putting select_all=1 in your config line.
'''
__all__ = ('ProbeSysfsHardwareProbe', )
import os
from os.path import sep
if 'KIVY_DOC' in os.environ:
ProbeSysfsHardwareProbe = None
else:
from re import match, IGNORECASE
from glob import glob
from subprocess import Popen, PIPE
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.providers.mouse import MouseMotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.config import _is_rpi
EventLoop = None
# See linux/input.h
ABS_MT_POSITION_X = 0x35
_cache_input = None
_cache_xinput = None
class Input(object):
def __init__(self, path):
query_xinput()
self.path = path
@property
def device(self):
base = os.path.basename(self.path)
return os.path.join("/dev", "input", base)
@property
def name(self):
path = os.path.join(self.path, "device", "name")
return read_line(path)
def get_capabilities(self):
path = os.path.join(self.path, "device", "capabilities", "abs")
line = read_line(path)
capabilities = []
long_bit = getconf("LONG_BIT")
for i, word in enumerate(line.split(" ")):
word = int(word, 16)
subcapabilities = [bool(word & 1 << i)
for i in range(long_bit)]
capabilities[:0] = subcapabilities
return capabilities
def has_capability(self, capability):
capabilities = self.get_capabilities()
return len(capabilities) > capability and capabilities[capability]
@property
def is_mouse(self):
return self.device in _cache_xinput
def getout(*args):
try:
return Popen(args, stdout=PIPE).communicate()[0]
except OSError:
return ''
def getconf(var):
output = getout("getconf", var)
return int(output)
def query_xinput():
global _cache_xinput
if _cache_xinput is None:
_cache_xinput = []
devids = getout('xinput', '--list', '--id-only')
for did in devids.splitlines():
devprops = getout('xinput', '--list-props', did)
evpath = None
for prop in devprops.splitlines():
prop = prop.strip()
if (prop.startswith(b'Device Enabled') and
prop.endswith(b'0')):
evpath = None
break
if prop.startswith(b'Device Node'):
try:
evpath = prop.split('"')[1]
except Exception:
evpath = None
if evpath:
_cache_xinput.append(evpath)
def get_inputs(path):
global _cache_input
if _cache_input is None:
event_glob = os.path.join(path, "event*")
_cache_input = [Input(x) for x in glob(event_glob)]
return _cache_input
def read_line(path):
f = open(path)
try:
return f.readline().strip()
finally:
f.close()
class ProbeSysfsHardwareProbe(MotionEventProvider):
def __new__(self, device, args):
# hack to not return an instance of this provider.
# :)
instance = super(ProbeSysfsHardwareProbe, self).__new__(self)
instance.__init__(device, args)
def __init__(self, device, args):
super(ProbeSysfsHardwareProbe, self).__init__(device, args)
self.provider = 'mtdev'
self.match = None
self.input_path = '/sys/class/input'
self.select_all = True if _is_rpi else False
self.use_regex = False
self.args = []
args = args.split(',')
for arg in args:
if arg == '':
continue
arg = arg.split('=', 1)
# ensure it's a key = value
if len(arg) != 2:
Logger.error('ProbeSysfs: invalid parameters %s, not'
' key=value format' % arg)
continue
key, value = arg
if key == 'match':
self.match = value
elif key == 'provider':
self.provider = value
elif key == 'use_regex':
self.use_regex = bool(value)
elif key == 'select_all':
self.select_all = bool(value)
elif key == 'param':
self.args.append(value)
else:
Logger.error('ProbeSysfs: unknown %s option' % key)
continue
self.probe()
def should_use_mouse(self):
return not any(p for p in EventLoop.input_providers
if isinstance(p, MouseMotionEventProvider))
def probe(self):
global EventLoop
from kivy.base import EventLoop
inputs = get_inputs(self.input_path)
Logger.debug('ProbeSysfs: using probesysfs!')
use_mouse = self.should_use_mouse()
if not self.select_all:
inputs = [x for x in inputs if
x.has_capability(ABS_MT_POSITION_X)
and (use_mouse or not x.is_mouse)]
for device in inputs:
Logger.debug('ProbeSysfs: found device: %s at %s' % (
device.name, device.device))
# must ignore ?
if self.match:
if self.use_regex:
if not match(self.match, device.name, IGNORECASE):
Logger.debug('ProbeSysfs: device not match the'
' rule in config, ignoring.')
continue
else:
if self.match not in device.name:
continue
Logger.info('ProbeSysfs: device match: %s' % device.device)
d = device.device
devicename = self.device % dict(name=d.split(sep)[-1])
provider = MotionEventFactory.get(self.provider)
if provider is None:
Logger.info('ProbeSysfs: unable to found provider %s' %
self.provider)
Logger.info('ProbeSysfs: fallback on hidinput')
provider = MotionEventFactory.get('hidinput')
if provider is None:
Logger.critical('ProbeSysfs: no input provider found'
' to handle this device !')
continue
instance = provider(devicename, '%s,%s' % (
device.device, ','.join(self.args)))
if instance:
EventLoop.add_input_provider(instance)
MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe)
| 35.451613 | 79 | 0.540833 |
79472ac373fdba478d4002e0cab601b3e723ab0d | 5,079 | py | Python | CV19Dash/_notebooks/covid_overview.py | Ehnryu/archives | 36f5d1b8b0168ada3a1a25fbe3112709f4134652 | [
"MIT"
] | 1,740 | 2020-03-19T17:26:24.000Z | 2022-03-30T08:04:55.000Z | CV19Dash/_notebooks/covid_overview.py | Ehnryu/archives | 36f5d1b8b0168ada3a1a25fbe3112709f4134652 | [
"MIT"
] | 3,565 | 2020-03-29T04:44:21.000Z | 2022-03-31T20:29:54.000Z | CV19Dash/_notebooks/covid_overview.py | Ehnryu/archives | 36f5d1b8b0168ada3a1a25fbe3112709f4134652 | [
"MIT"
] | 516 | 2020-03-19T16:28:36.000Z | 2022-03-28T15:22:19.000Z | import pandas as pd
base_url = 'https://raw.githubusercontent.com/pratapvardhan/notebooks/master/covid19/'
paths = {
'mapping': base_url + 'mapping_countries.csv',
'overview': base_url + 'overview.tpl'
}
def get_mappings(url):
df = pd.read_csv(url, encoding='utf-8')
return {
'df': df,
'replace.country': dict(df.dropna(subset=['Name']).set_index('Country')['Name']),
'map.continent': dict(df.set_index('Name')['Continent'])
}
mapping = get_mappings(paths['mapping'])
def get_template(path):
from urllib.parse import urlparse
if bool(urlparse(path).netloc):
from urllib.request import urlopen
return urlopen(path).read().decode('utf8')
return open(path, encoding='utf8').read()
def get_frame(name):
url = (
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/'
f'csse_covid_19_time_series/time_series_covid19_{name}_global.csv')
df = pd.read_csv(url, encoding='utf-8')
# rename countries
df['Country/Region'] = df['Country/Region'].replace(mapping['replace.country'])
return df
def get_dates(df):
dt_cols = df.columns[~df.columns.isin(['Province/State', 'Country/Region', 'Lat', 'Long'])]
latest_date_idx = -1
# sometimes last column may be empty, then go backwards
for i in range(-1, -len(dt_cols), -1):
if not df[dt_cols[i]].fillna(0).eq(0).all():
latest_date_idx = i
break
return latest_date_idx, dt_cols
def gen_data(region='Country/Region', filter_frame=lambda x: x, add_table=[], kpis_info=[]):
col_region = region
df = get_frame('confirmed')
dft_cases = df.pipe(filter_frame)
dft_deaths = get_frame('deaths').pipe(filter_frame)
latest_date_idx, dt_cols = get_dates(df)
dt_today = dt_cols[latest_date_idx]
dt_5ago = dt_cols[latest_date_idx - 5]
dfc_cases = dft_cases.groupby(col_region)[dt_today].sum()
dfc_deaths = dft_deaths.groupby(col_region)[dt_today].sum()
dfp_cases = dft_cases.groupby(col_region)[dt_5ago].sum()
dfp_deaths = dft_deaths.groupby(col_region)[dt_5ago].sum()
df_table = (pd.DataFrame(dict(
Cases=dfc_cases, Deaths=dfc_deaths,
PCases=dfp_cases, PDeaths=dfp_deaths))
.sort_values(by=['Cases', 'Deaths'], ascending=[False, False])
.reset_index())
for c in 'Cases, Deaths'.split(', '):
df_table[f'{c} (+)'] = (df_table[c] - df_table[f'P{c}']).clip(0) # DATABUG
df_table['Fatality Rate'] = (100 * df_table['Deaths'] / df_table['Cases']).round(1)
for rule in add_table:
df_table[rule['name']] = df_table.pipe(rule['apply'])
def kpi_of(name, prefix, pipe):
df_f = df_table.pipe(pipe or (lambda x: x[x[col_region].eq(name)]))
return df_f[metrics].sum().add_prefix(prefix)
metrics = ['Cases', 'Deaths', 'Cases (+)', 'Deaths (+)']
s_kpis = pd.concat([
kpi_of(x['title'], f'{x["prefix"]} ', x.get('pipe'))
for x in kpis_info])
summary = {'updated': pd.to_datetime(dt_today), 'since': pd.to_datetime(dt_5ago)}
summary = {**summary, **df_table[metrics].sum(), **s_kpis}
dft_ct_cases = dft_cases.groupby(col_region)[dt_cols].sum()
dft_ct_new_cases = dft_ct_cases.diff(axis=1).fillna(0).astype(int)
return {
'summary': summary, 'table': df_table, 'newcases': dft_ct_new_cases,
'dt_last': latest_date_idx, 'dt_cols': dt_cols}
def gen_data_us(region='Province/State', kpis_info=[]):
col_region = region
df = pd.read_csv(
'https://raw.githubusercontent.com/nytimes/covid-19-data'
'/master/us-states.csv')
dt_today = df['date'].max()
dt_5ago = (pd.to_datetime(dt_today) - pd.Timedelta(days=5)).strftime('%Y-%m-%d')
cols = ['cases', 'deaths']
df_table = df[df['date'].eq(dt_today)].set_index('state')[cols].join(
df[df['date'].eq(dt_5ago)].set_index('state')[cols], rsuffix='Ago'
).reset_index().fillna(0)
for col in cols:
df_table[f'{col.title()} (+)'] = df_table[col] - df_table[f'{col}Ago']
df_table = df_table.rename(
columns={'state': col_region, 'cases': 'Cases', 'deaths': 'Deaths'})
df_table['Fatality Rate'] = (100 * df_table['Deaths'] / df_table['Cases']).round(1)
df_table = df_table.sort_values(by='Cases', ascending=False)
dft_ct_cases = df.set_index(['state', 'date'])['cases'].unstack(1, fill_value=0)
metrics = ['Cases', 'Deaths', 'Cases (+)', 'Deaths (+)']
def kpi_of(name, prefix, pipe):
df_f = df_table.pipe(pipe or (lambda x: x[x[col_region].eq(name)]))
return df_f[metrics].sum().add_prefix(prefix)
s_kpis = pd.concat([
kpi_of(x['title'], f'{x["prefix"]} ', x.get('pipe'))
for x in kpis_info])
summary = {'updated': pd.to_datetime(dt_today), 'since': pd.to_datetime(dt_5ago)}
summary = {**summary, **df_table[metrics].sum(), **s_kpis}
dft_ct_new_cases = dft_ct_cases.diff(axis=1).fillna(0).astype(int)
data = {'summary': summary, 'table': df_table, 'newcases': dft_ct_new_cases}
return data | 39.992126 | 95 | 0.642646 |
79472ad2586ec0ebbc5692bb8e966c704385cdfa | 62,484 | py | Python | manta/cmdln.py | joyent/python-manta | f68ef142bdbac058c981e3b28e18d77612f5b7c6 | [
"MIT"
] | 16 | 2015-01-24T22:52:08.000Z | 2020-04-12T20:41:54.000Z | manta/cmdln.py | joyent/python-manta | f68ef142bdbac058c981e3b28e18d77612f5b7c6 | [
"MIT"
] | 42 | 2015-01-21T23:38:40.000Z | 2020-09-30T20:53:12.000Z | manta/cmdln.py | joyent/python-manta | f68ef142bdbac058c981e3b28e18d77612f5b7c6 | [
"MIT"
] | 18 | 2015-01-21T23:39:25.000Z | 2020-11-28T17:32:08.000Z | #!/usr/bin/env python
# Copyright (c) 2012 Trent Mick
# Copyright (c) 2002-2009 ActiveState Software Inc.
# License: MIT (see LICENSE.txt for license details)
# Author: Trent Mick
"""An improvement on Python's standard cmd.py module.
As with cmd.py, this module provides "a simple framework for writing
line-oriented command intepreters." This module provides a 'RawCmdln'
class that fixes some design flaws in cmd.Cmd, making it more scalable
and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
or simple shells. And it provides a 'Cmdln' class that add
optparse-based option processing. Basically you use it like this:
import cmdln
class MySVN(cmdln.Cmdln):
name = "svn"
@cmdln.alias('stat', 'st')
@cmdln.option('-v', '--verbose', action='store_true'
help='print verbose information')
def do_status(self, subcmd, opts, *paths):
print "handle 'svn status' command"
#...
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
See the README.txt or <http://trentm.com/projects/cmdln/> for more
details.
"""
from __future__ import absolute_import
from __future__ import print_function
__version_info__ = (2, 0, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
import sys
import re
import cmd
import optparse
from pprint import pprint
import sys
import datetime
#---- globals
LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
# An unspecified optional argument when None is a meaningful value.
_NOT_SPECIFIED = ("Not", "Specified")
PY3 = (sys.version_info[0] >= 3)
if not PY3:
input = raw_input
#---- exceptions
class CmdlnError(Exception):
"""A cmdln.py usage error."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class CmdlnUserError(Exception):
"""An error by a user of a cmdln-based tool/shell."""
pass
#---- public methods and classes
def alias(*aliases):
"""Decorator to add aliases for Cmdln.do_* command handlers.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.alias("!", "sh")
def do_shell(self, argv):
#...implement 'shell' command
"""
def decorate(f):
if not hasattr(f, "aliases"):
f.aliases = []
f.aliases += aliases
return f
return decorate
class RawCmdln(cmd.Cmd):
"""An improved (on cmd.Cmd) framework for building multi-subcommand
scripts (think "svn" & "cvs") and simple shells (think "pdb" and
"gdb").
A simple example:
import cmdln
class MySVN(cmdln.RawCmdln):
name = "svn"
@cmdln.aliases('stat', 'st')
def do_status(self, argv):
print "handle 'svn status' command"
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
"""
name = None # if unset, defaults basename(sys.argv[0])
prompt = None # if unset, defaults to self.name+"> "
version = None # if set, default top-level options include --version
# Default messages for some 'help' command error cases.
# They are interpolated with one arg: the command.
nohelp = "no help on '%s'"
unknowncmd = "unknown command: '%s'"
helpindent = '' # string with which to indent help output
def __init__(self,
completekey='tab',
stdin=None,
stdout=None,
stderr=None):
"""Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically.
The optional arguments 'stdin', 'stdout' and 'stderr' specify
alternate input, output and error output file objects; if not
specified, sys.* are used.
If 'stdout' but not 'stderr' is specified, stdout is used for
error output. This is to provide least surprise for users used
to only the 'stdin' and 'stdout' options with cmd.Cmd.
"""
import sys
if self.name is None:
self.name = os.path.basename(sys.argv[0])
if self.prompt is None:
self.prompt = self.name + "> "
self._name_str = self._str(self.name)
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
if stderr is not None:
self.stderr = stderr
elif stdout is not None:
self.stderr = stdout
else:
self.stderr = sys.stderr
self.cmdqueue = []
self.completekey = completekey
self.cmdlooping = False
def get_optparser(self):
"""Hook for subclasses to set the option parser for the
top-level command/shell.
This option parser is used retrieved and used by `.main()' to
handle top-level options.
The default implements a single '-h|--help' option. Sub-classes
can return None to have no options at the top-level. Typically
an instance of CmdlnOptionParser should be returned.
"""
version = (self.version is not None and
"%s %s" % (self._name_str, self.version) or None)
return CmdlnOptionParser(self, version=version)
def postoptparse(self):
"""Hook method executed just after `.main()' parses top-level
options.
When called `self.options' holds the results of the option parse.
If this returns non-zero/non-None, then command processing is stopped
and this retval is returned from `main()`.
"""
pass
def main(self, argv=None, loop=LOOP_NEVER):
"""A possible mainline handler for a script, like so:
import cmdln
class MyCmd(cmdln.Cmdln):
name = "mycmd"
...
if __name__ == "__main__":
MyCmd().main()
By default this will use sys.argv to issue a single command to
'MyCmd', then exit. The 'loop' argument can be use to control
interactive shell behaviour.
Arguments:
"argv" (optional, default sys.argv) is the command to run.
It must be a sequence, where the first element is the
command name and subsequent elements the args for that
command.
"loop" (optional, default LOOP_NEVER) is a constant
indicating if a command loop should be started (i.e. an
interactive shell). Valid values (constants on this module):
LOOP_ALWAYS start loop and run "argv", if any
LOOP_NEVER run "argv" (or .emptyline()) and exit
LOOP_IF_EMPTY run "argv", if given, and exit;
otherwise, start loop
"""
if argv is None:
import sys
argv = sys.argv
else:
argv = argv[:] # don't modify caller's list
self.optparser = self.get_optparser()
if self.optparser: # i.e. optparser=None means don't process for opts
try:
self.options, args = self.optparser.parse_args(argv[1:])
except CmdlnUserError as ex:
msg = "%s: %s\nTry '%s help' for info.\n" % (self.name, ex,
self.name)
self.stderr.write(self._str(msg))
self.stderr.flush()
return 1
except StopOptionProcessing as ex:
return 0
else:
self.options, args = None, argv[1:]
retval = self.postoptparse()
if retval:
return retval
if loop == LOOP_ALWAYS:
if args:
self.cmdqueue.append(args)
return self.cmdloop()
elif loop == LOOP_NEVER:
if args:
return self.cmd(args)
else:
return self.emptyline()
elif loop == LOOP_IF_EMPTY:
if args:
return self.cmd(args)
else:
return self.cmdloop()
def cmd(self, argv):
"""Run one command and exit.
"argv" is the arglist for the command to run. argv[0] is the
command to run. If argv is an empty list then the
'emptyline' handler is run.
Returns the return value from the command handler.
"""
assert isinstance(argv, (list, tuple)), \
"'argv' is not a sequence: %r" % argv
retval = None
try:
argv = self.precmd(argv)
retval = self.onecmd(argv)
self.postcmd(argv)
except:
if not self.cmdexc(argv):
raise
retval = 1
return retval
def _str(self, s):
"""Safely convert the given str/unicode to a string for printing."""
try:
return str(s)
except UnicodeError:
#XXX What is the proper encoding to use here? 'utf-8' seems
# to work better than "getdefaultencoding" (usually
# 'ascii'), on OS X at least.
#import sys
#return s.encode(sys.getdefaultencoding(), "replace")
return s.encode("utf-8", "replace")
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse into an argv, and
dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
the argv. In other words, start a shell.
"intro" (optional) is a introductory message to print when
starting the command loop. This overrides the class
"intro" attribute, if any.
"""
self.cmdlooping = True
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
if sys.platform == "darwin":
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind(self.completekey + ": complete")
except ImportError:
pass
try:
if intro is None:
intro = self.intro
if intro:
intro_str = self._str(intro)
self.stdout.write(intro_str + '\n')
self.stop = False
retval = None
while not self.stop:
if self.cmdqueue:
argv = self.cmdqueue.pop(0)
assert isinstance(argv, (list, tuple)), \
"item on 'cmdqueue' is not a sequence: %r" % argv
else:
if self.use_rawinput:
try:
line = input(self._str(self._prompt_str))
except EOFError:
line = 'EOF'
except KeyboardInterrupt:
line = 'KeyboardInterrupt'
else:
self.stdout.write(self._str(self._prompt_str))
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop '\n'
argv = line2argv(line)
try:
argv = self.precmd(argv)
retval = self.onecmd(argv)
self.postcmd(argv)
except:
if not self.cmdexc(argv):
raise
retval = 1
self.lastretval = retval
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
self.cmdlooping = False
return retval
def precmd(self, argv):
"""Hook method executed just before the command argv is
interpreted, but after the input prompt is generated and issued.
"argv" is the cmd to run.
Returns an argv to run (i.e. this method can modify the command
to run).
"""
return argv
def postcmd(self, argv):
"""Hook method executed just after a command dispatch is finished.
"argv" is the command that was run.
"""
pass
def cmdexc(self, argv):
"""Called if an exception is raised in any of precmd(), onecmd(),
or postcmd(). If True is returned, the exception is deemed to have
been dealt with. Otherwise, the exception is re-raised.
The default implementation handles CmdlnUserError's, which
typically correspond to user error in calling commands (as
opposed to programmer error in the design of the script using
cmdln.py).
"""
import sys
type, exc, traceback = sys.exc_info()
if isinstance(exc, CmdlnUserError):
msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
% (self.name, argv[0], exc, self.name, argv[0])
self.stderr.write(self._str(msg))
self.stderr.flush()
return True
def onecmd(self, argv):
if not argv:
return self.emptyline()
self.lastcmd = argv
cmdname = self._get_canonical_cmd_name(argv[0])
if cmdname:
handler = self._get_cmd_handler(cmdname)
if handler:
try:
return self._dispatch_cmd(handler, argv)
except KeyboardInterrupt:
return self.onecmd(["KeyboardInterrupt"])
except Exception as ex:
raise ex
return self.default(argv)
def _dispatch_cmd(self, handler, argv):
return handler(argv)
def default(self, argv):
"""Hook called to handle a command for which there is no handler.
"argv" is the command and arguments to run.
The default implementation writes an error message to stderr
and returns an error exit status.
Returns a numeric command exit status.
"""
errmsg = self._str(self.unknowncmd % (argv[0], ))
if self.cmdlooping:
self.stderr.write(errmsg + "\n")
else:
self.stderr.write("%s: %s\nTry '%s help' for info.\n" %
(self._name_str, errmsg, self._name_str))
self.stderr.flush()
return 1
def parseline(self, line):
# This is used by Cmd.complete (readline completer function) to
# massage the current line buffer before completion processing.
# We override to drop special '!' handling.
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
i, n = 0, len(line)
while i < n and line[i] in self.identchars:
i = i + 1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def helpdefault(self, cmd, known):
"""Hook called to handle help on a command for which there is no
help handler.
"cmd" is the command name on which help was requested.
"known" is a boolean indicating if this command is known
(i.e. if there is a handler for it).
Returns a return code.
"""
if known:
msg = self._str(self.nohelp % (cmd, ))
if self.cmdlooping:
self.stderr.write(msg + '\n')
else:
self.stderr.write("%s: %s\n" % (self.name, msg))
else:
msg = self.unknowncmd % (cmd, )
if self.cmdlooping:
self.stderr.write(msg + '\n')
else:
self.stderr.write("%s: %s\n"
"Try '%s help' for info.\n" %
(self.name, msg, self.name))
self.stderr.flush()
return 1
def do_help(self, argv):
"""${cmd_name}: give detailed help on a specific sub-command
Usage:
${name} help [COMMAND]
"""
if len(argv) > 1: # asking for help on a particular command
doc = None
cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
if not cmdname:
return self.helpdefault(argv[1], False)
else:
helpfunc = getattr(self, "help_" + cmdname, None)
if helpfunc:
doc = helpfunc()
else:
handler = self._get_cmd_handler(cmdname)
if handler:
doc = handler.__doc__
if doc is None:
return self.helpdefault(argv[1], handler != None)
else: # bare "help" command
doc = self.__class__.__doc__ # try class docstring
if doc is None:
# Try to provide some reasonable useful default help.
if self.cmdlooping: prefix = ""
else: prefix = self.name + ' '
doc = """Usage:
%sCOMMAND [ARGS...]
%shelp [COMMAND]
${option_list}
${command_list}
${help_list}
""" % (prefix, prefix)
cmdname = None
if doc: # *do* have help content, massage and print that
doc = self._help_reindent(doc)
doc = self._help_preprocess(doc, cmdname)
doc = doc.rstrip() + '\n' # trim down trailing space
self.stdout.write(self._str(doc))
self.stdout.flush()
do_help.aliases = ["?"]
def _help_reindent(self, help, indent=None):
"""Hook to re-indent help strings before writing to stdout.
"help" is the help content to re-indent
"indent" is a string with which to indent each line of the
help content after normalizing. If unspecified or None
then the default is use: the 'self.helpindent' class
attribute. By default this is the empty string, i.e.
no indentation.
By default, all common leading whitespace is removed and then
the lot is indented by 'self.helpindent'. When calculating the
common leading whitespace the first line is ignored -- hence
help content for Conan can be written as follows and have the
expected indentation:
def do_crush(self, ...):
'''${cmd_name}: crush your enemies, see them driven before you...
c.f. Conan the Barbarian'''
"""
if indent is None:
indent = self.helpindent
lines = help.splitlines(0)
_dedentlines(lines, skip_first_line=True)
lines = [(indent + line).rstrip() for line in lines]
return '\n'.join(lines)
def _help_preprocess(self, help, cmdname):
"""Hook to preprocess a help string before writing to stdout.
"help" is the help string to process.
"cmdname" is the canonical sub-command name for which help
is being given, or None if the help is not specific to a
command.
By default the following template variables are interpolated in
help content. (Note: these are similar to Python 2.4's
string.Template interpolation but not quite.)
${name}
The tool's/shell's name, i.e. 'self.name'.
${option_list}
A formatted table of options for this shell/tool.
${command_list}
A formatted table of available sub-commands.
${help_list}
A formatted table of additional help topics (i.e. 'help_*'
methods with no matching 'do_*' method).
${cmd_name}
The name (and aliases) for this sub-command formatted as:
"NAME (ALIAS1, ALIAS2, ...)".
${cmd_usage}
A formatted usage block inferred from the command function
signature.
${cmd_option_list}
A formatted table of options for this sub-command. (This is
only available for commands using the optparse integration,
i.e. using @cmdln.option decorators or manually setting the
'optparser' attribute on the 'do_*' method.)
Returns the processed help.
"""
preprocessors = {
"${name}": self._help_preprocess_name,
"${option_list}": self._help_preprocess_option_list,
"${command_list}": self._help_preprocess_command_list,
"${help_list}": self._help_preprocess_help_list,
"${cmd_name}": self._help_preprocess_cmd_name,
"${cmd_usage}": self._help_preprocess_cmd_usage,
"${cmd_option_list}": self._help_preprocess_cmd_option_list,
}
for marker, preprocessor in preprocessors.items():
if marker in help:
help = preprocessor(help, cmdname)
return help
def _help_preprocess_name(self, help, cmdname=None):
return help.replace("${name}", self.name)
def _help_preprocess_option_list(self, help, cmdname=None):
marker = "${option_list}"
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
if self.optparser:
# Setup formatting options and format.
# - Indentation of 4 is better than optparse default of 2.
# C.f. Damian Conway's discussion of this in Perl Best
# Practices.
self.optparser.formatter.indent_increment = 4
self.optparser.formatter.current_indent = indent_width
block = self.optparser.format_option_help() + '\n'
else:
block = ""
help = help.replace(indent + marker + suffix, block, 1)
return help
def _get_cmds_data(self):
# Find any aliases for commands.
token2canonical = self._get_canonical_map()
aliases = {}
for token, cmdname in token2canonical.items():
if token == cmdname: continue
aliases.setdefault(cmdname, []).append(token)
# Get the list of (non-hidden) commands and their
# documentation, if any.
cmdnames = set()
for attr in self.get_names():
if attr.startswith("do_"):
cmdnames.add(attr[3:])
linedata = []
for cmdname in sorted(cmdnames):
if aliases.get(cmdname):
a = aliases[cmdname]
a.sort()
cmdstr = "%s (%s)" % (cmdname, ", ".join(a))
else:
cmdstr = cmdname
doc = None
try:
helpfunc = getattr(self, 'help_' + cmdname)
except AttributeError:
handler = self._get_cmd_handler(cmdname)
if handler:
doc = handler.__doc__
else:
doc = helpfunc()
# Strip "${cmd_name}: " from the start of a command's doc. Best
# practice dictates that command help strings begin with this, but
# it isn't at all wanted for the command list.
to_strip = "${cmd_name}:"
if doc and doc.startswith(to_strip):
#log.debug("stripping %r from start of %s's help string",
# to_strip, cmdname)
doc = doc[len(to_strip):].lstrip()
linedata.append((cmdstr, doc))
return linedata
def _help_preprocess_command_list(self, help, cmdname=None):
marker = "${command_list}"
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
linedata = self._get_cmds_data()
if linedata:
subindent = indent + ' ' * 4
lines = _format_linedata(linedata, subindent, indent_width + 4)
block = indent + "Commands:\n" \
+ '\n'.join(lines) + "\n\n"
help = help.replace(indent + marker + suffix, block, 1)
return help
def _gen_names_and_attrs(self):
# Inheritance says we have to look in class and
# base classes; order is not important.
names = []
classes = [self.__class__]
while classes:
aclass = classes.pop(0)
if aclass.__bases__:
classes = classes + list(aclass.__bases__)
for name in dir(aclass):
yield (name, getattr(aclass, name))
def _get_help_names(self):
"""Return a mapping of help topic name to `.help_*()` method."""
# Determine the additional help topics, if any.
help_names = {}
token2cmdname = self._get_canonical_map()
for attrname, attr in self._gen_names_and_attrs():
if not attrname.startswith("help_"): continue
help_name = attrname[5:]
if help_name not in token2cmdname:
help_names[help_name] = attr
return help_names
def _help_preprocess_help_list(self, help, cmdname=None):
marker = "${help_list}"
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
help_names = self._get_help_names()
if help_names:
linedata = [(n, a.__doc__ or "") for n, a in help_names.items()]
linedata.sort()
subindent = indent + ' ' * 4
lines = _format_linedata(linedata, subindent, indent_width + 4)
block = (indent + "Additional help topics (run `%s help TOPIC'):\n"
% self.name + '\n'.join(lines) + "\n\n")
else:
block = ''
help = help.replace(indent + marker + suffix, block, 1)
return help
def _help_preprocess_cmd_name(self, help, cmdname=None):
marker = "${cmd_name}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r" %
(marker, cmdname))
s = cmdname
if hasattr(handler, "aliases"):
s += " (%s)" % (", ".join(handler.aliases))
help = help.replace(marker, s)
return help
#TODO: this only makes sense as part of the Cmdln class.
# Add hooks to add help preprocessing template vars and put
# this one on that class.
def _help_preprocess_cmd_usage(self, help, cmdname=None):
marker = "${cmd_usage}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r" %
(marker, cmdname))
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
# Extract the introspection bits we need.
func = handler.__func__
if func.__defaults__:
func_defaults = list(func.__defaults__)
else:
func_defaults = []
co_argcount = func.__code__.co_argcount
co_varnames = func.__code__.co_varnames
co_flags = func.__code__.co_flags
CO_FLAGS_ARGS = 4
CO_FLAGS_KWARGS = 8
# Adjust argcount for possible *args and **kwargs arguments.
argcount = co_argcount
if co_flags & CO_FLAGS_ARGS: argcount += 1
if co_flags & CO_FLAGS_KWARGS: argcount += 1
# Determine the usage string.
usage = "%s %s" % (self.name, cmdname)
if argcount <= 2: # handler ::= do_FOO(self, argv)
usage += " [ARGS...]"
elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
argnames = list(co_varnames[3:argcount])
tail = ""
if co_flags & CO_FLAGS_KWARGS:
name = argnames.pop(-1)
import warnings
# There is no generally accepted mechanism for passing
# keyword arguments from the command line. Could
# *perhaps* consider: arg=value arg2=value2 ...
warnings.warn("argument '**%s' on '%s.%s' command "
"handler will never get values" %
(name, self.__class__.__name__, func.func_name))
if co_flags & CO_FLAGS_ARGS:
name = argnames.pop(-1)
tail = "[%s...]" % name.upper()
while func_defaults:
func_defaults.pop(-1)
name = argnames.pop(-1)
tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
while argnames:
name = argnames.pop(-1)
tail = "%s %s" % (name.upper(), tail)
usage += ' ' + tail
block_lines = [
self.helpindent + "Usage:", self.helpindent + ' ' * 4 + usage
]
block = '\n'.join(block_lines) + '\n\n'
help = help.replace(indent + marker + suffix, block, 1)
return help
#TODO: this only makes sense as part of the Cmdln class.
# Add hooks to add help preprocessing template vars and put
# this one on that class.
def _help_preprocess_cmd_option_list(self, help, cmdname=None):
marker = "${cmd_option_list}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r" %
(marker, cmdname))
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
if hasattr(handler, "optparser"):
# Setup formatting options and format.
# - Indentation of 4 is better than optparse default of 2.
# C.f. Damian Conway's discussion of this in Perl Best
# Practices.
handler.optparser.formatter.indent_increment = 4
handler.optparser.formatter.current_indent = indent_width
block = handler.optparser.format_option_help() + '\n'
else:
block = ""
help = help.replace(indent + marker + suffix, block, 1)
return help
def _get_canonical_cmd_name(self, token):
map = self._get_canonical_map()
return map.get(token, None)
def _get_canonical_map(self):
"""Return a mapping of available command names and aliases to
their canonical command name.
"""
cacheattr = "_token2canonical"
if not hasattr(self, cacheattr):
# Get the list of commands and their aliases, if any.
token2canonical = {}
cmd2funcname = {} # use a dict to strip duplicates
for attr in self.get_names():
if attr.startswith("do_"): cmdname = attr[3:]
elif attr.startswith("_do_"): cmdname = attr[4:]
else:
continue
cmd2funcname[cmdname] = attr
token2canonical[cmdname] = cmdname
for cmdname, funcname in cmd2funcname.items(): # add aliases
func = getattr(self, funcname)
aliases = getattr(func, "aliases", [])
for alias in aliases:
if alias in cmd2funcname:
import warnings
warnings.warn("'%s' alias for '%s' command conflicts "
"with '%s' handler" %
(alias, cmdname, cmd2funcname[alias]))
continue
token2canonical[alias] = cmdname
setattr(self, cacheattr, token2canonical)
return getattr(self, cacheattr)
def _get_cmd_handler(self, cmdname):
handler = None
try:
handler = getattr(self, 'do_' + cmdname)
except AttributeError:
try:
# Private command handlers begin with "_do_".
handler = getattr(self, '_do_' + cmdname)
except AttributeError:
pass
return handler
def _do_EOF(self, argv):
# Default EOF handler
# TODO: A mechanism so "EOF" and "KeyboardInterrupt" work as handlers
# but are *not* real available commands.
self.stdout.write('\n')
self.stdout.flush()
self.stop = True
def _do_KeyboardInterrupt(self, argv):
# Default keyboard interrupt (i.e. <Ctrl+C>) handler.
# TODO: A mechanism so "EOF" and "KeyboardInterrupt" work as handlers
# but are *not* real available commands.
self.stdout.write('\n')
self.stdout.flush()
def emptyline(self):
# Different from cmd.Cmd: don't repeat the last command for an
# emptyline.
if self.cmdlooping:
pass
else:
return self.do_help(["help"])
#---- optparse.py extension to fix (IMO) some deficiencies
#
# See the class _OptionParserEx docstring for details.
#
class StopOptionProcessing(Exception):
"""Indicate that option *and argument* processing should stop
cleanly. This is not an error condition. It is similar in spirit to
StopIteration. This is raised by _OptionParserEx's default "help"
and "version" option actions and can be raised by custom option
callbacks too.
Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
usage is:
parser = CmdlnOptionParser(mycmd)
parser.add_option("-f", "--force", dest="force")
...
try:
opts, args = parser.parse_args()
except StopOptionProcessing:
# normal termination, "--help" was probably given
sys.exit(0)
"""
class _OptionParserEx(optparse.OptionParser):
"""An optparse.OptionParser that uses exceptions instead of sys.exit.
This class is an extension of optparse.OptionParser that differs
as follows:
- Correct (IMO) the default OptionParser error handling to never
sys.exit(). Instead OptParseError exceptions are passed through.
- Add the StopOptionProcessing exception (a la StopIteration) to
indicate normal termination of option processing.
See StopOptionProcessing's docstring for details.
I'd also like to see the following in the core optparse.py, perhaps
as a RawOptionParser which would serve as a base class for the more
generally used OptionParser (that works as current):
- Remove the implicit addition of the -h|--help and --version
options. They can get in the way (e.g. if want '-?' and '-V' for
these as well) and it is not hard to do:
optparser.add_option("-h", "--help", action="help")
optparser.add_option("--version", action="version")
These are good practices, just not valid defaults if they can
get in the way.
"""
def error(self, msg):
raise optparse.OptParseError(msg)
def exit(self, status=0, msg=None):
if status == 0:
raise StopOptionProcessing(msg)
else:
#TODO: don't lose status info here
raise optparse.OptParseError(msg)
#---- optparse.py-based option processing support
class CmdlnOptionParser(_OptionParserEx):
"""An optparse.OptionParser class more appropriate for top-level
Cmdln options. For parsing of sub-command options, see
SubCmdOptionParser.
Changes:
- disable_interspersed_args() by default, because a Cmdln instance
has sub-commands which may themselves have options.
- Redirect print_help() to the Cmdln.do_help() which is better
equiped to handle the "help" action.
- error() will raise a CmdlnUserError: OptionParse.error() is meant
to be called for user errors. Raising a well-known error here can
make error handling clearer.
- Also see the changes in _OptionParserEx.
"""
def __init__(self, cmdln, **kwargs):
self.cmdln = cmdln
kwargs["prog"] = self.cmdln.name
_OptionParserEx.__init__(self, **kwargs)
self.disable_interspersed_args()
def print_help(self, file=None):
self.cmdln.onecmd(["help"])
def error(self, msg):
raise CmdlnUserError(msg)
class SubCmdOptionParser(_OptionParserEx):
def set_cmdln_info(self, cmdln, subcmd):
"""Called by Cmdln to pass relevant info about itself needed
for print_help().
"""
self.cmdln = cmdln
self.subcmd = subcmd
def print_help(self, file=None):
self.cmdln.onecmd(["help", self.subcmd])
def error(self, msg):
raise CmdlnUserError(msg)
def option(*args, **kwargs):
"""Decorator to add an option to the optparser argument of a Cmdln
subcommand.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#...
"""
#XXX Is there a possible optimization for many options to not have a
# large stack depth here?
def decorate(f):
if not hasattr(f, "optparser"):
f.optparser = SubCmdOptionParser()
f.optparser.add_option(*args, **kwargs)
return f
return decorate
class Cmdln(RawCmdln):
"""An improved (on cmd.Cmd) framework for building multi-subcommand
scripts (think "svn" & "cvs") and simple shells (think "pdb" and
"gdb").
A simple example:
import cmdln
class MySVN(cmdln.Cmdln):
name = "svn"
@cmdln.aliases('stat', 'st')
@cmdln.option('-v', '--verbose', action='store_true'
help='print verbose information')
def do_status(self, subcmd, opts, *paths):
print "handle 'svn status' command"
#...
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
'Cmdln' extends 'RawCmdln' by providing optparse option processing
integration. See this class' _dispatch_cmd() docstring and general
cmdln document for more information.
"""
def _dispatch_cmd(self, handler, argv):
"""Introspect sub-command handler signature to determine how to
dispatch the command. The raw handler provided by the base
'RawCmdln' class is still supported:
def do_foo(self, argv):
# 'argv' is the vector of command line args, argv[0] is
# the command name itself (i.e. "foo" or an alias)
pass
In addition, if the handler has more than 2 arguments option
processing is automatically done (using optparse):
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar(self, subcmd, opts, *args):
# subcmd = <"bar" or an alias>
# opts = <an optparse.Values instance>
if opts.verbose:
print "lots of debugging output..."
# args = <tuple of arguments>
for arg in args:
bar(arg)
TODO: explain that "*args" can be other signatures as well.
The `cmdln.option` decorator corresponds to an `add_option()`
method call on an `optparse.OptionParser` instance.
You can declare a specific number of arguments:
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar2(self, subcmd, opts, bar_one, bar_two):
#...
and an appropriate error message will be raised/printed if the
command is called with a different number of args.
"""
co_argcount = handler.__func__.__code__.co_argcount
if co_argcount == 2: # handler ::= do_foo(self, argv)
return handler(argv)
elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
try:
optparser = handler.optparser
except AttributeError:
optparser = handler.__func__.optparser = SubCmdOptionParser()
assert isinstance(optparser, SubCmdOptionParser)
optparser.set_cmdln_info(self, argv[0])
try:
opts, args = optparser.parse_args(argv[1:])
except StopOptionProcessing:
#TODO: this doesn't really fly for a replacement of
# optparse.py behaviour, does it?
return 0 # Normal command termination
try:
return handler(argv[0], opts, *args)
except TypeError as ex:
# Some TypeError's are user errors because of incorrect number
# of arguments. Raise CmdlnUserError for these with a suitably
# massaged error message.
import sys
tb = sys.exc_info()[2] # the traceback object
if tb.tb_next is not None:
# If the traceback is more than one level deep, then the
# TypeError do *not* happen on the "handler(...)" call
# above. In that we don't want to handle it specially
# here: it would falsely mask deeper code errors.
raise
msg = ex.args[0]
userErr = self._userErrFromNumArgsErrmsg(msg)
if userErr:
raise userErr
else:
raise
else:
raise CmdlnError("incorrect argcount for %s(): takes %d, must "
"take 2 for 'argv' signature or 3+ for 'opts' "
"signature" % (handler.__name__, co_argcount))
def _userErrFromNumArgsErrmsg(self, msg):
if sys.version_info[0] < 3:
# Examples (see Python/getargs.c):
# do_foo() takes at least 4 arguments (3 given)
# do_foo() takes at most 5 arguments (6 given)
# do_foo() takes exactly 5 arguments (6 given)
pattern = re.compile(
r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))")
match = pattern.search(msg)
if match:
msg = list(match.groups())
msg[1] = int(msg[1]) - 3
if msg[1] == 1:
msg[2] = msg[2].replace("arguments", "argument")
msg[3] = int(msg[3]) - 3
msg = ''.join(map(str, msg))
return CmdlnUserError(msg)
else:
# Examples:
# do_foo() missing 1 required positional argument: 'bar'
patterns = [
re.compile(r"missing \d+ required positional argument"),
]
for pat in patterns:
match = pat.search(msg)
if match:
return CmdlnUserError("incorrect number of arguments")
#---- support for generating `man` page output from a Cmdln class
def man_sections_from_cmdln(inst, summary=None, description=None, author=None):
"""Return man page sections appropriate for the given Cmdln instance.
Join these sections for man page content.
The man page sections generated are:
NAME
SYNOPSIS
DESCRIPTION (if `description` is given)
OPTIONS
COMMANDS
HELP TOPICS (if any)
@param inst {Cmdln} Instance of Cmdln subclass for which to generate
man page content.
@param summary {str} A one-liner summary of the command.
@param description {str} A description of the command. If given,
it will be used for a "DESCRIPTION" section.
@param author {str} The author name and email for the AUTHOR secion
of the man page.
@raises {ValueError} if man page content cannot be generated for the
given class.
"""
if not inst.__class__.name:
raise ValueError("cannot generate man page content: `name` is not "
"set on class %r" % inst.__class__)
data = {
"name": inst.name,
"ucname": inst.name.upper(),
"date": datetime.date.today().strftime("%b %Y"),
"cmdln_version": __version__,
"version_str": inst.version and " %s" % inst.version or "",
"summary_str": summary and r" \- %s" % summary or "",
}
sections = []
sections.append(
'.\\" Automatically generated by cmdln %(cmdln_version)s\n'
'.TH %(ucname)s "1" "%(date)s" "%(name)s%(version_str)s" "User Commands"\n'
% data)
sections.append(".SH NAME\n%(name)s%(summary_str)s\n" % data)
sections.append(_dedent(r"""
.SH SYNOPSIS
.B %(name)s
[\fIGLOBALOPTS\fR] \fISUBCOMMAND \fR[\fIOPTS\fR] [\fIARGS\fR...]
.br
.B %(name)s
\fIhelp SUBCOMMAND\fR
""") % data)
if description:
sections.append(".SH DESCRIPTION\n%s\n" % description)
section = ".SH OPTIONS\n"
if not hasattr(inst, "optparser") is None:
#HACK: In case `.main()` hasn't been run.
inst.optparser = inst.get_optparser()
lines = inst._help_preprocess("${option_list}", None).splitlines(False)
for line in lines[1:]:
line = line.lstrip()
if not line:
continue
section += ".TP\n"
opts, desc = line.split(' ', 1)
section += ".B %s\n" % opts
section += "%s\n" % _dedent(desc.lstrip(), skip_first_line=True)
sections.append(section)
section = ".SH COMMANDS\n"
cmds = inst._get_cmds_data()
for cmdstr, doc in cmds:
cmdname = cmdstr.split(' ')[0] # e.g. "commit (ci)" -> "commit"
doc = inst._help_reindent(doc, indent="")
doc = inst._help_preprocess(doc, cmdname)
doc = doc.rstrip() + "\n" # trim down trailing space
section += '.PP\n.SS %s\n%s\n' % (cmdstr, doc)
sections.append(section)
help_names = inst._get_help_names()
if help_names:
section = ".SH HELP TOPICS\n"
for help_name, help_meth in sorted(help_names.items()):
help = help_meth(inst)
help = inst._help_reindent(help, indent="")
section += '.PP\n.SS %s\n%s\n' % (help_name, help)
sections.append(section)
if author:
sections.append(".SH AUTHOR\n%s\n" % author)
return sections
#---- internal support functions
def _format_linedata(linedata, indent, indent_width):
"""Format specific linedata into a pleasant layout.
"linedata" is a list of 2-tuples of the form:
(<item-display-string>, <item-docstring>)
"indent" is a string to use for one level of indentation
"indent_width" is a number of columns by which the
formatted data will be indented when printed.
The <item-display-string> column is held to 30 columns.
"""
lines = []
WIDTH = 78 - indent_width
SPACING = 2
NAME_WIDTH_LOWER_BOUND = 13
NAME_WIDTH_UPPER_BOUND = 30
NAME_WIDTH = max([len(s) for s, d in linedata])
if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
elif NAME_WIDTH > NAME_WIDTH_UPPER_BOUND:
NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
for namestr, doc in linedata:
line = indent + namestr
if len(namestr) <= NAME_WIDTH:
line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
else:
lines.append(line)
line = indent + ' ' * (NAME_WIDTH + SPACING)
line += _summarize_doc(doc, DOC_WIDTH)
lines.append(line.rstrip())
return lines
def _summarize_doc(doc, length=60):
r"""Parse out a short one line summary from the given doclines.
"doc" is the doc string to summarize.
"length" is the max length for the summary
>>> _summarize_doc("this function does this")
'this function does this'
>>> _summarize_doc("this function does this", 10)
'this fu...'
>>> _summarize_doc("this function does this\nand that")
'this function does this and that'
>>> _summarize_doc("this function does this\n\nand that")
'this function does this'
"""
import re
if doc is None:
return ""
assert length > 3, "length <= 3 is absurdly short for a doc summary"
doclines = doc.strip().splitlines(0)
if not doclines:
return ""
summlines = []
for i, line in enumerate(doclines):
stripped = line.strip()
if not stripped:
break
summlines.append(stripped)
if len(''.join(summlines)) >= length:
break
summary = ' '.join(summlines)
if len(summary) > length:
summary = summary[:length - 3] + "..."
return summary
def line2argv(line):
r"""Parse the given line into an argument vector.
"line" is the line of input to parse.
This may get niggly when dealing with quoting and escaping. The
current state of this parsing may not be completely thorough/correct
in this respect.
>>> from cmdln import line2argv
>>> line2argv("foo")
['foo']
>>> line2argv("foo bar")
['foo', 'bar']
>>> line2argv("foo bar ")
['foo', 'bar']
>>> line2argv(" foo bar")
['foo', 'bar']
Quote handling:
>>> line2argv("'foo bar'")
['foo bar']
>>> line2argv('"foo bar"')
['foo bar']
>>> line2argv(r'"foo\"bar"')
['foo"bar']
>>> line2argv("'foo bar' spam")
['foo bar', 'spam']
>>> line2argv("'foo 'bar spam")
['foo bar', 'spam']
>>> line2argv('some\tsimple\ttests')
['some', 'simple', 'tests']
>>> line2argv('a "more complex" test')
['a', 'more complex', 'test']
>>> line2argv('a more="complex test of " quotes')
['a', 'more=complex test of ', 'quotes']
>>> line2argv('a more" complex test of " quotes')
['a', 'more complex test of ', 'quotes']
>>> line2argv('an "embedded \\"quote\\""')
['an', 'embedded "quote"']
# Komodo bug 48027
>>> line2argv('foo bar C:\\')
['foo', 'bar', 'C:\\']
# Komodo change 127581
>>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
['\\test\\slash', 'foo bar', 'foo"bar']
# Komodo change 127629
>>> if sys.platform == "win32":
... line2argv(r'\foo\bar') == ['\\foo\\bar']
... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
... line2argv('"foo') == ['foo']
... else:
... line2argv(r'\foo\bar') == ['foobar']
... line2argv(r'\\foo\\bar') == ['\\foo\\bar']
... try:
... line2argv('"foo')
... except ValueError as ex:
... "not terminated" in str(ex)
True
True
True
"""
line = line.strip()
argv = []
state = "default"
arg = None # the current argument being parsed
i = -1
WHITESPACE = '\t\n\x0b\x0c\r ' # don't use string.whitespace (bug 81316)
while 1:
i += 1
if i >= len(line): break
ch = line[i]
if ch == "\\" and i + 1 < len(line):
# escaped char always added to arg, regardless of state
if arg is None: arg = ""
if (sys.platform == "win32" or
state in ("double-quoted", "single-quoted")
) and line[i + 1] not in tuple('"\''):
arg += ch
i += 1
arg += line[i]
continue
if state == "single-quoted":
if ch == "'":
state = "default"
else:
arg += ch
elif state == "double-quoted":
if ch == '"':
state = "default"
else:
arg += ch
elif state == "default":
if ch == '"':
if arg is None: arg = ""
state = "double-quoted"
elif ch == "'":
if arg is None: arg = ""
state = "single-quoted"
elif ch in WHITESPACE:
if arg is not None:
argv.append(arg)
arg = None
else:
if arg is None: arg = ""
arg += ch
if arg is not None:
argv.append(arg)
if not sys.platform == "win32" and state != "default":
raise ValueError("command line is not terminated: unfinished %s "
"segment" % state)
return argv
def argv2line(argv):
r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from cmdln import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print(argv2line(['foo" bar']))
'foo" bar'
>>> print(argv2line(["foo' bar"]))
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar"
"""
escapedArgs = []
for arg in argv:
if ' ' in arg and '"' not in arg:
arg = '"' + arg + '"'
elif ' ' in arg and "'" not in arg:
arg = "'" + arg + "'"
elif ' ' in arg:
arg = arg.replace('"', r'\"')
arg = '"' + arg + '"'
escapedArgs.append(arg)
return ' '.join(escapedArgs)
# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG:
print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin" %
(ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j + 1:]
break
elif removed > margin:
lines[i] = ' ' * (removed - margin) + lines[i][j + 1:]
break
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
def _get_indent(marker, s, tab_width=8):
"""_get_indent(marker, s, tab_width=8) ->
(<indentation-of-'marker'>, <indentation-width>)"""
# Figure out how much the marker is indented.
INDENT_CHARS = tuple(' \t')
start = s.index(marker)
i = start
while i > 0:
if s[i - 1] not in INDENT_CHARS:
break
i -= 1
indent = s[i:start]
indent_width = 0
for ch in indent:
if ch == ' ':
indent_width += 1
elif ch == '\t':
indent_width += tab_width - (indent_width % tab_width)
return indent, indent_width
def _get_trailing_whitespace(marker, s):
"""Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline.
"""
suffix = ''
start = s.index(marker) + len(marker)
i = start
while i < len(s):
if s[i] in ' \t':
suffix += s[i]
elif s[i] in '\r\n':
suffix += s[i]
if s[i] == '\r' and i + 1 < len(s) and s[i + 1] == '\n':
suffix += s[i + 1]
break
else:
break
i += 1
return suffix
#---- bash completion support
# Note: This is still experimental. I expect to change this
# significantly.
#
# To get Bash completion for a cmdln.Cmdln class, run the following
# bash command:
# $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname
# For example:
# $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn
#
#TODO: Simplify the above so don't have to given path to script (try to
# find it on PATH, if possible). Could also make class name
# optional if there is only one in the module (common case).
if __name__ == "__main__" and len(sys.argv) == 6:
def _log(s):
return # no-op, comment out for debugging
from os.path import expanduser
fout = open(expanduser("~/tmp/bashcpln.log"), 'a')
fout.write(str(s) + '\n')
fout.close()
# Recipe: module_from_path (1.0.1+)
def _module_from_path(path):
import imp, os, sys
path = os.path.expanduser(path)
dir = os.path.dirname(path) or os.curdir
name = os.path.splitext(os.path.basename(path))[0]
sys.path.insert(0, dir)
try:
iinfo = imp.find_module(name, [dir])
return imp.load_module(name, *iinfo)
finally:
sys.path.remove(dir)
def _get_bash_cplns(script_path, class_name, cmd_name, token,
preceding_token):
_log('--')
_log('get_cplns(%r, %r, %r, %r, %r)' %
(script_path, class_name, cmd_name, token, preceding_token))
comp_line = os.environ["COMP_LINE"]
comp_point = int(os.environ["COMP_POINT"])
_log("COMP_LINE: %r" % comp_line)
_log("COMP_POINT: %r" % comp_point)
try:
script = _module_from_path(script_path)
except ImportError as ex:
_log("error importing `%s': %s" % (script_path, ex))
return []
shell = getattr(script, class_name)()
cmd_map = shell._get_canonical_map()
del cmd_map["EOF"]
del cmd_map["KeyboardInterrupt"]
# Determine if completing the sub-command name.
parts = comp_line[:comp_point].split(None, 1)
_log(parts)
if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]):
#TODO: if parts[1].startswith('-'): handle top-level opts
_log("complete sub-command names")
matches = {}
for name, canon_name in cmd_map.items():
if name.startswith(token):
matches[name] = canon_name
if not matches:
return []
elif len(matches) == 1:
return matches.keys()
elif len(set(matches.values())) == 1:
return [matches.values()[0]]
else:
return matches.keys()
# Otherwise, complete options for the given sub-command.
#TODO: refine this so it does the right thing with option args
if token.startswith('-'):
cmd_name = comp_line.split(None, 2)[1]
try:
cmd_canon_name = cmd_map[cmd_name]
except KeyError:
return []
handler = shell._get_cmd_handler(cmd_canon_name)
optparser = getattr(handler, "optparser", None)
if optparser is None:
optparser = SubCmdOptionParser()
opt_strs = []
for option in optparser.option_list:
for opt_str in option._short_opts + option._long_opts:
if opt_str.startswith(token):
opt_strs.append(opt_str)
return opt_strs
return []
for cpln in _get_bash_cplns(*sys.argv[1:]):
print(cpln)
| 36.097054 | 83 | 0.554014 |
79472afda37eabd0815e3f8c258ec1bcd55ef2cb | 10,153 | py | Python | MultiTool_src/Modules/proxy_checker.py | Reven8e/multitool | 7a70cc8200f08e917ecf538e7589a2eba97e38bb | [
"MIT"
] | 17 | 2020-11-17T17:40:26.000Z | 2022-03-13T07:05:43.000Z | MultiTool_src/Modules/proxy_checker.py | Reven8e/multitool | 7a70cc8200f08e917ecf538e7589a2eba97e38bb | [
"MIT"
] | null | null | null | MultiTool_src/Modules/proxy_checker.py | Reven8e/multitool | 7a70cc8200f08e917ecf538e7589a2eba97e38bb | [
"MIT"
] | 5 | 2020-11-17T17:55:04.000Z | 2021-12-15T20:59:37.000Z | import random, time, sys, subprocess, threading, pycurl, os, requests
from colorama import Fore
class Proxy_Checker():
def __init__(self):
subprocess.call('clear', shell=True)
sys.setrecursionlimit(10**6)
print(f"""{Fore.BLUE}
██▓███ ██▀███ ▒█████ ▒██ ██▓██ ██▓ ▄▄▄█████▓▒█████ ▒█████ ██▓
▓██░ ██▓██ ▒ ██▒██▒ ██▒▒ █ █ ▒░▒██ ██▒ ▓ ██▒ ▓▒██▒ ██▒██▒ ██▓██▒
▓██░ ██▓▓██ ░▄█ ▒██░ ██░░ █ ░ ▒██ ██░ ▒ ▓██░ ▒▒██░ ██▒██░ ██▒██░
▒██▄█▓▒ ▒██▀▀█▄ ▒██ ██░░ █ █ ▒ ░ ▐██▓░ ░ ▓██▓ ░▒██ ██▒██ ██▒██░
▒██▒ ░ ░██▓ ▒██░ ████▓▒▒██▒ ▒██▒ ░ ██▒▓░ ▒██▒ ░░ ████▓▒░ ████▓▒░██████▒
▒▓▒░ ░ ░ ▒▓ ░▒▓░ ▒░▒░▒░▒▒ ░ ░▓ ░ ██▒▒▒ ▒ ░░ ░ ▒░▒░▒░░ ▒░▒░▒░░ ▒░▓ ░
░▒ ░ ░▒ ░ ▒░ ░ ▒ ▒░░░ ░▒ ░▓██ ░▒░ ░ ░ ▒ ▒░ ░ ▒ ▒░░ ░ ▒ ░
░░ ░░ ░░ ░ ░ ▒ ░ ░ ▒ ▒ ░░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
""")
self.yes = ["yes", "y", "ye", "Y", "YES", 'YE']
self.no = ["no", "n", "NO", "n"]
self.thr = 100
self.TARGET = input(f"{Fore.BLUE}[CONSOLE] Please enter full target url to check the proxies: ")
self.verbose = input(f"{Fore.BLUE}[CONSOLE] Input bad requests too?: ")
self.proxy_type = input(f"{Fore.BLUE}[CONSOLE] Proxy type (http, socks4, socks5): ")
get_proxies = input(f'{Fore.BLUE}[CONSOLE] Get the proxies or you already have http proxy list? (get/n):')
if get_proxies == 'get':
try:
os.remove("ProxyChecker/http_proxies.txt")
os.remove("ProxyChecker/good_proxies.txt")
except:
pass
proxylist = open(f'ProxyChecker/{self.proxy_type}_proxies.txt', 'a+')
try:
r1 = requests.get(f'https://api.proxyscrape.com?request=getproxies&proxytype={self.proxy_type}&ssl=yes')
proxylist.write(r1.text)
except:
pass
proxylist.close()
self.proxy_file = f'ProxyChecker/{self.proxy_type}_proxies.txt'
else:
self.proxy_file = input(f"{Fore.BLUE}[CONSOLE] Please enter the proxy filename: ")
self.timeout = int(input(f"{Fore.BLUE}[CONSOLE] Please enter proxy timeout (10-100): "))
self.pro = open("ProxyChecker/good_proxies.txt", "a+")
self.checked = 0
self.good = 0
self.headers= [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000',
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
def proxy_checker(self, proxy):
if self.proxy_type == "http":
try:
ip, port = proxy.split(":")[0].replace('\n', ''), proxy.split(":")[1].replace('\n', '')
c = pycurl.Curl()
c.setopt(pycurl.URL, self.TARGET)
c.setopt(pycurl.PROXY, ip)
c.setopt(pycurl.PROXYPORT, int(port))
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_HTTP)
c.setopt(pycurl.HTTPHEADER, [f'user-agent: {random.choice(self.headers)}'])
c.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
c.setopt(pycurl.WRITEFUNCTION, lambda x: None)
c.perform()
if c.getinfo(pycurl.HTTP_CODE) != 403:
print(f"{Fore.GREEN}Good Proxy: {proxy}")
self.good += 1
self.pro.write(f"{proxy}\n")
else:
if self.verbose in self.yes:
print(f"{Fore.RED}Bad Proxy: {proxy}")
except pycurl.error:
if self.verbose in self.yes:
print(f"{Fore.RED}Bad Proxy: {proxy}")
except Exception as e:
print(f'{Fore.RED}{e}')
elif self.proxy_type == "socks4":
try:
ip, port = proxy.split(":")[0].replace('\n', ''), proxy.split(":")[1].replace('\n', '')
c = pycurl.Curl()
c.setopt(pycurl.URL, self.TARGET)
c.setopt(pycurl.PROXY, ip)
c.setopt(pycurl.PROXYPORT, int(port))
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS4)
c.setopt(pycurl.HTTPHEADER, [f'user-agent: {random.choice(self.headers)}'])
c.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
c.setopt(pycurl.WRITEFUNCTION, lambda x: None)
c.perform()
print(f"{Fore.GREEN}Good Proxy: {proxy}")
self.good += 1
self.pro.write(f"{proxy}\n")
except pycurl.error:
if self.verbose in self.yes:
print(f"{Fore.RED}Bad Proxy: {proxy}")
except Exception as e:
print(f'{Fore.RED}{e}')
elif self.proxy_type == "socks5":
try:
ip, port = proxy.split(":")[0].replace('\n', ''), proxy.split(":")[1].replace('\n', '')
c = pycurl.Curl()
c.setopt(pycurl.URL, self.TARGET)
c.setopt(pycurl.PROXY, ip)
c.setopt(pycurl.PROXYPORT, int(port))
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
c.setopt(pycurl.HTTPHEADER, [f'user-agent: {random.choice(self.headers)}'])
c.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
c.setopt(pycurl.WRITEFUNCTION, lambda x: None)
c.perform()
print(f"{Fore.GREEN}Good Proxy: {proxy}")
self.good += 1
self.pro.write(f"{proxy}\n")
except pycurl.error:
if self.verbose in self.yes:
print(f"{Fore.RED}Bad Proxy: {proxy}")
except Exception as e:
print(f'{Fore.RED}{e}')
def start(self):
print(f"{Fore.YELLOW}[CONSOLE] Okay! I'm searching for the best proxies. It may take some time...")
proxys = open(f"{self.proxy_file}", "r", encoding="utf-8", errors='ignore')
proxies = [proxy.replace("\n", "") for proxy in proxys]
threads = []
length = 0
for _ in proxies:
length +=1
while True:
if threading.active_count() <self.thr:
if self.checked < length:
t = threading.Thread(target=self.proxy_checker, args=(proxies[self.checked],))
threads.append(t)
t.start()
self.checked +=1
else:
print(f"\n\n{Fore.RED}[CONSOLE] Closing proxy threads.")
for th in threads:
th.join()
print(f"\n\n{Fore.YELLOW}[CONSOLE] Found {self.good} proxies out of {length}.")
proxys.close()
self.pro.close()
return | 54.005319 | 204 | 0.504087 |
79472b249a47ce17f0bf199424f6d6172c76bf52 | 6,164 | py | Python | sphinx_gallery/notebook.py | ksunden/sphinx-gallery | 7f26c1a6fc8f53169af2e5fa558d0efbfb32fb4f | [
"BSD-3-Clause"
] | null | null | null | sphinx_gallery/notebook.py | ksunden/sphinx-gallery | 7f26c1a6fc8f53169af2e5fa558d0efbfb32fb4f | [
"BSD-3-Clause"
] | null | null | null | sphinx_gallery/notebook.py | ksunden/sphinx-gallery | 7f26c1a6fc8f53169af2e5fa558d0efbfb32fb4f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Parser for Jupyter notebooks
============================
Class that holds the Jupyter notebook information
"""
# Author: Óscar Nájera
# License: 3-clause BSD
from __future__ import division, absolute_import, print_function
from functools import partial
import argparse
import json
import re
import sys
import copy
from .py_source_parser import split_code_and_text_blocks
from .utils import replace_py_ipynb
def jupyter_notebook_skeleton():
"""Returns a dictionary with the elements of a Jupyter notebook"""
py_version = sys.version_info
notebook_skeleton = {
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python " + str(py_version[0]),
"language": "python",
"name": "python" + str(py_version[0])
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": py_version[0]
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython" + str(py_version[0]),
"version": '{0}.{1}.{2}'.format(*sys.version_info[:3])
}
},
"nbformat": 4,
"nbformat_minor": 0
}
return notebook_skeleton
def directive_fun(match, directive):
"""Helper to fill in directives"""
directive_to_alert = dict(note="info", warning="danger")
return ('<div class="alert alert-{0}"><h4>{1}</h4><p>{2}</p></div>'
.format(directive_to_alert[directive], directive.capitalize(),
match.group(1).strip()))
def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the Jupyter notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'\begin{{align}}{0}\end{{align}}'.format(
match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+?)`', re.DOTALL)
text = re.sub(inline_math, r'$\1$', text)
directives = ('warning', 'note')
for directive in directives:
directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^ .+)*)'
% directive, flags=re.M)
text = re.sub(directive_re,
partial(directive_fun, directive=directive), text)
links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M)
text = re.sub(links, '', text)
refs = re.compile(r':ref:`')
text = re.sub(refs, '`', text)
contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n',
flags=re.M)
text = re.sub(contents, '', text)
images = re.compile(
r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*',
flags=re.M)
text = re.sub(
images, lambda match: '\n'.format(
match.group(1).strip(), (match.group(2) or '').strip()), text)
return text
def jupyter_notebook(script_blocks, gallery_conf):
"""Generate a Jupyter notebook file cell-by-cell
Parameters
----------
script_blocks : list
Script execution cells.
gallery_conf : dict
The sphinx-gallery configuration dictionary.
"""
first_cell = gallery_conf.get("first_notebook_cell", "%matplotlib inline")
work_notebook = jupyter_notebook_skeleton()
add_code_cell(work_notebook, first_cell)
fill_notebook(work_notebook, script_blocks)
return work_notebook
def add_code_cell(work_notebook, code):
"""Add a code cell to the notebook
Parameters
----------
code : str
Cell content
"""
code_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {"collapsed": False},
"outputs": [],
"source": [code.strip()]
}
work_notebook["cells"].append(code_cell)
def add_markdown_cell(work_notebook, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
"cell_type": "markdown",
"metadata": {},
"source": [rst2md(text)]
}
work_notebook["cells"].append(markdown_cell)
def fill_notebook(work_notebook, script_blocks):
"""Writes the Jupyter notebook cells
Parameters
----------
script_blocks : list of tuples
"""
for blabel, bcontent, lineno in script_blocks:
if blabel == 'code':
add_code_cell(work_notebook, bcontent)
else:
add_markdown_cell(work_notebook, bcontent + '\n')
def save_notebook(work_notebook, write_file):
"""Saves the Jupyter work_notebook to write_file"""
with open(write_file, 'w') as out_nb:
json.dump(work_notebook, out_nb, indent=2)
###############################################################################
# Notebook shell utility
def python_to_jupyter_cli(args=None, namespace=None):
"""Exposes the jupyter notebook renderer to the command line
Takes the same arguments as ArgumentParser.parse_args
"""
from . import gen_gallery # To avoid circular import
parser = argparse.ArgumentParser(
description='Sphinx-Gallery Notebook converter')
parser.add_argument('python_src_file', nargs='+',
help='Input Python file script to convert. '
'Supports multiple files and shell wildcards'
' (e.g. *.py)')
args = parser.parse_args(args, namespace)
for src_file in args.python_src_file:
file_conf, blocks = split_code_and_text_blocks(src_file)
print('Converting {0}'.format(src_file))
gallery_conf = copy.deepcopy(gen_gallery.DEFAULT_GALLERY_CONF)
example_nb = jupyter_notebook(blocks, gallery_conf)
save_notebook(example_nb, replace_py_ipynb(src_file))
| 30.666667 | 79 | 0.572193 |
79472b2a7262f4908984c2732bffbfcf4699d39a | 1,368 | py | Python | glue-scripts/process_marketing_data.py | biswas/aws-serverless-etl | 062c013d2025ad1be9cd65bcf5a4663be479396d | [
"Unlicense"
] | null | null | null | glue-scripts/process_marketing_data.py | biswas/aws-serverless-etl | 062c013d2025ad1be9cd65bcf5a4663be479396d | [
"Unlicense"
] | 1 | 2022-02-04T23:00:32.000Z | 2022-02-04T23:00:32.000Z | glue-scripts/process_marketing_data.py | biswas/aws-serverless-etl | 062c013d2025ad1be9cd65bcf5a4663be479396d | [
"Unlicense"
] | null | null | null | import sys
import pyspark.sql.functions as func
from awsglue.dynamicframe import DynamicFrame
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
args = getResolvedOptions(sys.argv, ['JOB_NAME', 's3_output_path', 'database_name', 'table_name'])
s3_output_path = args['s3_output_path']
database_name = args['database_name']
table_name = args['table_name']
sc = SparkContext.getOrCreate()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
mktg_DyF = glueContext.create_dynamic_frame\
.from_catalog(database=database_name, table_name=table_name)
mktg_DyF = ApplyMapping.apply(frame=mktg_DyF, mappings=[
('date', 'string', 'date', 'string'),
('new visitors seo', 'bigint', 'new_visitors_seo', 'bigint'),
('new visitors cpc', 'bigint', 'new_visitors_cpc', 'bigint'),
('new visitors social media', 'bigint', 'new_visitors_social_media', 'bigint'),
('return visitors', 'bigint', 'return_visitors', 'bigint'),
], transformation_ctx='applymapping1')
mktg_DyF.printSchema()
mktg_DF = mktg_DyF.toDF()
mktg_DF.write\
.format('parquet')\
.option('header', 'true')\
.mode('overwrite')\
.save(s3_output_path)
job.commit() | 31.813953 | 98 | 0.739766 |
79472c022a4eb58620afb086a13e6cf1fd6b7ca5 | 8,089 | py | Python | calc_metrics.py | maua-maua-maua/nvGAN | edea24c58646780c9fb8ea942e49708ce9d62421 | [
"MIT"
] | null | null | null | calc_metrics.py | maua-maua-maua/nvGAN | edea24c58646780c9fb8ea942e49708ce9d62421 | [
"MIT"
] | null | null | null | calc_metrics.py | maua-maua-maua/nvGAN | edea24c58646780c9fb8ea942e49708ce9d62421 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Calculate quality metrics for previous training run or pretrained network pickle."""
import copy
import json
import os
import tempfile
import click
import torch
import dnnlib
import legacy
from metrics import metric_main, metric_utils
from torch_utils import custom_ops, misc, training_stats
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0 or not args.verbose:
custom_ops.verbosity = 'none'
# Configure torch.
device = torch.device('cuda', rank)
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
conv2d_gradfix.enabled = True
# Print network summary.
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
if rank == 0 and args.verbose:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, dict(z=z, c=c))
# Calculate each metric.
for metric in args.metrics:
if rank == 0 and args.verbose:
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs,
num_gpus=args.num_gpus, rank=rank, device=device, progress=progress, snapshot_pkl=args.network_pkl)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if rank == 0 and args.verbose:
print()
# Done.
if rank == 0 and args.verbose:
print('Exiting...')
#----------------------------------------------------------------------------
def parse_comma_separated_list(s):
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH', required=True)
@click.option('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True)
@click.option('--data', help='Dataset to evaluate against [default: look up]', metavar='[ZIP|DIR]')
@click.option('--mirror', help='Enable dataset x-flips [default: look up]', type=bool, metavar='BOOL')
@click.option('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True)
@click.option('--verbose', help='Print optional information', type=bool, default=True, metavar='BOOL', show_default=True)
def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
"""Calculate quality metrics for previous training run or pretrained network pickle.
Examples:
\b
# Previous training run: look up options automatically, save result to JSONL file.
python calc_metrics.py --metrics=eqt50k_int,eqr50k \\
--network=~/training-runs/00000-stylegan3-r-mydataset/network-snapshot-000000.pkl
\b
# Pre-trained network pickle: specify dataset explicitly, print result to stdout.
python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq-1024x1024.zip --mirror=1 \\
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl
\b
Recommended metrics:
fid50k_full Frechet inception distance against the full dataset.
kid50k_full Kernel inception distance against the full dataset.
pr50k3_full Precision and recall againt the full dataset.
ppl2_wend Perceptual path length in W, endpoints, full image.
eqt50k_int Equivariance w.r.t. integer translation (EQ-T).
eqt50k_frac Equivariance w.r.t. fractional translation (EQ-T_frac).
eqr50k Equivariance w.r.t. rotation (EQ-R).
\b
Legacy metrics:
fid50k Frechet inception distance against 50k real images.
kid50k Kernel inception distance against 50k real images.
pr50k3 Precision and recall against 50k real images.
is50k Inception score for CIFAR-10.
"""
dnnlib.util.Logger(should_flush=True)
# Validate arguments.
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
if not all(metric_main.is_valid_metric(metric) for metric in args.metrics):
ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
if not args.num_gpus >= 1:
ctx.fail('--gpus must be at least 1')
# Load network.
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
ctx.fail('--network must point to a file or URL')
if args.verbose:
print(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.G = network_dict['G_ema'] # subclass of torch.nn.Module
# Initialize dataset options.
if data is not None:
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data)
elif network_dict['training_set_kwargs'] is not None:
args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
# Finalize dataset options.
args.dataset_kwargs.resolution = args.G.img_resolution
args.dataset_kwargs.use_labels = (args.G.c_dim != 0)
if mirror is not None:
args.dataset_kwargs.xflip = mirror
# Print dataset options.
if args.verbose:
print('Dataset options:')
print(json.dumps(args.dataset_kwargs, indent=2))
# Locate run dir.
args.run_dir = None
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
args.run_dir = pkl_dir
# Launch processes.
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
calc_metrics() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 43.256684 | 154 | 0.664606 |
79472cdbb5787bbb74b76e1e8d236cdb5bfff36b | 4,151 | py | Python | 3d_classification/ignite/densenet_evaluation_dict.py | tommydino93/tutorials | aca1f3c44e6697029a8b81c86497ca19bf433698 | [
"Apache-2.0"
] | 535 | 2020-09-16T06:23:49.000Z | 2022-03-31T13:48:34.000Z | 3d_classification/ignite/densenet_evaluation_dict.py | tommydino93/tutorials | aca1f3c44e6697029a8b81c86497ca19bf433698 | [
"Apache-2.0"
] | 454 | 2020-09-16T02:11:17.000Z | 2022-03-31T20:00:09.000Z | 3d_classification/ignite/densenet_evaluation_dict.py | tommydino93/tutorials | aca1f3c44e6697029a8b81c86497ca19bf433698 | [
"Apache-2.0"
] | 289 | 2020-09-21T16:24:53.000Z | 2022-03-31T13:04:14.000Z | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import numpy as np
import torch
from ignite.engine import _prepare_batch, create_supervised_evaluator
from ignite.metrics import Accuracy
from torch.utils.data import DataLoader
import monai
from monai.handlers import CheckpointLoader, ClassificationSaver, StatsHandler
from monai.transforms import AddChanneld, Compose, LoadImaged, Resized, ScaleIntensityd, EnsureTyped
def main():
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
# the path of ixi IXI-T1 dataset
data_path = os.sep.join([".", "workspace", "data", "medical", "ixi", "IXI-T1"])
images = [
"IXI607-Guys-1097-T1.nii.gz",
"IXI175-HH-1570-T1.nii.gz",
"IXI385-HH-2078-T1.nii.gz",
"IXI344-Guys-0905-T1.nii.gz",
"IXI409-Guys-0960-T1.nii.gz",
"IXI584-Guys-1129-T1.nii.gz",
"IXI253-HH-1694-T1.nii.gz",
"IXI092-HH-1436-T1.nii.gz",
"IXI574-IOP-1156-T1.nii.gz",
"IXI585-Guys-1130-T1.nii.gz",
]
images = [os.sep.join([data_path, f]) for f in images]
# 2 binary labels for gender classification: man and woman
labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
val_files = [{"img": img, "label": label} for img, label in zip(images, labels)]
# define transforms for image
val_transforms = Compose(
[
LoadImaged(keys=["img"]),
AddChanneld(keys=["img"]),
ScaleIntensityd(keys=["img"]),
Resized(keys=["img"], spatial_size=(96, 96, 96)),
EnsureTyped(keys=["img"]),
]
)
# create DenseNet121
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = monai.networks.nets.DenseNet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)
def prepare_batch(batch, device=None, non_blocking=False):
return _prepare_batch((batch["img"], batch["label"]), device, non_blocking)
metric_name = "Accuracy"
# add evaluation metric to the evaluator engine
val_metrics = {metric_name: Accuracy()}
# Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,
# user can add output_transform to return other values
evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch)
# add stats event handler to print validation stats via evaluator
val_stats_handler = StatsHandler(
name="evaluator",
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
)
val_stats_handler.attach(evaluator)
# for the array data format, assume the 3rd item of batch data is the meta_data
prediction_saver = ClassificationSaver(
output_dir="tempdir",
name="evaluator",
batch_transform=lambda batch: batch["img_meta_dict"],
output_transform=lambda output: output[0].argmax(1),
)
prediction_saver.attach(evaluator)
# the model was trained by "densenet_training_dict" example
CheckpointLoader(load_path="./runs_dict/net_checkpoint_20.pt", load_dict={"net": net}).attach(evaluator)
# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
state = evaluator.run(val_loader)
print(state)
if __name__ == "__main__":
main()
| 39.160377 | 108 | 0.695736 |
79472db7498321532b0da88898cf94fe3736043a | 14,987 | py | Python | bundle/Conque-GDB/autoload/conque_term/conque_sole.py | wonwooddo/vim_setup | 7555031a846ceb2986f491fd1738a5b5263af2f1 | [
"MIT"
] | 1 | 2018-02-15T10:33:31.000Z | 2018-02-15T10:33:31.000Z | bundle/Conque-GDB/autoload/conque_term/conque_sole.py | wonwooddo/vim_setup | 7555031a846ceb2986f491fd1738a5b5263af2f1 | [
"MIT"
] | 2 | 2018-11-30T10:33:35.000Z | 2019-03-07T12:04:56.000Z | bundle/Conque-GDB/autoload/conque_term/conque_sole.py | wonwooddo/vim_setup | 7555031a846ceb2986f491fd1738a5b5263af2f1 | [
"MIT"
] | 2 | 2020-11-08T06:02:44.000Z | 2020-11-19T01:45:01.000Z | # FILE: autoload/conque_term/conque_sole.py
# AUTHOR: Nico Raffo <[email protected]>
# WEBSITE: http://conque.googlecode.com
# MODIFIED: 2011-09-12
# VERSION: 2.3, for Vim 7.0
# LICENSE:
# Conque - Vim terminal/console emulator
# Copyright (C) 2009-2011 Nico Raffo
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Windows Console Emulator
This is the main interface to the Windows emulator. It reads new output from the background console
and updates the Vim buffer.
"""
import vim
class ConqueSole(Conque):
window_top = None
window_bottom = None
color_cache = {}
attribute_cache = {}
color_mode = None
color_conceals = {}
buffer = None
encoding = None
# counters for periodic rendering
buffer_redraw_ct = 1
screen_redraw_ct = 1
# line offset, shifts output down
offset = 0
def open(self):
""" Start command and initialize this instance
Arguments:
command - Command string, e.g. "Powershell.exe"
options - Dictionary of config options
python_exe - Path to the python.exe executable. Usually C:\PythonXX\python.exe
communicator_py - Path to subprocess controller script in user's vimfiles directory
"""
# get arguments
command = vim.eval('command')
options = vim.eval('options')
python_exe = vim.eval('py_exe')
communicator_py = vim.eval('py_vim')
# init size
self.columns = vim.current.window.width
self.lines = vim.current.window.height
self.window_top = 0
self.window_bottom = vim.current.window.height - 1
# color mode
self.color_mode = vim.eval('g:ConqueTerm_ColorMode')
# line offset
self.offset = int(options['offset'])
# init color
self.enable_colors = options['color'] and not CONQUE_FAST_MODE
# open command
self.proc = ConqueSoleWrapper()
self.proc.open(command, self.lines, self.columns, python_exe, communicator_py, options)
self.buffer = vim.current.buffer
self.screen_encoding = vim.eval('&fileencoding')
def read(self, timeout=1, set_cursor=True, return_output=False, update_buffer=True):
""" Read from console and update Vim buffer. """
try:
stats = self.proc.get_stats()
if not stats:
return
# disable screen and buffer redraws in fast mode
if not CONQUE_FAST_MODE:
self.buffer_redraw_ct += 1
self.screen_redraw_ct += 1
update_top = 0
update_bottom = 0
lines = []
# full buffer redraw, our favorite!
#if self.buffer_redraw_ct == CONQUE_SOLE_BUFFER_REDRAW:
# self.buffer_redraw_ct = 0
# update_top = 0
# update_bottom = stats['top_offset'] + self.lines
# (lines, attributes) = self.proc.read(update_top, update_bottom)
# if return_output:
# output = self.get_new_output(lines, update_top, stats)
# if update_buffer:
# for i in range(update_top, update_bottom + 1):
# if CONQUE_FAST_MODE:
# self.plain_text(i, lines[i], None, stats)
# else:
# self.plain_text(i, lines[i], attributes[i], stats)
# full screen redraw
if stats['cursor_y'] + 1 != self.l or stats['top_offset'] != self.window_top or self.screen_redraw_ct >= CONQUE_SOLE_SCREEN_REDRAW:
self.screen_redraw_ct = 0
update_top = self.window_top
update_bottom = max([stats['top_offset'] + self.lines + 1, stats['cursor_y']])
(lines, attributes) = self.proc.read(update_top, update_bottom - update_top + 1)
if return_output:
output = self.get_new_output(lines, update_top, stats)
if update_buffer:
for i in range(update_top, update_bottom + 1):
if CONQUE_FAST_MODE:
self.plain_text(i, lines[i - update_top], None, stats)
else:
self.plain_text(i, lines[i - update_top], attributes[i - update_top], stats)
# single line redraw
else:
update_top = stats['cursor_y']
(lines, attributes) = self.proc.read(update_top, 1)
if return_output:
output = self.get_new_output(lines, update_top, stats)
if update_buffer:
if lines[0].rstrip() != u(self.buffer[update_top].rstrip()):
if CONQUE_FAST_MODE:
self.plain_text(update_top, lines[0], None, stats)
else:
self.plain_text(update_top, lines[0], attributes[0], stats)
# reset current position
self.window_top = stats['top_offset']
self.l = stats['cursor_y'] + 1
self.c = stats['cursor_x'] + 1
# reposition cursor if this seems plausible
if set_cursor:
self.set_cursor(self.l, self.c)
if return_output:
return output
except:
pass
def get_new_output(self, lines, update_top, stats):
""" Calculate the "new" output from this read. Fake but useful """
if not (stats['cursor_y'] + 1 > self.l or (stats['cursor_y'] + 1 == self.l and stats['cursor_x'] + 1 > self.c)):
return ""
try:
num_to_return = stats['cursor_y'] - self.l + 2
lines = lines[self.l - update_top - 1:]
new_output = []
# first line
new_output.append(lines[0][self.c - 1:].rstrip())
# the rest
for i in range(1, num_to_return):
new_output.append(lines[i].rstrip())
except:
pass
return "\n".join(new_output)
def plain_text(self, line_nr, text, attributes, stats):
""" Write plain text to Vim buffer. """
# handle line offset
line_nr += self.offset
self.l = line_nr + 1
# remove trailing whitespace
text = text.rstrip()
# if we're using concealed text for color, then s- is weird
if self.color_mode == 'conceal':
text = self.add_conceal_color(text, attributes, stats, line_nr)
# deal with character encoding
if CONQUE_PYTHON_VERSION == 2:
val = text.encode(self.screen_encoding)
else:
# XXX / Vim's python3 interface doesn't accept bytes object
val = str(text)
# update vim buffer
if len(self.buffer) <= line_nr:
self.buffer.append(val)
else:
self.buffer[line_nr] = val
if self.enable_colors and not self.color_mode == 'conceal' and line_nr > self.l - CONQUE_MAX_SYNTAX_LINES:
relevant = attributes[0:len(text)]
if line_nr not in self.attribute_cache or self.attribute_cache[line_nr] != relevant:
self.do_color(attributes=relevant, stats=stats)
self.attribute_cache[line_nr] = relevant
def add_conceal_color(self, text, attributes, stats, line_nr):
""" Add 'conceal' color strings to output text """
# stop here if coloration is disabled
if not self.enable_colors:
return text
# if no colors for this line, clear everything out
if len(attributes) == 0 or attributes == u(chr(stats['default_attribute'])) * len(attributes):
return text
new_text = ''
self.color_conceals[line_nr] = []
attribute_chunks = CONQUE_WIN32_REGEX_ATTR.findall(attributes)
offset = 0
ends = []
for attr in attribute_chunks:
attr_num = ord(attr[1])
ends = []
if attr_num != stats['default_attribute']:
color = self.translate_color(attr_num)
new_text += chr(27) + 'sf' + color['fg_code'] + ';'
ends.append(chr(27) + 'ef' + color['fg_code'] + ';')
self.color_conceals[line_nr].append(offset)
if attr_num > 15:
new_text += chr(27) + 'sb' + color['bg_code'] + ';'
ends.append(chr(27) + 'eb' + color['bg_code'] + ';')
self.color_conceals[line_nr].append(offset)
new_text += text[offset:offset + len(attr[0])]
# close color regions
ends.reverse()
for i in range(0, len(ends)):
self.color_conceals[line_nr].append(len(new_text))
new_text += ends[i]
offset += len(attr[0])
return new_text
def do_color(self, start=0, end=0, attributes='', stats=None):
""" Convert Windows console attributes into Vim syntax highlighting """
# if no colors for this line, clear everything out
if len(attributes) == 0 or attributes == u(chr(stats['default_attribute'])) * len(attributes):
self.color_changes = {}
self.apply_color(1, len(attributes), self.l)
return
attribute_chunks = CONQUE_WIN32_REGEX_ATTR.findall(attributes)
offset = 0
for attr in attribute_chunks:
attr_num = ord(attr[1])
if attr_num != stats['default_attribute']:
self.color_changes = self.translate_color(attr_num)
self.apply_color(offset + 1, offset + len(attr[0]) + 1, self.l)
offset += len(attr[0])
def translate_color(self, attr):
""" Convert Windows console attributes into RGB colors """
# check for cached color
if attr in self.color_cache:
return self.color_cache[attr]
# convert attribute integer to bit string
bit_str = bin(attr)
bit_str = bit_str.replace('0b', '')
# slice foreground and background portions of bit string
fg = bit_str[-4:].rjust(4, '0')
bg = bit_str[-8:-4].rjust(4, '0')
# ok, first create foreground #rbg
red = int(fg[1]) * 204 + int(fg[0]) * int(fg[1]) * 51
green = int(fg[2]) * 204 + int(fg[0]) * int(fg[2]) * 51
blue = int(fg[3]) * 204 + int(fg[0]) * int(fg[3]) * 51
fg_str = "#%02x%02x%02x" % (red, green, blue)
fg_code = "%02x%02x%02x" % (red, green, blue)
fg_code = fg_code[0] + fg_code[2] + fg_code[4]
# ok, first create foreground #rbg
red = int(bg[1]) * 204 + int(bg[0]) * int(bg[1]) * 51
green = int(bg[2]) * 204 + int(bg[0]) * int(bg[2]) * 51
blue = int(bg[3]) * 204 + int(bg[0]) * int(bg[3]) * 51
bg_str = "#%02x%02x%02x" % (red, green, blue)
bg_code = "%02x%02x%02x" % (red, green, blue)
bg_code = bg_code[0] + bg_code[2] + bg_code[4]
# build value for color_changes
color = {'guifg': fg_str, 'guibg': bg_str}
if self.color_mode == 'conceal':
color['fg_code'] = fg_code
color['bg_code'] = bg_code
self.color_cache[attr] = color
return color
def write_vk(self, vk_code):
""" write virtual key code to shared memory using proprietary escape seq """
self.proc.write_vk(vk_code)
def update_window(self, force=False):
# This magically works
vim.command("normal! i")
def update_window_size(self, tell_subprocess = True):
""" Resize underlying console if Vim buffer size has changed """
if vim.current.window.width != self.columns or vim.current.window.height != self.lines:
# reset all window size attributes to default
self.columns = vim.current.window.width
self.lines = vim.current.window.height
self.working_columns = vim.current.window.width
self.working_lines = vim.current.window.height
self.bottom = vim.current.window.height
if tell_subprocess:
self.proc.window_resize(vim.current.window.height, vim.current.window.width)
def set_cursor(self, line, column):
""" Update cursor position in Vim buffer """
# handle offset
line += self.offset
# shift cursor position to handle concealed text
if self.enable_colors and self.color_mode == 'conceal':
if line - 1 in self.color_conceals:
for c in self.color_conceals[line - 1]:
if c < column:
column += 7
else:
break
# figure out line
buffer_line = line
if buffer_line > len(self.buffer):
for l in range(len(self.buffer) - 1, buffer_line):
self.buffer.append('')
# figure out column
real_column = column
if len(self.buffer[buffer_line - 1]) < real_column:
self.buffer[buffer_line - 1] = self.buffer[buffer_line - 1] + ' ' * (real_column - len(self.buffer[buffer_line - 1]))
# python version is occasionally grumpy
try:
vim.current.window.cursor = (buffer_line, real_column - 1)
except:
vim.command('call cursor(' + str(buffer_line) + ', ' + str(real_column) + ')')
def idle(self):
""" go into idle mode """
self.proc.idle()
def resume(self):
""" resume from idle mode """
self.proc.resume()
def close(self):
""" end console subprocess """
self.proc.close()
def abort(self):
""" end subprocess forcefully """
self.proc.close()
def get_buffer_line(self, line):
""" get buffer line """
return line
# vim:foldmethod=marker
| 32.439394 | 143 | 0.575832 |
79472e5043ed044b0831d6e3d903523186f0f8dc | 788 | py | Python | leetcode/007_reverse-integer.py | heyf/cloaked-octo-adventure | 8180684a8a1859efb836edd48556b5f3088be398 | [
"MIT"
] | null | null | null | leetcode/007_reverse-integer.py | heyf/cloaked-octo-adventure | 8180684a8a1859efb836edd48556b5f3088be398 | [
"MIT"
] | null | null | null | leetcode/007_reverse-integer.py | heyf/cloaked-octo-adventure | 8180684a8a1859efb836edd48556b5f3088be398 | [
"MIT"
] | null | null | null | '''
7. Reverse Integer
https://leetcode.com/problems/reverse-integer/
'''
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
ret = 0
sign = 1
if x < 0:
x = -x
sign = -1
while x > 0:
digit = x % 10
x /= 10
ret = ret * 10 + digit
return ret * sign
def test():
cases = list()
cases.append( (0,0) )
cases.append( (123,321) )
cases.append( (-123,-321) )
def verify(case):
s = Solution()
print s.reverse(case[0])
return s.reverse(case[0]) == case[1]
result = filter(verify, cases)
return len(result) == len(cases), len(result), len(cases)
test() | 21.297297 | 61 | 0.458122 |
79472ef1c0fb88dd174511473e3956ae8727e364 | 1,314 | py | Python | website/forms.py | Jason13201/Lets-tidy-up-this-space | ca815f1e8227e7b60c9047c7a36d05355616021b | [
"MIT"
] | 2 | 2021-03-15T04:34:29.000Z | 2021-03-15T04:34:56.000Z | website/forms.py | Jason13201/Lets-tidy-up-this-space | ca815f1e8227e7b60c9047c7a36d05355616021b | [
"MIT"
] | null | null | null | website/forms.py | Jason13201/Lets-tidy-up-this-space | ca815f1e8227e7b60c9047c7a36d05355616021b | [
"MIT"
] | 1 | 2021-03-15T04:35:18.000Z | 2021-03-15T04:35:18.000Z | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, DecimalField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class RegistrationForm(FlaskForm):
username = StringField(
"Username",
validators=[DataRequired(), Length(max=16)],
render_kw={"placeholder": "Enter a nickname for your household"},
)
password = PasswordField(
"Password",
validators=[DataRequired()],
render_kw={"placeholder": "Choose a strong password"},
)
confirm_password = PasswordField(
"Confirm Password",
validators=[
DataRequired(),
EqualTo("password", message="Passwords do not match."),
],
render_kw={"placeholder": "Re-enter your password"},
)
submit = SubmitField("Create account")
class LoginForm(FlaskForm):
username = StringField(
"Household nickname",
validators=[DataRequired()],
render_kw={"placeholder": "Enter nick"},
)
password = PasswordField(
"Password",
validators=[DataRequired()],
render_kw={"placeholder": "Enter password"},
)
remember = BooleanField("Remember Me")
submit = SubmitField("Login")
| 30.55814 | 87 | 0.649163 |
79472f3d8a59a37784856594f2e3a95c926e8dcb | 268 | py | Python | Exercises/Exercise1.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | Exercises/Exercise1.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | Exercises/Exercise1.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | '''
Created on Aug 13, 2018
@author: david avalos
'''
class Calculator:
def addition(self, a, b):
print(a+b)
def subtraction(self, a, b):
print(a-b)
obj = Calculator()
obj.addition(5,2)
obj.subtraction(5,2)
| 14.888889 | 32 | 0.533582 |
79472fcbf7f028caab53f1c890fa17d95483484a | 1,029 | py | Python | data-science_interview_problems/app/forms.py | Max1993Liu/data-science-interview-problems | e71eed547f46bc2652722ed1f006be1ec6acf03c | [
"MIT"
] | null | null | null | data-science_interview_problems/app/forms.py | Max1993Liu/data-science-interview-problems | e71eed547f46bc2652722ed1f006be1ec6acf03c | [
"MIT"
] | null | null | null | data-science_interview_problems/app/forms.py | Max1993Liu/data-science-interview-problems | e71eed547f46bc2652722ed1f006be1ec6acf03c | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, EqualTo
from wtforms import ValidationError
from app.models import User
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
remember_me = BooleanField("Remember Me")
submit = SubmitField("Sign In")
class RegistrationForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
password2 = PasswordField("Repeat Password", validators=[DataRequired(), EqualTo('password')])
submit = SubmitField("Register")
def validate_username(self, username):
# methods with pattern validate_<xx> will be invoked by wtform automatically
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
| 36.75 | 95 | 0.781341 |
794730b5e6382952686cfa0e4cd6ef3754920115 | 2,209 | py | Python | modules/python/test/test_houghcircles.py | paleozogt/opencv | c4b158ff91b2acb33252f4169ef0b24e0427e3e9 | [
"BSD-3-Clause"
] | 6 | 2018-03-08T09:06:31.000Z | 2021-05-16T22:07:34.000Z | modules/python/test/test_houghcircles.py | LiangYue1981816/ComputeVision-opencv | 1f214d232daa6f6a4d0f297327e656f638e8e13a | [
"BSD-3-Clause"
] | 1 | 2019-10-10T22:25:52.000Z | 2019-10-10T22:25:52.000Z | modules/python/test/test_houghcircles.py | LiangYue1981816/ComputeVision-opencv | 1f214d232daa6f6a4d0f297327e656f638e8e13a | [
"BSD-3-Clause"
] | 11 | 2016-03-20T18:32:24.000Z | 2020-12-31T21:22:22.000Z | #!/usr/bin/python
'''
This example illustrates how to use cv2.HoughCircles() function.
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import numpy as np
import sys
from numpy import pi, sin, cos
from tests_common import NewOpenCVTests
def circleApproximation(circle):
nPoints = 30
dPhi = 2*pi / nPoints
contour = []
for i in range(nPoints):
contour.append(([circle[0] + circle[2]*cos(i*dPhi),
circle[1] + circle[2]*sin(i*dPhi)]))
return np.array(contour).astype(int)
def convContoursIntersectiponRate(c1, c2):
s1 = cv2.contourArea(c1)
s2 = cv2.contourArea(c2)
s, _ = cv2.intersectConvexConvex(c1, c2)
return 2*s/(s1+s2)
class houghcircles_test(NewOpenCVTests):
def test_houghcircles(self):
fn = "samples/data/board.jpg"
src = self.get_sample(fn, 1)
img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0]
testCircles = [[38, 181, 17.6],
[99.7, 166, 13.12],
[142.7, 160, 13.52],
[223.6, 110, 8.62],
[79.1, 206.7, 8.62],
[47.5, 351.6, 11.64],
[189.5, 354.4, 11.64],
[189.8, 298.9, 10.64],
[189.5, 252.4, 14.62],
[252.5, 393.4, 15.62],
[602.9, 467.5, 11.42],
[222, 210.4, 9.12],
[263.1, 216.7, 9.12],
[359.8, 222.6, 9.12],
[518.9, 120.9, 9.12],
[413.8, 113.4, 9.12],
[489, 127.2, 9.12],
[448.4, 121.3, 9.12],
[384.6, 128.9, 8.62]]
matches_counter = 0
for i in range(len(testCircles)):
for j in range(len(circles)):
tstCircle = circleApproximation(testCircles[i])
circle = circleApproximation(circles[j])
if convContoursIntersectiponRate(tstCircle, circle) > 0.6:
matches_counter += 1
self.assertGreater(float(matches_counter) / len(testCircles), .5)
self.assertLess(float(len(circles) - matches_counter) / len(circles), .75)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| 25.988235 | 99 | 0.574468 |
794730e959856d88fa1d09587f498ac7b3ec2ede | 76,997 | py | Python | tests/unit/gapic/compute_v1/test_backend_buckets.py | georgiyekkert/python-compute | d128efbb3bf10af9b41e55b20aaa8080b3221e77 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/compute_v1/test_backend_buckets.py | georgiyekkert/python-compute | d128efbb3bf10af9b41e55b20aaa8080b3221e77 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/compute_v1/test_backend_buckets.py | georgiyekkert/python-compute | d128efbb3bf10af9b41e55b20aaa8080b3221e77 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.backend_buckets import BackendBucketsClient
from google.cloud.compute_v1.services.backend_buckets import pagers
from google.cloud.compute_v1.services.backend_buckets import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert BackendBucketsClient._get_default_mtls_endpoint(None) is None
assert (
BackendBucketsClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
BackendBucketsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
BackendBucketsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
BackendBucketsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
BackendBucketsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize("client_class", [BackendBucketsClient,])
def test_backend_buckets_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.BackendBucketsRestTransport, "rest"),],
)
def test_backend_buckets_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [BackendBucketsClient,])
def test_backend_buckets_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_backend_buckets_client_get_transport_class():
transport = BackendBucketsClient.get_transport_class()
available_transports = [
transports.BackendBucketsRestTransport,
]
assert transport in available_transports
transport = BackendBucketsClient.get_transport_class("rest")
assert transport == transports.BackendBucketsRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(BackendBucketsClient, transports.BackendBucketsRestTransport, "rest"),],
)
@mock.patch.object(
BackendBucketsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BackendBucketsClient),
)
def test_backend_buckets_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(BackendBucketsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(BackendBucketsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(BackendBucketsClient, transports.BackendBucketsRestTransport, "rest", "true"),
(BackendBucketsClient, transports.BackendBucketsRestTransport, "rest", "false"),
],
)
@mock.patch.object(
BackendBucketsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BackendBucketsClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_backend_buckets_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(BackendBucketsClient, transports.BackendBucketsRestTransport, "rest"),],
)
def test_backend_buckets_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(BackendBucketsClient, transports.BackendBucketsRestTransport, "rest"),],
)
def test_backend_buckets_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_add_signed_url_key_rest(
transport: str = "rest", request_type=compute.AddSignedUrlKeyBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request_init["signed_url_key_resource"] = compute.SignedUrlKey(
key_name="key_name_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.add_signed_url_key(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_add_signed_url_key_rest_bad_request(
transport: str = "rest", request_type=compute.AddSignedUrlKeyBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request_init["signed_url_key_resource"] = compute.SignedUrlKey(
key_name="key_name_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.add_signed_url_key(request)
def test_add_signed_url_key_rest_from_dict():
test_add_signed_url_key_rest(request_type=dict)
def test_add_signed_url_key_rest_flattened(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "backend_bucket": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
backend_bucket="backend_bucket_value",
signed_url_key_resource=compute.SignedUrlKey(key_name="key_name_value"),
)
mock_args.update(sample_request)
client.add_signed_url_key(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}/addSignedUrlKey"
% client.transport._host,
args[1],
)
def test_add_signed_url_key_rest_flattened_error(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.add_signed_url_key(
compute.AddSignedUrlKeyBackendBucketRequest(),
project="project_value",
backend_bucket="backend_bucket_value",
signed_url_key_resource=compute.SignedUrlKey(key_name="key_name_value"),
)
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete(request)
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "backend_bucket": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", backend_bucket="backend_bucket_value",
)
mock_args.update(sample_request)
client.delete(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}"
% client.transport._host,
args[1],
)
def test_delete_rest_flattened_error(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteBackendBucketRequest(),
project="project_value",
backend_bucket="backend_bucket_value",
)
def test_delete_signed_url_key_rest(
transport: str = "rest", request_type=compute.DeleteSignedUrlKeyBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_signed_url_key(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_signed_url_key_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteSignedUrlKeyBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_signed_url_key(request)
def test_delete_signed_url_key_rest_from_dict():
test_delete_signed_url_key_rest(request_type=dict)
def test_delete_signed_url_key_rest_flattened(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "backend_bucket": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
backend_bucket="backend_bucket_value",
key_name="key_name_value",
)
mock_args.update(sample_request)
client.delete_signed_url_key(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}/deleteSignedUrlKey"
% client.transport._host,
args[1],
)
def test_delete_signed_url_key_rest_flattened_error(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_signed_url_key(
compute.DeleteSignedUrlKeyBackendBucketRequest(),
project="project_value",
backend_bucket="backend_bucket_value",
key_name="key_name_value",
)
def test_get_rest(
transport: str = "rest", request_type=compute.GetBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.BackendBucket(
bucket_name="bucket_name_value",
creation_timestamp="creation_timestamp_value",
custom_response_headers=["custom_response_headers_value"],
description="description_value",
enable_cdn=True,
id=205,
kind="kind_value",
name="name_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.BackendBucket.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.BackendBucket)
assert response.bucket_name == "bucket_name_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.custom_response_headers == ["custom_response_headers_value"]
assert response.description == "description_value"
assert response.enable_cdn is True
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.self_link == "self_link_value"
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.BackendBucket()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.BackendBucket.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "backend_bucket": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", backend_bucket="backend_bucket_value",
)
mock_args.update(sample_request)
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetBackendBucketRequest(),
project="project_value",
backend_bucket="backend_bucket_value",
)
def test_insert_rest(
transport: str = "rest", request_type=compute.InsertBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["backend_bucket_resource"] = compute.BackendBucket(
bucket_name="bucket_name_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_rest_bad_request(
transport: str = "rest", request_type=compute.InsertBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["backend_bucket_resource"] = compute.BackendBucket(
bucket_name="bucket_name_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert(request)
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
backend_bucket_resource=compute.BackendBucket(
bucket_name="bucket_name_value"
),
)
mock_args.update(sample_request)
client.insert(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/backendBuckets"
% client.transport._host,
args[1],
)
def test_insert_rest_flattened_error(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertBackendBucketRequest(),
project="project_value",
backend_bucket_resource=compute.BackendBucket(
bucket_name="bucket_name_value"
),
)
def test_list_rest(
transport: str = "rest", request_type=compute.ListBackendBucketsRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.BackendBucketList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.BackendBucketList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListBackendBucketsRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.BackendBucketList()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.BackendBucketList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/backendBuckets"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListBackendBucketsRequest(), project="project_value",
)
def test_list_rest_pager():
client = BackendBucketsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.BackendBucketList(
items=[
compute.BackendBucket(),
compute.BackendBucket(),
compute.BackendBucket(),
],
next_page_token="abc",
),
compute.BackendBucketList(items=[], next_page_token="def",),
compute.BackendBucketList(
items=[compute.BackendBucket(),], next_page_token="ghi",
),
compute.BackendBucketList(
items=[compute.BackendBucket(), compute.BackendBucket(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.BackendBucketList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.BackendBucket) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_patch_rest(
transport: str = "rest", request_type=compute.PatchBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request_init["backend_bucket_resource"] = compute.BackendBucket(
bucket_name="bucket_name_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_patch_rest_bad_request(
transport: str = "rest", request_type=compute.PatchBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request_init["backend_bucket_resource"] = compute.BackendBucket(
bucket_name="bucket_name_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.patch(request)
def test_patch_rest_from_dict():
test_patch_rest(request_type=dict)
def test_patch_rest_flattened(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "backend_bucket": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
backend_bucket="backend_bucket_value",
backend_bucket_resource=compute.BackendBucket(
bucket_name="bucket_name_value"
),
)
mock_args.update(sample_request)
client.patch(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}"
% client.transport._host,
args[1],
)
def test_patch_rest_flattened_error(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch(
compute.PatchBackendBucketRequest(),
project="project_value",
backend_bucket="backend_bucket_value",
backend_bucket_resource=compute.BackendBucket(
bucket_name="bucket_name_value"
),
)
def test_update_rest(
transport: str = "rest", request_type=compute.UpdateBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request_init["backend_bucket_resource"] = compute.BackendBucket(
bucket_name="bucket_name_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_update_rest_bad_request(
transport: str = "rest", request_type=compute.UpdateBackendBucketRequest
):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "backend_bucket": "sample2"}
request_init["backend_bucket_resource"] = compute.BackendBucket(
bucket_name="bucket_name_value"
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.update(request)
def test_update_rest_from_dict():
test_update_rest(request_type=dict)
def test_update_rest_flattened(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "backend_bucket": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
backend_bucket="backend_bucket_value",
backend_bucket_resource=compute.BackendBucket(
bucket_name="bucket_name_value"
),
)
mock_args.update(sample_request)
client.update(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}"
% client.transport._host,
args[1],
)
def test_update_rest_flattened_error(transport: str = "rest"):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update(
compute.UpdateBackendBucketRequest(),
project="project_value",
backend_bucket="backend_bucket_value",
backend_bucket_resource=compute.BackendBucket(
bucket_name="bucket_name_value"
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.BackendBucketsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.BackendBucketsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BackendBucketsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.BackendBucketsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BackendBucketsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.BackendBucketsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = BackendBucketsClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.BackendBucketsRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_backend_buckets_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.BackendBucketsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_backend_buckets_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.backend_buckets.transports.BackendBucketsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.BackendBucketsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"add_signed_url_key",
"delete",
"delete_signed_url_key",
"get",
"insert",
"list",
"patch",
"update",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_backend_buckets_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.backend_buckets.transports.BackendBucketsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BackendBucketsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_backend_buckets_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.backend_buckets.transports.BackendBucketsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BackendBucketsTransport()
adc.assert_called_once()
def test_backend_buckets_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
BackendBucketsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_backend_buckets_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.BackendBucketsRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_backend_buckets_host_no_port():
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_backend_buckets_host_with_port():
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = BackendBucketsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = BackendBucketsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = BackendBucketsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = BackendBucketsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = BackendBucketsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = BackendBucketsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = BackendBucketsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = BackendBucketsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = BackendBucketsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = BackendBucketsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = BackendBucketsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = BackendBucketsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = BackendBucketsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = BackendBucketsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = BackendBucketsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.BackendBucketsTransport, "_prep_wrapped_messages"
) as prep:
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.BackendBucketsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = BackendBucketsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = BackendBucketsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 38.672526 | 116 | 0.689079 |
794732c144f962df5c34da9e89f7fe633ccfc5ce | 1,295 | py | Python | network.py | priyavrat-misra/fashion-mnist | 9e9d18612b7556dbff5849be87cb35c296993d9e | [
"MIT"
] | 1 | 2021-12-09T06:58:28.000Z | 2021-12-09T06:58:28.000Z | network.py | priyavrat-misra/fashion-mnist | 9e9d18612b7556dbff5849be87cb35c296993d9e | [
"MIT"
] | null | null | null | network.py | priyavrat-misra/fashion-mnist | 9e9d18612b7556dbff5849be87cb35c296993d9e | [
"MIT"
] | null | null | null | import torch.nn as nn
class Network(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=8,
kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(num_features=8),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=8, out_channels=16,
kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(num_features=16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc1 = nn.Sequential(
nn.Linear(in_features=7*7*16, out_features=128),
nn.BatchNorm1d(num_features=128),
nn.Dropout(0.2),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(in_features=128, out_features=64),
nn.BatchNorm1d(num_features=64),
nn.ReLU()
)
self.out = nn.Linear(in_features=64, out_features=10)
def forward(self, t):
t = self.layer1(t)
t = self.layer2(t)
t = t.reshape(t.size(0), -1)
t = self.fc1(t)
t = self.fc2(t)
t = self.out(t)
return t
| 30.116279 | 61 | 0.522008 |
794733b463f811a61585dbc78228d7c99d4a57f7 | 1,400 | py | Python | custom_components/smartthinq/__init__.py | csirk51/hass-smartthinq | bd59f776b445c4c0d5626a836711d3fe1d85edc2 | [
"MIT"
] | null | null | null | custom_components/smartthinq/__init__.py | csirk51/hass-smartthinq | bd59f776b445c4c0d5626a836711d3fe1d85edc2 | [
"MIT"
] | null | null | null | custom_components/smartthinq/__init__.py | csirk51/hass-smartthinq | bd59f776b445c4c0d5626a836711d3fe1d85edc2 | [
"MIT"
] | null | null | null | """
Support for LG Smartthinq device.
This is made for korean only.
If you want to apply other county devices, you should change the code little bit.
"""
import logging
import wideq
import voluptuous as vol
from homeassistant.const import (
CONF_TOKEN, )
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['https://github.com/csirk51/wideq/archive/master.zip#wideq']
DOMAIN = 'smartthinq'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_TOKEN): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
LGE_DEVICES = 'lge_devices'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
import wideq
_LOGGER.info("Creating new LGE component")
if LGE_DEVICES not in hass.data:
hass.data[LGE_DEVICES] = []
refresh_token = config[DOMAIN][CONF_TOKEN]
client = wideq.Client.from_token(refresh_token)
hass.data[CONF_TOKEN] = refresh_token
for device in client.devices:
device_id = device.id
hass.data[LGE_DEVICES].append(device_id)
return True
class LGEDevice(Entity):
def __init__(self, client, device):
self._client = client
self._device = device
@property
def name(self):
return self._device.name
@property
def available(self):
return True
| 22.222222 | 81 | 0.682143 |
794733d7b532de6b215695d5b56304168386af8d | 14,510 | py | Python | lib/python2.7/site-packages/pelican/settings.py | drpaneas/linuxed.gr | 95676e9f18a234092656c61b73b9e6633f2e39ec | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/pelican/settings.py | drpaneas/linuxed.gr | 95676e9f18a234092656c61b73b9e6633f2e39ec | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/pelican/settings.py | drpaneas/linuxed.gr | 95676e9f18a234092656c61b73b9e6633f2e39ec | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import copy
import inspect
import os
import locale
import logging
try:
# SourceFileLoader is the recommended way in 3.3+
from importlib.machinery import SourceFileLoader
load_source = lambda name, path: SourceFileLoader(name, path).load_module()
except ImportError:
# but it does not exist in 3.2-, so fall back to imp
import imp
load_source = imp.load_source
from os.path import isabs
from pelican.log import LimitFilter
logger = logging.getLogger(__name__)
DEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'themes', 'notmyidea')
DEFAULT_CONFIG = {
'PATH': os.curdir,
'ARTICLE_PATHS': [''],
'ARTICLE_EXCLUDES': [],
'PAGE_PATHS': ['pages'],
'PAGE_EXCLUDES': [],
'THEME': DEFAULT_THEME,
'OUTPUT_PATH': 'output',
'READERS': {},
'STATIC_PATHS': ['images'],
'STATIC_EXCLUDES': [],
'STATIC_EXCLUDE_SOURCES': True,
'THEME_STATIC_DIR': 'theme',
'THEME_STATIC_PATHS': ['static', ],
'FEED_ALL_ATOM': os.path.join('feeds', 'all.atom.xml'),
'CATEGORY_FEED_ATOM': os.path.join('feeds', '%s.atom.xml'),
'AUTHOR_FEED_ATOM': os.path.join('feeds', '%s.atom.xml'),
'AUTHOR_FEED_RSS': os.path.join('feeds', '%s.rss.xml'),
'TRANSLATION_FEED_ATOM': os.path.join('feeds', 'all-%s.atom.xml'),
'FEED_MAX_ITEMS': '',
'SITEURL': '',
'SITENAME': 'A Pelican Blog',
'DISPLAY_PAGES_ON_MENU': True,
'DISPLAY_CATEGORIES_ON_MENU': True,
'DOCUTILS_SETTINGS': {},
'OUTPUT_SOURCES': False,
'OUTPUT_SOURCES_EXTENSION': '.text',
'USE_FOLDER_AS_CATEGORY': True,
'DEFAULT_CATEGORY': 'misc',
'WITH_FUTURE_DATES': True,
'CSS_FILE': 'main.css',
'NEWEST_FIRST_ARCHIVES': True,
'REVERSE_CATEGORY_ORDER': False,
'DELETE_OUTPUT_DIRECTORY': False,
'OUTPUT_RETENTION': (),
'ARTICLE_URL': '{slug}.html',
'ARTICLE_SAVE_AS': '{slug}.html',
'ARTICLE_ORDER_BY': 'slug',
'ARTICLE_LANG_URL': '{slug}-{lang}.html',
'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',
'DRAFT_URL': 'drafts/{slug}.html',
'DRAFT_SAVE_AS': os.path.join('drafts', '{slug}.html'),
'DRAFT_LANG_URL': 'drafts/{slug}-{lang}.html',
'DRAFT_LANG_SAVE_AS': os.path.join('drafts', '{slug}-{lang}.html'),
'PAGE_URL': 'pages/{slug}.html',
'PAGE_SAVE_AS': os.path.join('pages', '{slug}.html'),
'PAGE_ORDER_BY': 'basename',
'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',
'PAGE_LANG_SAVE_AS': os.path.join('pages', '{slug}-{lang}.html'),
'STATIC_URL': '{path}',
'STATIC_SAVE_AS': '{path}',
'PDF_GENERATOR': False,
'PDF_STYLE_PATH': '',
'PDF_STYLE': 'twelvepoint',
'CATEGORY_URL': 'category/{slug}.html',
'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),
'TAG_URL': 'tag/{slug}.html',
'TAG_SAVE_AS': os.path.join('tag', '{slug}.html'),
'AUTHOR_URL': 'author/{slug}.html',
'AUTHOR_SAVE_AS': os.path.join('author', '{slug}.html'),
'PAGINATION_PATTERNS': [
(0, '{name}{number}{extension}', '{name}{number}{extension}'),
],
'YEAR_ARCHIVE_SAVE_AS': '',
'MONTH_ARCHIVE_SAVE_AS': '',
'DAY_ARCHIVE_SAVE_AS': '',
'RELATIVE_URLS': False,
'DEFAULT_LANG': 'en',
'TAG_CLOUD_STEPS': 4,
'TAG_CLOUD_MAX_ITEMS': 100,
'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'authors', 'archives'),
'EXTRA_TEMPLATES_PATHS': [],
'PAGINATED_DIRECT_TEMPLATES': ('index', ),
'PELICAN_CLASS': 'pelican.Pelican',
'DEFAULT_DATE_FORMAT': '%a %d %B %Y',
'DATE_FORMATS': {},
'MD_EXTENSIONS': ['codehilite(css_class=highlight)', 'extra'],
'JINJA_EXTENSIONS': [],
'JINJA_FILTERS': {},
'LOG_FILTER': [],
'LOCALE': [''], # defaults to user locale
'DEFAULT_PAGINATION': False,
'DEFAULT_ORPHANS': 0,
'DEFAULT_METADATA': (),
'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*',
'PATH_METADATA': '',
'EXTRA_PATH_METADATA': {},
'DEFAULT_STATUS': 'published',
'ARTICLE_PERMALINK_STRUCTURE': '',
'TYPOGRIFY': False,
'TYPOGRIFY_IGNORE_TAGS': [],
'SUMMARY_MAX_LENGTH': 50,
'PLUGIN_PATHS': [],
'PLUGINS': [],
'PYGMENTS_RST_OPTIONS': {},
'TEMPLATE_PAGES': {},
'IGNORE_FILES': ['.#*'],
'SLUG_SUBSTITUTIONS': (),
'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]',
'SLUGIFY_SOURCE': 'title',
'CACHE_CONTENT': True,
'CONTENT_CACHING_LAYER': 'reader',
'CACHE_PATH': 'cache',
'GZIP_CACHE': True,
'CHECK_MODIFIED_METHOD': 'mtime',
'LOAD_CONTENT_CACHE': True,
'AUTORELOAD_IGNORE_CACHE': False,
'WRITE_SELECTED': [],
}
PYGMENTS_RST_OPTIONS = None
def read_settings(path=None, override=None):
if path:
local_settings = get_settings_from_file(path)
# Make the paths relative to the settings file
for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'CACHE_PATH']:
if p in local_settings and local_settings[p] is not None \
and not isabs(local_settings[p]):
absp = os.path.abspath(os.path.normpath(os.path.join(
os.path.dirname(path), local_settings[p])))
if p not in ('THEME') or os.path.exists(absp):
local_settings[p] = absp
if 'PLUGIN_PATH' in local_settings:
logger.warning('PLUGIN_PATH setting has been replaced by '
'PLUGIN_PATHS, moving it to the new setting name.')
local_settings['PLUGIN_PATHS'] = local_settings['PLUGIN_PATH']
del local_settings['PLUGIN_PATH']
if isinstance(local_settings['PLUGIN_PATHS'], six.string_types):
logger.warning("Defining PLUGIN_PATHS setting as string "
"has been deprecated (should be a list)")
local_settings['PLUGIN_PATHS'] = [local_settings['PLUGIN_PATHS']]
elif local_settings['PLUGIN_PATHS'] is not None:
local_settings['PLUGIN_PATHS'] = [os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(path), pluginpath)))
if not isabs(pluginpath) else pluginpath for pluginpath in local_settings['PLUGIN_PATHS']]
else:
local_settings = copy.deepcopy(DEFAULT_CONFIG)
if override:
local_settings.update(override)
parsed_settings = configure_settings(local_settings)
# This is because there doesn't seem to be a way to pass extra
# parameters to docutils directive handlers, so we have to have a
# variable here that we'll import from within Pygments.run (see
# rstdirectives.py) to see what the user defaults were.
global PYGMENTS_RST_OPTIONS
PYGMENTS_RST_OPTIONS = parsed_settings.get('PYGMENTS_RST_OPTIONS', None)
return parsed_settings
def get_settings_from_module(module=None, default_settings=DEFAULT_CONFIG):
"""Loads settings from a module, returns a dictionary."""
context = copy.deepcopy(default_settings)
if module is not None:
context.update(
(k, v) for k, v in inspect.getmembers(module) if k.isupper())
return context
def get_settings_from_file(path, default_settings=DEFAULT_CONFIG):
"""Loads settings from a file path, returning a dict."""
name, ext = os.path.splitext(os.path.basename(path))
module = load_source(name, path)
return get_settings_from_module(module, default_settings=default_settings)
def configure_settings(settings):
"""Provide optimizations, error checking, and warnings for the given
settings.
Also, specify the log messages to be ignored.
"""
if not 'PATH' in settings or not os.path.isdir(settings['PATH']):
raise Exception('You need to specify a path containing the content'
' (see pelican --help for more information)')
# specify the log messages to be ignored
LimitFilter._ignore.update(set(settings.get('LOG_FILTER',
DEFAULT_CONFIG['LOG_FILTER'])))
# lookup the theme in "pelican/themes" if the given one doesn't exist
if not os.path.isdir(settings['THEME']):
theme_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'themes',
settings['THEME'])
if os.path.exists(theme_path):
settings['THEME'] = theme_path
else:
raise Exception("Could not find the theme %s"
% settings['THEME'])
# make paths selected for writing absolute if necessary
settings['WRITE_SELECTED'] = [
os.path.abspath(path) for path in
settings.get('WRITE_SELECTED', DEFAULT_CONFIG['WRITE_SELECTED'])
]
# standardize strings to lowercase strings
for key in [
'DEFAULT_LANG',
]:
if key in settings:
settings[key] = settings[key].lower()
# standardize strings to lists
for key in [
'LOCALE',
]:
if key in settings and isinstance(settings[key], six.string_types):
settings[key] = [settings[key]]
# check settings that must be a particular type
for key, types in [
('OUTPUT_SOURCES_EXTENSION', six.string_types),
('FILENAME_METADATA', six.string_types),
]:
if key in settings and not isinstance(settings[key], types):
value = settings.pop(key)
logger.warn('Detected misconfigured %s (%s), '
'falling back to the default (%s)',
key, value, DEFAULT_CONFIG[key])
# try to set the different locales, fallback on the default.
locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])
for locale_ in locales:
try:
locale.setlocale(locale.LC_ALL, str(locale_))
break # break if it is successful
except locale.Error:
pass
else:
logger.warning("LOCALE option doesn't contain a correct value")
if ('SITEURL' in settings):
# If SITEURL has a trailing slash, remove it and provide a warning
siteurl = settings['SITEURL']
if (siteurl.endswith('/')):
settings['SITEURL'] = siteurl[:-1]
logger.warning("Removed extraneous trailing slash from SITEURL.")
# If SITEURL is defined but FEED_DOMAIN isn't,
# set FEED_DOMAIN to SITEURL
if not 'FEED_DOMAIN' in settings:
settings['FEED_DOMAIN'] = settings['SITEURL']
# check content caching layer and warn of incompatibilities
if (settings.get('CACHE_CONTENT', False) and
settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and
settings.get('WITH_FUTURE_DATES', DEFAULT_CONFIG['WITH_FUTURE_DATES'])):
logger.warning('WITH_FUTURE_DATES conflicts with '
"CONTENT_CACHING_LAYER set to 'generator', "
"use 'reader' layer instead")
# Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
feed_keys = [
'FEED_ATOM', 'FEED_RSS',
'FEED_ALL_ATOM', 'FEED_ALL_RSS',
'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',
'AUTHOR_FEED_ATOM', 'AUTHOR_FEED_RSS',
'TAG_FEED_ATOM', 'TAG_FEED_RSS',
'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',
]
if any(settings.get(k) for k in feed_keys):
if not settings.get('SITEURL'):
logger.warning('Feeds generated without SITEURL set properly may'
' not be valid')
if not 'TIMEZONE' in settings:
logger.warning(
'No timezone information specified in the settings. Assuming'
' your timezone is UTC for feed generation. Check '
'http://docs.getpelican.com/en/latest/settings.html#timezone '
'for more information')
# fix up pagination rules
from pelican.paginator import PaginationRule
pagination_rules = [
PaginationRule(*r) for r in settings.get(
'PAGINATION_PATTERNS',
DEFAULT_CONFIG['PAGINATION_PATTERNS'],
)
]
settings['PAGINATION_PATTERNS'] = sorted(
pagination_rules,
key=lambda r: r[0],
)
# move {ARTICLE,PAGE}_DIR -> {ARTICLE,PAGE}_PATHS
for key in ['ARTICLE', 'PAGE']:
old_key = key + '_DIR'
new_key = key + '_PATHS'
if old_key in settings:
logger.warning('Deprecated setting %s, moving it to %s list',
old_key, new_key)
settings[new_key] = [settings[old_key]] # also make a list
del settings[old_key]
# Save people from accidentally setting a string rather than a list
path_keys = (
'ARTICLE_EXCLUDES',
'DEFAULT_METADATA',
'DIRECT_TEMPLATES',
'EXTRA_TEMPLATES_PATHS',
'FILES_TO_COPY',
'IGNORE_FILES',
'JINJA_EXTENSIONS',
'PAGINATED_DIRECT_TEMPLATES',
'PLUGINS',
'STATIC_EXCLUDES',
'STATIC_PATHS',
'THEME_STATIC_PATHS',
'ARTICLE_PATHS',
'PAGE_PATHS',
)
for PATH_KEY in filter(lambda k: k in settings, path_keys):
if isinstance(settings[PATH_KEY], six.string_types):
logger.warning("Detected misconfiguration with %s setting "
"(must be a list), falling back to the default",
PATH_KEY)
settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]
# Add {PAGE,ARTICLE}_PATHS to {ARTICLE,PAGE}_EXCLUDES
mutually_exclusive = ('ARTICLE', 'PAGE')
for type_1, type_2 in [mutually_exclusive, mutually_exclusive[::-1]]:
try:
includes = settings[type_1 + '_PATHS']
excludes = settings[type_2 + '_EXCLUDES']
for path in includes:
if path not in excludes:
excludes.append(path)
except KeyError:
continue # setting not specified, nothing to do
for old, new, doc in [
('LESS_GENERATOR', 'the Webassets plugin', None),
('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',
'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'),
]:
if old in settings:
message = 'The {} setting has been removed in favor of {}'.format(
old, new)
if doc:
message += ', see {} for details'.format(doc)
logger.warning(message)
return settings
| 37.984293 | 132 | 0.617298 |
794734326ea80688441217658443c159a118c41b | 33,961 | py | Python | cinder/volume/drivers/rbd.py | oodrive/cinder | 40b43d6698128ab26b056586e0a5c5e818e21c11 | [
"Apache-2.0"
] | 1 | 2015-11-25T10:18:28.000Z | 2015-11-25T10:18:28.000Z | cinder/volume/drivers/rbd.py | oodrive/cinder | 40b43d6698128ab26b056586e0a5c5e818e21c11 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/rbd.py | oodrive/cinder | 40b43d6698128ab26b056586e0a5c5e818e21c11 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RADOS Block Device Driver"""
from __future__ import absolute_import
import io
import json
import os
import tempfile
import urllib
from oslo.config import cfg
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
from cinder import units
from cinder.volume import driver
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
rbd_opts = [
cfg.StrOpt('rbd_pool',
default='rbd',
help='the RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('rbd_user',
default=None,
help='the RADOS client name for accessing rbd volumes '
'- only set when using cephx authentication'),
cfg.StrOpt('rbd_ceph_conf',
default='', # default determined by librados
help='path to the ceph configuration file to use'),
cfg.BoolOpt('rbd_flatten_volume_from_snapshot',
default=False,
help='flatten volumes created from snapshots to remove '
'dependency'),
cfg.StrOpt('rbd_secret_uuid',
default=None,
help='the libvirt uuid of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('volume_tmp_dir',
default=None,
help='where to store temporary image files if the volume '
'driver does not write them directly to the volume'),
cfg.IntOpt('rbd_max_clone_depth',
default=5,
help='maximum number of nested clones that can be taken of a '
'volume before enforcing a flatten prior to next clone. '
'A value of zero disables cloning'),
cfg.IntOpt('rados_connect_timeout', default=-1,
help=_('Timeout value (in seconds) used when connecting to '
'ceph cluster. If value < 0, no timeout is set and '
'default librados value is used.'))]
CONF = cfg.CONF
CONF.register_opts(rbd_opts)
def ascii_str(string):
"""Convert a string to ascii, or return None if the input is None.
This is useful when a parameter is None by default, or a string. LibRBD
only accepts ascii, hence the need for conversion.
"""
if string is None:
return string
return str(string)
class RBDImageMetadata(object):
"""RBD image metadata to be used with RBDImageIOWrapper."""
def __init__(self, image, pool, user, conf):
self.image = image
self.pool = str(pool)
self.user = str(user)
self.conf = str(conf)
class RBDImageIOWrapper(io.RawIOBase):
"""Enables LibRBD.Image objects to be treated as Python IO objects.
Calling unimplemented interfaces will raise IOError.
"""
def __init__(self, rbd_meta):
super(RBDImageIOWrapper, self).__init__()
self._rbd_meta = rbd_meta
self._offset = 0
def _inc_offset(self, length):
self._offset += length
@property
def rbd_image(self):
return self._rbd_meta.image
@property
def rbd_user(self):
return self._rbd_meta.user
@property
def rbd_pool(self):
return self._rbd_meta.pool
@property
def rbd_conf(self):
return self._rbd_meta.conf
def read(self, length=None):
offset = self._offset
total = self._rbd_meta.image.size()
# NOTE(dosaboy): posix files do not barf if you read beyond their
# length (they just return nothing) but rbd images do so we need to
# return empty string if we have reached the end of the image.
if (offset >= total):
return ''
if length is None:
length = total
if (offset + length) > total:
length = total - offset
self._inc_offset(length)
return self._rbd_meta.image.read(int(offset), int(length))
def write(self, data):
self._rbd_meta.image.write(data, self._offset)
self._inc_offset(len(data))
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence == 0:
new_offset = offset
elif whence == 1:
new_offset = self._offset + offset
elif whence == 2:
new_offset = self._rbd_meta.image.size()
new_offset += offset
else:
raise IOError(_("Invalid argument - whence=%s not supported") %
(whence))
if (new_offset < 0):
raise IOError(_("Invalid argument"))
self._offset = new_offset
def tell(self):
return self._offset
def flush(self):
try:
self._rbd_meta.image.flush()
except AttributeError:
LOG.warning(_("flush() not supported in this version of librbd"))
def fileno(self):
"""RBD does not have support for fileno() so we raise IOError.
Raising IOError is recommended way to notify caller that interface is
not supported - see http://docs.python.org/2/library/io.html#io.IOBase
"""
raise IOError(_("fileno() not supported by RBD()"))
# NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes
# it which, if this is not overridden, calls flush() prior to close which
# in this case is unwanted since the rbd image may have been closed prior
# to the autoclean - currently triggering a segfault in librbd.
def close(self):
pass
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
try:
self.volume = driver.rbd.Image(ioctx, str(name),
snapshot=ascii_str(snapshot),
read_only=read_only)
except driver.rbd.Error:
LOG.exception(_("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
raise
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
class RBDDriver(driver.VolumeDriver):
"""Implements RADOS block device (RBD) volume commands."""
VERSION = '1.1.0'
def __init__(self, *args, **kwargs):
super(RBDDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(rbd_opts)
self._stats = {}
# allow overrides for testing
self.rados = kwargs.get('rados', rados)
self.rbd = kwargs.get('rbd', rbd)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if rados is None:
msg = _('rados and rbd python libraries not found')
raise exception.VolumeBackendAPIException(data=msg)
try:
with RADOSClient(self):
pass
except self.rados.Error:
msg = _('error connecting to ceph cluster')
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _ceph_args(self):
args = []
if self.configuration.rbd_user:
args.extend(['--id', self.configuration.rbd_user])
if self.configuration.rbd_ceph_conf:
args.extend(['--conf', self.configuration.rbd_ceph_conf])
return args
def _connect_to_rados(self, pool=None):
ascii_user = ascii_str(self.configuration.rbd_user)
ascii_conf = ascii_str(self.configuration.rbd_ceph_conf)
client = self.rados.Rados(rados_id=ascii_user, conffile=ascii_conf)
try:
if self.configuration.rados_connect_timeout >= 0:
client.connect(
timeout=self.configuration.rados_connect_timeout)
else:
client.connect()
pool_to_open = str(pool or self.configuration.rbd_pool)
ioctx = client.open_ioctx(pool_to_open)
return client, ioctx
except self.rados.Error:
# shutdown cannot raise an exception
client.shutdown()
raise
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def _get_backup_snaps(self, rbd_image):
"""Get list of any backup snapshots that exist on this volume.
There should only ever be one but accept all since they need to be
deleted before the volume can be.
"""
# NOTE(dosaboy): we do the import here otherwise we get import conflict
# issues between the rbd driver and the ceph backup driver. These
# issues only seem to occur when NOT using them together and are
# triggered when the ceph backup driver imports the rbd volume driver.
from cinder.backup.drivers import ceph
return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
def _get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json']
args.extend(self._ceph_args())
out, _ = self._execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = json.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def _update_volume_stats(self):
stats = {
'vendor_name': 'Open Source',
'driver_version': self.VERSION,
'storage_protocol': 'ceph',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or 'RBD'
try:
with RADOSClient(self) as client:
new_stats = client.cluster.get_cluster_stats()
stats['total_capacity_gb'] = new_stats['kb'] / units.MiB
stats['free_capacity_gb'] = new_stats['kb_avail'] / units.MiB
except self.rados.Error:
# just log and return unknown capacities
LOG.exception(_('error refreshing volume stats'))
self._stats = stats
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _supports_layering(self):
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
def _get_clone_depth(self, client, volume_name, depth=0):
"""Returns the number of ancestral clones (if any) of the given volume.
"""
parent_volume = self.rbd.Image(client.ioctx, volume_name)
try:
pool, parent, snap = self._get_clone_info(parent_volume,
volume_name)
finally:
parent_volume.close()
if not parent:
return depth
# If clone depth was reached, flatten should have occurred so if it has
# been exceeded then something has gone wrong.
if depth > CONF.rbd_max_clone_depth:
raise Exception(_("clone depth exceeds limit of %s") %
(CONF.rbd_max_clone_depth))
return self._get_clone_depth(client, parent, depth + 1)
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from another volume.
Since we are cloning from a volume and not a snapshot, we must first
create a snapshot of the source volume.
The user has the option to limit how long a volume's clone chain can be
by setting rbd_max_clone_depth. If a clone is made of another clone
and that clone has rbd_max_clone_depth clones behind it, the source
volume will be flattened.
"""
src_name = str(src_vref['name'])
dest_name = str(volume['name'])
flatten_parent = False
# Do full copy if requested
if CONF.rbd_max_clone_depth <= 0:
with RBDVolumeProxy(self, src_name, read_only=True) as vol:
vol.copy(vol.ioctx, dest_name)
return
# Otherwise do COW clone.
with RADOSClient(self) as client:
depth = self._get_clone_depth(client, src_name)
# If source volume is a clone and rbd_max_clone_depth reached,
# flatten the source before cloning. Zero rbd_max_clone_depth means
# infinite is allowed.
if depth == CONF.rbd_max_clone_depth:
LOG.debug(_("maximum clone depth (%d) has been reached - "
"flattening source volume") %
(CONF.rbd_max_clone_depth))
flatten_parent = True
src_volume = self.rbd.Image(client.ioctx, src_name)
try:
# First flatten source volume if required.
if flatten_parent:
pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
LOG.debug(_("flattening source volume %s") % (src_name))
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
try:
parent_volume.unprotect_snap(snap)
parent_volume.remove_snap(snap)
finally:
parent_volume.close()
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
LOG.debug(_("creating snapshot='%s'") % (clone_snap))
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception as exc:
# Only close if exception since we still need it.
src_volume.close()
raise exc
# Now clone source volume snapshot
try:
LOG.debug(_("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'") %
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.rbd.RBD().clone(client.ioctx, src_name, clone_snap,
client.ioctx, dest_name,
features=self.rbd.RBD_FEATURE_LAYERING)
except Exception as exc:
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
raise exc
finally:
src_volume.close()
LOG.debug(_("clone created successfully"))
def create_volume(self, volume):
"""Creates a logical volume."""
if int(volume['size']) == 0:
size = 100 * units.MiB
else:
size = int(volume['size']) * units.GiB
LOG.debug(_("creating volume '%s'") % (volume['name']))
old_format = True
features = 0
if self._supports_layering():
old_format = False
features = self.rbd.RBD_FEATURE_LAYERING
with RADOSClient(self) as client:
self.rbd.RBD().create(client.ioctx,
str(volume['name']),
size,
old_format=old_format,
features=features)
def _flatten(self, pool, volume_name):
LOG.debug(_('flattening %(pool)s/%(img)s') %
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug(_('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s') %
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume['name']))
with RADOSClient(self, src_pool) as src_client:
with RADOSClient(self) as dest_client:
self.rbd.RBD().clone(src_client.ioctx,
str(src_image),
str(src_snap),
dest_client.ioctx,
str(volume['name']),
features=self.rbd.RBD_FEATURE_LAYERING)
def _resize(self, volume, **kwargs):
size = kwargs.get('size', None)
if not size:
size = int(volume['size']) * units.GiB
with RBDVolumeProxy(self, volume['name']) as vol:
vol.resize(size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._clone(volume, self.configuration.rbd_pool,
snapshot['volume_name'], snapshot['name'])
if self.configuration.rbd_flatten_volume_from_snapshot:
self._flatten(self.configuration.rbd_pool, volume['name'])
if int(volume['size']):
self._resize(volume)
def _delete_backup_snaps(self, rbd_image):
backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps:
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug(_("volume has no backup snaps"))
def _get_clone_info(self, volume, volume_name, snap=None):
"""If volume is a clone, return its parent info.
Returns a tuple of (pool, parent, snap). A snapshot may optionally be
provided for the case where a cloned volume has been flattened but it's
snapshot still depends on the parent.
"""
try:
snap and volume.set_snap(snap)
pool, parent, parent_snap = tuple(volume.parent_info())
snap and volume.set_snap(None)
# Strip the tag off the end of the volume name since it will not be
# in the snap name.
if volume_name.endswith('.deleted'):
volume_name = volume_name[:-len('.deleted')]
# Now check the snap name matches.
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug(_("volume %s is not a clone") % volume_name)
volume.set_snap(None)
return (None, None, None)
def _delete_clone_parent_refs(self, client, parent_name, parent_snap):
"""Walk back up the clone chain and delete references.
Deletes references i.e. deleted parent volumes and snapshots.
"""
parent_rbd = self.rbd.Image(client.ioctx, parent_name)
parent_has_snaps = False
try:
# Check for grandparent
_pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd,
parent_name,
parent_snap)
LOG.debug(_("deleting parent snapshot %s") % (parent_snap))
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
parent_has_snaps = bool(list(parent_rbd.list_snaps()))
finally:
parent_rbd.close()
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
LOG.debug(_("deleting parent %s") % (parent_name))
self.rbd.RBD().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
if g_parent:
self._delete_clone_parent_refs(client, g_parent, g_parent_snap)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = strutils.safe_encode(volume['name'])
with RADOSClient(self) as client:
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
LOG.info(_("volume %s no longer exists in backend")
% (volume_name))
return
clone_snap = None
parent = None
# Ensure any backup snapshots are deleted
self._delete_backup_snaps(rbd_image)
# If the volume has non-clone snapshots this delete is expected to
# raise VolumeIsBusy so do so straight away.
try:
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug(_("volume has clone snapshot(s)"))
# We grab one of these and use it when fetching parent
# info in case the volume has been flattened.
clone_snap = snap['name']
break
raise exception.VolumeIsBusy(volume_name=volume_name)
# Determine if this volume is itself a clone
pool, parent, parent_snap = self._get_clone_info(rbd_image,
volume_name,
clone_snap)
finally:
rbd_image.close()
if clone_snap is None:
LOG.debug(_("deleting rbd volume %s") % (volume_name))
try:
self.rbd.RBD().remove(client.ioctx, volume_name)
except self.rbd.ImageBusy:
msg = (_("ImageBusy error raised while deleting rbd "
"volume. This may have been caused by a "
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
LOG.warn(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
except self.rbd.ImageNotFound:
msg = (_("RBD volume %s not found, allowing delete "
"operation to proceed.") % volume_name)
LOG.info(msg)
return
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug(_("volume is a clone so cleaning references"))
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
# delete it. Instead we will keep it as a silent volume which
# will be deleted when it's snapshot and clones are deleted.
new_name = "%s.deleted" % (volume_name)
self.rbd.RBD().rename(client.ioctx, volume_name, new_name)
def create_snapshot(self, snapshot):
"""Creates an rbd snapshot."""
with RBDVolumeProxy(self, snapshot['volume_name']) as volume:
snap = str(snapshot['name'])
volume.create_snap(snap)
if self._supports_layering():
volume.protect_snap(snap)
def delete_snapshot(self, snapshot):
"""Deletes an rbd snapshot."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = strutils.safe_encode(snapshot['volume_name'])
snap_name = strutils.safe_encode(snapshot['name'])
with RBDVolumeProxy(self, volume_name) as volume:
if self._supports_layering():
try:
volume.unprotect_snap(snap_name)
except self.rbd.ImageBusy:
raise exception.SnapshotIsBusy(snapshot_name=snap_name)
volume.remove_snap(snap_name)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
hosts, ports = self._get_mon_addrs()
data = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.configuration.rbd_pool,
volume['name']),
'hosts': hosts,
'ports': ports,
'auth_enabled': (self.configuration.rbd_user is not None),
'auth_username': self.configuration.rbd_user,
'secret_type': 'ceph',
'secret_uuid': self.configuration.rbd_secret_uuid, }
}
LOG.debug(_('connection data: %s'), data)
return data
def terminate_connection(self, volume, connector, **kwargs):
pass
def _parse_location(self, location):
prefix = 'rbd://'
if not location.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
pieces = map(urllib.unquote, location[len(prefix):].split('/'))
if any(map(lambda p: p == '', pieces)):
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
return pieces
def _get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def _is_cloneable(self, image_location, image_meta):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable as e:
LOG.debug(_('not cloneable: %s'), e)
return False
if self._get_fsid() != fsid:
reason = _('%s is in a different ceph cluster') % image_location
LOG.debug(reason)
return False
if image_meta['disk_format'] != 'raw':
reason = _("rbd image clone requires image format to be "
"'raw' but image {0} is '{1}'").format(
image_location, image_meta['disk_format'])
LOG.debug(reason)
return False
# check that we can read the image
try:
with RBDVolumeProxy(self, image,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug(_('Unable to open image %(loc)s: %(err)s') %
dict(loc=image_location, err=e))
return False
def clone_image(self, volume, image_location, image_id, image_meta):
image_location = image_location[0] if image_location else None
if image_location is None or not self._is_cloneable(
image_location, image_meta):
return ({}, False)
prefix, pool, image, snapshot = self._parse_location(image_location)
self._clone(volume, pool, image, snapshot)
self._resize(volume)
return {'provider_location': None}, True
def _ensure_tmp_exists(self):
tmp_dir = self.configuration.volume_tmp_dir
if tmp_dir and not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
def copy_image_to_volume(self, context, volume, image_service, image_id):
self._ensure_tmp_exists()
tmp_dir = self.configuration.volume_tmp_dir
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
image_utils.fetch_to_raw(context, image_service, image_id,
tmp.name,
self.configuration.volume_dd_blocksize,
size=volume['size'])
self.delete_volume(volume)
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['rbd', 'import',
'--pool', self.configuration.rbd_pool,
tmp.name, volume['name']]
if self._supports_layering():
args.append('--new-format')
args.extend(self._ceph_args())
self._try_execute(*args)
self._resize(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self._ensure_tmp_exists()
tmp_dir = self.configuration.volume_tmp_dir or '/tmp'
tmp_file = os.path.join(tmp_dir,
volume['name'] + '-' + image_meta['id'])
with fileutils.remove_path_on_error(tmp_file):
args = ['rbd', 'export',
'--pool', self.configuration.rbd_pool,
volume['name'], tmp_file]
args.extend(self._ceph_args())
self._try_execute(*args)
image_utils.upload_volume(context, image_service,
image_meta, tmp_file)
os.unlink(tmp_file)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
pool = self.configuration.rbd_pool
with RBDVolumeProxy(self, volume['name'], pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.backup(backup, rbd_fd)
LOG.debug(_("volume backup complete."))
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
pool = self.configuration.rbd_pool
with RBDVolumeProxy(self, volume['name'], pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.restore(backup, volume['id'], rbd_fd)
LOG.debug(_("volume restore complete."))
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
old_size = volume['size']
try:
size = int(new_size) * units.GiB
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
'%(volname)s') % {'volname': volume['name']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_("Extend volume from %(old_size)s GB to %(new_size)s GB."),
{'old_size': old_size, 'new_size': new_size})
| 39.35226 | 79 | 0.576308 |
79473461c93af9057724f218135752c6c9a50273 | 26,904 | py | Python | src/typhoonae/redis/tests/test_datastore_redis.py | ukris/typhoonae.redis | ef43f4448bfc59e643b6344064759633dbbb1ab6 | [
"Apache-2.0"
] | null | null | null | src/typhoonae/redis/tests/test_datastore_redis.py | ukris/typhoonae.redis | ef43f4448bfc59e643b6344064759633dbbb1ab6 | [
"Apache-2.0"
] | null | null | null | src/typhoonae/redis/tests/test_datastore_redis.py | ukris/typhoonae.redis | ef43f4448bfc59e643b6344064759633dbbb1ab6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2010 Tobias Rodäbel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Datastore Redis stub."""
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.ext import db
import datetime
import google.appengine.api.apiproxy_stub
import google.appengine.api.apiproxy_stub_map
import google.appengine.api.datastore_admin
import google.appengine.api.datastore_errors
import google.appengine.api.users
import google.appengine.datastore.entity_pb
import google.appengine.runtime.apiproxy_errors
import os
import time
import threading
import typhoonae.redis.datastore_redis_stub
import unittest
class DatastoreRedisTestCaseBase(unittest.TestCase):
"""Base class for testing the TyphoonAE Datastore Redis API proxy stub."""
def setUp(self):
"""Sets up test environment and regisers stub."""
# Set required environment variables
os.environ['APPLICATION_ID'] = 'test'
os.environ['AUTH_DOMAIN'] = 'mydomain.local'
os.environ['USER_EMAIL'] = '[email protected]'
os.environ['USER_IS_ADMIN'] = '1'
# Read index definitions.
index_yaml = open(
os.path.join(os.path.dirname(__file__), 'index.yaml'), 'r')
try:
indexes = datastore_index.IndexDefinitionsToProtos(
'test',
datastore_index.ParseIndexDefinitions(index_yaml).indexes)
except TypeError:
indexes = []
index_yaml.close()
# Register API proxy stub.
google.appengine.api.apiproxy_stub_map.apiproxy = (
google.appengine.api.apiproxy_stub_map.APIProxyStubMap())
datastore = typhoonae.redis.datastore_redis_stub.DatastoreRedisStub(
'test', indexes)
try:
google.appengine.api.apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v3', datastore)
except google.appengine.runtime.apiproxy_errors.ApplicationError, e:
raise RuntimeError('These tests require a running Redis server '
'(%s)' % e)
self.stub = google.appengine.api.apiproxy_stub_map.apiproxy.GetStub(
'datastore_v3')
def tearDown(self):
"""Clears all data."""
self.stub.Clear()
class StoredEntityTestCase(DatastoreRedisTestCaseBase):
"""Testing entity wrapper class."""
def testStoredEntity(self):
"""Initializes a stored entity instance."""
class MyModel(db.Model):
contents = db.StringProperty()
key = MyModel(contents="Some contents.").save()
entity = db.get(key)
protobuf = db.model_to_protobuf(entity)
stored_entity = typhoonae.redis.datastore_redis_stub._StoredEntity(
protobuf)
self.assertEqual(protobuf, stored_entity.protobuf)
self.assertEqual(
'j\x15j\x04testr\r\x0b\x12\x07MyModel\x18\x01\x0cr\x1e\x1a\x08'
'contents \x00*\x10\x1a\x0eSome contents.\x82\x01\r\x0b\x12\x07'
'MyModel\x18\x01\x0c',
stored_entity.encoded_protobuf)
self.assertEqual({u'contents': u'Some contents.'}, stored_entity.native)
self.assertTrue(
isinstance(
stored_entity.key(),
google.appengine.datastore.entity_pb.Reference))
class DatastoreRedisTestCase(DatastoreRedisTestCaseBase):
"""Testing the TyphoonAE Datastore Redis API proxy stub."""
def testStub(self):
"""Tests whether our stub is registered."""
self.assertNotEqual(None, self.stub)
def testConnectionError(self):
"""Tries to connect to wrong host and port."""
self.assertRaises(
google.appengine.runtime.apiproxy_errors.ApplicationError,
typhoonae.redis.datastore_redis_stub.DatastoreRedisStub,
'test', [], host='nowhere', port=10987)
def test__ValidateAppId(self):
"""Validates an application id."""
self.assertRaises(
google.appengine.api.datastore_errors.BadRequestError,
self.stub._DatastoreRedisStub__ValidateAppId,
'foo')
def test_GetAppIdNamespaceKindForKey(self):
"""Gets encoded app and kind from given key."""
ref = google.appengine.datastore.entity_pb.Reference()
ref.set_app(u'test')
ref.set_name_space(u'namespace')
path = ref.mutable_path()
elem = path.add_element()
elem.set_type('Foo')
elem = path.add_element()
elem.set_type('Bar')
self.assertEqual(
u'test!namespace\x08Bar',
self.stub._GetAppIdNamespaceKindForKey(ref))
def test_GetKeyForRedisKey(self):
"""Inititalizes an entity_pb.Reference from a Redis key."""
key = self.stub._GetKeyForRedisKey(
u'test!Foo\x08\t0000000000002\x07Bar\x08bar')
self.assertEqual(
datastore_types.Key.from_path(
u'Foo', 2, u'Bar', u'bar', _app=u'test'),
key)
def test_GetRedisKeyForKey(self):
"""Creates a valid Redis key."""
ref = google.appengine.datastore.entity_pb.Reference()
ref.set_app(u'test')
ref.set_name_space(u'namespace')
path = ref.mutable_path()
elem = path.add_element()
elem.set_type('Foo')
elem.set_id(1)
elem = path.add_element()
elem.set_type('Bar')
elem.set_id(2)
self.assertEqual(
u'test!Foo\x08\t0000000000001\x07Bar\x08\t0000000000002',
self.stub._GetRedisKeyForKey(ref))
def testPutGetDelete(self):
"""Puts/gets/deletes entities into/from the datastore."""
class Author(db.Model):
name = db.StringProperty()
class Book(db.Model):
title = db.StringProperty()
a = Author(name='Mark Twain', key_name='marktwain')
a.put()
b = Book(parent=a, title="The Adventures Of Tom Sawyer")
b.put()
key = b.key()
del a, b
book = google.appengine.api.datastore.Get(key)
self.assertEqual(
"{u'title': u'The Adventures Of Tom Sawyer'}", str(book))
author = google.appengine.api.datastore.Get(book.parent())
self.assertEqual("{u'name': u'Mark Twain'}", str(author))
del book
google.appengine.api.datastore.Delete(key)
self.assertRaises(
google.appengine.api.datastore_errors.EntityNotFoundError,
google.appengine.api.datastore.Get,
key)
del author
mark_twain = Author.get_by_key_name('marktwain')
self.assertEqual('Author', mark_twain.kind())
self.assertEqual('Mark Twain', mark_twain.name)
mark_twain.delete()
def testGetEntitiesByNameAndID(self):
"""Tries to retrieve entities by name or numeric id."""
class Book(db.Model):
title = db.StringProperty()
Book(title="The Hitchhiker's Guide to the Galaxy").put()
book = Book.get_by_id(1)
self.assertEqual("The Hitchhiker's Guide to the Galaxy", book.title)
Book(key_name="solong",
title="So Long, and Thanks for All the Fish").put()
book = Book.get_by_key_name("solong")
self.assertEqual("So Long, and Thanks for All the Fish", book.title)
def testLocking(self):
"""Acquires and releases transaction locks."""
self.stub._AcquireLockForEntityGroup('foo', timeout=1)
self.stub._ReleaseLockForEntityGroup('foo')
self.stub._AcquireLockForEntityGroup('bar', timeout=2)
t = time.time()
self.stub._AcquireLockForEntityGroup('bar', timeout=1)
assert time.time() > t + 1
self.stub._ReleaseLockForEntityGroup('bar')
def testTransactions(self):
"""Executes 1000 transactions in 10 concurrent threads."""
class Counter(db.Model):
value = db.IntegerProperty()
counter = Counter(key_name='counter', value=0)
counter.put()
del counter
class Incrementer(threading.Thread):
def run(self):
def tx():
counter = Counter.get_by_key_name('counter')
counter.value += 1
counter.put()
for i in range(100):
db.run_in_transaction(tx)
incrementers = []
for i in range(10):
incrementers.append(Incrementer())
incrementers[i].start()
for incr in incrementers:
incr.join()
counter = Counter.get_by_key_name('counter')
self.assertEqual(1000, counter.value)
def testLargerTransaction(self):
"""Executes multiple operations in one transaction."""
class Author(db.Model):
name = db.StringProperty()
class Book(db.Model):
title = db.StringProperty()
def tx():
a = Author(name='Mark Twain', key_name='marktwain')
a.put()
b = Book(parent=a, title="The Adventures Of Tom Sawyer")
b.put()
b.delete()
db.run_in_transaction(tx)
self.assertEqual(1, Author.all().count())
self.assertEqual(0, Book.all().count())
marktwain = Author.get_by_key_name('marktwain')
def query_tx():
query = db.Query()
query.filter('__key__ = ', marktwain.key())
author = query.get()
self.assertRaises(
google.appengine.api.datastore_errors.BadRequestError,
db.run_in_transaction, query_tx)
def testKindlessAncestorQueries(self):
"""Perform kindless queries for entities with a given ancestor."""
class Author(db.Model):
name = db.StringProperty()
class Book(db.Model):
title = db.StringProperty()
author = Author(name='Mark Twain', key_name='marktwain').put()
book = Book(parent=author, title="The Adventures Of Tom Sawyer").put()
query = db.Query()
query.ancestor(author)
query.filter('__key__ = ', book)
self.assertEqual(book, query.get().key())
book = query.get()
book.delete()
self.assertEqual(0, query.count())
def testRunQuery(self):
"""Runs some simple queries."""
class Employee(db.Model):
first_name = db.StringProperty(required=True)
last_name = db.StringProperty(required=True)
manager = db.SelfReferenceProperty()
manager = Employee(first_name='John', last_name='Dowe')
manager.put()
employee = Employee(
first_name=u'John', last_name='Appleseed', manager=manager.key())
employee.put()
# Perform a very simple query.
query = Employee.all()
self.assertEqual(set(['John Dowe', 'John Appleseed']),
set(['%s %s' % (e.first_name, e.last_name)
for e in query.run()]))
# Rename the manager.
manager.first_name = 'Clara'
manager.put()
# And perform the same query as above.
query = Employee.all()
self.assertEqual(set(['Clara Dowe', 'John Appleseed']),
set(['%s %s' % (e.first_name, e.last_name)
for e in query.run()]))
# Get only one entity.
query = Employee.all()
self.assertEqual(u'Dowe', query.get().last_name)
self.assertEqual(u'Dowe', query.fetch(1)[0].last_name)
# Delete our entities.
employee.delete()
manager.delete()
# Our query results should now be empty.
query = Employee.all()
self.assertEqual([], list(query.run()))
def testCount(self):
"""Counts query results."""
class Balloon(db.Model):
color = db.StringProperty()
Balloon(color='Red').put()
self.assertEqual(1, Balloon.all().count())
Balloon(color='Blue').put()
self.assertEqual(2, Balloon.all().count())
def testQueryWithFilter(self):
"""Tries queries with filters."""
class SomeKind(db.Model):
value = db.StringProperty()
foo = SomeKind(value="foo")
foo.put()
bar = SomeKind(value="bar")
bar.put()
class Artifact(db.Model):
description = db.StringProperty(required=True)
age = db.IntegerProperty()
vase = Artifact(description="Mycenaean stirrup vase", age=3300)
vase.put()
helmet = Artifact(description="Spartan full size helmet", age=2400)
helmet.put()
unknown = Artifact(description="Some unknown artifact")
unknown.put()
query = Artifact.all().filter('age =', 2400)
self.assertEqual(
['Spartan full size helmet'],
[artifact.description for artifact in query.run()])
query = db.GqlQuery("SELECT * FROM Artifact WHERE age = :1", 3300)
self.assertEqual(
['Mycenaean stirrup vase'],
[artifact.description for artifact in query.run()])
query = Artifact.all().filter('age IN', [2400, 3300])
self.assertEqual(
set(['Spartan full size helmet', 'Mycenaean stirrup vase']),
set([artifact.description for artifact in query.run()]))
vase.delete()
query = Artifact.all().filter('age IN', [2400])
self.assertEqual(
['Spartan full size helmet'],
[artifact.description for artifact in query.run()])
helmet.age = 2300
helmet.put()
query = Artifact.all().filter('age =', 2300)
self.assertEqual([2300], [artifact.age for artifact in query.run()])
query = Artifact.all()
self.assertEqual(
set([2300L, None]),
set([artifact.age for artifact in query.run()]))
def testQueryForKeysOnly(self):
"""Queries for entity keys instead of full entities."""
class Asset(db.Model):
name = db.StringProperty(required=True)
price = db.FloatProperty(required=True)
lamp = Asset(name="Bedside Lamp", price=10.45)
lamp.put()
towel = Asset(name="Large Towel", price=3.50)
towel.put()
query = Asset.all(keys_only=True)
self.assertEqual(
set([
datastore_types.Key.from_path(u'Asset', 1, _app=u'test'),
datastore_types.Key.from_path(u'Asset', 2, _app=u'test')]),
set(query.run()))
def testQueryWithOrder(self):
"""Tests queries with sorting."""
class Planet(db.Model):
name = db.StringProperty()
moon_count = db.IntegerProperty()
distance = db.FloatProperty()
earth = Planet(name="Earth", distance=93.0, moon_count=1)
earth.put()
saturn = Planet(name="Saturn", distance=886.7, moon_count=18)
saturn.put()
venus = Planet(name="Venus", distance=67.2, moon_count=0)
venus.put()
mars = Planet(name="Mars", distance=141.6, moon_count=2)
mars.put()
mercury = Planet(name="Mercury", distance=36.0, moon_count=0)
mercury.put()
query = (Planet.all()
.filter('moon_count <', 10)
.order('moon_count')
.order('-name')
.order('distance'))
self.assertEqual(
[u'Venus', u'Mercury', u'Earth', u'Mars'],
[planet.name for planet in query.run()]
)
query = Planet.all().filter('distance >', 100).order('-distance')
self.assertEqual(
['Saturn', 'Mars'],
[planet.name for planet in query.run()]
)
query = Planet.all().filter('distance <=', 93).order('distance')
self.assertEqual(
['Mercury', 'Venus', 'Earth'],
[planet.name for planet in query.run()]
)
query = (Planet.all()
.filter('distance >', 80.0)
.filter('distance <', 150)
.order('distance'))
self.assertEqual(
['Earth', 'Mars'],
[planet.name for planet in query.run()])
query = Planet.all().filter('distance >=', 93.0).order('distance')
self.assertEqual(
[u'Earth', u'Mars', u'Saturn'],
[planet.name for planet in query.run()])
query = Planet.all().filter('distance ==', 93.0)
self.assertEqual(
[u'Earth'], [planet.name for planet in query.run()])
def testQueriesWithMultipleFiltersAndOrders(self):
"""Tests queries with multiple filters and orders."""
class Artist(db.Model):
name = db.StringProperty()
class Album(db.Model):
title = db.StringProperty()
class Song(db.Model):
artist = db.ReferenceProperty(Artist)
album = db.ReferenceProperty(Album)
duration = db.StringProperty()
genre = db.CategoryProperty()
title = db.StringProperty()
beatles = Artist(name="The Beatles")
beatles.put()
abbeyroad = Album(title="Abbey Road")
abbeyroad.put()
herecomesthesun = Song(
artist=beatles.key(),
album=abbeyroad.key(),
duration="3:06",
genre=db.Category("Pop"),
title="Here Comes The Sun")
herecomesthesun.put()
query = (Song.all()
.filter('artist =', beatles)
.filter('album =', abbeyroad))
self.assertEqual(u'Here Comes The Sun', query.get().title)
cometogether = Song(
artist=beatles.key(),
album=abbeyroad.key(),
duration="4:21",
genre=db.Category("Pop"),
title="Come Together")
cometogether.put()
something = Song(
artist=beatles.key(),
album=abbeyroad.key(),
duration="3:03",
genre=db.Category("Pop"),
title="Something")
something.put()
because1 = Song(
key_name='because',
artist=beatles.key(),
album=abbeyroad.key(),
duration="2:46",
genre=db.Category("Pop"),
title="Because")
because1.put()
because2= Song(
artist=beatles.key(),
album=abbeyroad.key(),
duration="2:46",
genre=db.Category("Pop"),
title="Because")
because2.put()
query = (Song.all()
.filter('artist =', beatles)
.filter('album =', abbeyroad)
.order('title'))
self.assertEqual(
[u'Because', u'Because', u'Come Together', u'Here Comes The Sun',
u'Something'],
[song.title for song in query.run()])
query = Song.all().filter('title !=', 'Because').order('title')
self.assertEqual(
[u'Come Together', u'Here Comes The Sun', u'Something'],
[song.title for song in query.run()])
query = Song.all().filter('title >', 'Come').order('title')
self.assertEqual(
[u'Come Together', u'Here Comes The Sun', u'Something'],
[song.title for song in query.run()])
something.delete()
query = Song.all().filter('title >', 'Come').order('title')
self.assertEqual(
[u'Come Together', u'Here Comes The Sun'],
[song.title for song in query.run()])
def testUnicode(self):
"""Tests unicode."""
class Employee(db.Model):
first_name = db.StringProperty(required=True)
last_name = db.StringProperty(required=True)
employee = Employee(first_name=u'Björn', last_name=u'Müller')
employee.put()
query = Employee.all(keys_only=True).filter('first_name =', u'Björn')
self.assertEqual(
datastore_types.Key.from_path(u'Employee', 1, _app=u'test'),
query.get())
def testListProperties(self):
"""Tests list properties."""
class Numbers(db.Model):
values = db.ListProperty(int)
Numbers(values=[0, 1, 2, 3]).put()
Numbers(values=[4, 5, 6, 7]).put()
query = Numbers.all().filter('values =', 0)
self.assertEqual([0, 1, 2, 3], query.get().values)
query = db.GqlQuery(
"SELECT * FROM Numbers WHERE values > :1 AND values < :2", 4, 7)
self.assertEqual([4, 5, 6, 7], query.get().values)
class Issue(db.Model):
reviewers = db.ListProperty(db.Email)
me = db.Email('[email protected]')
you = db.Email('[email protected]')
issue = Issue(reviewers=[me, you])
issue.put()
query = db.GqlQuery(
"SELECT * FROM Issue WHERE reviewers = :1",
db.Email('[email protected]'))
self.assertEqual(1, query.count())
query = db.GqlQuery(
"SELECT * FROM Issue WHERE reviewers = :1",
'[email protected]')
self.assertEqual(1, query.count())
query = db.GqlQuery(
"SELECT * FROM Issue WHERE reviewers = :1",
db.Email('[email protected]'))
self.assertEqual(0, query.count())
def testStringListProperties(self):
"""Tests string list properties."""
class Pizza(db.Model):
topping = db.StringListProperty()
Pizza(topping=["tomatoe", "cheese"]).put()
Pizza(topping=["tomatoe", "cheese", "salami"]).put()
Pizza(topping=["tomatoe", "cheese", "prosciutto"]).put()
query = Pizza.all(keys_only=True).filter('topping =', "salami")
self.assertEqual(1, query.count())
query = Pizza.all(keys_only=True).filter('topping =', "cheese")
self.assertEqual(3, query.count())
query = Pizza.all().filter('topping IN', ["salami", "prosciutto"])
self.assertEqual(2, query.count())
key = datastore_types.Key.from_path('Pizza', 1)
query = db.GqlQuery("SELECT * FROM Pizza WHERE __key__ IN :1", [key])
pizza = query.get()
self.assertEqual(["tomatoe", "cheese"], pizza.topping)
pizza.delete()
query = db.GqlQuery("SELECT * FROM Pizza WHERE __key__ IN :1", [key])
self.assertEqual(0, query.count())
def testVariousPropertiyTypes(self):
"""Tests various property types."""
class Note(db.Model):
timestamp = db.DateTimeProperty(auto_now=True)
description = db.StringProperty()
author_email = db.EmailProperty()
location = db.GeoPtProperty()
user = db.UserProperty()
Note(
description="My first note.",
author_email="[email protected]",
location="52.518,13.408",
user=google.appengine.api.users.get_current_user()
).put()
query = db.GqlQuery("SELECT * FROM Note ORDER BY timestamp DESC")
self.assertEqual(1, query.count())
query = db.GqlQuery(
"SELECT * FROM Note WHERE timestamp <= :1", datetime.datetime.now())
self.assertEqual(1, query.count())
note = query.get()
self.assertEqual("My first note.", note.description)
self.assertEqual(db.Email("[email protected]"), note.author_email)
self.assertEqual("[email protected]", note.author_email)
self.assertEqual(
datastore_types.GeoPt(52.518000000000001, 13.407999999999999),
note.location)
self.assertEqual("52.518,13.408", note.location)
del note
query = Note.all().filter(
'location =',
datastore_types.GeoPt(52.518000000000001, 13.407999999999999))
self.assertEqual(1, query.count())
query = Note.all().filter('location =', "52.518,13.408")
self.assertEqual(1, query.count())
def testQueriesWithLimit(self):
"""Retrieves a limited number of results."""
class MyModel(db.Model):
property = db.StringProperty()
for i in range(100):
MyModel(property="Random data.").put()
self.assertEqual(50, MyModel.all().count(limit=50))
def testAllocateIds(self):
""" """
class EmptyModel(db.Model):
pass
for i in xrange(0, 1000):
key = EmptyModel().put()
query = db.GqlQuery("SELECT * FROM EmptyModel")
self.assertEqual(1000, query.count())
start, end = db.allocate_ids(key, 2000)
self.assertEqual(start, 1000)
self.assertEqual(end, 2999)
def testCursors(self):
"""Tests the cursor API."""
class Integer(db.Model):
value = db.IntegerProperty()
for i in xrange(0, 2000):
Integer(value=i).put()
# Set up a simple query.
query = Integer.all()
# Fetch some results.
a = query.fetch(500)
self.assertEqual(0L, a[0].value)
self.assertEqual(499L, a[-1].value)
b = query.fetch(500, offset=500)
self.assertEqual(500L, b[0].value)
self.assertEqual(999L, b[-1].value)
# Perform several queries with a cursor.
cursor = query.cursor()
query.with_cursor(cursor)
c = query.fetch(200)
self.assertEqual(1000L, c[0].value)
self.assertEqual(1199L, c[-1].value)
query.with_cursor(query.cursor())
d = query.fetch(500)
self.assertEqual(1200L, d[0].value)
self.assertEqual(1699L, d[-1].value)
query.with_cursor(query.cursor())
self.assertEqual(1700L, query.get().value)
# Use a query with filters.
query = Integer.all().filter('value >', 500).filter('value <=', 1000)
e = query.fetch(100)
query.with_cursor(query.cursor())
e = query.fetch(50)
self.assertEqual(601, e[0].value)
self.assertEqual(650, e[-1].value)
def testGetSchema(self):
"""Infers an app's schema from the entities in the datastore."""
class Foo(db.Model):
foobar = db.IntegerProperty(default=42)
Foo().put()
entity_pbs = google.appengine.api.datastore_admin.GetSchema()
entity = google.appengine.api.datastore.Entity.FromPb(entity_pbs.pop())
self.assertEqual('Foo', entity.key().kind())
| 30.853211 | 80 | 0.583408 |
794734eceab228f17ef1907a17686ea853aa3b2c | 4,636 | py | Python | tests/tests_preprocessing/test_preprocessing_categorical_integers.py | michaelneale/mljar-supervised | 8d1b5fdd56e994a7f13ec5f6d2033830744f3d6f | [
"MIT"
] | 1 | 2020-03-13T09:44:41.000Z | 2020-03-13T09:44:41.000Z | tests/tests_preprocessing/test_preprocessing_categorical_integers.py | wambagilles/mljar-supervised | 3192c91979b31810b249767a63e60ee74068c668 | [
"MIT"
] | null | null | null | tests/tests_preprocessing/test_preprocessing_categorical_integers.py | wambagilles/mljar-supervised | 3192c91979b31810b249767a63e60ee74068c668 | [
"MIT"
] | 1 | 2021-03-12T05:48:45.000Z | 2021-03-12T05:48:45.000Z | import unittest
import tempfile
import numpy as np
import pandas as pd
from supervised.preprocessing.preprocessing_categorical import PreprocessingCategorical
class PreprocessingCategoricalIntegersTest(unittest.TestCase):
def test_constructor_preprocessing_categorical(self):
"""
Check if PreprocessingCategorical object is properly initialized
"""
categorical = PreprocessingCategorical(
[], PreprocessingCategorical.CONVERT_INTEGER
)
self.assertEqual(
categorical._convert_method, PreprocessingCategorical.CONVERT_INTEGER
)
self.assertEqual(categorical._convert_params, {})
def test_fit_integers(self):
# training data
d = {
"col1": [1, 2, 3],
"col2": ["a", "a", "c"],
"col3": [1, 1, 3],
"col4": ["a", "b", "c"],
}
df = pd.DataFrame(data=d)
categorical = PreprocessingCategorical(
df.columns, PreprocessingCategorical.CONVERT_INTEGER
)
categorical.fit(df)
self.assertTrue("col2" in categorical._convert_params)
self.assertTrue("col4" in categorical._convert_params)
self.assertTrue("a" in categorical._convert_params["col2"])
self.assertTrue("c" in categorical._convert_params["col2"])
self.assertTrue("b" not in categorical._convert_params["col2"])
self.assertTrue("a" in categorical._convert_params["col4"])
self.assertTrue("b" in categorical._convert_params["col4"])
self.assertTrue("c" in categorical._convert_params["col4"])
def test_fit_transform_integers(self):
# training data
d = {
"col1": [1, 2, 3],
"col2": ["a", "a", "c"],
"col3": [1, 1, 3],
"col4": ["a", "b", "c"],
}
df = pd.DataFrame(data=d)
categorical = PreprocessingCategorical(
df.columns, PreprocessingCategorical.CONVERT_INTEGER
)
categorical.fit(df)
df = categorical.transform(df)
for col in ["col1", "col2", "col3", "col4"]:
self.assertTrue(col in df.columns)
self.assertEqual(df["col2"][0], 0)
self.assertEqual(df["col2"][1], 0)
self.assertEqual(df["col2"][2], 1)
self.assertEqual(df["col4"][0], 0)
self.assertEqual(df["col4"][1], 1)
self.assertEqual(df["col4"][2], 2)
def test_fit_transform_integers_with_new_values(self):
# training data
d_train = {
"col1": [1, 2, 3],
"col2": ["a", "a", "c"],
"col3": [1, 1, 3],
"col4": ["a", "b", "c"],
}
df_train = pd.DataFrame(data=d_train)
categorical = PreprocessingCategorical(
df_train.columns, PreprocessingCategorical.CONVERT_INTEGER
)
categorical.fit(df_train)
# testing data
d = {
"col1": [1, 2, 3],
"col2": ["a", "d", "f"],
"col3": [1, 1, 3],
"col4": ["e", "b", "z"],
}
df = pd.DataFrame(data=d)
df = categorical.transform(df)
for col in ["col1", "col2", "col3", "col4"]:
self.assertTrue(col in df.columns)
self.assertEqual(df["col2"][0], 0)
self.assertEqual(df["col2"][1], 2) # new values get higher indexes
self.assertEqual(df["col2"][2], 3) # new values get higher indexes
self.assertEqual(df["col4"][0], 3) # new values get higher indexes
self.assertEqual(df["col4"][1], 1)
self.assertEqual(df["col4"][2], 4) # new values get higher indexes
def test_to_and_from_json_convert_integers(self):
# training data
d = {
"col1": [1, 2, 3],
"col2": ["a", "a", "c"],
"col3": [1, 1, 3],
"col4": ["a", "b", "c"],
}
df = pd.DataFrame(data=d)
cat1 = PreprocessingCategorical(
df.columns, PreprocessingCategorical.CONVERT_INTEGER
)
cat1.fit(df)
cat2 = PreprocessingCategorical(
df.columns, PreprocessingCategorical.CONVERT_INTEGER
)
cat2.from_json(cat1.to_json())
df = cat2.transform(df)
for col in ["col1", "col2", "col3", "col4"]:
self.assertTrue(col in df.columns)
self.assertEqual(df["col2"][0], 0)
self.assertEqual(df["col2"][1], 0)
self.assertEqual(df["col2"][2], 1)
self.assertEqual(df["col4"][0], 0)
self.assertEqual(df["col4"][1], 1)
self.assertEqual(df["col4"][2], 2)
if __name__ == "__main__":
unittest.main()
| 35.937984 | 87 | 0.561044 |
7947351288e440875dd9116b3060ca2dde6d9779 | 12,552 | py | Python | examples/mmt_train_kmeans.py | Dingyuan-Zheng/ctf-UDA | 3e3c67f68d7eb0b52a16a259e5a77e153062c4fd | [
"MIT"
] | null | null | null | examples/mmt_train_kmeans.py | Dingyuan-Zheng/ctf-UDA | 3e3c67f68d7eb0b52a16a259e5a77e153062c4fd | [
"MIT"
] | null | null | null | examples/mmt_train_kmeans.py | Dingyuan-Zheng/ctf-UDA | 3e3c67f68d7eb0b52a16a259e5a77e153062c4fd | [
"MIT"
] | null | null | null | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from mmt import datasets
from mmt import models
from mmt.trainers import MMTTrainer
from mmt.evaluators import Evaluator, extract_features
from mmt.utils.data import IterLoader
from mmt.utils.data import transforms as T
from mmt.utils.data.sampler import RandomMultipleGallerySampler
from mmt.utils.data.preprocessor import Preprocessor
from mmt.utils.logging import Logger
from mmt.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
import os
best_mAP = 0
def get_data(name, data_dir):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root)
return dataset
def get_train_loader(dataset, height, width, batch_size, workers,
num_instances, iters):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])
train_set = sorted(dataset.train, key=lambda x:x[1])
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=True),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
else:
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer, mutual=False, cluster=True),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def create_model(args):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
model_2 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
model_2_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
model_1.cuda()
model_2.cuda()
model_1_ema.cuda()
model_2_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_2 = nn.DataParallel(model_2)
model_1_ema = nn.DataParallel(model_1_ema)
model_2_ema = nn.DataParallel(model_2_ema)
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
model_1_ema.module.classifier.weight.data.copy_(model_1.module.classifier.weight.data)
initial_weights = load_checkpoint(args.init_2)
copy_state_dict(initial_weights['state_dict'], model_2)
copy_state_dict(initial_weights['state_dict'], model_2_ema)
model_2_ema.module.classifier.weight.data.copy_(model_2.module.classifier.weight.data)
for param in model_1_ema.parameters():
param.detach_()
for param in model_2_ema.parameters():
param.detach_()
return model_1, model_2, model_1_ema, model_2_ema
def main():
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' # CUDA environment
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters>0) else None
dataset_target = get_data(args.dataset_target, args.data_dir)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers, testset=dataset_target.train)
# Create model
model_1, model_2, model_1_ema, model_2_ema = create_model(args)
# Evaluator
evaluator_1_ema = Evaluator(model_1_ema)
evaluator_2_ema = Evaluator(model_2_ema)
for epoch in range(args.epochs):
dict_f, _ = extract_features(model_1_ema, cluster_loader, print_freq=50)
cf_1 = torch.stack(list(dict_f.values())).numpy()
dict_f, _ = extract_features(model_2_ema, cluster_loader, print_freq=50)
cf_2 = torch.stack(list(dict_f.values())).numpy()
cf = (cf_1+cf_2)/2
print('\n Clustering into {} classes \n'.format(args.num_clusters))
km = KMeans(n_clusters=args.num_clusters, random_state=args.seed, n_jobs=2).fit(cf)
model_1.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda())
model_2.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda())
model_1_ema.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda())
model_2_ema.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda())
target_label = km.labels_
# change pseudo labels
for i in range(len(dataset_target.train)):
dataset_target.train[i] = list(dataset_target.train[i])
dataset_target.train[i][2] = int(target_label[i]) ## change dataset_target.train[i][1] --> [2]
dataset_target.train[i] = tuple(dataset_target.train[i])
# the place to re-compute cluster centers (e.g.500) with re-assigned pseudo labels
# based on the memory slot which contains the features of each target training images
train_loader_target = get_train_loader(dataset_target, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters)
# Optimizer
params = []
for key, value in model_1.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
for key, value in model_2.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
##
trainer = MMTTrainer(model_1, model_2, model_1_ema, model_2_ema,
num_cluster=args.num_clusters, alpha=args.alpha, cf=cf, f_memory_label=target_label)
##
train_loader_target.new_epoch()
trainer.train(epoch, train_loader_target, optimizer,
ce_soft_weight=args.soft_ce_weight, tri_soft_weight=args.soft_tri_weight,
print_freq=args.print_freq, train_iters=len(train_loader_target))
def save_model(model_ema, is_best, best_mAP, mid):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'model'+str(mid)+'_checkpoint.pth.tar'))
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
mAP_1 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=False)
mAP_2 = evaluator_2_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=False)
is_best = (mAP_1>best_mAP) or (mAP_2>best_mAP)
best_mAP = max(mAP_1, mAP_2, best_mAP)
save_model(model_1_ema, (is_best and (mAP_1>mAP_2)), best_mAP, 1)
save_model(model_2_ema, (is_best and (mAP_1<=mAP_2)), best_mAP, 2)
print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else ''))
print ('Test on the best model.')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model_1_ema.load_state_dict(checkpoint['state_dict'])
evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MMT Training")
# data
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--num-clusters', type=int, default=500)
parser.add_argument('--height', type=int, default=256,
help="input height")
parser.add_argument('--width', type=int, default=128,
help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--soft-ce-weight', type=float, default=0.5)
parser.add_argument('--soft-tri-weight', type=float, default=0.8)
parser.add_argument('--epochs', type=int, default=40)
parser.add_argument('--iters', type=int, default=800)
# training configs
parser.add_argument('--init-1', type=str, default='', metavar='PATH')
parser.add_argument('--init-2', type=str, default='', metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=1)
parser.add_argument('--eval-step', type=int, default=1)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
#parser.add_argument('--memory_size', type=int, default=12936)
main()
| 44.828571 | 138 | 0.661648 |
7947363d31f128ce3314e0feec37c2c15fa8503d | 7,881 | py | Python | tests/msg_hz_test.py | suet-lee/mycelium | db83cd3ab00697f28b2def2cebcdef52698fdd92 | [
"Apache-2.0"
] | 6 | 2021-05-23T17:36:02.000Z | 2022-01-21T20:34:17.000Z | tests/msg_hz_test.py | suet-lee/mycelium | db83cd3ab00697f28b2def2cebcdef52698fdd92 | [
"Apache-2.0"
] | null | null | null | tests/msg_hz_test.py | suet-lee/mycelium | db83cd3ab00697f28b2def2cebcdef52698fdd92 | [
"Apache-2.0"
] | 1 | 2021-06-17T20:35:10.000Z | 2021-06-17T20:35:10.000Z | #!/usr/bin/env python3
# Set MAVLink protocol to 2.
import os
os.environ["MAVLINK20"] = "1"
import time
import threading
import traceback
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from mycelium.components import RedisBridge, Connector
from mycelium_utils import Scripter, utils, DefaultConfig
from pymavlink import mavutil
class RedisToAPScripterExt:
instance = None
i=0
def __init__(self, **kwargs):
if not RedisToAPScripterExt.instance:
RedisToAPScripterExt.instance = RedisToAPScripterExt.__RedisToAPScripterExt(**kwargs)
def __getattr__(self, name):
return getattr(self.instance, name)
class __RedisToAPScripterExt(Scripter):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rb = RedisBridge(db=self.rd_cfg.databases['instruments'])
self.keys = self.rd_cfg.generate_flat_keys('instruments')
self.conn = mavutil.mavlink_connection(
self.cfg.redis_to_ap,
autoreconnect = True,
source_system = 1,
source_component = 93,
baud=self.cfg.connection_baudrate,
force_connected=True
)
self.lock = threading.Lock()
default_msg_hz = 30.0
msg_hz = {
'send_vision_position_estimate': 30.0,
'send_obstacle_distance': 15.0
}
self.mavlink_thread = threading.Thread(target=self.mavlink_loop, args=[self.conn])
self.mavlink_thread.start()
self.sched = BackgroundScheduler()
logging.getLogger('apscheduler').setLevel(logging.ERROR)
self.data = {}
for k, v in self.keys.items():
try:
if v in msg_hz.keys():
seconds = 1.0/msg_hz[v]
else:
seconds = 1.0/default_msg_hz
func = getattr(self, v)
self.sched.add_job(self.send_message,
'interval',
seconds=seconds,
args=[func, k],
max_instances=1
)
except:
utils.progress(traceback)
else:
self.data[k] = None
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
def send_vision_position_estimate(self, current_time_us, x, y, z,
roll, pitch, yaw, covariance, reset_counter):
# self.connect(self.connection_string, self.connection_baudrate, self.source_system, self.source_component)
self.conn.mav.vision_position_estimate_send(
current_time_us, # us Timestamp (UNIX time or time since system boot)
x, # Local X position
y, # Local Y position
z, # Local Z position
roll, # Roll angle
pitch, # Pitch angle
yaw, # Yaw angle
covariance, # Row-major representation of pose 6x6 cross-covariance matrix
reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
)
# def send_vision_position_delta_message(self, current_time_us, delta_time_us, delta_angle_rad, delta_position_m, current_confidence_level):
# conn.mav.vision_position_delta_send(
# current_time_us, # us: Timestamp (UNIX time or time since system boot)
# delta_time_us, # us: Time since last reported camera frame
# delta_angle_rad, # float[3] in radian: Defines a rotation vector in body frame that rotates the vehicle from the previous to the current orientation
# delta_position_m, # float[3] in m: Change in position from previous to current frame rotated into body frame (0=forward, 1=right, 2=down)
# current_confidence_level # Normalized confidence value from 0 to 100.
# )
# def send_vision_speed_estimate(self, current):
# self.conn.mav.vision_speed_estimate_send(
# current_time_us, # us Timestamp (UNIX time or time since system boot)
# V_aeroRef_aeroBody[0][3], # Global X speed
# V_aeroRef_aeroBody[1][3], # Global Y speed
# V_aeroRef_aeroBody[2][3], # Global Z speed
# covariance, # covariance
# reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
# )
# https://mavlink.io/en/messages/common.html#OBSTACLE_DISTANCE
def send_obstacle_distance(self, current_time_us, sensor_type, distances, increment,
min_distance, max_distance, increment_f, angle_offset, mav_frame):
# self.connect(self.connection_string, self.connection_baudrate, self.source_system, self.source_component)
self.conn.mav.obstacle_distance_send(
current_time_us, # us Timestamp (UNIX time or time since system boot)
sensor_type, # sensor_type, defined here: https://mavlink.io/en/messages/common.html#MAV_DISTANCE_SENSOR
distances, # distances, uint16_t[72], cm
increment, # increment, uint8_t, deg
min_distance, # min_distance, uint16_t, cm
max_distance, # max_distance, uint16_t, cm
increment_f, # increment_f, float, deg
angle_offset, # angle_offset, float, deg
mav_frame # MAV_FRAME, vehicle-front aligned: https://mavlink.io/en/messages/common.html#MAV_FRAME_BODY_FRD
)
def run_main(self):
self.sched.start()
while not self.exit_threads:
with self.lock:
for k, _ in self.keys.items():
self.data[k] = self.rb.get_key_by_string(k)
# time.sleep(0.3)
# self.conn.send_heartbeat()
# m = self.conn.get_callbacks(['HEARTBEAT'])
# if m is None:
# continue
# self.logger.log_debug("Received callback: %s" % m)
# # utils.progress(m)
def mavlink_loop(self, conn, callbacks=['HEARTBEAT']):
while not self.exit_threads:
self.conn.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_ONBOARD_CONTROLLER,
mavutil.mavlink.MAV_AUTOPILOT_GENERIC,
0,
0,
0)
m = self.conn.recv_match(type=callbacks, timeout=1, blocking=True)
if m is None:
continue
self.logger.log_debug("Received callback: %s" % m)
def send_message(self, func, key):
while not self.exit_threads:
with self.lock:
try:
value = self.data[key]
if value is not None:
func(*value)
except Exception as e:
self.logger.log_error("Could not send %s"%e)
def close_script(self):
try:
self.sched.shutdown()
self.mavlink_thread.join()
self.conn.close()
except:
pass
scripter = RedisToAPScripterExt(log_source="redis_to_ap")
scripter.run()
| 44.778409 | 169 | 0.544601 |
79473741de85ba6e6a2fc642ec790b1794a14534 | 5,944 | py | Python | nyflow/core_simple.py | namyoungkim/NyFlow | d6c749e5a88e1ef3b6c73a1f927ab83abe1d7a3f | [
"MIT"
] | null | null | null | nyflow/core_simple.py | namyoungkim/NyFlow | d6c749e5a88e1ef3b6c73a1f927ab83abe1d7a3f | [
"MIT"
] | 1 | 2021-09-22T15:34:31.000Z | 2021-09-22T15:34:31.000Z | nyflow/core_simple.py | namyoungkim/nyflow | d6c749e5a88e1ef3b6c73a1f927ab83abe1d7a3f | [
"MIT"
] | null | null | null | import weakref
import numpy as np
import contextlib
# =============================================================================
# Config
# =============================================================================
class Config:
enable_backprop = True
@contextlib.contextmanager
def using_config(name, value):
old_value = getattr(Config, name)
setattr(Config, name, value)
try:
yield
finally:
setattr(Config, name, old_value)
def no_grad():
return using_config('enable_backprop', False)
# =============================================================================
# Variable
# =============================================================================
class Variable:
__array_priority__ = 200
def __init__(self, data, name=None):
if data is not None:
if not isinstance(data, np.ndarray):
raise TypeError('{} is not supported'.format(type(data)))
self.data = data
self.name = name
self.grad = None
self.creator = None
self.generation = 0
@property
def shape(self):
return self.data.shape
@property
def ndim(self):
return self.data.ndim
@property
def size(self):
return self.data.size
@property
def dtype(self):
return self.data.dtype
def __len__(self):
return len(self.data)
def __repr__(self):
if self.data is None:
return 'variable(None)'
p = str(self.data).replace('\n', '\n' + ' ' * 9)
return 'variable(' + p + ')'
def set_creator(self, func):
self.creator = func
self.generation = func.generation + 1
def cleargrad(self):
self.grad = None
def backward(self, retain_grad=False):
if self.grad is None:
self.grad = np.ones_like(self.data)
funcs = []
seen_set = set()
def add_func(f):
if f not in seen_set:
funcs.append(f)
seen_set.add(f)
funcs.sort(key=lambda x: x.generation)
add_func(self.creator)
while funcs:
f = funcs.pop()
gys = [output().grad for output in f.outputs] # output is weakref
gxs = f.backward(*gys)
if not isinstance(gxs, tuple):
gxs = (gxs,)
for x, gx in zip(f.inputs, gxs):
if x.grad is None:
x.grad = gx
else:
x.grad = x.grad + gx
if x.creator is not None:
add_func(x.creator)
if not retain_grad:
for y in f.outputs:
y().grad = None # y is weakref
def as_variable(obj):
if isinstance(obj, Variable):
return obj
return Variable(obj)
def as_array(x):
if np.isscalar(x):
return np.array(x)
return x
# =============================================================================
# Function
# =============================================================================
class Function:
def __call__(self, *inputs):
inputs = [as_variable(x) for x in inputs]
xs = [x.data for x in inputs]
ys = self.forward(*xs)
if not isinstance(ys, tuple):
ys = (ys,)
outputs = [Variable(as_array(y)) for y in ys]
if Config.enable_backprop:
self.generation = max([x.generation for x in inputs])
for output in outputs:
output.set_creator(self)
self.inputs = inputs
self.outputs = [weakref.ref(output) for output in outputs]
return outputs if len(outputs) > 1 else outputs[0]
def forward(self, xs):
raise NotImplementedError()
def backward(self, gys):
raise NotImplementedError()
# =============================================================================
# 사칙연산 / 연산자 오버로드
# =============================================================================
class Add(Function):
def forward(self, x0, x1):
y = x0 + x1
return y
def backward(self, gy):
return gy, gy
def add(x0, x1):
x1 = as_array(x1)
return Add()(x0, x1)
class Mul(Function):
def forward(self, x0, x1):
y = x0 * x1
return y
def backward(self, gy):
x0, x1 = self.inputs[0].data, self.inputs[1].data
return gy * x1, gy * x0
def mul(x0, x1):
x1 = as_array(x1)
return Mul()(x0, x1)
class Neg(Function):
def forward(self, x):
return -x
def backward(self, gy):
return -gy
def neg(x):
return Neg()(x)
class Sub(Function):
def forward(self, x0, x1):
y = x0 - x1
return y
def backward(self, gy):
return gy, -gy
def sub(x0, x1):
x1 = as_array(x1)
return Sub()(x0, x1)
def rsub(x0, x1):
x1 = as_array(x1)
return sub(x1, x0)
class Div(Function):
def forward(self, x0, x1):
y = x0 / x1
return y
def backward(self, gy):
x0, x1 = self.inputs[0].data, self.inputs[1].data
gx0 = gy / x1
gx1 = gy * (-x0 / x1 ** 2)
return gx0, gx1
def div(x0, x1):
x1 = as_array(x1)
return Div()(x0, x1)
def rdiv(x0, x1):
x1 = as_array(x1)
return div(x1, x0)
class Pow(Function):
def __init__(self, c):
self.c = c
def forward(self, x):
y = x ** self.c
return y
def backward(self, gy):
x = self.inputs[0].data
c = self.c
gx = c * x ** (c - 1) * gy
return gx
def pow(x, c):
return Pow(c)(x)
def setup_variable():
Variable.__add__ = add
Variable.__radd__ = add
Variable.__mul__ = mul
Variable.__rmul__ = mul
Variable.__neg__ = neg
Variable.__sub__ = sub
Variable.__rsub__ = rsub
Variable.__truediv__ = div
Variable.__rtruediv__ = rdiv
Variable.__pow__ = pow | 22.096654 | 79 | 0.4857 |
794737a97c176c9f701f94c89a9d3fa6ea1cba13 | 601 | py | Python | python/cartpole1.py | lusing/mljs | 4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8 | [
"MIT"
] | null | null | null | python/cartpole1.py | lusing/mljs | 4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8 | [
"MIT"
] | null | null | null | python/cartpole1.py | lusing/mljs | 4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8 | [
"MIT"
] | null | null | null | import gym
def cartpole():
environment = gym.make('CartPole-v1')
environment.reset()
for i in range(1000):
# environment.render()
action = environment.action_space.sample()
observation, reward, done, info = environment.step(action)
print("Step {}:".format(i))
print("action: {}:".format(action))
print('observation: {}'.format(observation))
print('reward: {}'.format(reward))
print('done: {}'.format(done))
print('info: {}'.format(info))
if done:
break
if __name__ == '__main__':
cartpole()
| 28.619048 | 66 | 0.577371 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.