content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
"""
/dms/pool/views_show.py
.. zeigt den Inhalt eines Materialpools an
Django content Management System
Hans Rauch
[email protected]
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 28.03.2007 Beginn der Arbeit
0.02 18.03.2008 is_file_by_item_container
"""
import string
from django.utils.encoding import smart_unicode
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from dms.settings import MY_DOMAINS
from dms.settings import DOWNLOAD_URL
from dms.queries import get_site_url
from dms.queries import is_file_by_item_container
from dms.queries import get_parent_container
from dms.queries import get_role_by_name
from dms.utils_form import get_folderish_vars_show
from dms.utils import get_footer_email
from dms.text_icons import FOLDER_ICON
from dms.text_icons import NEW_WINDOW_ICON
from dms.text_icons import FILE_DETAIL
from dms.text_icons import EXTERN_ICON
from dms.pool.utils import get_user_support
from dms.folder.utils import get_folder_content
from dms.file.utils import get_file_size
from dms.file.utils import get_file_url
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def pool_show(request, item_container):
""" zeigt den Inhalt eines Materialpools """
def get_section_view(items, sections):
""" erzeugt die Section-Ansicht der im Ordner enthaltenen Objekte """
from django.template.loader import get_template
from django.template import Context
tSection = get_template('app/pool/section.html')
content = ''
unknown = _(u'Unbekannter Zwischentitel')
section_exist = False
section = '--START--'
links = []
for i in items :
if section != i.section :
if section != unknown :
if section != '--START--' and links != [] :
cSection = Context ( { 'section': section, 'links': links } )
content += tSection.render ( cSection)
if i.section in sections :
section = i.section
section_exist = True
else :
section = unknown
links = []
d = {}
d['title'] = i.item.title
d['text'] = i.item.text
d['text_more'] = i.item.text_more
if i.item.app.name == 'dmsFile':
d['size'] = '<br />' + i.item.name + ', ' + get_footer_email(i.item) + ', ' + \
smart_unicode(get_file_size(i, i.container.is_protected())) + ' Bytes'
else:
d['size'] = ''
if i.item.app.is_folderish :
d['folder_icon'] = FOLDER_ICON
else :
d['folder_icon'] = ''
# --- handelt es sich um ein Datei- oder Ordner-Objekt?
if string.find(i.item.name, '.html') > 0 or is_file_by_item_container(i):
if i.item.app.name in ['dmsRedirect', 'dmsLinkItem', 'dmsEduLinkItem']:
d['url'] = i.item.url_more
if string.find(i.item.url_more, 'http://') >= 0:
is_my_domain = False
for domain in MY_DOMAINS:
if string.find(i.item.url_more, domain) >= 0:
is_my_domain = True
break
if is_my_domain:
if i.item.url_more_extern:
d['extern'] = '_extern'
d['extern_icon'] = NEW_WINDOW_ICON
else :
d['extern'] = '_extern'
d['extern_icon'] = EXTERN_ICON
elif i.item.app.name in ['dmsFile', 'dmsImage', 'dmsEduFileItem']:
if i.item.url_more_extern:
d['extern'] = '_extern'
d['extern_icon'] = NEW_WINDOW_ICON
else:
d['extern'] = ''
d['extern_icon'] = ''
d['url'] = get_file_url(i, i.container.is_protected())
else:
d['url'] = get_site_url(i, i.item.name)
else:
d['url'] = get_site_url(i, 'index.html')
#if i.item.app.name != 'dmsRedirect':
# d['folder_icon'] = FILE_DETAIL % get_site_url(i, i.item.name + '/show/')
links.append ( d )
if section != '--START--' and links != []:
if section == unknown and not section_exist:
section = ''
cSection = Context ( { 'section': section, 'links': links } )
content += tSection.render ( cSection)
return content
app_name = 'pool'
items, sections, d_sections = get_folder_content(item_container)
vars = get_folderish_vars_show(request, item_container, app_name,
get_section_view(items, sections),
get_user_support(item_container, request.user))
parent = get_parent_container(item_container.container)
vars['no_top_main_navigation'] = (parent.min_role_id < get_role_by_name('no_rights').id) or \
item_container.container.nav_name_left.find('webquest') >= 0
#l = item_container.container.nav_name_left
#p = parent.nav_name_left
#assert False
return render_to_response ( 'app/base_folderish.html', vars )
|
from django.apps import AppConfig
class InfraConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'infra'
|
import sys
import unicodedata
if len(sys.argv) < 2:
print("USAGE: python3 emojify.py CHARACTER")
print("Will output Markdown to render that character as an image.")
sys.exit(1)
if str is bytes: char = sys.argv[1].decode("UTF-8")[0] # Py2
else: char = sys.argv[1][0] # Py3
try:
print("U+%X %s" % (ord(char), unicodedata.name(char)))
print("")
except ValueError:
print("U+%X <unknown>" % ord(char))
print("**" %
(char, ord(char)))
|
from .base import Unit
from .prefix import (
Yotta, Zetta, Exa, Peta, Tera, Giga, Mega, Kilo, Hecto,
Deca, Base, Centi, Mili, Micro, Nano, Pico, Femto, Atto,
Zepto, Yocto )
class Metre( Unit ):
name = 'metre'
symbol = 'm'
valid_prefix = (
Yotta, Zetta, Exa, Peta, Tera, Giga, Mega, Kilo,
Hecto, Deca, Base, Centi, Mili, Micro, Nano, Pico,
Femto, Atto, Zepto, Yocto )
class Gram( Unit ):
name = 'gram'
symbol = 'g'
valid_prefix = (
Yotta, Zetta, Exa, Peta, Tera, Giga, Mega, Kilo,
Hecto, Deca, Base, Centi, Mili, Micro, Nano, Pico,
Femto, Atto, Zepto, Yocto )
|
from __future__ import print_function, division, absolute_import
import sys
import pytest
from distributed.protocol import (loads, dumps, msgpack, maybe_compress,
to_serialize)
from distributed.protocol.serialize import Serialize, Serialized, deserialize
from distributed.utils_test import slow
def test_protocol():
for msg in [1, 'a', b'a', {'x': 1}, {b'x': 1}, {'x': b''}, {}]:
assert loads(dumps(msg)) == msg
def test_compression_1():
pytest.importorskip('lz4')
np = pytest.importorskip('numpy')
x = np.ones(1000000)
frames = dumps({'x': Serialize(x.tobytes())})
assert sum(map(len, frames)) < x.nbytes
y = loads(frames)
assert {'x': x.tobytes()} == y
def test_compression_2():
pytest.importorskip('lz4')
np = pytest.importorskip('numpy')
x = np.random.random(10000)
header, payload = dumps(x.tobytes())
assert (not header or
not msgpack.loads(header, encoding='utf8').get('compression'))
def test_compression_without_deserialization():
pytest.importorskip('lz4')
np = pytest.importorskip('numpy')
x = np.ones(1000000)
frames = dumps({'x': Serialize(x)})
assert all(len(frame) < 1000000 for frame in frames)
msg = loads(frames, deserialize=False)
assert all(len(frame) < 1000000 for frame in msg['x'].frames)
def test_small():
assert sum(map(len, dumps(b''))) < 10
assert sum(map(len, dumps(1))) < 10
def test_small_and_big():
d = {'x': [1, 2, 3], 'y': b'0' * 10000000}
L = dumps(d)
assert loads(L) == d
# assert loads([small_header, small]) == {'x': [1, 2, 3]}
# assert loads([big_header, big]) == {'y': d['y']}
def test_maybe_compress():
import zlib
payload = b'123'
assert maybe_compress(payload, None) == (None, payload)
assert maybe_compress(payload, 'zlib') == (None, payload)
assert maybe_compress(b'111', 'zlib') == (None, b'111')
payload = b'0' * 10000
assert maybe_compress(payload, 'zlib') == ('zlib', zlib.compress(payload))
def test_maybe_compress_sample():
np = pytest.importorskip('numpy')
lz4 = pytest.importorskip('lz4')
payload = np.random.randint(0, 255, size=10000).astype('u1').tobytes()
fmt, compressed = maybe_compress(payload, 'lz4')
assert fmt == None
assert compressed == payload
def test_large_bytes():
msg = {'x': b'0' * 1000000, 'y': 1}
frames = dumps(msg)
assert loads(frames) == msg
assert len(frames[0]) < 1000
assert len(frames[1]) < 1000
assert loads(frames, deserialize=False) == msg
@slow
def test_large_messages():
np = pytest.importorskip('numpy')
psutil = pytest.importorskip('psutil')
pytest.importorskip('lz4')
if psutil.virtual_memory().total < 8e9:
return
if sys.version_info.major == 2:
return 2
x = np.random.randint(0, 255, size=200000000, dtype='u1')
msg = {'x': [Serialize(x), b'small_bytes'],
'y': {'a': Serialize(x), 'b': b'small_bytes'}}
b = dumps(msg)
msg2 = loads(b)
assert msg['x'][1] == msg2['x'][1]
assert msg['y']['b'] == msg2['y']['b']
assert (msg['x'][0].data == msg2['x'][0]).all()
assert (msg['y']['a'].data == msg2['y']['a']).all()
def test_loads_deserialize_False():
frames = dumps({'data': Serialize(123), 'status': 'OK'})
msg = loads(frames)
assert msg == {'data': 123, 'status': 'OK'}
msg = loads(frames, deserialize=False)
assert msg['status'] == 'OK'
assert isinstance(msg['data'], Serialized)
result = deserialize(msg['data'].header, msg['data'].frames)
assert result == 123
def test_loads_without_deserialization_avoids_compression():
pytest.importorskip('lz4')
b = b'0' * 100000
msg = {'x': 1, 'data': to_serialize(b)}
frames = dumps(msg)
assert sum(map(len, frames)) < 10000
msg2 = loads(frames, deserialize=False)
assert sum(map(len, msg2['data'].frames)) < 10000
msg3 = dumps(msg2)
msg4 = loads(msg3)
assert msg4 == {'x': 1, 'data': b'0' * 100000}
def eq_frames(a, b):
if b'headers' in a:
return (msgpack.loads(a, use_list=False)
== msgpack.loads(b, use_list=False))
else:
return a == b
def test_dumps_loads_Serialize():
msg = {'x': 1, 'data': Serialize(123)}
frames = dumps(msg)
assert len(frames) > 2
result = loads(frames)
assert result == {'x': 1, 'data': 123}
result2 = loads(frames, deserialize=False)
assert result2['x'] == 1
assert isinstance(result2['data'], Serialized)
assert any(a is b
for a in result2['data'].frames
for b in frames)
frames2 = dumps(result2)
assert all(map(eq_frames, frames, frames2))
result3 = loads(frames2)
assert result == result3
|
import pytest
import os
from .. import Relic
from ..metadata import Metadata
from ..storage import FileStorage
from unittest.mock import patch
import datetime as dt
import numpy as np
raw_config = """
{
"s3": {
"storage": {
"type": "S3",
"args": {
"s3_bucket": "Scratch-bucket",
"prefix": "scratch"
}
}
}
}"""
@pytest.fixture
def test_storage(tmp_path):
return FileStorage(str(tmp_path), "test-metadata")
def test_metadata_when_adding_arrays(test_storage):
rq = Relic(
name="test", relic_type="test", storage_name="tests", storage=test_storage
)
rq.add_array("test-array", np.zeros((100, 128, 128)))
metadata = rq.describe()
assert metadata["test"]["arrays"][0]["name"] == "test-array"
assert metadata["test"]["arrays"][0]["data_type"] == "arrays"
assert metadata["test"]["arrays"][0]["size"] > 0
assert metadata["test"]["arrays"][0]["shape"] == "(100, 128, 128)"
def test_metadata_when_adding_text(test_storage):
rq = Relic(
name="test", relic_type="test", storage_name="tests", storage=test_storage
)
test_text = "This is the test arena"
rq.add_text("test-text", test_text)
metadata = rq.describe()
assert metadata["test"]["text"][0]["name"] == "test-text"
assert metadata["test"]["text"][0]["data_type"] == "text"
assert metadata["test"]["text"][0]["size"] > 0
assert metadata["test"]["text"][0]["shape"] == len(test_text)
def test_metadata_when_adding_html(test_storage):
rq = Relic(
name="test", relic_type="test", storage_name="tests", storage=test_storage
)
rq.add_html_from_path(
"test-html.html", os.path.join(os.path.dirname(__file__), "test.html")
)
metadata = rq.describe()
assert metadata["test"]["html"][0]["name"] == "test-html.html"
assert metadata["test"]["html"][0]["data_type"] == "html"
@patch.dict(os.environ, {"RELIQUERY_CONFIG": raw_config})
@patch("reliquery.storage.S3Storage")
def test_relic_s3_storage_syncs_on_init(storage):
storage().put_text.return_value = "exists"
storage().get_metadata.return_value = {
"test": {
"arrays": [
{
"id": 1,
"name": "test-array",
"data_type": "arrays",
"relic_type": "test",
"size": 80.0,
"shape": "(100,100,100)",
"last_modified": dt.datetime.utcnow().strftime("%m/%d/%Y %H:%M:%S"),
}
],
"text": [],
"html": [],
}
}
storage().list_keys.return_value = ["test-array"]
rq = Relic(name="test", relic_type="test", storage_name="s3")
assert len(rq.describe()["test"]["arrays"]) == 1
@patch.dict(os.environ, {"RELIQUERY_CONFIG": raw_config})
@patch("reliquery.storage.S3Storage.put_text")
@patch("reliquery.storage.S3Storage.list_keys")
@patch("reliquery.storage.S3Storage.get_metadata")
def test_db_connection(put_text, list_keys, get_metadata):
put_text.return_value = "exists"
list_keys.return_value = []
get_metadata.return_value = {}
rq = Relic(
name="test",
relic_type="test",
storage_name="s3",
check_exists=False,
)
db = rq.metadata_db
assert len(list(iter(db.get_all_metadata()))) == 0
db.add_metadata(
Metadata(
"name",
"data type",
rq._relic_data(),
last_modified=dt.datetime.utcnow().strftime("%m/%d/%Y %H:%M:%S"),
)
)
meta = [i for i in db.get_all_metadata()]
assert len(meta) == 1
meta = [i for i in db.get_all_metadata()]
assert meta[0].name == "name"
assert meta[0].data_type == "data type"
assert meta[0].relic.storage_name == "s3"
assert meta[0].last_modified is not None
|
import logging
from rbtools.api.errors import APIError
from rbtools.clients.errors import InvalidRevisionSpecError
from rbtools.commands import CommandError
from rbtools.utils.match_score import Score
from rbtools.utils.repository import get_repository_id
from rbtools.utils.users import get_user
def get_draft_or_current_value(field_name, review_request):
"""Returns the draft or current field value from a review request.
If a draft exists for the supplied review request, return the draft's
field value for the supplied field name, otherwise return the review
request's field value for the supplied field name.
"""
if review_request.draft:
fields = review_request.draft[0]
else:
fields = review_request
return fields[field_name]
def get_possible_matches(review_requests, summary, description, limit=5):
"""Returns a sorted list of tuples of score and review request.
Each review request is given a score based on the summary and
description provided. The result is a sorted list of tuples containing
the score and the corresponding review request, sorted by the highest
scoring review request first.
"""
candidates = []
# Get all potential matches.
for review_request in review_requests.all_items:
summary_pair = (get_draft_or_current_value('summary', review_request),
summary)
description_pair = (get_draft_or_current_value('description',
review_request),
description)
score = Score.get_match(summary_pair, description_pair)
candidates.append((score, review_request))
# Sort by summary and description on descending rank.
sorted_candidates = sorted(
candidates,
key=lambda m: (m[0].summary_score, m[0].description_score),
reverse=True
)
return sorted_candidates[:limit]
def get_revisions(tool, cmd_args):
"""Returns the parsed revisions from the command line arguments.
These revisions are used for diff generation and commit message
extraction. They will be cached for future calls.
"""
# Parse the provided revisions from the command line and generate
# a spec or set of specialized extra arguments that the SCMClient
# can use for diffing and commit lookups.
try:
revisions = tool.parse_revision_spec(cmd_args)
except InvalidRevisionSpecError:
if not tool.supports_diff_extra_args:
raise
revisions = None
return revisions
def find_review_request_by_change_id(api_client, api_root, repository_info,
repository_name, revisions):
"""Ask ReviewBoard for the review request ID for the tip revision.
Note that this function calls the ReviewBoard API with the only_fields
paramater, thus the returned review request will contain only the fields
specified by the only_fields variable.
If no review request is found, None will be returned instead.
"""
only_fields = 'id,commit_id,changenum,status,url,absolute_url'
change_id = revisions['tip']
logging.debug('Attempting to find review request from tip revision ID: %s'
% change_id)
# Strip off any prefix that might have been added by the SCM.
change_id = change_id.split(':', 1)[1]
optional_args = {}
if change_id.isdigit():
# Populate integer-only changenum field also for compatibility
# with older API versions
optional_args['changenum'] = int(change_id)
user = get_user(api_client, api_root, auth_required=True)
repository_id = get_repository_id(
repository_info, api_root, repository_name)
# Don't limit query to only pending requests because it's okay to stamp a
# submitted review.
review_requests = api_root.get_review_requests(repository=repository_id,
from_user=user.username,
commit_id=change_id,
only_links='self',
only_fields=only_fields,
**optional_args)
if review_requests:
count = review_requests.total_results
# Only one review can be associated with a specific commit ID.
if count > 0:
assert count == 1, '%d review requests were returned' % count
review_request = review_requests[0]
logging.debug('Found review request %s with status %s'
% (review_request.id, review_request.status))
if review_request.status != 'discarded':
return review_request
return None
def guess_existing_review_request(repository_info, repository_name,
api_root, api_client, tool, revisions,
guess_summary, guess_description,
is_fuzzy_match_func=None,
no_commit_error=None,
submit_as=None):
"""Try to guess the existing review request ID if it is available.
The existing review request is guessed by comparing the existing
summary and description to the current post's summary and description,
respectively. The current post's summary and description are guessed if
they are not provided.
If the summary and description exactly match those of an existing
review request, that request is immediately returned. Otherwise,
the user is prompted to select from a list of potential matches,
sorted by the highest ranked match first.
Note that this function calls the ReviewBoard API with the only_fields
paramater, thus the returned review request will contain only the fields
specified by the only_fields variable.
"""
only_fields = 'id,summary,description,draft,url,absolute_url'
if submit_as:
username = submit_as
else:
user = get_user(api_client, api_root, auth_required=True)
username = user.username
repository_id = get_repository_id(
repository_info, api_root, repository_name)
try:
# Get only pending requests by the current user for this
# repository.
review_requests = api_root.get_review_requests(
repository=repository_id,
from_user=username,
status='pending',
expand='draft',
only_fields=only_fields,
only_links='draft',
show_all_unpublished=True)
if not review_requests:
raise CommandError('No existing review requests to update for '
'user %s.'
% user.username)
except APIError as e:
raise CommandError('Error getting review requests for user '
'%s: %s' % (user.username, e))
summary = None
description = None
if not guess_summary or not guess_description:
try:
commit_message = tool.get_commit_message(revisions)
if commit_message:
if not guess_summary:
summary = commit_message['summary']
if not guess_description:
description = commit_message['description']
elif callable(no_commit_error):
no_commit_error()
except NotImplementedError:
raise CommandError('--summary and --description are required.')
if not summary and not description:
return None
possible_matches = get_possible_matches(review_requests, summary,
description)
exact_match_count = num_exact_matches(possible_matches)
for score, review_request in possible_matches:
# If the score is the only exact match, return the review request
# ID without confirmation, otherwise prompt.
if ((score.is_exact_match() and exact_match_count == 1) or
(callable(is_fuzzy_match_func) and
is_fuzzy_match_func(review_request))):
return review_request
return None
def num_exact_matches(possible_matches):
"""Returns the number of exact matches in the possible match list."""
count = 0
for score, request in possible_matches:
if score.is_exact_match():
count += 1
return count
|
#!/usr/bin/python
import numpy as np
import flask
from flask_restful import reqparse
from flask import request, make_response, current_app
from . import valueFromRequest, make_json_response
from ..database_support import APIDB, database_cursor
from .utils import selection_to_where
api_histogram_2d = flask.Blueprint("api_histogram_2d", __name__)
@api_histogram_2d.route("/histogram_2d", methods=['GET'])
def histogram2d():
'''
Parameters:
x_attribute (string) : column name
y_attribute (string) : column name
x_range (float, float) : range of values
y_range (float, float) : range of values
x_n_bins (integer) [optional] : number of bins, x axis
x_n_bins (integer) [optional] : number of bins, y axis
'''
x_attribute = valueFromRequest(key="x_attribute", request=request)
y_attribute = valueFromRequest(key="y_attribute", request=request)
x_range = valueFromRequest(key="x_range", request=request, asList=True)
y_range = valueFromRequest(key="y_range", request=request, asList=True)
x_n_bins = valueFromRequest(key="x_n_bins", request=request)
y_n_bins = valueFromRequest(key="y_n_bins", request=request)
selection = valueFromRequest(key="selection", request=request, asList=False)
where = selection_to_where(selection)
apidb = APIDB()
pool = apidb.pool()
with database_cursor(pool) as cursor:
query = "select * from pg_hist_2d('select {0},{1} from kic {2} LIMIT 1000000', ARRAY[{3},{4}], ARRAY[{5},{6}], ARRAY[{7},{8}]);".format(x_attribute, y_attribute, where, x_n_bins, y_n_bins, x_range[0], y_range[0], x_range[1], y_range[1])
cursor.execute(query)
values = np.zeros((int(y_n_bins), int(x_n_bins)))
# Only non-zero entries are returned
for row in cursor.fetchall():
x_id, y_id, count = row
values[y_id, x_id] = count
return make_json_response(values.tolist())
|
import jnius_config
import os
import inspect
from itertools import chain
import numpy as np
global nd4j
nd4j = None
global INDArray
INDArray = None
global transforms
transforms = None
indexing = None
DataBuffer = None
system = None
Integer = None
Float = None
Double = None
nd4j_index = None
serde = None
native_ops_holder = None
native_ops = None
shape = None
serde = None
DoublePointer = None
FloatPointer = None
IntPointer = None
DataTypeUtil = None
MemoryManager = None
memory_manager = None
methods = None
global SameDiff
SameDiff = None
def _expand_directory(path):
if not path.endswith('*'):
return [path, ]
else:
# wild card expansions like /somedir/* (we do this because of jnius being unable to handle class path expansion
clean_path = path.rstrip('*')
return [os.path.join(clean_path, y) for y in os.listdir(clean_path)]
def get_classpath(base_path):
"""
Get the classpath of based on the given folder.
:param base_path: the directory to get the classpath for
:return:
"""
return ':'.join(chain.from_iterable(map(lambda x: _expand_directory(x), base_path.split(':'))))
def init():
jnius_config.add_options('-Dorg.bytedeco.javacpp.nopointergc=true')
try:
jnius_classpath = os.environ['JUMPY_CLASS_PATH']
except KeyError:
raise Exception('Please specify a jar or directory for JUMPY_CLASS_PATH in the environment')
jnius_config.set_classpath(get_classpath(jnius_classpath))
# after jnius is initialized with proper class path *then* we setup nd4j
from jnius import autoclass
global nd4j
nd4j = autoclass('org.nd4j.linalg.factory.Nd4j')
global INDArray
INDArray = autoclass('org.nd4j.linalg.api.ndarray.INDArray')
global transforms
transforms = autoclass('org.nd4j.linalg.ops.transforms.Transforms')
global indexing
indexing = autoclass('org.nd4j.linalg.indexing.NDArrayIndex')
global DataBuffer
DataBuffer = autoclass('org.nd4j.linalg.api.buffer.DataBuffer')
global system
system = autoclass('java.lang.System')
system.out.println(system.getProperty('org.bytedeco.javacpp.nopointergc'))
global Integer
Integer = autoclass('java.lang.Integer')
global Float
Float = autoclass('java.lang.Float')
global Double
Double = autoclass('java.lang.Double')
global nd4j_index
nd4j_index = autoclass('org.nd4j.linalg.indexing.NDArrayIndex')
global shape
shape = autoclass('org.nd4j.linalg.api.shape.Shape')
global serde
serde = autoclass('org.nd4j.serde.binary.BinarySerde')
global native_ops_holder
native_ops_holder = autoclass('org.nd4j.nativeblas.NativeOpsHolder')
global native_ops
native_ops = native_ops_holder.getInstance().getDeviceNativeOps()
global DoublePointer
DoublePointer = autoclass('org.bytedeco.javacpp.DoublePointer')
global FloatPointer
FloatPointer = autoclass('org.bytedeco.javacpp.FloatPointer')
global IntPointer
IntPointer = autoclass('org.bytedeco.javacpp.IntPointer')
global DataTypeUtil
DataTypeUtil = autoclass('org.nd4j.linalg.api.buffer.util.DataTypeUtil')
global MemoryManager
MemoryManager = autoclass('org.nd4j.linalg.memory.MemoryManager')
global memory_manager
memory_manager = nd4j.getMemoryManager()
global methods
methods = inspect.getmembers(INDArray, predicate=inspect.ismethod)
for name, method in methods:
Nd4jArray.name = method
methods = inspect.getmembers(DataBuffer, predicate=inspect.ismethod)
for name, method in methods:
Nd4jBuffer.name = method
global SameDiff
SameDiff = autoclass('org.nd4j.autodiff.samediff.SameDiff')
def disable_gc():
memory_manager.togglePeriodicGc(False)
def set_gc_interval(interval=5000):
memory_manager.setAutoGcWindow(interval)
def same_diff_create():
'''
Create a samediff instance.
:return:
'''
global SameDiff
return SameDiff.create()
def data_type():
"""
Returns the data type name
:return:
"""
return DataTypeUtil.getDTypeForName(DataTypeUtil.getDtypeFromContext())
def set_data_type(data_type):
"""
Set the data type for nd4j
:param data_type: the data type to set
one of:
float
double
:return:
"""
data_type_type = DataTypeUtil.getDtypeFromContext(data_type)
DataTypeUtil.setDTypeForContext(data_type_type)
def dot(array1, array2):
"""
The equivalent of numpy's "dot"
:param array1: the first Nd4jArray
:param array2: the second Nd4jArray
:return: an nd4j array with the matrix multiplication
result
"""
return Nd4jArray(array1.array.mmul(array2.array))
def _get_numpy_buffer_reference(np_arr):
return np.asarray(np_arr, dtype=_numpy_datatype_from_nd4j_context())
def get_buffer_from_arr(np_arr):
"""
Create an nd4j data buffer from a numpy
array's pointer
:param np_arr: The input numpy array
:return: and nd4j data buffer based
on the numpy array's pointer
"""
pointer_address = get_array_address(np_arr)
pointer = native_ops.pointerForAddress(pointer_address)
size = np_arr.size
if np_arr.dtype == 'float64':
as_double = DoublePointer(pointer)
return Nd4jBuffer(nd4j.createBuffer(as_double, size))
elif np_arr.dtype == 'float32':
as_float = FloatPointer(pointer)
return Nd4jBuffer(nd4j.createBuffer(as_float, size))
elif np_arr.dtype == 'int64':
as_int = IntPointer(pointer)
return Nd4jBuffer(data_buffer=nd4j.createBuffer(as_int, size),
numpy_pointer=_get_numpy_buffer_reference(np_arr))
def _to_number(number):
"""
Convert a number to a scalar ndarray
:param number:
:return:
"""
return nd4j.scalar(number)
def get_array_address(np_arr):
"""
:param np_arr: The numpy array to get the pointer address for
:return: the pointer address as a long
"""
pointer, read_only_flag = np_arr.__array_interface__['data']
return pointer
class Nd4jArray(object):
"""
A small wrapper around nd4j's ndarray
in java.
"""
def __init__(self, nd4j_array=None,
numpy_array=None):
self.array = nd4j_array
self.numpy_array = numpy_array
def __add__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(self.array.add(other.array), numpy_array=self.numpy_array)
# scalar
return Nd4jArray(nd4j_array=self.array.add(_to_number(other)), numpy_array=self.numpy_array)
def __sub__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.sub(other.array), numpy_array=self.numpy_array)
# scalar
return Nd4jArray(nd4j_array=self.array.sub(_to_number(other)), numpy_array=self.numpy_array)
def __div__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.div(other.array), numpy_array=self.numpy_array)
# scalar
return Nd4jArray(nd4j_array=self.array.div(_to_number(other)), numpy_array=self.numpy_array)
def __mul__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.mul(other.array), numpy_array=self.numpy_array)
# scalar
return Nd4jArray(nd4j_array=self.array.mul(_to_number(other)), numpy_array=self.numpy_array)
def __gt__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.gt(other.array), numpy_array=self.numpy_array)
# scalar
return Nd4jArray(nd4j_array=self.array.gt(_to_number(other)), numpy_array=self.numpy_array)
def __lt__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.lt(other.array), numpy_array=self.numpy_array)
# scalar
return Nd4jArray(nd4j_array=self.array.lt(_to_number(other)), numpy_array=self.numpy_array)
def __deepcopy__(self, memodict={}):
return Nd4jArray(nd4j_array=self.array.dup())
def __eq__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.add(other.array))
# scalar
return Nd4jArray(nd4j_array=self.array.add(_to_number(other)), numpy_array=self.numpy_array)
def __imul__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.muli(other.array), numpy_array=self.numpy_array)
# scalar
return Nd4jArray(nd4j_array=self.array.muli(_to_number(other)), numpy_array=self.numpy_array)
def __isub__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.subi(other.array), numpy_array=self.numpy_array)
# scalar
return Nd4jArray(nd4j_array=self.array.subi(_to_number(other)), numpy_array=self.numpy_array)
def __iadd__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.addi(other.array), numpy_array=self.numpy_array)
# scalar
def __idiv__(self, other):
if isinstance(other, Nd4jArray):
return Nd4jArray(nd4j_array=self.array.divi(other.array))
# scalar
return Nd4jArray(nd4j_array=self.array.divi(_to_number(other)), numpy_array=self.numpy_array)
def __getitem__(self, item):
if isinstance(item, int):
return self.array.getDouble(item)
else:
raise AssertionError("Only int types are supported for indexing right now")
def __setitem__(self, key, value):
if isinstance(key, int):
self.array.putScalar(key, value)
else:
raise AssertionError("Only int types are supported for indexing right now")
def rank(self):
return self.array.rank()
def length(self):
return self.array.length()
def shape(self):
return self.array.shape()
def stride(self):
return self.array.stride()
def data(self):
return self.array.data()
class Nd4jBuffer(object):
def __init__(self, data_buffer=None, numpy_pointer=None):
self.data_buffer = data_buffer
self.numpy_pointer = numpy_pointer
def __getitem__(self, item):
if isinstance(item, int):
return self.data_buffer.getDouble(item)
else:
raise AssertionError("Please ensure that item is of type int")
def __setitem__(self, key, value):
if isinstance(key, int):
self.data_buffer.put(key, value)
else:
raise AssertionError("Please ensure that item is of type int")
def length(self):
return self.data_buffer.length()
def element_size(self):
return self.data_buffer.getElementSize()
def _nd4j_datatype_from_np(np_datatype_name):
"""
:param np_datatype_name:
a numpy data type name.
1 of:
float64
float32
float16
:return: the equivalent nd4j data type name (double,float,half)
"""
if np_datatype_name == 'float64':
return 'double'
elif np_datatype_name == 'float32':
return 'float'
elif np_datatype_name == 'float16':
return 'half'
return None
def _nd4j_datatype_from_np_array(array):
"""
Gets the equivalent nd4j datatype
from the passed in numpy array
:param array:
:return:
"""
return _nd4j_datatype_from_np(array.dtype.name)
def _numpy_datatype_from_nd4j_context():
"""
Returns the appropriate
numpy data type
given the current nd4j context
for data type
:return:
"""
nd4j_datatype = data_type()
if nd4j_datatype == 'double':
return np.float64
elif nd4j_datatype == 'float':
return np.float32
elif nd4j_datatype == 'half':
return np.float16
def _align_np_datatype_for_array(array):
"""
Ensure the given numpy array
matches the current nd4j data type
:param array:
:return:
"""
return np.asarray(array, _numpy_datatype_from_nd4j_context())
def _assert_data_type_length(data_buffer):
data_type = _numpy_datatype_from_nd4j_context()
element_size = data_buffer.getElementSize()
if data_type == np.float32 and element_size != 4:
raise AssertionError("Data Type from nd4j is float. Data buffer size is not 4")
elif data_type == np.float64 and element_size != 8:
raise AssertionError("Data Type from nd4j is double. Data buffer size is not 8")
elif data_type == np.int and element_size != 4:
raise AssertionError("Data Type from nd4j is int. Data buffer size is not 4")
def from_np(np_arr):
"""
Create an nd4j ndarray from a numpy array (passing the
numpy pointer buffer by reference)
:param np_arr: a numpy array
:return:
"""
np_arr = _align_np_datatype_for_array(np_arr)
# nd4j doesn't have 1d arrays. Convert to a row vector
if np_arr.ndim == 1:
np_arr = np.reshape(np_arr, (1, np_arr.size))
data_buffer = get_buffer_from_arr(np_arr).data_buffer
_assert_data_type_length(data_buffer)
# note here we divide the strides by 8 for numpy
# the reason we do this is because numpy's strides are based on bytes rather than words
strides = map(lambda x: x / data_buffer.getElementSize(), np_arr.strides)
arr_shape = np_arr.shape
return Nd4jArray(nd4j_array=nd4j.create(data_buffer, arr_shape, strides, 0))
if __name__ == "__main__":
init()
|
class Solution:
# @return an integer
def uniquePathsWithObstacles(self, obstacleGrid):
y = len(obstacleGrid)
x = len(obstacleGrid[0])
pathcount = [[0 for i in xrange(x)] for j in xrange(y)]
obstacleFound = False
for x_ in range(x):
if (obstacleGrid[0][x_]):
obstacleFound = True
if (not obstacleFound):
pathcount[0][x_] = 1
for x_ in xrange(x):
for y_ in range(1, y):
if y_ == 0:
top = 0
elif obstacleGrid[y_ - 1][x_] == 1:
top = 0
else:
top = pathcount[y_ - 1][x_]
if x_ == 0:
left = 0
elif obstacleGrid[y_][x_ - 1] == 1:
left = 0
else:
left = pathcount[y_][x_ - 1]
pathcount[y_][x_] = top + left
if (obstacleGrid[y_][x_]):
pathcount[y_][x_] = 0
return pathcount[y - 1][x - 1] |
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Mark Koennecke <[email protected]>
#
# *****************************************************************************
import copy
from nicos.nexus.elements import ConstDataset, DetectorDataset, \
DeviceAttribute, DeviceDataset, ImageDataset, NexusSampleEnv, \
NXAttribute, NXLink, NXScanLink, NXTime
from nicos.nexus.nexussink import NexusTemplateProvider
from nicos_sinq.nexus.specialelements import ArrayParam, CellArray, FixedArray
class ZEBRATemplateProvider(NexusTemplateProvider):
_zebra_default = {"NeXus_Version": "4.3.0", "instrument": "ZEBRA",
"owner": DeviceAttribute('ZEBRA', 'responsible'),
"entry1:NXentry": {"title": DeviceDataset('Exp',
'title'),
"proposal_title":
DeviceDataset('Exp', 'title'),
"proposal_id":
DeviceDataset('Exp', 'proposal'),
"start_time": NXTime(),
"zebra_mode":
DeviceDataset('zebramode',
dtype='string'),
"end_time": NXTime(), "user:NXuser": {
"name": DeviceDataset('Exp', 'users'),
"email":
DeviceDataset('Exp',
'localcontact'),
},
"control:NXmonitor": {
"mode": DetectorDataset('mode',
"string"),
"Monitor": DetectorDataset(
'monitorval', 'float32',
units=NXAttribute('counts',
'string')),
"preset":
DetectorDataset('preset',
'float32'),
"time": DetectorDataset(
'elapsedtime', 'float32',
units=NXAttribute('seconds',
'string')),
},
"proton_beam:NXmonitor": {
"data": DetectorDataset(
'protoncurr',
'int32',
units=NXAttribute(
'counts',
'string'))},
"beam_monitor:NXmonitor": {
"data": DetectorDataset(
'monitorval',
'int32',
units=NXAttribute('counts',
'string'))},
"area_detector2:NXdata": {
"data": NXLink(
'/entry1/ZEBRA/'
'area_detector2/data'),
'None': NXScanLink(),
},
}
}
_zebra_instrument = {
"SINQ:NXsource": {
'name': ConstDataset('SINQ @ PSI', 'string'),
'type': ConstDataset('Continuous flux spallation source',
'string')
},
'collimator:NXcollimator': {
'cex1': DeviceDataset('cex1', units=NXAttribute('degree',
'string')),
'cex2': DeviceDataset('cex2', units=NXAttribute('degree',
'string')),
},
'monochromator:NXmonochromator': {
'description': ConstDataset('PG monochromator', 'string'),
'wavelength': DeviceDataset('wavelength'),
'mexz': DeviceDataset('mexz'),
'mcvl': DeviceDataset('mcvl'),
'mgvl': DeviceDataset('mgvl'),
'mgvu': DeviceDataset('mgvu'),
'theta': DeviceDataset('moml'),
'two_theta': ConstDataset(40.2, 'float',
unit=NXAttribute('degree', 'string')),
},
'detector:NXslit': {
'top': DeviceDataset('s2vt'),
'bottom': DeviceDataset('s2vb'),
'left': DeviceDataset('s2hl'),
'right': DeviceDataset('s2hr'),
'y_gap': DeviceDataset('slit2_opening'),
'x_gap': DeviceDataset('slit2_width'),
},
'pre_sample:NXslit': {
'top': DeviceDataset('s1vt'),
'bottom': DeviceDataset('s1vb'),
'left': DeviceDataset('s1hl'),
'right': DeviceDataset('s1hr'),
'y_gap': DeviceDataset('slit1_opening'),
'x_gap': DeviceDataset('slit1_width'),
},
'nose:NXattenuator': {
'horizontal_position': DeviceDataset('snhm'),
'vertical_position': DeviceDataset('snvm'),
},
'area_detector2:NXdetector': {
'name': ConstDataset('EMBL-PSD', 'string'),
'distance': DeviceDataset('detdist'),
'polar_angle': DeviceDataset('stt'),
'tilt_angle': DeviceDataset('nu'),
'x_pixel_offset': ConstDataset(128, 'float'),
'y_pixel_offset': ConstDataset(64, 'float'),
'x_pixel_size': ConstDataset(0.734, 'float',
units=NXAttribute('mm', 'string')),
'y_pixel_size': ConstDataset(1.4809, 'float',
units=NXAttribute('mm', 'string')),
'data': ImageDataset(0, 0, signal=NXAttribute(1, 'int32')),
'x': FixedArray(-95, 0.734, 256),
'y': FixedArray(-95, 1.4809, 128),
},
}
_zebra_sample = {
'UB': ArrayParam('Sample', 'ubmatrix', 'float32'),
'cell': CellArray(),
'chi': DeviceDataset('sch'),
'phi': DeviceDataset('sph'),
'rotation_angle': DeviceDataset('som'),
'h': DeviceDataset('h'),
'k': DeviceDataset('k'),
'l': DeviceDataset('l'),
'name': DeviceDataset('Sample', 'samplename'),
"hugo": NexusSampleEnv(),
}
def getTemplate(self):
zebra_template = copy.deepcopy(self._zebra_default)
zebra_template['entry1:NXentry']['ZEBRA:NXinstrument'] = \
copy.deepcopy(self._zebra_instrument)
zebra_template['entry1:NXentry']['sample:NXsample'] = \
copy.deepcopy(self._zebra_sample)
return zebra_template
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data = np.genfromtxt(path,delimiter=",",skip_header=1)
print(data)
census = np.concatenate((data, new_record))
print(census)
# --------------
#Code starts here
age = np.array(census[:,0])
print(age)
max_age = age.max()
print("The maximum age observed is:",max_age)
min_age = age.min()
print("The minimum age observed is:",min_age)
age_mean = age.mean()
print("The mean age observed is:",age_mean)
age_std = np.std(age)
print(age_std)
# --------------
#Code starts here
import numpy as np
race = np.array(census[:,2])
race_0 = np.array(census[race==0])
race_1 = np.array(census[race==1])
race_2 = np.array(census[race==2])
race_3 = np.array(census[race==3])
race_4 = np.array(census[race==4])
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
minlength = min(len_0,len_1,len_2,len_3,len_4)
minority_race = 3
# --------------
#Code starts here
import numpy as np
age = np.array(census[:,0])
senior_citizens = np.array(census[age>60])
working_hours = senior_citizens[:,6]
print(working_hours)
working_hours_sum = working_hours.sum()
print(working_hours_sum)
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
import numpy as np
educationnum = np.array(census[:,1])
high = np.array(census[educationnum>10])
low = np.array(census[educationnum<=10])
income = np.array(census[:,7])
high_income = np.array(high[:,7])
low_income = np.array(low[:,7])
avg_pay_high = high_income.mean()
print(avg_pay_high)
avg_pay_low = low_income.mean()
print(avg_pay_low)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 18:05:37 2020
@author: Karan
"""
import numpy as np
import matplotlib.pyplot as pyplot
import pandas as pd
dataset=pd.read_csv('train.csv')
#Filling null values
null=dataset.isnull().sum(axis=0)
null2={}
for i in range(len(list(null.index))):
if list(null)[i]>0:
null2[str(list(null.index)[i])]=list(null)[i]
keys=list(null2.keys())
dataset2=dataset[keys]
dataset2.dtypes
mode_fill = ['MasVnrArea','LotFrontage','GarageYrBlt','Electrical']
None_fill= ['MasVnrType','Alley','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2','BsmtQual',
'FireplaceQu','Fence','GarageCond','GarageFinish','GarageQual','GarageType','PoolQC','MiscFeature']
for i in mode_fill:
mode=dataset[i].mode()[0]
dataset[i]=dataset[i].fillna(mode)
for i in None_fill:
dataset[i]=dataset[i].fillna('None')
print(dataset.isnull().sum().sum())
X=dataset.iloc[:,:-1]
X=X.iloc[:,1:]
y=dataset.iloc[:,-1].values
y=y.reshape(-1,1)
#Creating Dummy variables for categorical variables
obj=[]
for i in X.columns:
if str(X[i].dtype)=='object':
obj.append(i)
X=pd.get_dummies(X,columns=obj,drop_first=True)
X=X.values
#Scaling values
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
sc_y=StandardScaler()
X=sc_x.fit_transform(X)
y=sc_y.fit_transform(y)
#Train-test split
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
#Linear Regression
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(X_train,y_train)
y_pred=regressor.predict(X_test)
n = len(y_pred)
rmse_lr = np.linalg.norm(y_pred - y_test) / np.sqrt(n)
#Random Forest Regression
from sklearn.ensemble import RandomForestRegressor
regressor2=RandomForestRegressor(n_estimators=500)
regressor2.fit(X_train,y_train)
y_pred2=regressor2.predict(X_test)
n = len(y_pred)
rmse_rf = np.linalg.norm(y_pred2 - y_test) / np.sqrt(n)
#Decision Tree
from sklearn.tree import DecisionTreeRegressor
regressor3=DecisionTreeRegressor()
regressor3.fit(X_train,y_train)
y_pred3=regressor3.predict(X_test)
n = len(y_pred)
rmse_dt = np.linalg.norm(y_pred3 - y_test) / np.sqrt(n)
#Ensemble Methods
#1. AdaBoost Regressor
from sklearn.ensemble import AdaBoostRegressor
regressor4=AdaBoostRegressor(regressor3)
regressor4.fit(X_train,y_train)
y_pred4=regressor4.predict(X_test)
n = len(y_pred)
rmse_abr = np.linalg.norm(y_pred4 - y_test) / np.sqrt(n)
#2. Gradient Boosting Regressor
from sklearn.ensemble import GradientBoostingRegressor
regressor5=GradientBoostingRegressor(learning_rate=0.001,n_estimators=400)
regressor5.fit(X_train,y_train)
y_pred5=regressor5.predict(X_test)
n = len(y_pred5)
rmse_gb = np.linalg.norm(y_pred5 - y_test) / np.sqrt(n)
#Pricipal Component Analysis to reduce dimensionality in order to efficiently apply SVM
from sklearn.decomposition import PCA
#Calculating ratio of variance covered for 2 to 150 number of features
explained_variance=dict()
for i in range(2,150):
pca=PCA(n_components=i)
X_train_=pca.fit_transform(X_train)
X_test_=pca.transform(X_test)
explained_variance[i]=pca.explained_variance_ratio_.sum()
pca=PCA(n_components=100)
X_train_=pca.fit_transform(X_train)
X_test_=pca.transform(X_test)
#Support Vector Regressor
from sklearn.svm import SVR
regressor=SVR(kernel='rbf',C=0.5)
regressor.fit(X_train_,y_train)
y_pred6=regressor.predict(X_test_)
n = len(y_pred5)
rmse_svr = np.linalg.norm(y_pred6 - y_test) / np.sqrt(n)
#Unscale the predictions to acquire actual predicted house prices
y_actual=sc_y.inverse_transform(y_pred5)
|
# mailstat.analyze
# Analysis module for the email analysis project
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sun Dec 29 23:45:58 2013 -0600
#
# Copyright (C) 2013 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: __init__.py [] [email protected] $
"""
Analysis module for the email analysis project
"""
##########################################################################
## Imports
##########################################################################
from copy import deepcopy
from mailstat.metric import *
from mailstat.reader import *
from mailstat.exceptions import *
##########################################################################
## Module Constants
##########################################################################
METRICS = [DomainDistribution,]
##########################################################################
## Analysis Harness
##########################################################################
class Analysis(object):
"""
The analysis and data processing harness
"""
def __init__(self, csvfile, **kwargs):
self.csvfile = csvfile
self.metrics = kwargs.get('metrics', METRICS)
@property
def metrics(self):
return self._metrics
@metrics.setter
def metrics(self, metrics):
self._metrics = []
for metric in metrics:
self._metrics.append(metric())
@property
def dataset(self):
if not hasattr(self, '_dataset'):
self._dataset = M3Reader(self.csvfile)
return self._dataset
def before_analysis(self):
for metric in self.metrics:
metric.preprocess()
def after_analysis(self):
for metric in self.metrics:
metric.postprocess()
def analyze(self):
self.before_analysis()
for row in self.dataset:
for metric in self.metrics:
metric.process(deepcopy(row))
self.after_analysis()
def serialize(self):
"""
TODO: Check analysis state
"""
return dict((m.get_name(), m.get_value()) for m in self.metrics)
|
import numpy as np
import os
import xml.etree.ElementTree as ET
import pickle
import json
import os
from os.path import join, dirname
def parse_voc_annotation(ann_dir, img_dir, cache_name, labels=[]):
if os.path.exists(cache_name):
with open(cache_name, 'rb') as handle:
cache = pickle.load(handle)
all_insts, seen_labels = cache['all_insts'], cache['seen_labels']
else:
all_insts = []
seen_labels = {}
for ann in sorted(os.listdir(ann_dir)):
img = {'object':[]}
if ann.endswith('.xml'):
print('annotation in xml')
try:
tree = ET.parse(ann_dir + ann)
except Exception as e:
print(e)
print('Ignore this bad annotation: ' + ann_dir + ann)
continue
for elem in tree.iter():
if 'filename' in elem.tag:
img['filename'] = img_dir + elem.text
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
img['height'] = int(elem.text)
if 'object' in elem.tag or 'part' in elem.tag:
obj = {}
for attr in list(elem):
if 'name' in attr.tag:
obj['name'] = attr.text
if obj['name'] in seen_labels:
seen_labels[obj['name']] += 1
else:
seen_labels[obj['name']] = 1
if len(labels) > 0 and obj['name'] not in labels:
break
else:
img['object'] += [obj]
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
obj['xmin'] = int(round(float(dim.text)))
if 'ymin' in dim.tag:
obj['ymin'] = int(round(float(dim.text)))
if 'xmax' in dim.tag:
obj['xmax'] = int(round(float(dim.text)))
if 'ymax' in dim.tag:
obj['ymax'] = int(round(float(dim.text)))
elif ann.endswith('.json'):
print('annotation in json')
annot_path = join(ann_dir, ann)
with open(annot_path) as f:
annot = json.load(f)
img = {}
#parent_dir = dirname(ann_dir)
filename = join(img_dir, annot['filename'])
print('filename', filename)
#img['filename'] = annot['filename']
img['filename'] = filename
img['width'] = annot['size']['width']
img['height'] = annot['size']['height']
img['object'] = []
for item in annot['object']:
if item['name'] in seen_labels:
seen_labels[item['name']] += 1
else:
seen_labels[item['name']] = 1
if len(labels) > 0 and item['name'] not in labels:
break
obj = {}
obj['name'] = item['name']
obj['xmin'] = item['bndbox']['xmin']
obj['xmax'] = item['bndbox']['xmax']
obj['ymin'] = item['bndbox']['ymin']
obj['ymax'] = item['bndbox']['ymax']
img['object'].append(obj)
if len(img['object']) > 0:
all_insts += [img]
cache = {'all_insts': all_insts, 'seen_labels': seen_labels}
with open(cache_name, 'wb') as handle:
pickle.dump(cache, handle, protocol=pickle.HIGHEST_PROTOCOL)
return all_insts, seen_labels |
"""
This is the Django template system.
How it works:
The Lexer.tokenize() method converts a template string (i.e., a string
containing markup with custom template tags) to tokens, which can be either
plain text (TokenType.TEXT), variables (TokenType.VAR), or block statements
(TokenType.BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
"""
|
"""
AUTOR: Juanjo
FECHA DE CREACIÓN: 17/01/2020
""" |
import datetime
import numpy as np
from typing import Union
def snapshot_maker(param_dict, dir:str):
# record <.pth> model infomation snapshot.
with open(dir, 'w') as file:
for key, value in param_dict.items():
file.write(key + ' : ' + str(value) + '\n')
time_now = datetime.datetime.now()
file.write('record time : ' + time_now.strftime('%Y-%m-%d %H:%M:%S'))
def write_line(dict_in:dict, dir:str):
# record loss in real time.
import os
import torch
os.makedirs(os.path.dirname(dir), exist_ok=True)
with open(dir, 'a') as file:
for key, value in dict_in.items():
if isinstance(key, torch.Tensor):
key = float(key)
if isinstance(value, torch.Tensor):
value = float(value)
if isinstance(key, float):
key = round(key, 4)
if isinstance(value, float):
value = round(value, 4)
file.write(str(key) + ' : ' + str(value) + '\n')
def cuda2np(tensor) -> np.ndarray:
# cuda tensor -> cpu numpy
arr = tensor.cpu()
arr = arr.detach().numpy()
return arr
def tensorview(Intensor, batch_idx):
# show target tensor
arr = cuda2np(Intensor)
print(arr[batch_idx])
def imgstore(Intensor, nums:int, save_dir:str, epoch:Union[int, str], filename='', cls='pred'):
# function for saving prediction image.
import os
import cv2
img_np = cuda2np(Intensor)
img_np = np.transpose(img_np, (0, 2, 3, 1))
os.makedirs(save_dir, exist_ok=True)
img_list = []
for i, img in enumerate(img_np):
if i == nums:
break
img_list.append(img)
if isinstance(filename, str): # stores only one image, batch == 1
if isinstance(epoch, str):
cv2.imwrite(os.path.join(save_dir, cls + '_' + epoch + '_[' + filename + '].png'), img_list[0])
else:
cv2.imwrite(os.path.join(save_dir, cls+'_'+'epoch_'+str(epoch)+'_['+filename+'].png'), img_list[0])
elif isinstance(filename, list): # stores <nums:int> images, batch > 1
for idx, unit in enumerate(img_list):
if isinstance(epoch, str):
cv2.imwrite(os.path.join(save_dir, cls + '_' + epoch + '_[' + filename[idx] + '].png'), unit)
print(f"{os.path.join(save_dir, cls+'_'+epoch+'_['+filename[idx]+'].png')} saved.")
else:
cv2.imwrite(os.path.join(save_dir, cls+'_'+'epoch_'+str(epoch)+'_['+filename[idx]+'].png'), unit) |
import sys, os
from os.path import abspath, join, isdir, isfile, exists, dirname
import logging
from .tree import AstProvider
from .module import ModuleProvider, PackageResolver
from .watcher import DummyMonitor
from .calls import CallDB
class Project(object):
def __init__(self, root, config=None, monitor=None):
self.root = root
self.config = config or {}
self._refresh_paths()
self.monitor = monitor or DummyMonitor()
self.ast_provider = AstProvider()
self.module_providers = {
'default':ModuleProvider()
}
self.package_resolver = PackageResolver()
self.docstring_processors = []
self.registered_hooks = set()
for h in self.config.get('hooks', []):
self.register_hook(h)
self.calldb = CallDB(self)
def _refresh_paths(self):
self.sources = []
self.paths = []
if 'sources' in self.config:
for p in self.config['sources']:
p = join(abspath(self.root), p)
self.paths.append(p)
self.sources.append(p)
else:
self.paths.append(abspath(self.root))
self.sources.append(abspath(self.root))
for p in self.config.get('libs', []):
self.paths.append(p)
self.paths.extend(sys.path)
def get_module(self, name, filename=None):
assert name
ctx, sep, name = name.partition(':')
if not sep:
ctx, name = 'default', ctx
if filename:
return self.module_providers[ctx].get(self, name, filename)
else:
return self.module_providers[ctx].get(self, name)
def get_ast(self, module):
return self.ast_provider.get(module)
def get_possible_imports(self, start, filename=None):
result = set()
if not start:
paths = self.paths
result.update(r for r, m in sys.modules.items() if m)
else:
m = self.get_module(start, filename)
sub_package_prefix = m.module.__name__ + '.'
for name, module in sys.modules.iteritems():
if module and name.startswith(sub_package_prefix):
result.add(name[len(sub_package_prefix):])
try:
paths = m.module.__path__
except AttributeError:
paths = []
for path in paths:
if not exists(path) or not isdir(path):
continue
path = abspath(path)
for name in os.listdir(path):
if name == '__init__.py':
continue
filename = join(path, name)
if isdir(filename):
if isfile(join(filename, '__init__.py')):
result.add(name)
else:
if any(map(name.endswith, ('.py', '.so'))):
result.add(name.rpartition('.')[0])
return result
def register_hook(self, name):
if name not in self.registered_hooks:
try:
__import__(name)
sys.modules[name].init(self)
except:
logging.getLogger(__name__).exception('[%s] hook register failed' % name)
else:
self.registered_hooks.add(name)
def add_docstring_processor(self, processor):
self.docstring_processors.append(processor)
def add_module_provider(self, ctx, provider):
self.module_providers[ctx] = provider
def add_override_processor(self, override):
self.module_providers['default'].add_override(override)
def process_docstring(self, docstring, obj):
for p in self.docstring_processors:
result = p(docstring, obj)
if result is not None:
return result
return obj
def get_filename(self, name, rel=None):
if name.startswith('/'):
return join(self.root, name[1:])
return join(dirname(rel), name) |
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import datetime
import os
import random
import argparse
import sys
import numpy as np
import math
from os import listdir
from os.path import isfile, join
class Dataset:
def __init__(self, **kwargs):
self.dic = {}
self.distance = []
self.pair={}
if "text" in kwargs:
for line in kwargs["text"].split("\n"):
tokens=line.split( )
if len(tokens)>=2:
if tokens[1] in self.dic:
self.dic[tokens[1]].append(line)
else:
self.dic[tokens[1]] = [line]
for d in self.dic:
self.pair[d]=len(self.dic[d])
def output(data,address):
for d in data.dic:
index=d.split(":")
save_address = address + "/"
if not os.path.exists(save_address):
os.makedirs(save_address)
name=index[0]+index[1]+".txt"
log = open(save_address+name, "w")
print("write:"+save_address+name)
for line in data.dic[d]:
log.write(line)
log.write("\n")
log.close()
save_address = address + "/"
if not os.path.exists(save_address):
os.makedirs(save_address)
save_address = save_address + "info.txt"
print("write:" + save_address )
log = open(save_address, "w")
for d in data.pair:
log.write(d+" "+str(data.pair[d])+"\n")
log.close()
save_address = address + "/"
if not os.path.exists(save_address):
os.makedirs(save_address)
save_address = save_address + "info_sort.txt"
print("write:" + save_address)
log = open(save_address, "w")
pair_sort=sorted(data.pair.items(), key=lambda kv: (kv[1], kv[0]))
for d in pair_sort:
print("----------------------")
print(d)
log.write(d[0] + " " + str(d[1]) + "\n")
log.close()
def readData(str):
f = open(str, "r")
instance = text = f.read()
data=Dataset(text=instance)
f.close()
return data
def operate(args):
data=readData(args.read)
output(data,args.save)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
argparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument('-read', default='Data/original_data.txt',type=str)
argparser.add_argument('-save', help="save_result", default='Data',type=str)
args = argparser.parse_args()
operate(args)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
from django.shortcuts import render
from django.db import transaction, IntegrityError
import pycountry
from apiv1.tasks import fetch_mfi_client
from haedrian.forms import NewUserForm, EmailUserForm
from haedrian.models import Wallet, UserData
def index(request):
return render(request, 'index.html')
@transaction.atomic
def _create_account(user_data):
"""
API friendly INTERNAL USE ONLY account registration end point
:param user_data - Dict that contains all the fields that are expected for the user to fill out.
Required keys in the dict are
["username", "email", "password1", "password2", "phone", "country"]
Optional fields are
['organization', 'org_id']
:returns True if the account creation was successful
"""
data_form = NewUserForm(user_data)
user_form = EmailUserForm(user_data)
if user_form.is_valid() and data_form.is_valid():
django_user = user_form.save(commit=False)
django_user.first_name = user_data.get('first_name', data_form.first_name)
django_user.last_name = user_data.get('last_name', data_form.last_name)
django_user.save()
haedrian_user = data_form.save(commit=False)
haedrian_user.user = django_user
haedrian_user.credit_score = 0
_country = pycountry.countries.get(alpha3=haedrian_user.country.alpha3)
haedrian_user.default_currency = pycountry.currencies.get(numeric=_country.numeric).letter
# TODO: fix what type of wallets get created rather than just all coins_ph
wallet = Wallet(user=django_user, type=Wallet.COINS_PH)
wallet.save()
try:
haedrian_user.save()
except IntegrityError as e:
return {'success': False, 'error': e.message}
# TODO: send verification email or something
return {'success': True, 'data_form': data_form, 'user_form': user_form}
else:
# user has an account created by an external app so update instead of create
user = UserData.objects.filter(org_id=user_data['org_id'])
if len(user) == 1:
u = user[0]
# only update the user account if the person has a placeholder name
if u.user.username.startswith('placeholder') and len(u.user.username) == (len("placeholder") + 10):
u.user.username = user_form.cleaned_data['username']
u.user.set_password(user_form.cleaned_data['password1'])
u.user.email = user_form.cleaned_data['email']
u.user.save()
u.phone = data_form.cleaned_data['phone']
u.country = data_form.cleaned_data['country']
u.save()
return {'success': True, 'data_form': data_form, 'user_form': user_form}
error = 'Signup: {} - {}'.format(user_form.errors, data_form.errors['__all__'].as_text())
return {
'error': error,
'success': False
}
from django.contrib.auth import views
def login(request, *args, **kwargs):
if request.method == 'POST':
if not request.POST.get('remember_me', None):
request.session.set_expiry(0)
return views.login(request, *args, **kwargs)
|
# import osm2gmns as og
# net = og.getNetFromOSMFile('map.osm', network_type=('railway', 'auto', 'bike', 'walk'), POIs=True, default_lanes=True,default_speed=True)
# og.connectPOIWithNet(net)
# og.generateNodeActivityInfo(net)
# og.outputNetToCSV(net)
import grid2demand_0525a as gd
import os
os.chdir('./Norfolk_VA')
"Step 1: Read Input Network Data"
gd.ReadNetworkFiles()
# gd.ReadExternalPOI()
# users can give external customized_poi.csv
"Step 2: Partition Grid into cells"
gd.PartitionGrid(number_of_x_blocks=None, number_of_y_blocks=None, cell_width=500, cell_height=500, external_poi=True)
# users can customize number of grid cells or cell's width and height
# Also partition grid for external poi
"Step 3: Simplify the network for path4gmns"
gd.SimplifyNet(link_type_set = {'primary', 'secondary'}, criteria=10)
# users can customize 1) the link types in the simplified network
# and 2) maximum number of poi nodes in each zone by area
# we need to use the simplified network to define trip generation for boundary nodes
gd.GeneratePOIConnector()
# connect useful POI nodes to the network
"Step 4: Get Trip Generation Rates of Each Land Use Type"
gd.GetPoiTripRate()
# users can customize poi_trip_rate.csv for each land use type
"Step 5: Define Production/Attraction Value of Each Node According to POI Type and Activity Purpose"
gd.GetNodeDemand(residential_generation=200, boundary_generation=5000)
# users can customize the values of trip generation for residential nodes and boundary nodes
"Step 6: Calculate Zone-to-zone Accessibility Matrix by Centroid-to-centroid Straight Distance"
gd.ProduceAccessMatrix()
"Step 7: Generate Time-dependent Demand and Agent by Purpose-mode"
gd.GenerateDemand()
|
"""
Scaling tools
===================
"""
import numpy
def linearscaling(x, new_min, new_max, old_min=None, old_max=None, axis=None):
"""
Linearly rescale input from its original range to a new range.
:param scalar-or-array x: scalar or arrays of scalars in ``[old_min, old_max]`` of shape ``(n, *shape)``
:param scalar-or-array new_min: scalar or array of shape ``(*shape,)``
:param scalar-or-array new_max: scalar or array of shape ``(*shape,)``
:param scalar-or-array? old_min: (``default=x.min()``)
:param scalar-or-array? old_max: (``default=x.max()``)
:param int? axis: (``default=None``)
:return: scalar or array of scalars in ``[new_min, new_max]`` of shape ``(n, *shape)``
Example
_______
>>> linearscaling(0, -10, 10, 0, 1)
-10.0
When the original range is not passed on, it is considered to be the interval in which the input values are.
>>> x = numpy.arange(0, 1, 0.1)
>>> linearscaling(x, 0, 10)
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> linearscaling(x, 0, 10, 0, 2)
array([0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5])
Linear scaling can be performed on dfferent series of data having different ranges provided that the new minima/maxima are specfied for each serie.
>>> new_min = numpy.array([0, 0])
>>> new_max = numpy.array([1, 100])
>>> old_min = numpy.array([0, 0])
>>> old_max = numpy.array([10, 10])
>>> x = numpy.arange(0, 1, 0.1).reshape(5, 2)
>>> x
array([[0. , 0.1],
[0.2, 0.3],
[0.4, 0.5],
[0.6, 0.7],
[0.8, 0.9]])
>>> linearscaling(x, new_min, new_max, old_min, old_max)
array([[ 0.1, 10. ],
[ 0.2, 20. ],
[ 0.3, 30. ],
[ 0.4, 40. ],
[ 0.5, 50. ]])
"""
if not isinstance(x, numpy.ndarray):
# meant for scalars
assert old_min is not None
assert old_max is not None
assert old_min <= x <= old_max
else:
if x.size == 0:
return x
# set default old to current
if old_min is None:
old_min = x.min(axis=axis)
if old_max is None:
old_max = x.max(axis=axis)
if axis is not None:
old_min = numpy.expand_dims(old_min, axis)
if axis is not None:
old_max = numpy.expand_dims(old_max, axis)
tol = abs(x.max()) * 1e-06 # for float32 precision
assert (old_min - tol <= x).all(axis=None) & (x <= old_max + tol).all(axis=None), (
x.min(), x.max(), old_max, old_min)
# clipping because we allow a tolerance in the previous assert
return numpy.clip(
new_min + (x - old_min) * (new_max - new_min) / (old_max - old_min + 1e-32),
a_min=numpy.minimum(new_min, new_max), # we support new_max < new_min, for inversions
a_max=numpy.maximum(new_max, new_min)
)
|
from Configs.app_config import app, db
from flask import render_template, request, flash, redirect
from Classes.Asset_Class import AssetsData
from Classes.People_Class import People
from Classes.Roles import Roles
customer_name= AssetsData.query.filter_by(name='test').all()
@app.route('/', methods=['GET', 'POST'])
def welcome():
return render_template('welcome.html')
@app.route('/All', methods=['GET', 'POST'])
def all_assets():
All_Assets = AssetsData.query.all()
return render_template('AllAsset.html', All_Assets=All_Assets)
@app.route('/Add', methods=['GET', 'POST'])
def add_assets():
if request.method == 'GET':
return render_template('AddAsset.html')
if request.form:
print(request.form)
if request.method == 'POST':
if not request.form['id'] or not request.form['name'] or not request.form['location']:
flash('Please enter all the fields', 'error')
else:
assets = AssetsData(request.form['id'], request.form['name'],
request.form['location'], request.form['dimensions'], request.form['history_id'],
request.form['status'],request.form['Actual_Value'], request.form['Selling_price'],
request.form['Payment_mode'], request.form['Pay_duration'],
request.form['Broker_charges'])
db.session.add(assets)
db.session.commit()
flash('Record was successfully added')
return redirect("All")
return render_template('AddAsset.html')
@app.route('/add_people', methods=['GET', 'POST'])
def add_people():
if request.method == 'POST':
if not request.form['id'] or not request.form['name']:
flash('Please enter all the fields', 'error')
else:
people = People(request.form['id'], request.form['name'], request.form['role'],
request.form['gender'], request.form['age'], request.form['mobile_num'],
request.form['alternate_num'], request.form['address'])
db.session.add(people)
db.session.commit()
flash('Record was successfully added')
return render_template('AddPeople.html')
@app.route('/update/<update_id>', methods=['GET', 'POST'])
def update(update_id):
update_asset = AssetsData.query.filter_by(id=update_id).first()
if request.method == 'GET':
return render_template('UpdateAsset.html', update_id=update_id, update_asset=update_asset)
elif request.method == 'POST':
if not request.form['id'] or not request.form['name']:
flash('Name cannot be blank, please enter name', 'error')
else:
update_assets = AssetsData(request.form['id'], request.form['name'],
request.form['location'], request.form['dimensions'], request.form['history_id'],
request.form['status'],request.form['Actual_Value'], request.form['Selling_price'],
request.form['Payment_mode'], request.form['Pay_duration'],
request.form['Broker_charges'])
update_asset.name = update_assets.name
update_asset.location = update_assets.location
update_asset.dimensions = update_assets.dimensions
update_asset.status = update_assets.status
update_asset.Actual_Value = update_assets.Actual_Value
update_asset.Selling_price = update_assets.Selling_price
update_asset.Payment_mode = update_assets.Payment_mode
update_asset.Pay_duration = update_assets.Pay_duration
update_asset.Broker_charges = update_assets.Broker_charges
db.session.commit()
flash('Record was successfully updated')
return redirect("All")
return render_template('UpdateAsset.html', update_id=update_id, update_asset=update_asset)
@app.route('/delete/<delete_id>', methods=['GET', 'POST'])
def delete(delete_id):
delete_asset = AssetsData.query.filter_by(id=delete_id).first()
db.session.delete(delete_asset)
db.session.commit()
flash('Record was successfully deleted')
return redirect("All")
if __name__ == '__main__':
app.run(debug=True) |
from django.conf.urls import patterns, url
from website import views
from website import feeds
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'rss/$', feeds.UltimasPublicacoes()),
url(r'^index/$', views.index, name='index'),
url(r'^empresa/(?P<slug>\S+)$', views.empresa, name='empresa'),
url(r'^depoimentos/$', views.depoimentos, name='depoimentos'),
url(r'^depoimento/$', views.depoimento, name='depoimento'),
url(r'^agradece-depoimento/$',
views.agradece_depoimento, name='agradece-depoimento'),
url(r'^matriculas/$', views.matriculas, name='matriculas'),
url(r'^uniforme/$', views.uniforme, name='uniforme'),
url(r'^determinacoes/$', views.determinacoes, name='determinacoes'),
url(r'^material-escolar/$', views.material_escolar,
name='material_escolar'),
url(r'^cardapio/$', views.cardapio, name='cardapio'),
url(r'^avisos-importantes/$', views.avisos_importantes,
name='avisos_importantes'),
url(r'^eventos/$', views.eventos, name='eventos'),
url(r'^noticias/$', views.noticias, name='noticias'),
url(r'^dicas/$', views.dicas, name='dicas'),
url(r'^dica/(?P<slug>\S+)$', views.publicacao, name='dica'),
url(r'^materiais-apoio/$', views.materiais_apoio, name='materiais_apoio'),
url(r'^material_apoio/(?P<slug>\S+)$', views.material_apoio,
name='material_apoio'),
url(r'^responsabilidade-social/$', views.responsabilidade_social,
name='responsabilidade_social'),
url(r'^publicacao/(?P<slug>\S+)$', views.publicacao, name='publicacao'),
url(r'^noticia/(?P<slug>\S+)$', views.publicacao, name='noticia'),
url(r'^evento/(?P<slug>\S+)$', views.publicacao, name='evento'),
url(r'^aviso/(?P<slug>\S+)$', views.publicacao, name='aviso'),
url(r'^galeria-fotos/$', views.galerias_foto, name='galerias_foto'),
url(r'^foto/(?P<slug>\S+)$', views.foto, name='foto'),
url(r'^galeria-videos/$', views.galerias_video, name='galerias_video'),
url(r'^video/(?P<slug>\S+)$', views.video, name='video'),
url(r'^servicos/(?P<slug>\S+)$', views.servicos, name='servicos'),
url(r'^downloads/(?P<slug>\S+)$', views.downloads, name='downloads'),
url(r'^contato/(?P<slug>\S+)$', views.contato, name='contato'),
)
|
import argparse
import numpy as np
import wandb
import torch
import csv
from torch import nn
from typing import List, Tuple
from typing_extensions import Literal
from perceptual_advex.utilities import add_dataset_model_arguments, \
get_dataset_model
from perceptual_advex.distances import LPIPSDistance, LinfDistance, SSIM, \
L2Distance
from perceptual_advex.models import FeatureModel
from perceptual_advex.perceptual_attacks import get_lpips_model
from perceptual_advex.perceptual_attacks import *
from perceptual_advex.attacks import *
from perceptual_advex.ci_attacks import *
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Distance measure analysis')
add_dataset_model_arguments(parser, include_checkpoint=True)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--num_batches', type=int, required=False,
help='number of batches (default entire dataset)')
parser.add_argument('--per_example', action='store_true', default=False,
help='output per-example accuracy')
parser.add_argument('--output', type=str, help='output CSV')
parser.add_argument('attacks', metavar='attack', type=str, nargs='+',
help='attack names')
args = parser.parse_args()
wandb.init(config=args)
dist_models: List[Tuple[str, nn.Module]] = [
('l2', L2Distance()),
('linf', LinfDistance()),
('ssim', SSIM()),
]
dataset, model = get_dataset_model(args)
if not isinstance(model, FeatureModel):
raise TypeError('model must be a FeatureModel')
dist_models.append(('lpips_self', LPIPSDistance(model)))
alexnet_model_name: Literal['alexnet_cifar', 'alexnet']
if args.dataset.startswith('cifar'):
alexnet_model_name = 'alexnet_cifar'
else:
alexnet_model_name = 'alexnet'
dist_models.append((
'lpips_alexnet',
LPIPSDistance(get_lpips_model(alexnet_model_name, model)),
))
for _, dist_model in dist_models:
dist_model.eval()
if torch.cuda.is_available():
dist_model.cuda()
_, val_loader = dataset.make_loaders(1, args.batch_size, only_val=True)
model.eval()
if torch.cuda.is_available():
model.cuda()
attack_names: List[str] = args.attacks
with open(args.output, 'w') as out_file:
out_csv = csv.writer(out_file)
out_csv.writerow([
attack_name for attack_name in attack_names
for _ in dist_models
])
out_csv.writerow([
dist_model_name for _ in attack_names
for dist_model_name, _ in dist_models
])
for batch_index, (inputs, labels) in enumerate(val_loader):
if (
args.num_batches is not None and
batch_index >= args.num_batches
):
break
print(f'BATCH\t{batch_index:05d}')
if torch.cuda.is_available():
inputs = inputs.cuda()
labels = labels.cuda()
batch_distances = np.zeros((
inputs.shape[0],
len(attack_names) * len(dist_models),
))
for attack_index, attack_name in enumerate(attack_names):
print(f'ATTACK {attack_name}')
attack = eval(attack_name)
adv_inputs = attack(inputs, labels)
with torch.no_grad():
for dist_model_index, (dist_name, dist_model) in \
enumerate(dist_models):
batch_distances[
:,
attack_index * len(dist_models) + dist_model_index
] = dist_model(
inputs,
adv_inputs,
).detach().cpu().numpy()
wandb.log({f'val-{attack_name}-dist/{dist_name}':
wandb.Histogram(dist_model(
inputs,
adv_inputs,
).detach().cpu().numpy())})
for row in batch_distances:
out_csv.writerow(row.tolist())
|
#!/usr/bin/env python
# coding: utf8
from __future__ import division
import logging
import numpy as np
logger = logging.getLogger(__name__)
def basic_test(ndvi, ndsi, swir2, tirs1):
"""Fundamental test to identify Potential Cloud Pixels (PCPs)
Equation 1 (Zhu and Woodcock, 2012)
Note: all input arrays must be the same shape
Parameters
----------
ndvi: ndarray
ndsi: ndarray
swir2: ndarray
Shortwave Infrared Band TOA reflectance
Band 7 in Landsat 8, ~2.2 µm
tirs1: ndarray
Thermal band brightness temperature
Band 10 in Landsat 8, ~11 µm
units are degrees Celcius
Output
------
ndarray: boolean
"""
# Thresholds
th_ndsi = 0.8 # index
th_ndvi = 0.8 # index
th_tirs1 = 27.0 # degrees celcius
th_swir2 = 0.03 # toa
return ((swir2 > th_swir2) &
(tirs1 < th_tirs1) &
(ndsi < th_ndsi) &
(ndvi < th_ndvi))
def whiteness_index(blue, green, red):
"""Index of "Whiteness" based on visible bands.
Parameters
----------
blue: ndarray
green: ndarray
red: ndarray
Output
------
ndarray:
whiteness index
"""
mean_vis = (blue + green + red) / 3
blue_absdiff = np.absolute((blue - mean_vis) / mean_vis)
green_absdiff = np.absolute((green - mean_vis) / mean_vis)
red_absdiff = np.absolute((red - mean_vis) / mean_vis)
return blue_absdiff + green_absdiff + red_absdiff
def whiteness_test(blue, green, red):
"""Whiteness test
Clouds appear white due to their "flat" reflectance in the visible bands
Equation 2 (Zhu and Woodcock, 2012)
Parameters
----------
blue: ndarray
green: ndarray
red: ndarray
Output
------
ndarray: boolean
"""
whiteness_threshold = 0.7
return whiteness_index(blue, green, red) < whiteness_threshold
def hot_test(blue, red):
"""Haze Optimized Transformation (HOT) test
Equation 3 (Zhu and Woodcock, 2012)
Based on the premise that the visible bands for most land surfaces
are highly correlated, but the spectral response to haze and thin cloud
is different between the blue and red wavelengths.
Zhang et al. (2002)
Parameters
----------
blue: ndarray
red: ndarray
Output
------
ndarray: boolean
"""
thres = 0.08
return blue - (0.5 * red) - thres > 0.0
def nirswir_test(nir, swir1):
"""Spectral test to exclude bright rock and desert
see (Irish, 2000)
Equation 4 (Zhu and Woodcock, 2012)
Note that Zhu and Woodcock 2015 refer to this as the "B4B5" test
due to the Landsat ETM+ band designations. In Landsat 8 OLI,
these are bands 5 and 6.
Parameters
----------
nir: ndarray
swir1: ndarray
Output
------
ndarray: boolean
"""
th_ratio = 0.75
return (nir / swir1) > th_ratio
def cirrus_test(cirrus):
"""Cirrus TOA test, see (Zhu and Woodcock, 2015)
The threshold is derived from (Wilson & Oreopoulos, 2013)
Parameters
----------
cirrus: ndarray
Output
------
ndarray: boolean
"""
th_cirrus = 0.01
return cirrus > th_cirrus
def water_test(ndvi, nir):
"""Water or Land?
Equation 5 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
nir: ndarray
Output
------
ndarray: boolean
"""
th_ndvi_A = 0.01
th_nir_A = 0.11
th_ndvi_B = 0.1
th_nir_B = 0.05
return (((ndvi < th_ndvi_A) & (nir < th_nir_A)) |
((ndvi < th_ndvi_B) & (nir < th_nir_B)))
def potential_cloud_pixels(ndvi, ndsi, blue, green, red, nir,
swir1, swir2, cirrus, tirs1):
"""Determine potential cloud pixels (PCPs)
Combine basic spectral tests to get a premliminary cloud mask
First pass, section 3.1.1 in Zhu and Woodcock 2012
Equation 6 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
blue: ndarray
green: ndarray
red: ndarray
nir: ndarray
swir1: ndarray
swir2: ndarray
cirrus: ndarray
tirs1: ndarray
Output
------
ndarray:
potential cloud mask, boolean
"""
eq1 = basic_test(ndvi, ndsi, swir2, tirs1)
eq2 = whiteness_test(blue, green, red)
eq3 = hot_test(blue, red)
eq4 = nirswir_test(nir, swir1)
cir = cirrus_test(cirrus)
return (eq1 & eq2 & eq3 & eq4) | cir
def temp_water(is_water, swir2, tirs1):
"""Use water to mask tirs and find 82.5 pctile
Equation 7 and 8 (Zhu and Woodcock, 2012)
Parameters
----------
is_water: ndarray, boolean
water mask, water is True, land is False
swir2: ndarray
tirs1: ndarray
Output
------
float:
82.5th percentile temperature over water
"""
# eq7
th_swir2 = 0.03
clearsky_water = is_water & (swir2 < th_swir2)
# eq8
clear_water_temp = tirs1.copy()
clear_water_temp[~clearsky_water] = np.nan
return np.nanpercentile(clear_water_temp, 82.5)
def water_temp_prob(water_temp, tirs):
"""Temperature probability for water
Equation 9 (Zhu and Woodcock, 2012)
Parameters
----------
water_temp: float
82.5th percentile temperature over water
swir2: ndarray
tirs1: ndarray
Output
------
ndarray:
probability of cloud over water based on temperature
"""
temp_const = 4.0 # degrees C
return (water_temp - tirs) / temp_const
def brightness_prob(nir, clip=True):
"""The brightest water may have Band 5 reflectance
as high as 0.11
Equation 10 (Zhu and Woodcock, 2012)
Parameters
----------
nir: ndarray
Output
------
ndarray:
brightness probability, constrained 0..1
"""
thresh = 0.11
bp = np.minimum(thresh, nir) / thresh
if clip:
bp[bp > 1] = 1
bp[bp < 0] = 0
return bp
# Eq 11, water_cloud_prob
# wCloud_Prob = wTemperature_Prob x Brightness_Prob
def temp_land(pcps, water, tirs1):
"""Derive high/low percentiles of land temperature
Equations 12 an 13 (Zhu and Woodcock, 2012)
Parameters
----------
pcps: ndarray
potential cloud pixels, boolean
water: ndarray
water mask, boolean
tirs1: ndarray
Output
------
tuple:
17.5 and 82.5 percentile temperature over clearsky land
"""
# eq 12
clearsky_land = ~(pcps | water)
# use clearsky_land to mask tirs1
clear_land_temp = tirs1.copy()
clear_land_temp[~clearsky_land] = np.nan
# take 17.5 and 82.5 percentile, eq 13
return np.nanpercentile(clear_land_temp, (17.5, 82.5))
def land_temp_prob(tirs1, tlow, thigh):
"""Temperature-based probability of cloud over land
Equation 14 (Zhu and Woodcock, 2012)
Parameters
----------
tirs1: ndarray
tlow: float
Low (17.5 percentile) temperature of land
thigh: float
High (82.5 percentile) temperature of land
Output
------
ndarray :
probability of cloud over land based on temperature
"""
temp_diff = 4 # degrees
return (thigh + temp_diff - tirs1) / (thigh + 4 - (tlow - 4))
def variability_prob(ndvi, ndsi, whiteness):
"""Use the probability of the spectral variability
to identify clouds over land.
Equation 15 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
whiteness: ndarray
Output
------
ndarray :
probability of cloud over land based on variability
"""
ndi_max = np.fmax(np.absolute(ndvi), np.absolute(ndsi))
f_max = 1.0 - np.fmax(ndi_max, whiteness)
return f_max
# Eq 16, land_cloud_prob
# lCloud_Prob = lTemperature_Prob x Variability_Prob
def land_threshold(land_cloud_prob, pcps, water):
"""Dynamic threshold for determining cloud cutoff
Equation 17 (Zhu and Woodcock, 2012)
Parameters
----------
land_cloud_prob: ndarray
probability of cloud over land
pcps: ndarray
potential cloud pixels
water: ndarray
water mask
Output
------
float:
land cloud threshold
"""
# eq 12
clearsky_land = ~(pcps | water)
# 82.5th percentile of lCloud_Prob(masked by clearsky_land) + 0.2
cloud_prob = land_cloud_prob.copy()
cloud_prob[~clearsky_land] = np.nan
# eq 17
th_const = 0.2
return np.nanpercentile(cloud_prob, 82.5) + th_const
def potential_cloud_layer(pcp, water, tirs1, tlow,
land_cloud_prob, land_threshold,
water_cloud_prob, water_threshold=0.5):
"""Final step of determining potential cloud layer
Equation 18 (Zhu and Woodcock, 2012)
Parameters
----------
pcps: ndarray
potential cloud pixels
water: ndarray
water mask
tirs1: ndarray
tlow: float
low percentile of land temperature
land_cloud_prob: ndarray
probability of cloud over land
land_threshold: float
cutoff for cloud over land
water_cloud_prob: ndarray
probability of cloud over water
water_threshold: float
cutoff for cloud over water
Output
------
ndarray:
potential cloud layer, boolean
"""
# Using pcp and water as mask todo
part1 = (pcp & water & (water_cloud_prob > water_threshold))
part2 = (pcp & ~water & (land_cloud_prob > land_threshold))
temptest = tirs1 < (tlow - 35) # 35degrees C colder
return part1 | part2 | temptest
def calc_ndsi(green, swir1):
"""NDSI calculation
normalized difference snow index
Parameters
----------
green: ndarray
swir1: ndarray
~1.62 µm
Band 6 in Landsat 8
Output
------
ndarray:
unitless index
"""
return (green - swir1) / (green + swir1)
def calc_ndvi(red, nir):
"""NDVI calculation
normalized difference vegetation index
Parameters
----------
red: ndarray
nir: ndarray
Output
------
ndarray:
unitless index
"""
return (nir - red) / (nir + red)
def potential_cloud_shadow_layer(nir, swir1, water):
"""Find low NIR/SWIR1 that is not classified as water
This differs from the Zhu Woodcock algorithm
but produces decent results without requiring a flood-fill
Parameters
----------
nir: ndarray
swir1: ndarray
water: ndarray
Output
------
ndarray
boolean, potential cloud shadows
"""
return (nir < 0.10) & (swir1 < 0.10) & ~water
def potential_snow_layer(ndsi, green, nir, tirs1):
"""Spectral test to determine potential snow
Uses the 9.85C (283K) threshold defined in Zhu, Woodcock 2015
Parameters
----------
ndsi: ndarray
green: ndarray
nir: ndarray
tirs1: ndarray
Output
------
ndarray:
boolean, True is potential snow
"""
return (ndsi > 0.15) & (tirs1 < 9.85) & (nir > 0.11) & (green > 0.1)
def cloudmask(blue, green, red, nir, swir1, swir2,
cirrus, tirs1, min_filter=(3, 3), max_filter=(21, 21)):
"""Calculate the potential cloud layer from source data
*This is the high level function which ties together all
the equations for generating potential clouds*
Parameters
----------
blue: ndarray
green: ndarray
red: ndarray
nir: ndarray
swir1: ndarray
swir2: ndarray
cirrus: ndarray
tirs1: ndarray
min_filter: 2-element tuple, default=(3,3)
Defines the window for the minimum_filter, for removing outliers
max_filter: 2-element tuple, default=(21, 21)
Defines the window for the maximum_filter, for "buffering" the edges
Output
------
ndarray, boolean:
potential cloud layer; True = cloud
ndarray, boolean
potential cloud shadow layer; True = cloud shadow
"""
logger.info("Running initial tests")
ndvi = calc_ndvi(red, nir)
ndsi = calc_ndsi(green, swir1)
whiteness = whiteness_index(blue, green, red)
water = water_test(ndvi, nir)
# First pass, potential clouds
pcps = potential_cloud_pixels(
ndvi, ndsi, blue, green, red, nir, swir1, swir2, cirrus, tirs1)
cirrus_prob = cirrus / 0.04
# Clouds over water
tw = temp_water(water, swir2, tirs1)
wtp = water_temp_prob(tw, tirs1)
bp = brightness_prob(nir)
water_cloud_prob = (wtp * bp) + cirrus_prob
wthreshold = 0.5
# Clouds over land
tlow, thigh = temp_land(pcps, water, tirs1)
ltp = land_temp_prob(tirs1, tlow, thigh)
vp = variability_prob(ndvi, ndsi, whiteness)
land_cloud_prob = (ltp * vp) + cirrus_prob
lthreshold = land_threshold(land_cloud_prob, pcps, water)
logger.info("Calculate potential clouds")
pcloud = potential_cloud_layer(
pcps, water, tirs1, tlow,
land_cloud_prob, lthreshold,
water_cloud_prob, wthreshold)
# Ignoring snow for now as it exhibits many false positives and negatives
# when used as a binary mask
# psnow = potential_snow_layer(ndsi, green, nir, tirs1)
# pcloud = pcloud & ~psnow
logger.info("Calculate potential cloud shadows")
pshadow = potential_cloud_shadow_layer(nir, swir1, water)
# The remainder of the algorithm differs significantly from Fmask
# In an attempt to make a more visually appealling cloud mask
# with fewer inclusions and more broad shapes
if min_filter:
# Remove outliers
logger.info("Remove outliers with minimum filter")
from scipy.ndimage.filters import minimum_filter
from scipy.ndimage.morphology import distance_transform_edt
# remove cloud outliers by nibbling the edges
pcloud = minimum_filter(pcloud, size=min_filter)
# crude, just look x pixels away for potential cloud pixels
dist = distance_transform_edt(~pcloud)
pixel_radius = 100.0
pshadow = (dist < pixel_radius) & pshadow
# remove cloud shadow outliers
pshadow = minimum_filter(pshadow, size=min_filter)
if max_filter:
# grow around the edges
logger.info("Buffer edges with maximum filter")
from scipy.ndimage.filters import maximum_filter
pcloud = maximum_filter(pcloud, size=max_filter)
pshadow = maximum_filter(pshadow, size=max_filter)
return pcloud, pshadow
def gdal_nodata_mask(pcl, pcsl, tirs_arr):
"""
Given a boolean potential cloud layer,
a potential cloud shadow layer and a thermal band
Calculate the GDAL-style uint8 mask
"""
tirs_mask = np.isnan(tirs_arr) | (tirs_arr == 0)
return ((~(pcl | pcsl | tirs_mask)) * 255).astype('uint8')
|
from trex.emu.api import *
import argparse
class Prof1():
def __init__(self):
self.def_ns_plugs = None
self.def_c_plugs = None
def get_template_261_fields(self):
return [
{
"name": "clientIPv4Address",
"type": 45004,
"length": 4,
"enterprise_number": 9,
"data": [16, 0, 0, 1]
},
{
"name": "serverIPv4Address",
"type": 45005,
"length": 4,
"enterprise_number": 9,
"data": [24, 0, 0, 1]
},
{
"name": "protocolIdentifier",
"type": 4,
"length": 1,
"data": [17]
},
{
"name": "clientTransportPort",
"type": 45008,
"length": 2,
"enterprise_number": 9,
"data": [128, 232]
},
{
"name": "serverTransportProtocol",
"type": 45009,
"length": 2,
"enterprise_number": 9,
"data": [0, 53]
},
{
"name": "applicationId",
"type": 95,
"length": 4,
"data": [3, 0, 0, 53]
},
{
"name": "nbar2HttpHost",
"type": 45003,
"length": 7,
"enterprise_number": 9,
"data": [115, 115, 115, 46, 101, 100, 117]
},
{
"name": "nbar2HttpHostBlackMagic1",
"type": 45003,
"length": 7,
"enterprise_number": 9,
"data": [3, 0, 0, 53, 52, 4, 0]
},
{
"name": "nbar2HttpHostBlackMagic2",
"type": 45003,
"length": 7,
"enterprise_number": 9,
"data": [3, 0, 0, 53, 52, 5, 133]
},
{
"name": "flowStartSysUpTime",
"type": 22,
"length": 4,
"data": [0, 0, 0, 1]
},
{
"name": "flowEndSysUpTime",
"type": 21,
"length": 4,
"data": [0, 0, 0, 10]
},
{
"name": "flowStartMilliseconds",
"type": 152,
"length": 8,
"data": [0, 0, 0, 0, 0, 0, 0, 0]
},
{
"name": "responderPackets",
"type": 299,
"length": 8,
"data": [0, 0, 0, 0, 0, 0, 0, 1]
},
{
"name": "initiatorPackets",
"type": 298,
"length": 8,
"data": [0, 0, 0, 0, 0, 0, 0, 1]
},
{
"name": "serverBytesL3",
"type": 41105,
"length": 8,
"enterprise_number": 9,
"data": [0, 0, 0, 0, 0, 0, 0, 127]
},
{
"name": "clientBytesL3",
"type": 41106,
"length": 8,
"enterprise_number": 9,
"data": [0, 0, 0, 0, 0, 0, 0, 127]
}
]
def get_init_json_examples(self, mac, ipv4, ipv6, example_number):
example1 = {
"netflow_version": 10,
"dst_mac": mac.V(),
"dst_ipv4": ipv4.V(),
"dst_ipv6": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"dst_port": 6007,
"src_port": 30334,
"domain_id": 1000+example_number,
"generators": [
{
"name": "dns",
"auto_start": True,
"rate_pps": 2,
"data_records_num": 7,
"template_id": 260+example_number,
"fields": self.get_template_261_fields(),
"engines": [
{
"engine_name": "clientIPv4Address",
"engine_type": "uint",
"params":
{
"size": 1,
"offset": 3,
"min": 1,
"max": 255,
"op": "inc",
"step": 1,
}
},
{
"engine_name": "protocolIdentifier",
"engine_type": "histogram_uint",
"params":
{
"size": 1,
"offset": 0,
"entries": [
{
"v": 17,
"prob": 5
},
{
"v": 1,
"prob": 1,
},
{
"v": 6,
"prob": 3
}
]
}
},
{
"engine_name": "applicationId",
"engine_type": "histogram_uint_list",
"params":
{
"size": 1,
"offset": 3,
"entries": [
{
"list": [0, 2, 4, 6, 8],
"prob": 1
},
{
"list": [1, 3, 5, 7, 9],
"prob": 1
}
]
}
},
{
"engine_name": "initiatorPackets",
"engine_type": "histogram_uint64_range",
"params":
{
"size": 8,
"offset": 0,
"entries": [
{
"min": 0,
"max": 4294967295,
"prob": 1
},
{
"min": 4294967296,
"max": 8589934591,
"prob": 1
}
]
}
}
]
}
]
}
example2 = {
"netflow_version": 9,
"dst_ipv4": ipv4.V(),
"dst_ipv6": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"src_port": 30334,
"domain_id": 2000+example_number,
"generators": [
{
"name": "dnsv9",
"auto_start": True,
"rate_pps": 0.5,
"data_records_num": 1,
"template_id": 260+example_number,
"fields": [
{
"name": "protocolIdentifier",
"type": 4,
"length": 1,
"data": [17]
},
{
"name": "applicationId",
"type": 95,
"length": 4,
"data": [3, 0, 0, 53]
},
{
"name": "flowStartSysUpTime",
"type": 22,
"length": 4,
"data": [0, 0, 0, 1]
},
{
"name": "flowEndSysUpTime",
"type": 21,
"length": 4,
"data": [0, 0, 0, 10]
},
{
"name": "flowStartMilliseconds",
"type": 152,
"length": 8,
"data": [0, 0, 0, 0, 0, 0, 0, 0]
},
{
"name": "responderPackets",
"type": 299,
"length": 8,
"data": [0, 0, 0, 0, 0, 0, 0, 1]
},
{
"name": "initiatorPackets",
"type": 298,
"length": 8,
"data": [0, 0, 0, 0, 0, 0, 0, 1]
}
],
"engines": [
{
"engine_name": "protocolIdentifier",
"engine_type": "histogram_uint",
"params":
{
"size": 1,
"offset": 0,
"entries": [
{
"v": 17,
"prob": 5
},
{
"v": 1,
"prob": 1,
},
{
"v": 6,
"prob": 3
}
]
},
},
{
"engine_name": "ipVersion",
"engine_type": "histogram_uint",
"params": {
"size": 1,
"offset": 0,
"entries": [
{
"v": 4,
"prob": 3,
},
{
"v": 6,
"prob": 1
}
]
}
}
]
}
]
}
example3 = {
"netflow_version": 9,
"dst_ipv4": [0, 0, 0, 0],
"dst_mac": mac.V(),
"dst_ipv6": ipv6.V(),
"src_port": 30334,
"domain_id": 3000+example_number,
"generators": [
{
"name": "protocolID",
"auto_start": True,
"rate_pps": 1,
"data_records_num": 0,
"template_id": 260+example_number,
"fields": [
{
"name": "protocolIdentifier",
"type": 4,
"length": 1,
"data": [17]
}
]
},
{
"name": "ipVersion",
"auto_start": True,
"rate_pps": 2,
"data_records_num": 0,
"template_id": 266,
"fields": [
{
"name": "ipVersion",
"type": 60,
"length": 1,
"data": [4]
}
]
}
]
}
example4 = {
"netflow_version": 10,
"dst_ipv4": ipv4.V(),
"dst_mac": mac.V(),
"dst_ipv6": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"domain_id": 4000+example_number,
"generators": [
{
"name": "dns_with_app_id",
"auto_start": True,
"rate_pps": 1,
"data_records_num": 0,
"template_id": 260+example_number,
"is_options_template": True,
"scope_count": 7,
"fields": self.get_template_261_fields(),
"engines": [
{
"engine_name": "applicationId",
"engine_type": "uint",
"params": {
"size": 2,
"offset": 2,
"op": "rand",
"min": 20,
"max": 50000
}
}
]
},
{
"name": "bes",
"auto_start": True,
"rate_pps": 0.2,
"data_records_num": 7,
"template_id": 256,
"fields": [
{
"name": "sumServerRespTime",
"type": 42074,
"length": 4,
"enterprise_number": 9,
"data": [0, 0, 0, 10]
},
{
"name": "serverTransportProtocol",
"type": 45009,
"length": 2,
"enterprise_number": 9,
"data": [0, 53]
}
],
"engines": [
{
"engine_name": "serverTransportProtocol",
"engine_type": "histogram_uint_list",
"params": {
"size": 1,
"offset": 1,
"entries": [
{
"list": [53],
"prob": 5
},
{
"list": [67, 68],
"prob": 4
},
{
"list": [20, 21],
"prob": 5
}
]
}
}
]
}
]
}
examples = [example1, example2, example3, example4]
return examples[example_number%len(examples)]
def create_profile(self, ns_size, clients_size, mac, ipv4, dg, ipv6, dst_ipv4):
ns_list = []
mac = Mac(mac)
ipv4 = Ipv4(ipv4)
ipv6 = Ipv6(ipv6)
dg = Ipv4(dg)
dst_ipv4 = Ipv4(dst_ipv4)
dst_ipv6 = Ipv6('::1234')
dst_mac = Mac('00:25:84:7c:d7:40')
for i in range(ns_size):
# create different namespace each time
ns_key = EMUNamespaceKey(vport = i, tci = 0,tpid = 0)
ns = EMUNamespaceObj(ns_key = ns_key, def_c_plugs = self.def_c_plugs)
for j in range(clients_size):
c = EMUClientObj( mac = mac[j].V(),
ipv4 = ipv4[j].V(),
ipv4_dg = dg.V(),
ipv4_force_dg = True,
ipv4_force_mac = Mac('00:00:00:00:05:01'),
ipv6 = ipv6[j].V(),
plugs = {'ipfix': self.get_init_json_examples(dst_mac, dst_ipv4, dst_ipv6, j)}
)
ns.add_clients(c)
ns_list.append(ns)
return EMUProfile(ns = ns_list, def_ns_plugs = self.def_ns_plugs)
def get_profile(self, tuneables):
# Argparse for tunables
parser = argparse.ArgumentParser(description='Argparser for simple emu profile.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--ns', type = int, default = 1,
help='Number of namespaces to create')
parser.add_argument('--clients', type = int, default = 15,
help='Number of clients to create in each namespace')
# client args
parser.add_argument('--mac', type = str, default = '00:00:00:70:00:01',
help='Mac address of the first client')
parser.add_argument('--4', type = str, default = '1.1.1.3', dest = 'ipv4',
help='IPv4 address of the first client')
parser.add_argument('--dst-4', type = str, default = '10.56.97.19', dest = 'dst_4',
help='Ipv4 address of collector')
parser.add_argument('--dg', type = str, default = '1.1.1.1',
help='Default Gateway address of the clients')
parser.add_argument('--6', type = str, default = '2001:DB8:1::2', dest = 'ipv6',
help='IPv6 address of the first client')
args = parser.parse_args(tuneables)
assert 0 < args.ns < 65535, 'Namespaces size must be positive! in range of ports: 0 < ns < 65535'
assert 0 < args.clients, 'Clients size must be positive!'
return self.create_profile(args.ns, args.clients, args.mac, args.ipv4, args.dg, args.ipv6, args.dst_4)
def register():
return Prof1()
|
#!/usr/bin/env python
import encoder
import preamble
import sys
if len(sys.argv) != 3:
print('Usage: main.py <shellcode file> <pointer to shellcode>')
print("Pointer to shellcode should be an expression that is the address of the start of the shellcode in the victim's address space")
print('Example: main.py shellcode.bin rcx')
print('Example: main.py shellcode.bin [rsp+-8]')
print('Example: main.py shellcode.bin 0x0123456789abcdef')
print('Example: main.py shellcode.bin rbp+5')
sys.exit(1)
payload = open(sys.argv[1], 'rb').read()
encoded_payload = encoder.encode(payload)
shellcode_ptr = sys.argv[2]
print()
print('Encoding preamble for rdx <- %s' % (shellcode_ptr))
preamble = preamble.load_rdx(shellcode_ptr)
print(preamble)
print()
print('Original length: %d' % (len(payload),))
print('Encoded length: %d' % (len(encoded_payload),))
print('Preamble length: %d' % (len(preamble)))
print('Total length: %d' % (len(preamble) + len(encoded_payload)))
print()
print(preamble + encoded_payload)
|
from torchdistlog import logging
import numpy as np
import torch.distributed as dist
import torch
from torchdistlog import logging
try:
import faiss
except ModuleNotFoundError:
logging.warning("Faiss Package Not Found!")
from ...launcher.misc.utils import distributed_gather_objects
"""
(Faiss) to multi gpu devices
"""
def index_cpu_to_gpu_multiple(index, resources=None, co=None, gpu_ids=None):
assert isinstance(gpu_ids, list)
if resources is None:
resources = [faiss.StandardGpuResources() for i in gpu_ids]
v_res = faiss.GpuResourcesVector()
v_dev = faiss.IntVector()
if gpu_ids is None:
gpu_ids = list(range(len(resources)))
for i, res in zip(gpu_ids, resources):
v_dev.push_back(i)
v_res.push_back(res)
index = faiss.index_cpu_to_gpu_multiple(v_res, v_dev, index, co)
index.referenced_objects = resources
return index
"""
Some random number generator
"""
def multi_center_generator(sample_num_per_class, class_num, dim=2, scale=0.1):
loc = np.random.rand(class_num, dim)
scale = scale * np.ones((class_num, dim))
data = []
for i in range(class_num):
data.append(np.random.normal(loc=loc[i, :], scale=scale[i, :], size=(sample_num_per_class, dim)))
data = np.vstack(data).astype('float32')
label = np.arange(class_num).reshape(-1, 1)
label = np.tile(label, (1, sample_num_per_class)).reshape(-1).astype('float32')
return data, label
|
#!/usr/bin/env python3
from github import Github
import json
# token needed for authentication at github
token = "the_very_secret_token"
g = Github(token)
repo = g.get_repo('cookiejar/cookietemple')
def fetch_ct_pr_issue_stats(gh_item: str) -> None:
"""
Fetch number of closed and open pull requests to the cookietemple repository per day
:param gh_item Either Issue or PR indicating the sort of data to be collected
"""
stats = repo.get_pulls('all') if gh_item == 'pr' else repo.get_issues(state='all')
open_stats_dict = dict()
closed_stats_dict = dict()
for stat in stats:
if gh_item == 'issue' and stat.pull_request:
continue
stat_is_closed = stat.state == 'closed'
stat_created_date = stat.created_at
created_date = str(stat_created_date).split(' ')[0]
# if issue/pr is already closed, add a closed date
if stat_is_closed:
stat_closed_date = stat.closed_at
closed_date = str(stat_closed_date).split(' ')[0]
try:
closed_stats_dict[closed_date] += 1
except KeyError:
closed_stats_dict[closed_date] = 1
# for each issue/pr, add its creation date, so it counts to open issues/prs
try:
open_stats_dict[created_date] += 1
except KeyError:
open_stats_dict[created_date] = 1
open_stat_per_date = dict()
for stat in stats:
if gh_item == 'issue' and stat.pull_request:
continue
if stat.state == 'closed':
stat_created_date = stat.created_at
created_date = str(stat_created_date).split(' ')[0]
stat_closed_date = stat.closed_at
closed_date = str(stat_closed_date).split(' ')[0]
for date in open_stats_dict.keys():
if created_date <= date < closed_date:
try:
open_stat_per_date[date] += 1
except KeyError:
open_stat_per_date[date] = 1
# sort the open and closed issues/prs by date in ascending order
open_stat_list = sorted(list(open_stat_per_date.items()))
closed_stat_list = sorted(list(closed_stats_dict.items()))
# convert to dict for easier JSON dumping
open_stats_dict = dict(open_stat_list)
closed_stats_dict = dict(closed_stat_list)
# sum up all closed issues/prs made up to (including) a date
sum_until_date(closed_stats_dict)
# dump data to json file to plot at stats subpage
write_to_json(open_stats_dict, f'open_{gh_item}s')
write_to_json(closed_stats_dict, f'closed_{gh_item}s')
def fetch_ct_commits() -> None:
"""
Fetch all commits to cookietemple repository.
"""
commits = repo.get_commits()
commit_dict = dict()
for commit in commits:
if commit.commit is not None:
commit_date = commit.commit.author.date
date = str(commit_date).split(' ')[0]
try:
commit_dict[date] += 1
except KeyError:
commit_dict[date] = 1
# sort the commit dict by date in ascending order
commit_list = sorted(list(commit_dict.items()))
# convert to dict for easier JSON dumping
commit_dict = dict(commit_list)
# sum up all commits made up to (including) a date
sum_until_date(commit_dict)
# dump data to json file to plot at stats subpage
write_to_json(commit_dict, actions='commits')
def sum_until_date(gh_data: dict) -> None:
"""
For each date, calculate total sum of actions up to (and including) this date
:param gh_data: The fetched github data containing dates associated with number of actions (like prs or commits)
"""
key_list = list(gh_data.keys())
for i in range(1, len(key_list)):
gh_data[key_list[i]] += gh_data[key_list[i - 1]]
def write_to_json(actions_per_day: dict, actions: str) -> None:
"""
Write the actions date and number to a local .json file
"""
with open(f'{actions}_per_day.json', 'w', encoding='utf-8') as f:
json.dump(actions_per_day, f, ensure_ascii=False, indent=4)
|
# -*- coding: utf-8 -*-
# pylint: disable=C0111,C0301,R0904
import unittest
import os
import envitro
class TestCore(unittest.TestCase):
# test setter/getter
def test_isset(self):
os.environ['TEST_ISSET_TRUE'] = 'setvar'
self.assertTrue(envitro.isset('TEST_ISSET_TRUE'))
if 'TEST_ISSET_FALSE' in os.environ:
del os.environ['TEST_ISSET_FALSE']
self.assertFalse(envitro.isset('TEST_ISSET_FALSE'))
def test_write(self):
envitro.write('TEST_SET', 'setvar')
self.assertEqual(os.environ['TEST_SET'], 'setvar')
envitro.write('TEST_SET_SPACES', ' spacesvar ')
self.assertEqual(os.environ['TEST_SET_SPACES'], ' spacesvar ')
envitro.write('TEST_SET_INT', 123)
self.assertEqual(os.environ['TEST_SET_INT'], '123')
envitro.write('TEST_SET_BOOL', True)
self.assertEqual(os.environ['TEST_SET_BOOL'], 'True')
def test_write_clear(self):
os.environ['TEST_ALREADY_SET'] = 'myvar'
envitro.write('TEST_ALREADY_SET', None)
self.assertEqual(os.environ.get('TEST_ALREADY_SET'), None)
if 'TEST_ALREADY_SET_MISSING' in os.environ:
del os.environ['TEST_ALREADY_SET_MISSING']
envitro.write('TEST_ALREADY_SET_MISSING', None)
self.assertEqual(os.environ.get('TEST_ALREADY_SET_MISSING'), None)
def test_read_default(self):
if 'TEST_DEFAULT_GET' in os.environ:
del os.environ['TEST_DEFAULT_GET']
self.assertEqual(envitro.read('TEST_DEFAULT_GET', 'defaultval'), 'defaultval')
def test_read_none(self):
if 'TEST_DEFAULT_GET_NONE' in os.environ:
del os.environ['TEST_DEFAULT_GET_NONE']
self.assertEqual(envitro.read('TEST_DEFAULT_GET_NONE', allow_none=True), None)
if 'TEST_DEFAULT_GET_NONE_DEFAULT' in os.environ:
del os.environ['TEST_DEFAULT_GET_NONE_DEFAULT']
self.assertEqual(
envitro.read('TEST_DEFAULT_GET_NONE_DEFAULT', default='defaultval', allow_none=True), 'defaultval')
def test_read_fallback(self):
if 'TEST_PRIMARY' in os.environ:
del os.environ['TEST_PRIMARY']
self.assertEqual(envitro.read('TEST_PRIMARY', allow_none=True), None)
os.environ['TEST_FALLBACK'] = 'fallback'
self.assertEqual(envitro.read('TEST_PRIMARY', fallback='TEST_FALLBACK'), 'fallback')
def test_read_fallback_list(self):
if 'TEST_PRIMARY' in os.environ:
del os.environ['TEST_PRIMARY']
if 'TEST_FALLBACK_1' in os.environ:
del os.environ['TEST_FALLBACK_1']
os.environ['TEST_FALLBACK_2'] = 'fallback2'
self.assertEqual(envitro.read('TEST_PRIMARY', fallback=['TEST_FALLBACK_1', 'TEST_FALLBACK_2']), 'fallback2')
def test_read_fallback_list_default(self):
if 'TEST_PRIMARY' in os.environ:
del os.environ['TEST_PRIMARY']
if 'TEST_FALLBACK_1' in os.environ:
del os.environ['TEST_FALLBACK_1']
if 'TEST_FALLBACK_2' in os.environ:
del os.environ['TEST_FALLBACK_2']
self.assertEqual(envitro.read('TEST_PRIMARY', default='def', fallback=['TEST_FALLBACK_1', 'TEST_FALLBACK_2']), 'def')
def test_invalid_read(self):
if 'TEST_INVALID_GET' in os.environ:
del os.environ['TEST_INVALID_GET']
self.assertRaises(KeyError, lambda: envitro.read('TEST_INVALID_GET'))
def test_read(self):
os.environ['TEST_GET'] = 'getvar'
self.assertEqual(envitro.read('TEST_GET'), 'getvar')
def test_invalid(self):
if 'DOES_NOT_EXIST' in os.environ:
del os.environ['DOES_NOT_EXIST']
with self.assertRaises(KeyError):
envitro.str('DOES_NOT_EXIST')
def test_nested_default(self):
self.assertEqual(envitro.int('TEST_NOPE_INT', envitro.str('TEST_NOPE_STR', '123')), 123)
self.assertEqual(envitro.str('TEST_NOPE_STR', envitro.int('TEST_NOPE_INT', 123)), '123')
self.assertEqual(envitro.bool('TEST_NOPE_BOOL', envitro.int('TEST_NOPE_INT', 123)), True)
self.assertEqual(envitro.bool('TEST_NOPE_BOOL', envitro.int('TEST_NOPE_INT', 0)), False)
self.assertEqual(envitro.bool('TEST_NOPE_BOOL', envitro.int('TEST_NOPE_INT', 123)), True)
self.assertEqual(envitro.bool('TEST_NOPE_BOOL', envitro.str('TEST_NOPE_STR', 'false')), False)
self.assertEqual(envitro.bool('TEST_NOPE_BOOL', envitro.str('TEST_NOPE_STR', '')), False)
class TestCoreStr(unittest.TestCase):
def assert_get_set_str(self, value, expected_value):
os.environ['TEST_STR'] = value
self.assertEqual(envitro.str('TEST_STR'), expected_value)
def test_str(self):
self.assert_get_set_str('Hello World', 'Hello World')
def test_str_strip_whitespace(self):
self.assert_get_set_str(' hello ', 'hello')
def test_none_str(self):
if 'DOES_NOT_EXIST_STR' in os.environ:
del os.environ['DOES_NOT_EXIST_STR']
self.assertEqual(envitro.str('DOES_NOT_EXIST_STR', allow_none=True), None)
def test_fallback(self):
if 'PRIMARY' in os.environ:
del os.environ['PRIMARY']
os.environ['FALLBACK'] = ' fallback'
self.assertEqual(envitro.str('PRIMARY', fallback='FALLBACK'), 'fallback')
class TestCoreBool(unittest.TestCase):
def assert_get_set_bool(self, value, expected_value):
os.environ['TEST_BOOL'] = value
self.assertEqual(envitro.bool('TEST_BOOL'), expected_value)
def test_invalid_bool(self):
envitro.write('INVALID_BOOL', 'nope')
with self.assertRaises(ValueError):
envitro.bool('INVALID_BOOL')
def test_bool(self):
self.assert_get_set_bool('yes', True)
self.assert_get_set_bool('1', True)
self.assert_get_set_bool('YeS', True)
self.assert_get_set_bool('True', True)
self.assert_get_set_bool('true', True)
self.assert_get_set_bool(' 1 ', True)
self.assert_get_set_bool('YES\t', True)
self.assert_get_set_bool('\tYES\t', True)
self.assert_get_set_bool('false', False)
self.assert_get_set_bool('no', False)
self.assert_get_set_bool('0', False)
self.assert_get_set_bool(' NO ', False)
self.assert_get_set_bool('', False)
self.assert_get_set_bool(' ', False)
def test_default_bool(self):
if 'DOES_NOT_EXIST' in os.environ:
del os.environ['DOES_NOT_EXIST']
self.assertTrue(envitro.bool('DOES_NOT_EXIST', True))
self.assertFalse(envitro.bool('DOES_NOT_EXIST', False))
def test_none_bool(self):
if 'DOES_NOT_EXIST_BOOL' in os.environ:
del os.environ['DOES_NOT_EXIST_BOOL']
self.assertEqual(envitro.bool('DOES_NOT_EXIST_BOOL', allow_none=True), None)
def test_fallback(self):
if 'PRIMARY' in os.environ:
del os.environ['PRIMARY']
os.environ['FALLBACK'] = ' true'
self.assertEqual(envitro.bool('PRIMARY', fallback='FALLBACK'), True)
class TestCoreInt(unittest.TestCase):
def assert_get_set_int(self, value, expected_value):
os.environ['TEST_INT'] = value
self.assertEqual(envitro.int('TEST_INT'), expected_value)
def test_int(self):
self.assert_get_set_int('1234567', 1234567)
self.assert_get_set_int(' 1234567 ', 1234567)
def test_none_int(self):
if 'DOES_NOT_EXIST_INT' in os.environ:
del os.environ['DOES_NOT_EXIST_INT']
self.assertEqual(envitro.int('DOES_NOT_EXIST_INT', allow_none=True), None)
def test_fallback(self):
if 'PRIMARY' in os.environ:
del os.environ['PRIMARY']
os.environ['FALLBACK'] = ' 5'
self.assertEqual(envitro.int('PRIMARY', fallback='FALLBACK'), 5)
class TestCoreFloat(unittest.TestCase):
def assert_get_set_float(self, value, expected_value):
os.environ['TEST_FLOAT'] = value
self.assertEqual(envitro.float('TEST_FLOAT'), expected_value)
def test_float(self):
self.assert_get_set_float('123.45670', 123.4567)
self.assert_get_set_float(' 12345.67 ', 12345.67)
self.assert_get_set_float(' 0012345.67 ', 12345.67)
def test_none_float(self):
if 'DOES_NOT_EXIST_FLOAT' in os.environ:
del os.environ['DOES_NOT_EXIST_FLOAT']
self.assertEqual(envitro.float('DOES_NOT_EXIST_FLOAT', allow_none=True), None)
def test_fallback(self):
if 'PRIMARY' in os.environ:
del os.environ['PRIMARY']
os.environ['FALLBACK'] = ' 3.14'
self.assertEqual(envitro.float('PRIMARY', fallback='FALLBACK'), 3.14)
class TestCoreList(unittest.TestCase):
def test_list(self):
os.environ['TEST_LIST'] = 'item1,item2,item3'
self.assertEqual(envitro.list('TEST_LIST'), ['item1', 'item2', 'item3'])
os.environ['TEST_LIST'] = 'item1,item2'
self.assertEqual(envitro.list('TEST_LIST'), ['item1', 'item2'])
os.environ['TEST_LIST'] = 'item1'
self.assertEqual(envitro.list('TEST_LIST'), ['item1'])
os.environ['TEST_LIST'] = 'item1,'
self.assertEqual(envitro.list('TEST_LIST'), ['item1'])
os.environ['TEST_LIST'] = ',item1,'
self.assertEqual(envitro.list('TEST_LIST'), ['item1'])
def test_list_required(self):
os.environ['TEST_LIST_REQUIRED'] = ''
with self.assertRaises(ValueError):
envitro.list('TEST_LIST_REQUIRED')
def test_none_list(self):
if 'DOES_NOT_EXIST_LIST' in os.environ:
del os.environ['DOES_NOT_EXIST_LIST']
self.assertEqual(envitro.list('DOES_NOT_EXIST_LIST', allow_none=True), None)
def test_list_spaces(self):
os.environ['TEST_LIST_SPACES'] = ' item1 , item2 , item3 '
self.assertEqual(envitro.list('TEST_LIST_SPACES'), ['item1', 'item2', 'item3'])
os.environ['TEST_LIST_SPACES'] = ' , item1 , item2 , item3 , , ,, '
self.assertEqual(envitro.list('TEST_LIST_SPACES'), ['item1', 'item2', 'item3'])
def test_default_list(self):
if 'DOES_NOT_EXIST' in os.environ:
del os.environ['DOES_NOT_EXIST']
self.assertEqual(envitro.list('DOES_NOT_EXIST', ['item1']), ['item1'])
self.assertEqual(envitro.list('DOES_NOT_EXIST', ['item1', 'item2']), ['item1', 'item2'])
self.assertEqual(envitro.list('DOES_NOT_EXIST', 'item1,item2'), ['item1', 'item2'])
def test_list_separator(self):
os.environ['TEST_LIST_SEPARATOR'] = 'item1;item2;item3'
self.assertEqual(envitro.list('TEST_LIST_SEPARATOR', separator=';'), ['item1', 'item2', 'item3'])
def test_fallback(self):
if 'PRIMARY' in os.environ:
del os.environ['PRIMARY']
os.environ['FALLBACK'] = ' a,b,c'
self.assertEqual(envitro.list('PRIMARY', fallback='FALLBACK'), ['a', 'b', 'c'])
class TestCoreTuple(unittest.TestCase):
def test_tuple(self):
os.environ['TEST_TUPLE'] = 'item1,item2,item3'
self.assertEqual(envitro.tuple('TEST_TUPLE'), ('item1', 'item2', 'item3'))
os.environ['TEST_TUPLE'] = 'item1,item2'
self.assertEqual(envitro.tuple('TEST_TUPLE'), ('item1', 'item2'))
os.environ['TEST_TUPLE'] = 'item1'
self.assertEqual(envitro.tuple('TEST_TUPLE'), ('item1', ))
os.environ['TEST_TUPLE'] = 'item1,'
self.assertEqual(envitro.tuple('TEST_TUPLE'), ('item1', ))
os.environ['TEST_TUPLE'] = ',item1,'
self.assertEqual(envitro.tuple('TEST_TUPLE'), ('item1', ))
def test_tuple_required(self):
os.environ['TEST_TUPLE_REQUIRED'] = ''
with self.assertRaises(ValueError):
envitro.tuple('TEST_TUPLE_REQUIRED')
def test_none_tuple(self):
if 'DOES_NOT_EXIST_TUPLE' in os.environ:
del os.environ['DOES_NOT_EXIST_TUPLE']
self.assertEqual(envitro.tuple('DOES_NOT_EXIST_TUPLE', allow_none=True), None)
def test_tuple_spaces(self):
os.environ['TEST_TUPLE_SPACES'] = ' item1 , item2 , item3 '
self.assertEqual(envitro.tuple('TEST_LIST_SPACES'), ('item1', 'item2', 'item3'))
os.environ['TEST_TUPLE_SPACES'] = ' , item1 , item2 , item3 , , ,, '
self.assertEqual(envitro.tuple('TEST_TUPLE_SPACES'), ('item1', 'item2', 'item3'))
def test_default_tuple(self):
if 'DOES_NOT_EXIST' in os.environ:
del os.environ['DOES_NOT_EXIST']
self.assertEqual(envitro.tuple('DOES_NOT_EXIST', ('item1', )), ('item1', ))
self.assertEqual(envitro.tuple('DOES_NOT_EXIST', ('item1', 'item2')), ('item1', 'item2'))
self.assertEqual(envitro.tuple('DOES_NOT_EXIST', 'item1,item2'), ('item1', 'item2'))
def test_tuple_separator(self):
os.environ['TEST_TUPLE_SEPARATOR'] = 'item1;item2;item3'
self.assertEqual(envitro.tuple('TEST_TUPLE_SEPARATOR', separator=';'), ('item1', 'item2', 'item3'))
def test_fallback(self):
if 'PRIMARY' in os.environ:
del os.environ['PRIMARY']
os.environ['FALLBACK'] = ' a,b,c'
self.assertEqual(envitro.tuple('PRIMARY', fallback='FALLBACK'), ('a', 'b', 'c'))
|
from django import forms
from .models import Employee
class EmployeeModelForm(forms.ModelForm):
class Meta:
model = Employee
fields = ['firstname', 'surname', 'birthdate', 'email']
widgets = {
'firstname': forms.TextInput(attrs={'class': 'form-control'}),
'surname': forms.TextInput(attrs={'class': 'form-control'}),
'birthdate': forms.DateInput(attrs={'class': 'form-control'}),
'email': forms.EmailInput(attrs={'class': 'form-control'}),
}
|
'''
https://leetcode.com/problems/reverse-string/description/
input: "hello"
output: "olleh"
'''
def reverse_string(s):
return s[::-1]
|
import unittest
import numpy as np
import numpy.testing as npt
import pytest
from sklearn.base import clone
from divik.sampler._core import BaseSampler, ParallelSampler
class DummySampler(BaseSampler):
def __init__(self, whatever):
self.whatever = whatever
def get_sample(self, seed):
return np.array([[seed]])
class BaseSamplerTest(unittest.TestCase):
def test_iterates_through_samples(self):
sampler = DummySampler(1)
samples = [v for _, v in zip(range(10000), sampler)]
assert 10000 == len(samples)
npt.assert_array_equal(np.ravel(samples), np.ravel(range(10000)))
def test_fit_returns_self(self):
sampler = DummySampler(1)
assert sampler is sampler.fit(np.array([[1]]))
def test_parallel_sampler_generates_same_values(self):
sampler = DummySampler(1)
expected = sampler.get_sample(34134123)
with sampler.parallel() as sampler_:
actual = sampler_.get_sample(34134123)
npt.assert_array_equal(expected, actual)
def test_context_creates_parallel_sampler(self):
sampler = DummySampler(1)
with sampler.parallel() as sampler_:
assert isinstance(sampler_, ParallelSampler)
def test_parallel_sampler_is_cloneable(self):
sampler = DummySampler(1)
with sampler.parallel() as sampler_:
cloned = sampler_.clone()
assert isinstance(cloned, DummySampler)
assert sampler.whatever == cloned.whatever
def test_parallel_sampler_is_not_iterable(self):
sampler = DummySampler(1)
with sampler.parallel() as sampler_, pytest.raises(TypeError):
iter(sampler_)
def test_parallel_sampler_is_not_fittable(self):
sampler = DummySampler(1)
with sampler.parallel() as sampler_, pytest.raises(AttributeError):
sampler_.fit()
def test_is_cloneable(self):
original = DummySampler(1)
cloned = clone(original)
assert cloned.whatever == 1
|
class Util:
def __init__(self, _):
def qrng() :
"""
return random 0 or 1 via hadarmard gate
param:
location_strings: string, node where the q.h is happening, 'Alice' by default
"""
q=_.PREP()
_.H(q)
number = _.MEAS()
print('Outcome of the measure:', number)
return number
self.qrng = qrng
|
"All view functions for contentstore, broken out into submodules"
from .assets import *
from .checklists import *
from .component import *
from .course import * # lint-amnesty, pylint: disable=redefined-builtin
from .entrance_exam import *
from .error import *
from .export_git import *
from .helpers import *
from .import_export import *
from .item import *
from .library import *
from .preview import *
from .public import *
from .tabs import *
from .transcript_settings import *
from .transcripts_ajax import *
from .user import *
from .videos import *
try:
from .dev import *
except ImportError:
pass
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from scipy.sparse.linalg import spsolve
import numpy as np
import numba as nb
import os
import time
from scipy.sparse import lil_matrix, diags
import scipy.sparse as sp
def dSf_dV(Yf, V, F, Cf):
"""
Derivatives of the branch power w.r.t the branch voltage modules and angles
:param Yf: Admittances matrix of the branches with the "from" buses
:param V: Array of voltages
:param F: Array of branch "from" bus indices
:param Cf: Connectivity matrix of the branches with the "from" buses
:return: dSf_dVa, dSf_dVm
"""
Vc = np.conj(V)
diagVc = diags(Vc)
diagE = diags(V / np.abs(V))
diagV = diags(V)
Yfc = np.conj(Yf)
Ifc = Yfc * Vc # conjugate of "from" current
diagIfc = diags(Ifc)
Vf = V[F]
diagVf = diags(Vf)
CVf = Cf * diagV
CVnf = Cf * diagE
dSf_dVa = 1j * (diagIfc * CVf - diagVf * Yfc * diagVc)
dSf_dVm = diagVf * np.conj(Yf * diagE) + diagIfc * CVnf
return dSf_dVa.tocsc(), dSf_dVm.tocsc()
def dSt_dV(Yt, V, T, Ct):
"""
Derivatives of the branch power w.r.t the branch voltage modules and angles
:param Yt: Admittances matrix of the branches with the "to" buses
:param V: Array of voltages
:param T: Array of branch "to" bus indices
:param Ct: Connectivity matrix of the branches with the "to" buses
:return: dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm
"""
Vc = np.conj(V)
diagVc = diags(Vc)
diagE = diags(V / np.abs(V))
diagV = diags(V)
Ytc = np.conj(Yt)
Itc = Ytc * Vc # conjugate of "to" current
diagItc = diags(Itc)
Vt = V[T]
diagVt = diags(Vt)
CVt = Ct * diagV
CVnt = Ct * diagE
dSt_dVa = 1j * (diagItc * CVt - diagVt * Ytc * diagVc)
dSt_dVm = diagVt * np.conj(Yt * diagE) + diagItc * CVnt
return dSt_dVa.tocsc(), dSt_dVm.tocsc()
def dSf_dV_fast_v1(Yf, V, F, Cf):
Vc = np.conj(V)
diagVc = diags(Vc)
E = V / np.abs(V)
diagE = diags(E)
diagV = diags(V)
Yfc = np.conj(Yf).tocsc()
Ifc = Yfc * Vc # conjugate of "from" current
diagIfc = diags(Ifc)
Vf = V[F]
diagVf = diags(Vf)
CVf = Cf * diagV
CVnf = Cf * diagE
op1 = diagIfc * CVf # diagIfc * Cf * diagV
op2 = diagVf * Yfc * diagVc
op3 = diagVf * np.conj(Yf * diagE)
op4 = diagIfc * CVnf
# realizamos la operación [diagIfc * Cf * diagV] y [diagIfc * Cf * diagE]
data1 = np.empty(len(Cf.data), dtype=complex)
data4 = np.empty(len(Cf.data), dtype=complex)
for j in range(Cf.shape[1]): # para cada columna j ...
for k in range(Cf.indptr[j], Cf.indptr[j + 1]): # para cada entrada de la columna ....
i = Cf.indices[k] # obtener el índice de la fila
data1[k] = Cf.data[k] * Ifc[i] * V[j]
data4[k] = Cf.data[k] * Ifc[i] * E[j]
op1_b = sp.csc_matrix((data1, Cf.indices, Cf.indptr), shape=Cf.shape)
op4_b = sp.csc_matrix((data4, Cf.indices, Cf.indptr), shape=Cf.shape)
# realizamos la operación [diagVf * Yfc * diagVc] y [diagVf * np.conj(Yf * diagE)]
data2 = np.empty(len(Yf.data), dtype=complex)
data3 = np.empty(len(Yf.data), dtype=complex)
for j in range(Yf.shape[1]): # para cada columna j ...
for k in range(Yf.indptr[j], Yf.indptr[j + 1]): # para cada entrada de la columna ....
i = Yf.indices[k] # obtener el índice de la fila
data2[k] = np.conj(Yf.data[k]) * Vf[i] * Vc[j]
data3[k] = Vf[i] * np.conj(Yf.data[k] * E[j])
op2_b = sp.csc_matrix((data2, Yf.indices, Yf.indptr), shape=Yf.shape)
op3_b = sp.csc_matrix((data3, Yf.indices, Yf.indptr), shape=Yf.shape)
c1 = op1 - op1_b
c2 = op2 - op2_b
c3 = op3 - op3_b
c4 = op4 - op4_b
dSf_dVa = 1j * (op1 - op2)
dSf_dVm = op3 + op4
return dSf_dVa, dSf_dVm
def dSf_dV_fast_v2(Yf, V, F, Cf):
"""
:param Yf:
:param V:
:param F:
:param Cf:
:return:
"""
Vc = np.conj(V)
E = V / np.abs(V)
Ifc = np.conj(Yf) * Vc # conjugate of "from" current
# Perform the following operations
# op1 = [diagIfc * Cf * diagV]
# op4 = [diagIfc * Cf * diagE]
data1 = np.empty(len(Cf.data), dtype=complex)
data4 = np.empty(len(Cf.data), dtype=complex)
for j in range(Cf.shape[1]): # column j ...
for k in range(Cf.indptr[j], Cf.indptr[j + 1]): # for each column entry k ...
i = Cf.indices[k] # row i
data1[k] = Cf.data[k] * Ifc[i] * V[j]
data4[k] = Cf.data[k] * Ifc[i] * E[j]
op1 = sp.csc_matrix((data1, Cf.indices, Cf.indptr), shape=Cf.shape)
op4 = sp.csc_matrix((data4, Cf.indices, Cf.indptr), shape=Cf.shape)
# Perform the following operations
# op2 = [diagVf * Yfc * diagVc]
# op3 = [diagVf * np.conj(Yf * diagE)]
data2 = np.empty(len(Yf.data), dtype=complex)
data3 = np.empty(len(Yf.data), dtype=complex)
for j in range(Yf.shape[1]): # column j ...
for k in range(Yf.indptr[j], Yf.indptr[j + 1]): # for each column entry k ...
i = Yf.indices[k] # row i
data2[k] = np.conj(Yf.data[k]) * V[F[i]] * Vc[j]
data3[k] = V[F[i]] * np.conj(Yf.data[k] * E[j])
op2 = sp.csc_matrix((data2, Yf.indices, Yf.indptr), shape=Yf.shape)
op3 = sp.csc_matrix((data3, Yf.indices, Yf.indptr), shape=Yf.shape)
dSf_dVa = 1j * (op1 - op2)
dSf_dVm = op3 + op4
return dSf_dVa, dSf_dVm
@nb.njit()
def data_1_4(Cf_data, Cf_indptr, Cf_indices, Ifc, V, E, n_cols):
data1 = np.empty(len(Cf_data), dtype=nb.complex128)
data4 = np.empty(len(Cf_data), dtype=nb.complex128)
for j in range(n_cols): # column j ...
for k in range(Cf_indptr[j], Cf_indptr[j + 1]): # for each column entry k ...
i = Cf_indices[k] # row i
data1[k] = Cf_data[k] * Ifc[i] * V[j]
data4[k] = Cf_data[k] * Ifc[i] * E[j]
return data1, data4
@nb.njit()
def data_2_3(Yf_data, Yf_indptr, Yf_indices, V, F, Vc, E, n_cols):
data2 = np.empty(len(Yf_data), dtype=nb.complex128)
data3 = np.empty(len(Yf_data), dtype=nb.complex128)
for j in range(n_cols): # column j ...
for k in range(Yf_indptr[j], Yf_indptr[j + 1]): # for each column entry k ...
i = Yf_indices[k] # row i
data2[k] = np.conj(Yf_data[k]) * V[F[i]] * Vc[j]
data3[k] = V[F[i]] * np.conj(Yf_data[k] * E[j])
return data2, data3
def dSf_dV_fast(Yf, V, F, Cf):
"""
Derivatives of the branch power w.r.t the branch voltage modules and angles
Works for dSf with Yf, F, Cf and for dSt with Yt, T, Ct
:param Yf: Admittances matrix of the branches with the "from" buses
:param V: Array of voltages
:param F: Array of branch "from" bus indices
:param Cf: Connectivity matrix of the branches with the "from" buses
:return: dSf_dVa, dSf_dVm
"""
Vc = np.conj(V)
E = V / np.abs(V)
Ifc = np.conj(Yf) * Vc # conjugate of "from" current
# Perform the following operations
# op1 = [diagIfc * Cf * diagV]
# op4 = [diagIfc * Cf * diagE]
data1, data4 = data_1_4(Cf.data, Cf.indptr, Cf.indices, Ifc, V, E, Cf.shape[1])
op1 = sp.csc_matrix((data1, Cf.indices, Cf.indptr), shape=Cf.shape)
op4 = sp.csc_matrix((data4, Cf.indices, Cf.indptr), shape=Cf.shape)
# Perform the following operations
# op2 = [diagVf * Yfc * diagVc]
# op3 = [diagVf * np.conj(Yf * diagE)]
data2, data3 = data_2_3(Yf.data, Yf.indptr, Yf.indices, V, F, Vc, E, Yf.shape[1])
op2 = sp.csc_matrix((data2, Yf.indices, Yf.indptr), shape=Yf.shape)
op3 = sp.csc_matrix((data3, Yf.indices, Yf.indptr), shape=Yf.shape)
dSf_dVa = 1j * (op1 - op2)
dSf_dVm = op3 + op4
return dSf_dVa, dSf_dVm
if __name__ == '__main__':
from GridCal.Engine import FileOpen, compile_snapshot_circuit
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/Lynn 5 Bus pv.gridcal'
main_circuit = FileOpen(fname).open()
nc = compile_snapshot_circuit(main_circuit)
V = nc.Vbus + np.array([0.01, -0.05, 0.002, -0.007, 0.006]) # to not to be perfectly 1
dSf_dVa_1, dSf_dVm_1 = dSf_dV(Yf=nc.Yf, V=V, F=nc.F, Cf=nc.Cf)
dSf_dVa_2, dSf_dVm_2 = dSf_dV_fast(Yf=nc.Yf.tocsc(), V=V, F=nc.F, Cf=nc.Cf.tocsc())
da = dSf_dVa_1 - dSf_dVa_2
dm = dSf_dVm_1 - dSf_dVm_2
assert len(da.data) == 0
assert len(dm.data) == 0
dSt_dVa_1, dSt_dVm_1 = dSt_dV(Yt=nc.Yt, V=V, T=nc.T, Ct=nc.Ct)
dSt_dVa_2, dSt_dVm_2 = dSf_dV_fast(Yf=nc.Yt.tocsc(), V=V, F=nc.T, Cf=nc.Ct.tocsc())
da = dSt_dVa_1 - dSt_dVa_2
dm = dSt_dVm_1 - dSt_dVm_2
assert len(da.data) == 0
assert len(dm.data) == 0
print() |
from io import StringIO
from pytest_mock import MockerFixture
from scrapy_feedexporter_azure.storage import BlobStorageFeedStorage
def test_saves_item(mocker: MockerFixture):
blob_service = mocker.MagicMock()
mocker.patch('scrapy_feedexporter_azure.storage.BlobServiceClient').return_value = blob_service
container_client = mocker.MagicMock()
blob_service.get_container_client.return_value = container_client
feed_storage = BlobStorageFeedStorage("azblob://storage_account/container/path", "connection_string")
file = StringIO('some text')
feed_storage._store_in_thread(file)
container_client.upload_blob.assert_called_with(name="path", data=file)
|
import os
from telethon.tl.types import *
from KilluaRobot.utils.pluginhelper import runcmd
async def convert_to_image(event, borg):
lmao = await event.get_reply_message()
if not (
lmao.gif
or lmao.audio
or lmao.voice
or lmao.video
or lmao.video_note
or lmao.photo
or lmao.sticker
or lmao.media
):
await borg.send_message(event.chat_id, "`Format Not Supported.`")
return
else:
try:
time.time()
downloaded_file_name = await borg.download_media(
lmao.media, sedpath, "`Downloading...`"
)
except Exception as e: # pylint:disable=C0103,W0703
await borg.send_message(event.chat_id, str(e))
else:
lel = await borg.send_message(
event.chat_id,
"Downloaded to `{}` successfully".format(downloaded_file_name),
)
await lel.delete
if not os.path.exists(downloaded_file_name):
lel = await borg.send_message(event.chat_id, "Download Unsucessfull :(")
await lel.delete
return
if lmao and lmao.photo:
lmao_final = downloaded_file_name
elif lmao.sticker and lmao.sticker.mime_type == "application/x-tgsticker":
rpath = downloaded_file_name
image_name20 = os.path.join(sedpath, "SED.png")
cmd = f"lottie_convert.py --frame 0 -if lottie -of png {downloaded_file_name} {image_name20}"
stdout, stderr = (await runcmd(cmd))[:2]
os.remove(rpath)
lmao_final = image_name20
elif lmao.sticker and lmao.sticker.mime_type == "image/webp":
pathofsticker2 = downloaded_file_name
image_new_path = sedpath + "image.png"
im = Image.open(pathofsticker2)
im.save(image_new_path, "PNG")
if not os.path.exists(image_new_path):
await event.reply("`Wasn't Able To Fetch Shot.`")
return
lmao_final = image_new_path
elif lmao.audio:
sed_p = downloaded_file_name
hmmyes = sedpath + "stark.mp3"
imgpath = sedpath + "starky.jpg"
os.rename(sed_p, hmmyes)
await runcmd(f"ffmpeg -i {hmmyes} -filter:v scale=500:500 -an {imgpath}")
os.remove(sed_p)
if not os.path.exists(imgpath):
await event.reply("`Wasn't Able To Fetch Shot.`")
return
lmao_final = imgpath
elif lmao.gif or lmao.video or lmao.video_note:
sed_p2 = downloaded_file_name
jpg_file = os.path.join(sedpath, "image.jpg")
await take_screen_shot(sed_p2, 0, jpg_file)
os.remove(sed_p2)
if not os.path.exists(jpg_file):
await event.reply("`Couldn't Fetch. SS`")
return
lmao_final = jpg_file
return lmao_final
async def take_screen_shot(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
"""take a screenshot"""
logger.info(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
ttl = duration // 2
thumb_image_path = path or os.path.join(sedpath, f"{basename(video_file)}.jpg")
command = f'''ffmpeg -ss {ttl} -i "{video_file}" -vframes 1 "{thumb_image_path}"'''
err = (await runcmd(command))[1]
if err:
logger.error(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
async def get_all_admin_chats(event):
lul_stark = []
all_chats = [
d.entity
for d in await event.client.get_dialogs()
if (d.is_group or d.is_channel)
]
try:
for i in all_chats:
if i.creator or i.admin_rights:
lul_stark.append(i.id)
except:
pass
return lul_stark
async def is_admin(event, user):
try:
sed = await event.client.get_permissions(event.chat_id, user)
if sed.is_admin:
is_mod = True
else:
is_mod = False
except:
is_mod = False
return is_mod
async def progress(current, total, event, start, type_of_ps, file_name=None):
"""Generic progress_callback for both
upload.py and download.py"""
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}]\nProgress: {2}%\n".format(
"".join(["🟠" for i in range(math.floor(percentage / 5))]),
"".join(["🔘" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
await event.edit(
"{}\nFile Name: `{}`\n{}".format(type_of_ps, file_name, tmp)
)
else:
await event.edit("{}\n{}".format(type_of_ps, tmp))
def humanbytes(size):
"""Input size in bytes,
outputs in a human readable format"""
# https://stackoverflow.com/a/49361727/4723940
if not size:
return ""
# 2 ** 10 = 1024
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(milliseconds: int) -> str:
"""Inputs time in milliseconds, to get beautified time,
as string"""
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
|
class ValueObject:
pass
|
import numpy as np
def get_dist_mr_to_stride(idx, stride, st_delta, stdir, maxidx):
d = 0
if stdir == 'stay':
return d
elif stdir == 'left':
d += st_delta
if stride > idx:
acts = maxidx - stride
d += 2 * acts
else:
acts = (stride if stride >= idx else maxidx) - idx
d += 2 * acts
d += st_delta
return d
def get_dist_mr_at_edge(goal, start, rval, stdir, carry, maxclr):
if stdir == 'stay':
if rval == 0:
g = goal
d1 = g - start
d2 = maxclr - g + start + 4
else:
g = goal
d1 = abs(g - start) + 3
d2 = maxclr - g + start + 1
elif stdir == 'left':
if rval == 0:
g = goal if not carry else (maxclr + 1 + goal)
d1 = g - start
d2 = abs(maxclr + 1 - g + start) + 3
else:
g = goal if not carry else (maxclr + 1 + goal)
d1 = abs(g - start) + 3
d2 = abs(maxclr - g + start + 1)
else:
if rval == 0:
g = goal
d1 = abs(g - start) + 2
d2 = maxclr - g + start + 2
else:
g = goal if not carry else -1
d1 = abs(g - start) + 3
d2 = maxclr - g + start + 3
if d1 <= d2:
return d1
else:
return d2
def get_dist_mr_stride_dir(station, stride, st_delta, stdir, carry, maxidx, maxclr, done, best_d):
doneidx, doneval = done
tv = [0]
ds = [0]
for i, s in enumerate(station):
n = len(tv)
if n == 0:
break
for _ in range(n):
v = tv.pop(0)
d = ds.pop(0)
#goup
if v == 1:
if i == doneidx and i < maxidx:
egd = get_dist_mr_at_edge(s, v, doneval, stdir, carry, maxclr)
std = get_dist_mr_to_stride(i + (1 if stdir == 'right' else 0), stride, st_delta, stdir, maxidx)
total_d = d + egd + std
if total_d < best_d:
best_d = total_d
elif s > 0:
if i == maxidx:
std = get_dist_mr_to_stride(i, stride, st_delta, stdir, maxidx)
total_d = d + s - 1 + std
if total_d < best_d:
best_d = total_d
else:
new_d = d + s
if new_d < best_d:
tv.append(1)
ds.append(new_d)
elif n == 1 and (i < doneidx or doneidx == maxidx):
if s > 0:
if i == maxidx:
std = get_dist_mr_to_stride(i, stride, st_delta, stdir, maxidx)
total_d = d + s + std
if total_d < best_d:
best_d = total_d
else:
new_d = d + s + 1
if new_d < best_d:
tv.append(1)
ds.append(new_d)
#godown
if v == 0:
if i == doneidx and i < maxidx:
egd = get_dist_mr_at_edge(s, v, doneval, stdir, carry, maxclr)
std = get_dist_mr_to_stride(i + (1 if stdir == 'right' else 0), stride, st_delta, stdir, maxidx)
total_d = d + egd + std
if total_d < best_d:
best_d = total_d
elif s > 0:
if i == maxidx:
std = get_dist_mr_to_stride(i, stride, st_delta, stdir, maxidx)
total_d = d + maxclr - s + 1 + std
if total_d < best_d:
best_d = total_d
else:
new_d = d + maxclr - s + 2
if new_d < best_d:
tv.append(0)
ds.append(new_d)
else:
if i == maxidx:
std = get_dist_mr_to_stride(i, stride, st_delta, stdir, maxidx)
total_d = d + std
if total_d < best_d:
best_d = total_d
else:
new_d = d + 1
if new_d < best_d:
tv.append(1)
ds.append(new_d)
elif n == 1 and (i < doneidx or doneidx == maxidx):
if s > 1:
if i == maxidx:
std = get_dist_mr_to_stride(i, stride, st_delta, stdir, maxidx)
total_d = d + maxclr - s + 2 + std
if total_d < best_d:
best_d = total_d
else:
new_d = d + maxclr - s + 3
if new_d < best_d:
tv.append(0)
ds.append(new_d)
elif s == 0:
if i == maxidx:
std = get_dist_mr_to_stride(i, stride, st_delta, stdir, maxidx)
total_d = d + 1 + std
if total_d < best_d:
best_d = total_d
else:
new_d = d + 2
if new_d < best_d:
tv.append(1)
ds.append(new_d)
if len(ds) > 1:
if ds[0] != ds[1]:
deli = ds.index(max(ds))
del tv[deli]
del ds[deli]
return best_d
def get_distance_moving_right(station, stride, colors, best_d, stdir='both'):
stnlen = len(station)
maxidx = stnlen - 1
maxclr = colors - 1
if all([s == 0 for s in station]):
d1 = stride
d2 = stnlen - stride
if d1 <= d2:
return d1 * 2
else:
return d2 * 2
elif all([s == maxclr for s in station]):
d1 = stride
d2 = maxidx - stride
if d1 <= d2:
return d1 * 2 + 1
else:
return d2 * 2 + 1
doneval = station[-1]
if doneval in [0, maxclr]:
doneidx = 0
for s in reversed(station):
if s == doneval:
doneidx += 1
else:
break
doneidx = maxidx - doneidx
else:
doneidx = maxidx
if stride == doneidx:
best_d = get_dist_mr_stride_dir(station, stride, 0, 'stay', 0, maxidx, maxclr, (doneidx, doneval), best_d)
else:
#stride_right
if stdir in ['both', 'right']:
if stride < doneidx:
st_delta = stride + 1
adj_station = []
c = 0
carry = 0
for i, s in enumerate(station):
rep = i <= stride
if not rep and c == 0:
adj_station.extend(station[i:])
break
offset = 1 if rep else 0
adj_s = s - offset - c
if adj_s < 0:
adj_s += colors
c = 1
else:
c = 0
if i == doneidx:
carry = c
adj_station.append(adj_s)
elif stride > doneidx:
st_delta = 0
carry = 0
adj_station = station[:]
best_d = get_dist_mr_stride_dir(adj_station, stride, st_delta, 'right', carry, maxidx, maxclr, (doneidx, doneval), best_d)
#stride_left
if stdir in ['both', 'left']:
steq = stride if stride < doneidx else -1
st_delta = doneidx - steq
adj_station = []
c = 0
carry = 0
for i, s in enumerate(station):
rep = i > steq and i <= doneidx
offset = 1 if rep else 0
adj_s = s + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
else:
c = 0
if i == doneidx:
carry = c
adj_station.append(adj_s)
if i >= doneidx and c == 0:
adj_station.extend(station[i + 1:])
break
best_d = get_dist_mr_stride_dir(adj_station, stride, st_delta, 'left', carry, maxidx, maxclr, (doneidx, doneval), best_d)
return best_d
def get_dist_ml_to_stride(idx, stride, st_delta, stdir, extra=0):
d = 0
if stdir == 'left':
acts = idx - (stride + 1 if stride < idx else 1)
d += 2 * acts
d += st_delta
else:
d += st_delta
acts = stride if stride > 0 and stride < idx else 0
if extra > 0:
d += extra + 2 * (acts - 1)
else:
d += 2 * acts
return d
def get_dist_ml_stride_dir(station, stride, st_delta, stdir, initvdx, maxidx, maxclr, doneidx, best_d, extra=0):
v0, d0 = initvdx
done = maxidx - doneidx
off = [0]
ds = [d0]
if v0 == 0:
for i, s in enumerate(reversed(station[1:])):
n = len(off)
if n == 0:
break
o1 = o2 = off.pop(0)
d1 = d2 = ds.pop(0)
if n > 1:
o2 = off.pop(0)
d2 = ds.pop(0)
if i == done:
std = get_dist_ml_to_stride(doneidx, stride, st_delta, stdir, extra)
up_d, down_d = d1 + s, d2 + maxclr - s + 1 + o2
total_d = min(up_d, down_d) + std
if total_d < best_d:
best_d = total_d
break
else:
if s == maxclr:
up_d = d2 + 1 + o2
down_d = up_d + 2
else:
up_d = d1 + s + 2
down_d = d2 + maxclr - s + 1 + o2
if min(up_d, down_d) < best_d:
if down_d - up_d > 1:
off.append(1)
ds.append(up_d)
elif up_d >= down_d:
off.append(-1)
ds.append(down_d)
else:
off.append(1)
ds.append(up_d)
off.append(-1)
ds.append(down_d)
else:
for i, s in enumerate(reversed(station[1:])):
n = len(off)
if n == 0:
break
o1 = o2 = off.pop(0)
d1 = d2 = ds.pop(0)
if n > 1:
o2 = off.pop(0)
d2 = ds.pop(0)
if i == done:
std = get_dist_ml_to_stride(doneidx, stride, st_delta, stdir, extra)
up_d, down_d = d1 + s + 1 + o1, d2 + maxclr - s
total_d = min(up_d, down_d) + std
if total_d < best_d:
best_d = total_d
break
else:
if s == maxclr:
up_d = down_d = d2 + 2
else:
up_d = d1 + s + 3 + o1
down_d = d2 + maxclr - s
if min(up_d, down_d) < best_d:
if up_d - down_d > 1:
off.append(1)
ds.append(down_d)
elif down_d >= up_d:
off.append(-1)
ds.append(up_d)
else:
off.append(-1)
ds.append(up_d)
off.append(1)
ds.append(down_d)
return best_d
def get_distance_moving_left(station, stride, colors, best_d, stdir='both', doedge=True):
stnlen = len(station)
maxidx = stnlen - 1
maxclr = colors - 1
if all([s == 0 for s in station]):
d1 = stride
d2 = stnlen - stride
if d1 <= d2:
return d1 * 2
else:
return d2 * 2
elif all([s == maxclr for s in station]):
d1 = stride
d2 = maxidx - stride
if d1 <= d2:
return d1 * 2 + 1
else:
return d2 * 2 + 1
doneidx = 1
s0 = station[0]
s1 = station[1]
if s1 in [0, maxclr]:
for s in station[1:]:
if s == s1:
doneidx += 1
else:
break
if doneidx > maxidx:
best_d = get_distance_moving_right(station, stride, colors, best_d)
else:
if s1 == 0 and doedge:
s0_d1 = s0 + 2
s0_rep1 = s0_d1 - 1
s0_d2 = maxclr - s0 + 4
s0_rep2 = 4 - s0_d2
elif s1 == maxclr and doedge:
s0_d1 = s0 + 5
s0_rep1 = s0_d1 - 4
s0_d2 = maxclr - s0 + 1
s0_rep2 = 1 - s0_d2
else:
s0_d1 = s0 + 2
s0_rep1 = s0_d1 - 1
s0_d2 = maxclr - s0 + 1
s0_rep2 = 1 - s0_d2
rep_off = 0
if stride == doneidx:
if s1 in [0, maxclr] and doedge:
tv = [s1]
if s0_d1 <= s0_d2:
ds = [s0_d1]
else:
ds = [s0_d2]
else:
if abs(s0_d1 - s0_d2) > 0:
if s0_d1 < s0_d2:
tv = [0]
ds = [s0_d1]
else:
tv = [maxclr]
ds = [s0_d2]
else:
tv = [0, maxclr]
ds = [s0_d1, s0_d2]
for v, d in zip(tv, ds):
best_d = get_dist_ml_stride_dir(station, stride, 0, 'right', (v, d), maxidx, maxclr, doneidx, best_d)
else:
#stride_left
if stdir in ['both', 'left']:
stpos = stride > doneidx
steq = stride if stpos else (maxidx + 1)
st_delta = maxidx - steq + 2
adj_station = []
rep_off = int(stpos)
c = 0
for i, s in enumerate(station):
if stpos:
rep = i == doneidx or i == 0 or i > stride
else:
rep = i == doneidx
if i > doneidx and c == 0:
adj_station.extend(station[i:])
break
offset = 1 if rep else 0
adj_s = s + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1 if i > 0 else 0
else:
c = 0
adj_station.append(adj_s)
adj_rep1 = s0_rep1 + rep_off
abs_rep1 = abs(adj_rep1)
adj_d1 = s0_d1 + abs_rep1 - abs(s0_rep1)
adj_rep2 = s0_rep2 + rep_off
abs_rep2 = abs(adj_rep2)
adj_d2 = s0_d2 + abs_rep2 - abs(s0_rep2)
if s1 in [0, maxclr] and doedge:
tv = [s1]
if adj_d1 <= adj_d2:
ds = [adj_d1]
else:
ds = [adj_d2]
else:
if abs(adj_d1 - adj_d2) > 0:
if adj_d1 < adj_d2:
tv = [0]
ds = [adj_d1]
else:
tv = [maxclr]
ds = [adj_d2]
else:
tv = [0, maxclr]
ds = [adj_d1, adj_d2]
for v, d in zip(tv, ds):
best_d = get_dist_ml_stride_dir(adj_station, stride, st_delta, 'left', (v, d), maxidx, maxclr, doneidx, best_d)
#stride_right
if stdir in ['both', 'right']:
if s1 == 0 and not (stride > 0 and stride < doneidx) and doedge:
s0_d1 = s0 + 2
s0_rep1 = s0_d1 - 1
s0_d2 = maxclr - s0 + 4
s0_rep2 = 4 - s0_d2
elif s1 == maxclr and not (stride > 0 and stride < doneidx) and doedge:
s0_d1 = s0 + 5
s0_rep1 = s0_d1 - 4
s0_d2 = maxclr - s0 + 1
s0_rep2 = 1 - s0_d2
else:
s0_d1 = s0 + 2
s0_rep1 = s0_d1 - 1
s0_d2 = maxclr - s0 + 1
s0_rep2 = 1 - s0_d2
stpos = stride > doneidx
steq = stride if stpos else stnlen
st_delta = steq - doneidx
adj_station = []
c = 0
rep_off = 0
if not stpos:
adj_s0 = s0 - 1
if adj_s0 < 0:
adj_s0 += colors
adj_station.append(adj_s0)
rep_off = -1
else:
adj_station.append(s0)
for i, s in enumerate(station):
if i == 0:
continue
rep = i > doneidx and i <= steq
offset = 1 if rep else 0
adj_s = s - offset - c
if adj_s < 0:
adj_s += colors
c = 1
else:
c = 0
adj_station.append(adj_s)
if i >= steq and c == 0:
adj_station.extend(station[i + 1:])
break
adj_rep1 = s0_rep1 + rep_off
abs_rep1 = abs(adj_rep1)
adj_d1 = s0_d1 + abs_rep1 - abs(s0_rep1)
adj_rep2 = s0_rep2 + rep_off
abs_rep2 = abs(adj_rep2)
adj_d2 = s0_d2 + abs_rep2 - abs(s0_rep2)
extras = []
if s1 in [0, maxclr] and not (stride > 0 and stride < doneidx) and doedge:
extras.append(0)
tv = [s1]
if adj_d1 <= adj_d2:
ds = [adj_d1]
else:
ds = [adj_d2]
else:
if s1 in [0, maxclr] and (stride > 0 and stride < doneidx):
if abs(adj_d1 - adj_d2) > 0:
tv = [s1]
if adj_d1 < adj_d2:
ds = [adj_d1]
if s1 == maxclr:
extras.append(3)
else:
extras.append(0)
else:
ds = [adj_d2]
if s1 == 0:
extras.append(1)
else:
extras.append(0)
else:
tv = [s1, s1]
ds = [adj_d1, adj_d2]
if s1 == 0:
extras.extend([0, 1])
else:
extras.extend([3, 0])
else:
if abs(adj_d1 - adj_d2) > 0:
extras.append(0)
if adj_d1 < adj_d2:
tv = [0]
ds = [adj_d1]
else:
tv = [maxclr]
ds = [adj_d2]
else:
tv = [0, maxclr]
ds = [adj_d1, adj_d2]
extras.extend([0, 0])
for v, d, xt in zip(tv, ds, extras):
best_d = get_dist_ml_stride_dir(adj_station, stride, st_delta, 'right', (v, d), maxidx, maxclr, doneidx, best_d, xt)
return best_d
def get_windows(station, colors):
max_idx = len(station) - 1
max_symbol = colors - 1
w = False
windows = []
winval = 0
window_start = 0
for i, d in enumerate(station[1:]):
if not w and (d == 0 or d == max_symbol):
window_start = i
winval = d
w = True
elif w and d != winval:
windows.append([window_start, i + 1, winval])
if d in [0, max_symbol]:
window_start = i
winval = d
w = True
else:
w = False
if w:
windows.append([window_start, max_idx + 1, winval])
return windows
def action_distance(station, stride, colors):
stnlen = len(station)
maxidx = stnlen - 1
maxclr = colors - 1
if all([s == 0 for s in station]):
d1 = stride
d2 = stnlen - stride
if d1 <= d2:
return d1 * 2
else:
return d2 * 2
elif all([s == maxclr for s in station]):
d1 = stride
d2 = maxidx - stride
if d1 <= d2:
return d1 * 2 + 1
else:
return d2 * 2 + 1
else:
#all right or left
best_d = np.inf
best_d = get_distance_moving_right(station, stride, colors, best_d)
best_d = get_distance_moving_left(station, stride, colors, best_d)
windows = get_windows(station, colors)
#print(windows)
for (lowedge, highedge, winval) in windows:
if lowedge == 0 or highedge == stnlen:
continue
#first right then left
#stride in place
if stride == highedge:
adj_station = [s if i <= lowedge else winval for i, s in enumerate(station)]
dp = get_distance_moving_right(adj_station, maxidx, colors, best_d, stdir='left')
if dp < best_d:
best_d = get_dist_ml_stride_dir(station, stride, 0, 'right', (winval, dp), maxidx, maxclr, stride, best_d)
else:
#stride right
if stride > highedge:
adj_station = [s if i <= lowedge else winval for i, s in enumerate(station)]
dp = get_distance_moving_right(adj_station, maxidx, colors, best_d, stdir='left')
if dp < best_d:
st_delta = stride - highedge
adj_station = []
c = 0
for i, s in enumerate(station):
if i <= highedge:
adj_station.append(s)
continue
rep = i <= stride
offset = 1 if rep else 0
adj_s = s - offset - c
if adj_s < 0:
adj_s += colors
c = 1
else:
c = 0
adj_station.append(adj_s)
if not rep and c == 0:
adj_station.extend(station[i + 1:])
break
best_d = get_dist_ml_stride_dir(adj_station, stride, st_delta, 'right', (winval, dp), maxidx, maxclr, highedge, best_d)
else:
if stride < lowedge:
steps_forward = stride + 1
steps_end = 0
adj_station = []
c = 0
for i, s in enumerate(station):
if i <= stride:
adj_station.append(s)
continue
rep = i <= lowedge
offset = 1 if rep else 0
if i <= lowedge:
adj_s = s + offset + c
else:
adj_s = winval + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
else:
c = 0
adj_station.append(adj_s)
if not rep and c == 0:
adj_station.extend([winval] * (maxidx - i))
break
else:
steps_forward = lowedge + 1
steps_end = stride - lowedge
adj_station = [s if i <= lowedge else winval for i, s in enumerate(station)]
dp = get_distance_moving_right(adj_station, lowedge, colors, best_d)
if dp < best_d:
steps_back = lowedge + 1
dp += steps_back
st_delta = maxidx - highedge
adj_station = []
c = 0
for i, s in enumerate(station):
if i <= highedge:
adj_station.append(s)
continue
adj_s = s - 1 - c
if adj_s < 0:
adj_s += colors
c = 1
else:
c = 0
adj_station.append(adj_s)
dp = get_dist_ml_stride_dir(adj_station, maxidx, st_delta, 'right', (winval, dp), maxidx, maxclr, highedge, best_d)
if dp < best_d:
dp += steps_forward
dp += steps_end
if dp < best_d:
best_d = dp
#stride left
if stride >= lowedge and stride < highedge:
adj_station = [s if i <= lowedge else winval for i, s in enumerate(station)]
dp = get_distance_moving_right(adj_station, maxidx, colors, best_d, stdir='left')
if dp < best_d:
st_delta = 1
adj_station = []
c = 0
for i, s in enumerate(station):
if i < highedge:
adj_station.append(s)
continue
rep = i == highedge
offset = 1 if rep else 0
adj_s = s + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
else:
c = 0
adj_station.append(adj_s)
if not rep and c == 0:
adj_station.extend(station[i + 1:])
break
best_d = get_dist_ml_stride_dir(adj_station, stride, st_delta, 'left', (winval, dp), maxidx, maxclr, highedge, best_d)
else:
steq = stride if stride < lowedge else -1
adj_station = []
c = 0
for i, s in enumerate(station):
if i > lowedge and c == 0:
adj_station.extend([winval] * (maxidx - i + 1))
break
offset = (1 if i <= steq else 2) if i <= lowedge else 0
if i <= lowedge:
adj_s = s + offset + c
else:
adj_s = winval + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
else:
c = 0
adj_station.append(adj_s)
dp = get_distance_moving_right(adj_station, lowedge, colors, best_d)
if dp < best_d:
steps_back = lowedge + 1
dp += steps_back
adj_station = []
c = 0
for i, s in enumerate(station):
if i < highedge:
adj_station.append(s)
continue
rep = i == highedge
offset = 1 if rep else 0
adj_s = s + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
else:
c = 0
adj_station.append(adj_s)
if not rep and c == 0:
adj_station.extend(station[i + 1:])
break
steps_end = 0
if stride > highedge and stride < maxidx:
steps_end = maxidx - stride
prev_station = adj_station[:]
adj_station = []
c = 0
for i, s in enumerate(prev_station):
if i <= stride:
adj_station.append(s)
continue
adj_s = s + 1 + c
if adj_s > maxclr:
adj_s -= colors
c = 1
else:
c = 0
adj_station.append(adj_s)
dp = get_dist_ml_stride_dir(adj_station, highedge, 0, 'left', (winval, dp), maxidx, maxclr, highedge, best_d)
if dp < best_d:
dp += 1
steps_back = highedge - lowedge - 1
dp += 2 * steps_back
steps_end += (lowedge - stride) if stride < lowedge else (lowedge + 1)
dp += steps_end
if dp < best_d:
best_d = dp
#first left then right
if stride == lowedge:
doneidx = highedge
rep_off = -1
adj_station = []
c = 0
for i, s in enumerate([s if i == 0 or i >= highedge else 0 for i, s in enumerate(station)]):
rep = i > doneidx or i == 0
offset = 1 if rep else 0
adj_s = s - offset - c
if adj_s < 0:
adj_s += colors
c = 1
else:
c = 0
adj_station.append(adj_s)
s0 = adj_station[0]
s0_d1 = s0 + 2
s0_rep1 = s0_d1 - 1
s0_d2 = maxclr - s0 + 1
s0_rep2 = 1 - s0_d2
st_delta = stnlen - doneidx
if abs(s0_d1 - s0_d2) > 0:
if s0_d1 < s0_d2:
tv = [0]
ds = [s0_d1]
else:
tv = [maxclr]
ds = [s0_d2]
else:
tv = [0, maxclr]
ds = [s0_d1, s0_d2]
prev_station = adj_station[:]
for v, d in zip(tv, ds):
carryath = 0
if (v == 0 or (v == maxclr and s0 == maxclr)):
if winval == maxclr:
carryath = 1
else:
if winval == 0:
carryath = -1
adj_station = []
c = 0
for i, s in enumerate(prev_station):
if i < highedge:
adj_station.append(s)
continue
offset = carryath if i == highedge else 0
adj_s = s + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
elif adj_s < 0:
adj_s += colors
c = -1
else:
c = 0
adj_station.append(adj_s)
dp = get_dist_ml_stride_dir(adj_station, 0, st_delta, 'right', (v, d), maxidx, maxclr, doneidx, best_d)
if dp < best_d:
carryat1 = int(v == maxclr and not (v == maxclr and s0 == maxclr))
adj_station = []
c = 0
for i, s in enumerate(station):
if i == 0:
adj_station.append(0)
continue
offset = carryat1 if i == 1 else 0
adj_s = (s if i <= lowedge else winval) + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
else:
c = 0
adj_station.append(adj_s)
if i >= lowedge and c == 0:
adj_station.extend([winval] * (maxidx - i))
break
dp2 = get_distance_moving_right(adj_station, lowedge, colors, best_d, stdir='left')
dp += dp2
if dp < best_d:
best_d = dp
else:
#stride right
doneidx = highedge
rep_off = -1
adj_station = []
c = 0
for i, s in enumerate([s if i == 0 or i >= highedge else 0 for i, s in enumerate(station)]):
if stride >= lowedge and stride < highedge:
rep = i > doneidx or i == 0
offset = 1 if rep else 0
elif stride >= highedge:
offset = 2 if i > highedge and i <= stride else (1 if i == 0 or i == highedge or i > stride else 0)
else:
offset = 2 if i > highedge or i == 0 else (1 if i == highedge else 0)
adj_s = s - offset - c
if adj_s < 0:
adj_s += colors
c = 1
else:
c = 0
adj_station.append(adj_s)
s0 = adj_station[0]
s0_d1 = s0 + 2
s0_rep1 = s0_d1 - 1
s0_d2 = maxclr - s0 + 1
s0_rep2 = 1 - s0_d2
st_delta = stnlen - doneidx
if abs(s0_d1 - s0_d2) > 0:
if s0_d1 < s0_d2:
tv = [0]
ds = [s0_d1]
else:
tv = [maxclr]
ds = [s0_d2]
else:
tv = [0, maxclr]
ds = [s0_d1, s0_d2]
prev_station = adj_station[:]
for v, d in zip(tv, ds):
carryath = 0
if (v == 0 or (v == maxclr and s0 == maxclr)):
if winval == maxclr:
carryath = 1
else:
if winval == 0:
carryath = -1
adj_station = []
c = 0
for i, s in enumerate(prev_station):
if i < highedge:
adj_station.append(s)
continue
offset = carryath if i == highedge else 0
adj_s = s + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
elif adj_s < 0:
adj_s += colors
c = -1
else:
c = 0
adj_station.append(adj_s)
dp = get_dist_ml_stride_dir(adj_station, 0, st_delta, 'right', (v, d), maxidx, maxclr, doneidx, best_d)
if dp < best_d:
carryat1 = int(v == maxclr and not (v == maxclr and s0 == maxclr))
adj_station = []
c = 0
for i, s in enumerate(station):
if i == 0:
adj_station.append(0)
continue
if stride > 0 and stride <= lowedge:
if i == 1:
offset = (1 if i > 0 and i <= stride else 0) - carryat1
else:
offset = 1 if i > 0 and i <= stride else 0
else:
offset = -carryat1 if i == 1 else 0
adj_s = (s if i <= lowedge else winval) - offset - c
if adj_s < 0:
adj_s += colors
c = 1
elif adj_s > maxclr:
adj_s -= colors
c = -1
else:
c = 0
adj_station.append(adj_s)
dp2 = get_distance_moving_right(adj_station, lowedge + 1, colors, best_d)
dp += dp2
if dp < best_d:
steps_forward = (stride if stride >= lowedge + 1 and stride < highedge else highedge) - lowedge - 1
dp += steps_forward * 2
steps_end = (stride - highedge + 1) if stride >= highedge else ((stride + 1) if stride <= lowedge else 0)
dp += steps_end
if dp < best_d:
best_d = dp
#stride left
doneidx = highedge
rep_off = -1
adj_station = []
c = 0
for i, s in enumerate([s if i == 0 or i >= highedge else 0 for i, s in enumerate(station)]):
if stride < lowedge:
rep = i > doneidx or i == 0
offset = 1 if rep else 0
elif stride >= highedge:
offset = 1 if i > highedge and i <= stride else 0
else:
offset = -1 if i == highedge else 0
adj_s = s - offset - c
if adj_s < 0:
adj_s += colors
c = 1
elif adj_s > maxclr:
adj_s -= colors
c = -1
else:
c = 0
adj_station.append(adj_s)
s0 = adj_station[0]
s0_d1 = s0 + 2
s0_rep1 = s0_d1 - 1
s0_d2 = maxclr - s0 + 1
s0_rep2 = 1 - s0_d2
st_delta = stnlen - doneidx
if abs(s0_d1 - s0_d2) > 0:
if s0_d1 < s0_d2:
tv = [0]
ds = [s0_d1]
else:
tv = [maxclr]
ds = [s0_d2]
else:
tv = [0, maxclr]
ds = [s0_d1, s0_d2]
prev_station = adj_station[:]
for v, d in zip(tv, ds):
carryath = 0
if (v == 0 or (v == maxclr and s0 == maxclr and stride < lowedge)):
if winval == maxclr:
carryath = 1
else:
if winval == 0:
carryath = -1
adj_station = []
c = 0
for i, s in enumerate(prev_station):
if i < highedge:
adj_station.append(s)
continue
offset = carryath if i == highedge else 0
adj_s = s + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
elif adj_s < 0:
adj_s += colors
c = -1
else:
c = 0
adj_station.append(adj_s)
dp = get_dist_ml_stride_dir(adj_station, 0, st_delta, 'right', (v, d), maxidx, maxclr, doneidx, best_d)
if dp < best_d:
carryat1 = int(v == maxclr and not (v == maxclr and s0 == maxclr and stride < lowedge))
adj_station = []
c = 0
for i, s in enumerate(station):
if i == 0:
adj_station.append(0)
continue
if stride < lowedge:
if i == 1:
offset = (1 if i > stride and i <= lowedge else 0) + carryat1
else:
offset = 1 if i > stride and i <= lowedge else 0
else:
if i == 1:
offset = (1 if i <= lowedge else 0) + carryat1
else:
offset = 1 if i <= lowedge else 0
adj_s = (s if i <= lowedge else winval) + offset + c
if adj_s > maxclr:
adj_s -= colors
c = 1
elif adj_s < 0:
adj_s += colors
c = -1
else:
c = 0
adj_station.append(adj_s)
dp2 = get_distance_moving_right(adj_station, lowedge, colors, best_d)
dp += dp2
if dp < best_d:
steps_back = (lowedge - stride) if stride < lowedge else lowedge
if stride > lowedge:
steps_back += (maxidx - stride + 1) if stride >= highedge - 1 else (maxidx - highedge + 2)
dp += steps_back
steps_end = (highedge - 1 - stride) if stride > lowedge and stride < highedge - 1 else 0
dp += steps_end
if dp < best_d:
best_d = dp
return best_d
|
"""Parse the output of writelog.sh into info about git commits.
"""
import argparse
import json
import sys
SEP="@@@@@"
def parse_diff(st):
"""Parse a commit diff from the given string.
Args:
st: diff string
"""
D_BEGIN = 1
D_FILES = 2
D_HEADER = 3
D_BODY = 4
state = D_BEGIN
def add_diff():
putf = fromfile
if fromfile != tofile:
if fromfile == "dev/null":
putf = tofile
else:
putf = fromfile
#print("WARNING: fromfile!=tofile")
part["files"][putf]["text"] = curtxt
part = {"files": dict()}
fromfile = ""
tofile = ""
curtxt = ""
for l in st.split("\n"):
if state == D_BEGIN:
if len(l) > 0:
state = D_FILES
if state == D_FILES:
if len(l) == 0:
state = D_HEADER
else:
(ins, rem, fname) = l.split()
part["files"][fname] = {
"ins": ins,
"rem": rem
}
elif state == D_HEADER:
#expect
# index md5..md5
# new file
# deleted file
# new mode
# old mode
# Binary files
if l.startswith("---"):
(_, fromfile) = l.split()
#XXX: don't assume a/
fromfile = fromfile[fromfile.find("/")+1:]
elif l.startswith("+++"):
(_, tofile) = l.split()
tofile = tofile[tofile.find("/")+1:]
state = D_BODY
elif state == D_BODY:
if l.startswith("diff --git"):
#print "CURTXT", curtxt, "ff", fromfile, "tf", tofile
add_diff()
curtxt = ""
state = D_HEADER
elif not l.startswith("@@"):
curtxt += l + "\n"
#print "CURTXT", curtxt, "ff", fromfile, "tf", tofile
if len(curtxt) > 0:
add_diff()
#/for
return part
def parse(fin):
"""Parse tuples from the given input
Args:
fin: file pointer
"""
last = None
field = None
for l in fin:
if l.startswith(SEP+SEP):
if last:
last["diff"] = parse_diff(last["diff"])
yield last
last = dict()
elif l.startswith(SEP):
#trim last newline
if field and field in last:
last[field] = last[field][:-1]
field = l.strip()[len(SEP):]
else:
if not field in last:
last[field] = ""
last[field] += l
def print_times(iput):
for tup in parse(iput):
print tup["time"]
def print_files(iput):
for tup in parse(iput):
if "diff" in tup:
for (fname, cont) in tup["diff"]["files"].items():
print fname
def write_json(iput, fname):
with open(fname, "w") as fp:
json.dump(list(parse(iput)), fp, indent=2)
def write_json(iput, fname, filter=lambda x: True):
changes = []
for tup in parse(iput):
if filter(tup):
changes.append(tup)
with open(fname, "w") as fp:
json.dump(changes, fp, indent=2)
def filter_has_file(name):
"""Filter restricted to only those files with a given name
"""
def fn(d):
if "diff" in d:
if "files" in d["diff"]:
if name in d["diff"]["files"]:
return True
return False
return fn
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("mode")
args = parser.parse_args()
if args.mode == "filtered":
write_json(sys.stdin, "filtered-mysql.json",
filter_has_file("manifests/mysql.pp"))
elif args.mode == "unfiltered":
write_json(sys.stdin, "all-changes.json")
elif args.mode == "print_files":
print_files(sys.stdin)
|
#!/usr/bin/env python
import logging
import sys
import os
import signal
import conf
import core
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocolAcceleratedFactory
from thrift.server import TServer
from rpc import RndNodeApi
logger = logging.getLogger(__name__)
class RndProcessHandler(object):
def runTask(self, rtc):
logger.debug("starting core.ProcessMgr.runProcess(rtc): %s", rtc.taskId)
core.ProcessMgr.runProcess(rtc)
logger.debug("finished core.ProcessMgr.runProcess(rtc): %s", rtc.taskId)
def killRunningTask(self, procId, reason):
core.ProcessMgr.killRunningTask(procId, reason)
def getRunningTasks(self):
logger.debug("starting core.ProcessMgr.getRunningTasks()")
tasks = core.ProcessMgr.getRunningTasks()
logger.debug("finished core.ProcessMgr.getRunningTasks()")
return tasks
def reboot(self, now=False):
core.ProcessMgr.reboot(now)
def pingPong(self, withTasks=False):
ping = core.Profiler.getPing()
ping.isReboot = core.ProcessMgr.isReboot
if withTasks:
ping.tasks = self.getRunningTasks()
return ping
def get_server(api, handler, port, **kwargs):
processor = api.Processor(handler)
socket = TSocket.TServerSocket(port=port)
tfactory = kwargs.get('transport') or TTransport.TFramedTransportFactory()
pfactory = kwargs.get('protocol') or TBinaryProtocolAcceleratedFactory()
server = TServer.TThreadPoolServer(processor, socket, tfactory, pfactory)
server.setNumThreads(8)
return server
def exit_handler(*args):
logger.info("Caught SIGTERM. Shutting down Process Manager...")
core.ProcessMgr.shutdown()
logger.info("Process Manager finished shutting down")
os._exit(0)
signal.signal(signal.SIGTERM, exit_handler)
def start():
logger.info("Staring Render Node Daemon on TCP port %d" % conf.NETWORK_PORT)
server = get_server(RndNodeApi, RndProcessHandler(), conf.NETWORK_PORT)
try:
server.serve()
except KeyboardInterrupt:
exit_handler()
sys.exit(0)
|
import asyncio
import irc
import time
TWITCH_RATE_LIMITS = {
"ALL": [15, 30],
"AUTH": [10, 10],
"JOIN": [15, 10],
}
class Client(irc.Client):
def __init__(self, endpoint: irc.Endpoint) -> None:
super().__init__(endpoint)
self._status = {
"ALL": [TWITCH_RATE_LIMITS["ALL"][0], time.time()],
"AUTH": [TWITCH_RATE_LIMITS["AUTH"][0], time.time()],
"JOIN": [TWITCH_RATE_LIMITS["JOIN"][0], time.time()],
}
def _is_rate_limited(self, rate_limit: str) -> bool:
current_time = time.time()
allowance, last_check = self._status[rate_limit]
rate, per = TWITCH_RATE_LIMITS[rate_limit]
time_passed = current_time - last_check
allowance += time_passed * (rate / per)
if allowance > rate:
allowance = rate
if allowance < 1.0:
rate_limited = True
else:
rate_limited = False
allowance -= 1.0;
self._status |= {rate_limit: [allowance, current_time]}
return rate_limited
def send(self, command: irc.Command) -> None:
can_send = False
if not self._is_rate_limited("ALL"):
can_send = True
if isinstance(command, irc.Join):
if not self._is_rate_limited("JOIN"):
can_send = True
elif isinstance(command, irc.Pass):
if not self._is_rate_limited("AUTH"):
can_send = True
else:
can_send = True
if isinstance(command, irc.Pong):
can_send = True
if can_send:
super().send(command) |
import sublime, sublime_plugin
class AngularjsToggleSettingsCommand(sublime_plugin.ApplicationCommand):
"""Enables/Disables settings"""
def run(self, setting):
s = sublime.load_settings('AngularJS-sublime-package.sublime-settings')
s.set(setting, not s.get(setting, False))
sublime.save_settings('AngularJS-sublime-package.sublime-settings')
def is_checked(self, setting):
s = sublime.load_settings('AngularJS-sublime-package.sublime-settings')
return s.get(setting, False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# normspace.py
# shelly
#
"""
Normalise spaces from the lines on stdin, stripping outer spaces and
converting all sequences of spaces or tabs into single spaces.
"""
import sys
import optparse
import re
def normspace(istream=sys.stdin, ostream=sys.stdout):
for l in istream:
l = l.strip()
l = re.sub('[ \t]+', ' ', l)
print >> ostream, l
def _create_option_parser():
usage = \
"""%prog normspace [options]
Normalises whitespace in lines from stdin. Strips outer spaces and replaces
any sequences of whitespace with a single space."""
parser = optparse.OptionParser(usage)
return parser
def main(argv):
parser = _create_option_parser()
(options, args) = parser.parse_args(argv)
if args:
parser.print_help()
sys.exit(1)
try:
normspace(*args)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main(sys.argv[1:])
|
from focusnfe.core.base import BaseAPIWrapper
class NFe(BaseAPIWrapper):
pass
|
import discord
import command_template
class Disable(command_template.Command):
def __init__(self, handler):
super(Disable, self).__init__(handler)
self.enabled = True # Should never change
self.perm_level = self.permission_levels["owner"]
self.cmd_name = "disable"
self.arguments = "[command]"
self.help_description = "Disables a command from the bot. This command can not be disabled."
async def command(self, message: discord.Message):
if not self.execute_cmd(message):
return
text_result = self.handler.disable_command(self.rm_cmd(message))
await self.send_message_check(message.channel, text_result)
|
"""Dedup related entities
Revision ID: 0361dbb4d96f
Revises: a5d97318b751
Create Date: 2016-12-18 22:00:05.397609
"""
# revision identifiers, used by Alembic.
revision = '0361dbb4d96f'
down_revision = 'a5d97318b751'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('entity',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('type', sa.String(length=256), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_entity', 'entity', ['name', 'type'], unique=True)
op.create_table('session_entity',
sa.Column('session_id', sa.Integer(), nullable=True),
sa.Column('entity_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_id'], ['entity.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['session_id'], ['session.id'], ondelete='CASCADE')
)
op.create_index('ix_session_entity_session_id_entity_id', 'session_entity', ['session_id', 'entity_id'], unique=False)
op.create_table('test_entity',
sa.Column('test_id', sa.Integer(), nullable=True),
sa.Column('entity_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_id'], ['entity.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['test_id'], ['test.id'], ondelete='CASCADE')
)
op.create_index('ix_test_entity_test_id_entity_id', 'test_entity', ['test_id', 'entity_id'], unique=False)
op.execute('INSERT INTO entity (type, name) SELECT DISTINCT type, name FROM related_entity')
op.execute('INSERT INTO session_entity (session_id, entity_id) SELECT session.id, entity.id FROM session JOIN related_entity ON related_entity.session_id = session.id JOIN entity ON related_entity.name = entity.name and related_entity.type = entity.type')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_test_entity_test_id_entity_id', table_name='test_entity')
op.drop_table('test_entity')
op.drop_index('ix_session_entity_session_id_entity_id', table_name='session_entity')
op.drop_table('session_entity')
op.drop_index('ix_entity', table_name='entity')
op.drop_table('entity')
# ### end Alembic commands ###
|
"""
680
valid palindrome 2
easy
Given a non-empty string s, you may delete at most one character.
Judge whether you can make it a palindrome.
"""
class Solution:
def validPalindrome(self, s: str) -> bool:
def check_removed(m, n):
return all([s[k] == s[m+n-k] for k in range(m, n)])
for i in range(0, int(len(s)/2)):
j = len(s)-i-1
if s[i] != s[j]:
return check_removed(i+1, j) or check_removed(i, j-1)
return True
sol = Solution()
s = "vbh"
print(sol.validPalindrome(s))
|
import pytest
from fixtures import get_keys, get_children, get_title
from craigslist_meta import Country
selector = "country"
key = "germany"
def test_keys(get_keys):
"""Test `keys` method for country returns valid keys for instantiation."""
country_keys = Country.keys
expected_keys = sorted(list(set(get_keys(selector))))
assert country_keys == expected_keys
def test_key():
"""Test `key` attribute of country instance."""
country_key = Country(key).key
expected_key = key
assert country_key == expected_key
def test_children(get_children):
"""Test `children` attribute of country instance."""
country_children = Country(key).children
expected_children = list(get_children(selector, key))
assert country_children == expected_children
def test_title(get_title):
"""Test `title` attribute of country instance."""
country_title = Country(key).title
expected_title = get_title(selector, key)
assert country_title == expected_title
def test_url_raises():
"""`url` attribute should raise an exception for Country."""
with pytest.raises(AttributeError, match="'Country' object has no attribute 'url'"):
Country(key).url
def test_all():
"""Test `all` method yields all country instances."""
country_instances = [country for country in Country.all()]
assert all(isinstance(item, Country) for item in country_instances)
def test_key_raises():
"""Constructing Country with an invalid key should raise an exception."""
with pytest.raises(ValueError):
Country("invalid_key")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six
from toscaparser.common import exception
from toscaparser import functions
from toscaparser.tests.base import TestCase
from toscaparser.tosca_template import ToscaTemplate
from toscaparser.utils.gettextutils import _
class IntrinsicFunctionsTest(TestCase):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_single_instance_wordpress.yaml")
params = {'db_name': 'my_wordpress', 'db_user': 'my_db_user',
'db_root_pwd': '12345678'}
tosca = ToscaTemplate(tosca_tpl, parsed_params=params)
def setUp(self):
TestCase.setUp(self)
exception.ExceptionCollector.stop() # Added as sometimes negative testcases fails.
def _get_node(self, node_name, tosca=None):
if tosca is None:
tosca = self.tosca
return [
node for node in tosca.nodetemplates
if node.name == node_name][0]
def _get_operation(self, interfaces, operation):
return [
interface for interface in interfaces
if interface.name == operation][0]
def _get_property(self, node_template, property_name):
return [prop.value for prop in node_template.get_properties_objects()
if prop.name == property_name][0]
def _get_inputs_dict(self):
inputs = {}
for input in self.tosca.inputs:
inputs[input.name] = input.default
return inputs
def _get_input(self, name):
self._get_inputs_dict()[name]
def test_get_property(self):
wordpress = self._get_node('wordpress')
operation = self._get_operation(wordpress.interfaces, 'configure')
wp_db_password = operation.inputs['wp_db_password']
self.assertIsInstance(wp_db_password, functions.GetProperty)
result = wp_db_password.result()
self.assertEqual('wp_pass', result)
def test_get_property_with_input_param(self):
wordpress = self._get_node('wordpress')
operation = self._get_operation(wordpress.interfaces, 'configure')
wp_db_user = operation.inputs['wp_db_user']
self.assertIsInstance(wp_db_user, functions.GetProperty)
result = wp_db_user.result()
self.assertEqual('my_db_user', result)
def test_unknown_capability_property(self):
self.assertRaises(exception.ValidationError, self._load_template,
'functions/test_unknown_capability_property.yaml')
exception.ExceptionCollector.assertExceptionMessage(
KeyError,
_('\'Property "unknown" was not found in capability '
'"database_endpoint" of node template "database" referenced '
'from node template "database".\''))
def test_get_input_in_properties(self):
mysql_dbms = self._get_node('mysql_dbms')
expected_inputs = ['db_root_pwd', 'db_port']
props = mysql_dbms.get_properties()
for key in props.keys():
prop = props[key]
self.assertIsInstance(prop.value, functions.GetInput)
expected_inputs.remove(prop.value.input_name)
self.assertListEqual(expected_inputs, [])
def test_get_input_validation(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_unknown_input_in_property.yaml')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownInputError,
_('Unknown input "objectstore_name".'))
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_unknown_input_in_interface.yaml')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownInputError,
_('Unknown input "image_id".'))
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_invalid_function_signature.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Expected one argument for function "get_input" but received '
'"[\'cpus\', \'cpus\']".'))
def test_get_input_default_value_result(self):
mysql_dbms = self._get_node('mysql_dbms')
dbms_port = self._get_property(mysql_dbms, 'port')
self.assertEqual(3306, dbms_port.result())
dbms_root_password = self._get_property(mysql_dbms,
'root_password')
self.assertEqual(dbms_root_password.result(), '12345678')
def test_get_property_with_host(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_get_property_with_host.yaml")
mysql_database = self._get_node('mysql_database',
ToscaTemplate(tosca_tpl,
parsed_params={
'db_root_pwd': '123'
}))
operation = self._get_operation(mysql_database.interfaces, 'configure')
db_port = operation.inputs['db_port']
self.assertIsInstance(db_port, functions.GetProperty)
result = db_port.result()
self.assertEqual(3306, result)
test = operation.inputs['test']
self.assertIsInstance(test, functions.GetProperty)
result = test.result()
self.assertEqual(1, result)
def test_get_property_with_nested_params(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/tosca_nested_property_names_indexes.yaml")
webserver = self._get_node('wordpress',
ToscaTemplate(tosca_tpl,
parsed_params={
'db_root_pwd': '1234'}))
operation = self._get_operation(webserver.interfaces, 'configure')
wp_endpoint_prot = operation.inputs['wp_endpoint_protocol']
self.assertIsInstance(wp_endpoint_prot, functions.GetProperty)
self.assertEqual('tcp', wp_endpoint_prot.result())
wp_list_prop = operation.inputs['wp_list_prop']
self.assertIsInstance(wp_list_prop, functions.GetProperty)
self.assertEqual(3, wp_list_prop.result())
def test_get_property_with_capabilties_inheritance(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_capabilties_inheritance.yaml")
some_node = self._get_node('some_node',
ToscaTemplate(tosca_tpl,
parsed_params={
'db_root_pwd': '1234'}))
operation = self._get_operation(some_node.interfaces, 'configure')
some_input = operation.inputs['some_input']
self.assertIsInstance(some_input, functions.GetProperty)
self.assertEqual('someval', some_input.result())
def test_get_property_source_target_keywords(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_get_property_source_target_keywords.yaml")
tosca = ToscaTemplate(tosca_tpl,
parsed_params={'db_root_pwd': '1234'})
for node in tosca.nodetemplates:
for relationship, trgt in node.relationships.items():
rel_template = trgt.get_relationship_template()[0]
break
operation = self._get_operation(rel_template.interfaces,
'pre_configure_source')
target_test = operation.inputs['target_test']
self.assertIsInstance(target_test, functions.GetProperty)
self.assertEqual(1, target_test.result())
source_port = operation.inputs['source_port']
self.assertIsInstance(source_port, functions.GetProperty)
self.assertEqual(3306, source_port.result())
def test_get_prop_cap_host(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_get_prop_cap_host.yaml")
some_node = self._get_node('some_node',
ToscaTemplate(tosca_tpl))
some_prop = some_node.get_properties()['some_prop']
self.assertIsInstance(some_prop.value, functions.GetProperty)
self.assertEqual('someval', some_prop.value.result())
def test_get_prop_cap_bool(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_get_prop_cap_bool.yaml")
some_node = self._get_node('software',
ToscaTemplate(tosca_tpl))
some_prop = some_node.get_properties()['some_prop']
self.assertIsInstance(some_prop.value, functions.GetProperty)
self.assertEqual(False, some_prop.value.result())
class GetAttributeTest(TestCase):
def _load_template(self, filename):
return ToscaTemplate(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
filename),
parsed_params={'db_root_pwd': '1234'})
def _get_operation(self, interfaces, operation):
return [
interface for interface in interfaces
if interface.name == operation][0]
def test_get_attribute_in_outputs(self):
tpl = self._load_template('tosca_single_instance_wordpress.yaml')
website_url_output = [
x for x in tpl.outputs if x.name == 'website_url'][0]
self.assertIsInstance(website_url_output.value, functions.GetAttribute)
self.assertEqual('server', website_url_output.value.node_template_name)
self.assertEqual('private_address',
website_url_output.value.attribute_name)
def test_get_attribute_invalid_args(self):
expected_msg = _('Illegal arguments for function "get_attribute".'
' Expected arguments: "node-template-name", '
'"req-or-cap"(optional), "property name"')
err = self.assertRaises(ValueError,
functions.get_function, None, None,
{'get_attribute': []})
self.assertIn(expected_msg, six.text_type(err))
err = self.assertRaises(ValueError,
functions.get_function, None, None,
{'get_attribute': ['x']})
self.assertIn(expected_msg, six.text_type(err))
def test_get_attribute_unknown_node_template_name(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_unknown_node_template_name.yaml')
exception.ExceptionCollector.assertExceptionMessage(
KeyError,
_('\'Node template "unknown_node_template" was not found.\''))
def test_get_attribute_unknown_attribute(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_unknown_attribute_name.yaml')
exception.ExceptionCollector.assertExceptionMessage(
KeyError,
_('\'Attribute "unknown_attribute" was not found in node template '
'"server".\''))
def test_get_attribute_host_keyword(self):
tpl = self._load_template(
'functions/test_get_attribute_host_keyword.yaml')
def assert_get_attribute_host_functionality(node_template_name):
node = [x for x in tpl.nodetemplates
if x.name == node_template_name][0]
configure_op = [
x for x in node.interfaces if x.name == 'configure'][0]
ip_addr_input = configure_op.inputs['ip_address']
self.assertIsInstance(ip_addr_input, functions.GetAttribute)
self.assertEqual('server',
ip_addr_input.get_referenced_node_template().name)
assert_get_attribute_host_functionality('dbms')
assert_get_attribute_host_functionality('database')
def test_get_attribute_host_not_found(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_host_not_found.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('"get_attribute: [ HOST, ... ]" was used in node template '
'"server" but "tosca.relationships.HostedOn" was not found in '
'the relationship chain.'))
def test_get_attribute_illegal_host_in_outputs(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_illegal_host_in_outputs.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('"get_attribute: [ HOST, ... ]" is not allowed in "outputs" '
'section of the TOSCA template.'))
def test_get_attribute_with_index(self):
self._load_template(
'functions/test_get_attribute_with_index.yaml')
def test_get_attribute_with_index_error(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_with_index_error.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Illegal arguments for function "get_attribute". '
'Unexpected attribute/index value "0"'))
def test_get_attribute_source_target_keywords(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_get_attribute_source_target_keywords.yaml")
tosca = ToscaTemplate(tosca_tpl,
parsed_params={'db_root_pwd': '12345678'})
for node in tosca.nodetemplates:
for relationship, trgt in node.relationships.items():
rel_template = trgt.get_relationship_template()[0]
break
operation = self._get_operation(rel_template.interfaces,
'pre_configure_source')
target_test = operation.inputs['target_test']
self.assertIsInstance(target_test, functions.GetAttribute)
source_port = operation.inputs['source_port']
self.assertIsInstance(source_port, functions.GetAttribute)
def test_get_attribute_with_nested_params(self):
self._load_template(
'functions/test_get_attribute_with_nested_params.yaml')
def test_implicit_attribute(self):
self.assertIsNotNone(self._load_template(
'functions/test_get_implicit_attribute.yaml'))
def test_get_attribute_capability_inheritance(self):
self.assertIsNotNone(self._load_template(
'functions/test_container_cap_child.yaml'))
class ConcatTest(TestCase):
def _load_template(self, filename):
return ToscaTemplate(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
filename))
def test_validate_concat(self):
tosca = self._load_template("data/functions/test_concat.yaml")
server_url_output = [
output for output in tosca.outputs if output.name == 'url'][0]
func = functions.get_function(self, tosca.outputs,
server_url_output.value)
self.assertIsInstance(func, functions.Concat)
self.assertRaises(exception.ValidationError, self._load_template,
'data/functions/test_concat_invalid.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Invalid arguments for function "concat". Expected at least '
'one arguments.'))
class TokenTest(TestCase):
def _load_template(self, filename):
return ToscaTemplate(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
filename))
def test_validate_token(self):
tosca = self._load_template("data/functions/test_token.yaml")
server_url_output = [
output for output in tosca.outputs if output.name == 'url'][0]
func = functions.get_function(self, tosca.outputs,
server_url_output.value)
self.assertIsInstance(func, functions.Token)
self.assertRaises(exception.ValidationError, self._load_template,
'data/functions/test_token_invalid.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Invalid arguments for function "token". Expected at least '
'three arguments.'))
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Invalid arguments for function "token". Expected '
'integer value as third argument.'))
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Invalid arguments for function "token". Expected '
'single char value as second argument.'))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the masterfile package: https://github.com/uwmadison-chm/masterfile
# Copyright (c) 2020 Board of Regents of the University of Wisconsin System
# Written by Nate Vack <[email protected]> at the Center for Healthy Minds
# at the University of Wisconsin-Madison.
# Released under MIT licence; see LICENSE at the package root.
from __future__ import absolute_import
import logging
from collections import defaultdict
from masterfile import errors
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def validate(mf):
logger.debug('validators.duplicate_column:validate()')
duplicates = _find_duplicate_columns(mf)
errlist = [
errors.DuplicateColumnError(
locations=locations,
message='duplicate column {}'.format(col)
)
for col, locations in duplicates.items()
]
logger.debug('found {} errors'.format(len(errlist)))
return errlist
def _map_column_locations(mf):
"""
Find all the places where a column is used. Algorithm:
* Start a column_locations dict. Keys will be column names, values will
be lists of locations.
* Iterate over all individual masterfiles
* Iterate over all columns as column_name
* For each column, append it location (filename, column number)
to column_locations[column_name]
* Return column_locations
"""
column_locations = defaultdict(list)
for f, df in zip(mf._candidate_data_files, mf._unprocessed_dataframes):
for col_index, col_name in enumerate(df.columns):
column_locations[col_name].append(errors.Location.smart_create(
filename=f, column_index=col_index))
return column_locations
def _find_duplicate_columns(mf):
"""
Find every column that occurs more than once in the masterfile data,
except for the index column.
"""
column_locations = _map_column_locations(mf)
dupes = {
column_name: locations
for (column_name, locations) in column_locations.items()
if (len(locations) > 1 and (not column_name == mf.index_column))
}
return dupes
|
# Copyright (c) 2014 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tables
class CSVSummary(tables.LinkAction):
name = "csv_summary"
verbose_name = _("Download CSV Summary")
icon = "download"
def get_link_url(self):
return self.table.kwargs['billing'].csv_link()
class BillingTable(tables.DataTable):
resource = tables.Column("resource",
link=("#"),
verbose_name=_("Resource"))
count = tables.Column("count", verbose_name=_("Count"))
cost = tables.Column("cost", verbose_name=_("Cost"))
class Meta:
name = "billing"
verbose_name = _("Breakdown")
columns = ("resource", "count", "cost")
table_actions = (CSVSummary,)
multi_select = False
|
line=input()
if 'A' in line or 'B' in line or 'C' in line or 'D' in line or 'E' in line or 'F' in line or 'G' in line or 'J' in line or 'K' in line or 'L' in line or 'M' in line or 'P' in line or 'R' in line or 'Q' in line or 'T' in line or 'U' in line or 'V' in line or 'W' in line or 'Y' in line:
print('NO')
else:
print('YES') |
from sys import argv
from argparse import ArgumentParser
from csv import reader, writer
from numpy import array
from collections import deque
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from efficiency import migrations
def create_parser():
"""Parse command-line arguments and return parser."""
parser = ArgumentParser(
description='Calculate the efficiency of using snapshots for the '\
'selected algorithm and the number of active snapshots. '\
'Efficiency is the amount of data migrating and '\
'the ratio of occupied logical blocks to physical ones.'
)
parser.add_argument('filename', type=str,
help='name of csv file (without extension) '\
'that represents the data distribution model')
parser.add_argument('nactive', type=int,
help='number of active snapshots')
algorithm = parser.add_mutually_exclusive_group(required=True)
algorithm.add_argument('--cow', action='store_true',
help='choose copy-on-write algorithm')
algorithm.add_argument('--row', action="store_true",
help='choose redirect-on-write algorithm')
algorithm.add_argument('--row_m', action="store_true",
help='choose redirect-on-write algorithm '\
'with forward refs')
return parser
def row_efficiency(file_name, nactive, modified=False):
"""Compute and return the number of migrating data for row.
Arguments:
file_name -- name of csv file (with extension)
nactive -- number of active snapshots
"""
migrations_row = migrations.row_m if modified else migrations.row
migrations_count = 0
source = dict()
snapshots = dict()
with open(file_name, newline='') as fin:
block_reader = reader(fin)
for line in block_reader:
isnap, iblock = int(line[0]), int(line[1])
if not iblock:
print(isnap)
block = array(line[2:], dtype=int).astype(bool)
try:
migrations_count += migrations_row(snapshots[iblock])
source[iblock] += block
snapshots[iblock].append(block)
except KeyError:
source[iblock] = block.copy()
snapshots[iblock] = deque([block], maxlen=nactive)
logical = 0
for _, block in source.items():
logical += bool(sum(block))
physical = 0
for _, snapshot in snapshots.items():
for block in snapshot:
physical += bool(sum(block))
return migrations_count, logical / physical
def cow_efficiency(file_name, nactive):
"""Compute and return the number of migrating data for cow.
Arguments:
file_name -- name of csv file (with extension)
nactive -- number of active snapshots
"""
migrations_count = 0
source = dict()
snapshots = dict()
with open(file_name, newline='') as fin:
block_reader = reader(fin)
for line in block_reader:
isnap, iblock = int(line[0]), int(line[1])
if not iblock:
print(isnap)
block = array(line[2:], dtype=int).astype(bool)
try:
migrations_count += migrations.cow(source[iblock], block)
source[iblock] += block
snapshots[iblock].append(block)
except KeyError:
source[iblock] = block.copy()
snapshots[iblock] = deque([block], maxlen=nactive)
logical = 0
for _, block in source.items():
logical += bool(sum(block))
physical = 0
for _, snapshot in snapshots.items():
for block in snapshot:
physical += bool(sum(block))
return migrations_count, logical / physical
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args(argv[1:])
result = (0, 0)
algorithm = 'None'
file_name = 'models/' + args.filename + '.csv'
if args.cow:
algorithm = 'cow'
result = cow_efficiency(file_name, args.nactive)
elif args.row:
algorithm = 'row'
result = row_efficiency(file_name, args.nactive)
elif args.row_m:
algorithm = 'row_m'
result = row_efficiency(file_name, args.nactive, True)
with open('models/results.csv', 'a', newline='') as fout:
csv_writer = writer(fout)
csv_writer.writerow([args.filename, args.nactive, algorithm, result[0], result[1]])
print(result) |
import requests, json, cv2, glob, os
from playsound import playsound
addr = 'http://localhost:5000'
test_url = addr + '/audify'
content_type = 'image/jpeg'
headers = {'content-type': content_type}
img = cv2.imread('../assets/book1.jpg')
_, img_encoded = cv2.imencode('.jpg', img)
response = requests.post(test_url, data=img_encoded.tostring(), headers=headers)
list_of_files = glob.glob('./all_recordings/*')
latest_file = max(list_of_files, key=os.path.getctime)
print (latest_file)
playsound(latest_file) |
from django.db import models
# Create your models here.
class Category(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField()
class Meta:
verbose_name_plural = "Categories"
def __str__(self):
return self.title
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
category = models.ForeignKey(Category, on_delete=models.PROTECT)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
|
#! /usr/bin/python
'''
This part of the program is used to manipulate the local directory structure
'''
__author__ = "xiaofeng"
__date__ = "2019-12-8"
import pickle
import time
from datetime import datetime
from ServerLog import ServerLogger #导入服务器端日志库
import logging
class OpeDir:
def __init__(self):
self.filename = "dir.data"
self.loogger = ServerLogger()
#服务器端要添加异常
try:
with open(self.filename, "rb") as file:
self.dir = pickle.load(file)
print("Reading the local directory data...", end = " ")
self.logger.writingLog(logging.INFO, "Reading the local directory data...")
print("\t done.")
self.logger.writingLog(logging.INFO, "\t done.")
except:
self.logger.writingLog(logging.WARNING, "No local directory records.")
def updateDir(self):
with open(self.filename, "rb") as file:
self.dir = pickle.load(file)
def insertRecord(self, _record):
peer_md5 = self.getAllMd5()
if _record[3] in peer_md5:
print("The record already exists.")
else:
#构造新记录
new_record = {"peer_id": str(_record[1]), "file_name": _record[2], "Checksum": \
_record[3], "Date_added": str(datetime.now())}
#插入新记录
self.dir.append(new_record)
#写入存储
self.writeDirToFile()
print("A new record has been registered. ", new_record["peer_id"], " ", new_record["file_name"])
def writeDirToFile(self):
with open(self.filename, "wb") as file:
pickle.dump(self.dir, file)
return
def getAllMd5(self):
s = set() #声明为集合的目的为无重复值
for i in self.dir:
s.add(i["Checksum"])
return s
def searchRecord(self, file_name):
print("Updating local data...")
self.updateDir() #更新目录数据
results = list()
for i in self.dir:
if i["file_name"] == file_name:
results.append(i)
else:
continue
if len(results) > 0:
print("Peer_Id | File_name | Checksum | Date_added :\n")
for item in results:
print(" ", item["peer_id"], " ", item["file_name"], " ", item["Checksum"], " ", item["Date_added"])
else:
print("There is no file has this name or there is no file in server at all\n")
return
def listAll(self):
print("Updating local data...")
self.updateDir() #更新目录数据
print("Peer_Id | file_name | Checksum | Date_added:\n")
for i in self.dir:
print(" ", i["peer_id"], " ", i["file_name"], " ", i["Checksum"], " ", i["Date_added"])
return
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
import notifications
from django_project.urls import router
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('',
('^inbox/notifications/', include(notifications.urls)),
url(r'^toggle/(?P<app>[^\/]+)/(?P<model>[^\/]+)/(?P<id>\d+)/$', 'follow.views.toggle', name='toggle'),
url(r'^toggle/(?P<app>[^\/]+)/(?P<model>[^\/]+)/(?P<id>\d+)/$', 'follow.views.toggle', name='follow'),
url(r'^toggle/(?P<app>[^\/]+)/(?P<model>[^\/]+)/(?P<id>\d+)/$', 'follow.views.toggle', name='unfollow'),
)
|
# Generated from rdsquery.g4 by ANTLR 4.9.2
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\34")
buf.write("\u00df\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\3\2\3")
buf.write("\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3\3\3\4\3\4\3\5\3\5\3\5")
buf.write("\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13")
buf.write("\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3\20")
buf.write("\3\20\3\21\3\21\6\21u\n\21\r\21\16\21v\3\21\3\21\3\22")
buf.write("\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\24\3\24\3\24\3\25")
buf.write("\3\25\3\25\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\6\30")
buf.write("\u0091\n\30\r\30\16\30\u0092\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\32\3\32\5\32\u009c\n\32\3\33\3\33\3\33\3\33\3\33\3")
buf.write("\33\5\33\u00a4\n\33\3\34\3\34\3\34\3\34\3\34\3\34\3\35")
buf.write("\3\35\3\35\3\36\3\36\3\37\3\37\3\37\5\37\u00b4\n\37\3")
buf.write(" \6 \u00b7\n \r \16 \u00b8\3 \3 \6 \u00bd\n \r \16 \u00be")
buf.write("\3!\6!\u00c2\n!\r!\16!\u00c3\3\"\3\"\3\"\3\"\3\"\3\"\3")
buf.write("\"\3\"\3\"\5\"\u00cf\n\"\3#\6#\u00d2\n#\r#\16#\u00d3\3")
buf.write("#\7#\u00d7\n#\f#\16#\u00da\13#\3$\3$\3$\3$\2\2%\3\3\5")
buf.write("\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33")
buf.write("\17\35\20\37\21!\22#\2%\2\'\2)\2+\2-\2/\2\61\2\63\23\65")
buf.write("\24\67\259\26;\2=\27?\30A\31C\32E\33G\34\3\2\6\3\2\62")
buf.write(";\4\2--//\4\2C\\c|\5\2\13\f\16\17\"\"\2\u00e1\2\3\3\2")
buf.write("\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2")
buf.write("\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2")
buf.write("\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35")
buf.write("\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2\63\3\2\2\2\2\65\3\2")
buf.write("\2\2\2\67\3\2\2\2\29\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A")
buf.write("\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\3I\3\2\2\2\5")
buf.write("O\3\2\2\2\7S\3\2\2\2\tU\3\2\2\2\13Y\3\2\2\2\r[\3\2\2\2")
buf.write("\17]\3\2\2\2\21_\3\2\2\2\23a\3\2\2\2\25c\3\2\2\2\27e\3")
buf.write("\2\2\2\31g\3\2\2\2\33j\3\2\2\2\35m\3\2\2\2\37p\3\2\2\2")
buf.write("!r\3\2\2\2#z\3\2\2\2%\177\3\2\2\2\'\u0082\3\2\2\2)\u0085")
buf.write("\3\2\2\2+\u0088\3\2\2\2-\u008b\3\2\2\2/\u008e\3\2\2\2")
buf.write("\61\u0094\3\2\2\2\63\u009b\3\2\2\2\65\u009d\3\2\2\2\67")
buf.write("\u00a5\3\2\2\29\u00ab\3\2\2\2;\u00ae\3\2\2\2=\u00b3\3")
buf.write("\2\2\2?\u00b6\3\2\2\2A\u00c1\3\2\2\2C\u00ce\3\2\2\2E\u00d1")
buf.write("\3\2\2\2G\u00db\3\2\2\2IJ\7h\2\2JK\7t\2\2KL\7q\2\2LM\7")
buf.write("o\2\2MN\7\"\2\2N\4\3\2\2\2OP\7v\2\2PQ\7q\2\2QR\7\"\2\2")
buf.write("R\6\3\2\2\2ST\7\61\2\2T\b\3\2\2\2UV\7\60\2\2VW\7\60\2")
buf.write("\2WX\7\60\2\2X\n\3\2\2\2YZ\7?\2\2Z\f\3\2\2\2[\\\7/\2\2")
buf.write("\\\16\3\2\2\2]^\7-\2\2^\20\3\2\2\2_`\7%\2\2`\22\3\2\2")
buf.write("\2ab\7\"\2\2b\24\3\2\2\2cd\7@\2\2d\26\3\2\2\2ef\7>\2\2")
buf.write("f\30\3\2\2\2gh\7@\2\2hi\7?\2\2i\32\3\2\2\2jk\7>\2\2kl")
buf.write("\7?\2\2l\34\3\2\2\2mn\7#\2\2no\7?\2\2o\36\3\2\2\2pq\7")
buf.write("\60\2\2q \3\2\2\2rt\7]\2\2su\t\2\2\2ts\3\2\2\2uv\3\2\2")
buf.write("\2vt\3\2\2\2vw\3\2\2\2wx\3\2\2\2xy\7_\2\2y\"\3\2\2\2z")
buf.write("{\5;\36\2{|\5;\36\2|}\5;\36\2}~\5;\36\2~$\3\2\2\2\177")
buf.write("\u0080\5;\36\2\u0080\u0081\5;\36\2\u0081&\3\2\2\2\u0082")
buf.write("\u0083\5;\36\2\u0083\u0084\5;\36\2\u0084(\3\2\2\2\u0085")
buf.write("\u0086\5;\36\2\u0086\u0087\5;\36\2\u0087*\3\2\2\2\u0088")
buf.write("\u0089\5;\36\2\u0089\u008a\5;\36\2\u008a,\3\2\2\2\u008b")
buf.write("\u008c\5;\36\2\u008c\u008d\5;\36\2\u008d.\3\2\2\2\u008e")
buf.write("\u0090\7\60\2\2\u008f\u0091\5;\36\2\u0090\u008f\3\2\2")
buf.write("\2\u0091\u0092\3\2\2\2\u0092\u0090\3\2\2\2\u0092\u0093")
buf.write("\3\2\2\2\u0093\60\3\2\2\2\u0094\u0095\t\3\2\2\u0095\u0096")
buf.write("\5)\25\2\u0096\u0097\7<\2\2\u0097\u0098\5+\26\2\u0098")
buf.write("\62\3\2\2\2\u0099\u009c\7\\\2\2\u009a\u009c\5\61\31\2")
buf.write("\u009b\u0099\3\2\2\2\u009b\u009a\3\2\2\2\u009c\64\3\2")
buf.write("\2\2\u009d\u009e\5)\25\2\u009e\u009f\7<\2\2\u009f\u00a0")
buf.write("\5+\26\2\u00a0\u00a1\7<\2\2\u00a1\u00a3\5-\27\2\u00a2")
buf.write("\u00a4\5/\30\2\u00a3\u00a2\3\2\2\2\u00a3\u00a4\3\2\2\2")
buf.write("\u00a4\66\3\2\2\2\u00a5\u00a6\5#\22\2\u00a6\u00a7\7/\2")
buf.write("\2\u00a7\u00a8\5%\23\2\u00a8\u00a9\7/\2\2\u00a9\u00aa")
buf.write("\5\'\24\2\u00aa8\3\2\2\2\u00ab\u00ac\5\65\33\2\u00ac\u00ad")
buf.write("\5\63\32\2\u00ad:\3\2\2\2\u00ae\u00af\t\2\2\2\u00af<\3")
buf.write("\2\2\2\u00b0\u00b4\5? \2\u00b1\u00b4\5A!\2\u00b2\u00b4")
buf.write("\5C\"\2\u00b3\u00b0\3\2\2\2\u00b3\u00b1\3\2\2\2\u00b3")
buf.write("\u00b2\3\2\2\2\u00b4>\3\2\2\2\u00b5\u00b7\t\2\2\2\u00b6")
buf.write("\u00b5\3\2\2\2\u00b7\u00b8\3\2\2\2\u00b8\u00b6\3\2\2\2")
buf.write("\u00b8\u00b9\3\2\2\2\u00b9\u00ba\3\2\2\2\u00ba\u00bc\7")
buf.write("\60\2\2\u00bb\u00bd\t\2\2\2\u00bc\u00bb\3\2\2\2\u00bd")
buf.write("\u00be\3\2\2\2\u00be\u00bc\3\2\2\2\u00be\u00bf\3\2\2\2")
buf.write("\u00bf@\3\2\2\2\u00c0\u00c2\5;\36\2\u00c1\u00c0\3\2\2")
buf.write("\2\u00c2\u00c3\3\2\2\2\u00c3\u00c1\3\2\2\2\u00c3\u00c4")
buf.write("\3\2\2\2\u00c4B\3\2\2\2\u00c5\u00c6\7v\2\2\u00c6\u00c7")
buf.write("\7t\2\2\u00c7\u00c8\7w\2\2\u00c8\u00cf\7g\2\2\u00c9\u00ca")
buf.write("\7h\2\2\u00ca\u00cb\7c\2\2\u00cb\u00cc\7n\2\2\u00cc\u00cd")
buf.write("\7u\2\2\u00cd\u00cf\7g\2\2\u00ce\u00c5\3\2\2\2\u00ce\u00c9")
buf.write("\3\2\2\2\u00cfD\3\2\2\2\u00d0\u00d2\t\4\2\2\u00d1\u00d0")
buf.write("\3\2\2\2\u00d2\u00d3\3\2\2\2\u00d3\u00d1\3\2\2\2\u00d3")
buf.write("\u00d4\3\2\2\2\u00d4\u00d8\3\2\2\2\u00d5\u00d7\t\2\2\2")
buf.write("\u00d6\u00d5\3\2\2\2\u00d7\u00da\3\2\2\2\u00d8\u00d6\3")
buf.write("\2\2\2\u00d8\u00d9\3\2\2\2\u00d9F\3\2\2\2\u00da\u00d8")
buf.write("\3\2\2\2\u00db\u00dc\t\5\2\2\u00dc\u00dd\3\2\2\2\u00dd")
buf.write("\u00de\b$\2\2\u00deH\3\2\2\2\16\2v\u0092\u009b\u00a3\u00b3")
buf.write("\u00b8\u00be\u00c3\u00ce\u00d3\u00d8\3\b\2\2")
return buf.getvalue()
class rdsqueryLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
GLUE = 16
TIME_OFFSET = 17
PARTIAL_TIME = 18
FULL_DATE = 19
FULL_TIME = 20
LITERAL = 21
REAL = 22
INTEGER = 23
BOOLEAN = 24
ID = 25
WS = 26
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'from '", "'to '", "'/'", "'...'", "'='", "'-'", "'+'", "'#'",
"' '", "'>'", "'<'", "'>='", "'<='", "'!='", "'.'" ]
symbolicNames = [ "<INVALID>",
"GLUE", "TIME_OFFSET", "PARTIAL_TIME", "FULL_DATE", "FULL_TIME",
"LITERAL", "REAL", "INTEGER", "BOOLEAN", "ID", "WS" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "GLUE", "DATE_FULL_YEAR", "DATE_MONTH", "DATE_M_DAY",
"TIME_HOUR", "TIME_MINUTE", "TIME_SECOND", "TIME_SEC_FRAC",
"TIME_NUM_OFFSET", "TIME_OFFSET", "PARTIAL_TIME", "FULL_DATE",
"FULL_TIME", "DIGIT", "LITERAL", "REAL", "INTEGER", "BOOLEAN",
"ID", "WS" ]
grammarFileName = "rdsquery.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
import random
num = random.randrange(1000, 10000)
n = int(input("Guess the 4 digit number:"))
if (n == num):
print("Great! You guessed the number in just 1 try! You're a Mastermind!")
else:
ctr = 0
while (n != num):
ctr += 1
count = 0
n = str(n)
num = str(num)
correct = ['X']*4
for i in range(0, 4):
if (n[i] == num[i]):
count += 1
correct[i] = n[i]
else:
continue
if (count < 4) and (count != 0):
print("Not quite the number. But you did get ", count, " digit(s) correct!")
print("Also these numbers in your input were correct.")
for k in correct:
print(k, end=' ')
print('\n')
print('\n')
n = int(input("Enter your next choice of numbers: "))
elif (count == 0):
print("None of the numbers in your input match.")
n = int(input("Enter your next choice of numbers: "))
if n == num:
print("You've become a Mastermind!")
print("It took you only", ctr, "tries.")
#######################################################
################################################################
|
# -*- coding: utf-8 -*-
"""
File downloading functions.
"""
# Authors: Tan Tingyi <[email protected]>
import os
import shutil
import time
from urllib import parse, request
from urllib.error import HTTPError, URLError
from tqdm.auto import tqdm
from ._logging import logger
from .misc import sizeof_fmt
def _get_http(url, temp_file_name, initial_size, timeout):
"""Safely (resume a) download to a file from http(s)."""
# Actually do the reading
response = None
extra = ''
if initial_size > 0:
logger.debug(' Resuming at %s' % (initial_size, ))
req = request.Request(
url, headers={'Range': 'bytes=%s-' % (initial_size, )})
try:
response = request.urlopen(req, timeout=timeout)
content_range = response.info().get('Content-Range', None)
if (content_range is None
or not content_range.startswith('bytes %s-' %
(initial_size, ))):
raise IOError('Server does not support resuming')
except (KeyError, HTTPError, URLError, IOError):
initial_size = 0
response = None
else:
extra = ', resuming at %s' % (sizeof_fmt(initial_size), )
if response is None:
response = request.urlopen(request.Request(url), timeout=timeout)
file_size = int(response.headers.get('Content-Length', '0').strip())
file_size += initial_size
url = response.geturl()
logger.info('Downloading %s (%s%s)' % (url, sizeof_fmt(file_size), extra))
mode = 'ab' if initial_size > 0 else 'wb'
chunk_size = 8192 # 2 ** 13
with tqdm(desc='Downloading dataset',
total=file_size,
unit='B',
unit_scale=True,
unit_divisor=1024) as progress:
del file_size
del url
with open(temp_file_name, mode) as local_file:
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.01:
chunk_size *= 2
elif dt > 0.1 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
break
local_file.write(chunk)
progress.update(len(chunk))
def _fetch_file(url, file_name, resume=True, timeout=30.):
"""Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
resume: bool, optional
If true, try to resume partially downloaded files.
timeout : float
The URL open timeout.
"""
temp_file_name = file_name + ".part"
scheme = parse.urlparse(url).scheme
if scheme not in ('http', 'https'):
raise NotImplementedError('Cannot use scheme %r' % (scheme, ))
try:
# Triage resume
if not os.path.exists(temp_file_name):
resume = False
if resume:
with open(temp_file_name, 'rb', buffering=0) as local_file:
local_file.seek(0, 2)
initial_size = local_file.tell()
del local_file
else:
initial_size = 0
_get_http(url, temp_file_name, initial_size, timeout)
shutil.move(temp_file_name, file_name)
except Exception:
logger.error('Error while fetching file %s.'
' Dataset fetching aborted.' % url)
raise
def _url_to_local_path(url, path):
"""Mirror a url path in a local destination (keeping folder structure)."""
destination = parse.urlparse(url).path
# First char should be '/', and it needs to be discarded
if len(destination) < 2 or destination[0] != '/':
raise ValueError('Invalid URL')
destination = os.path.join(path, request.url2pathname(destination)[1:])
return destination
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ._markdown import \
markdown_autogen, \
markdown_code_block, \
markdown_comment, \
markdown_inline
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, WSO2 Inc. (http://wso2.org) All Rights Reserved.
#
# WSO2 Inc. licenses this file to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file except
# in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ----------------------------------------------------------------------------
# Create summary of SAR reports
# ----------------------------------------------------------------------------
import argparse
import pandas as pd
import json
import re
def main():
parser = argparse.ArgumentParser(
description='Create SAR summary from CSV reports')
parser.add_argument('--start-timestamp', required=True,
help='Start timestamp in seconds.', type=int)
parser.add_argument('--end-timestamp', required=True,
help='End timestamp in seconds.', type=int)
parser.add_argument('--sar-csv-reports', required=True,
help='SAR CSV reports.', nargs='+', type=str)
parser.add_argument('--output-file', default="sar-summary.json", required=False,
help='Output JSON file')
args = parser.parse_args()
sar_averages = {}
for sar_report in args.sar_csv_reports:
try:
print('Reading {filename}'.format(filename=sar_report))
df = pd.read_csv(sar_report, sep=';')
except pd.errors.EmptyDataError:
print('WARNING: {filename} was empty. Skipping.'.format(
filename=sar_report))
continue
df = df[(df['timestamp'] >= args.start_timestamp)
& (df['timestamp'] <= args.end_timestamp)]
df = df.drop(columns=['hostname', 'interval', 'timestamp'])
df = df.rename(columns=lambda x: re.sub(r'[%/\-]', '', x))
sar_averages.update(df.mean().round(2).to_dict())
with open(args.output_file, 'w') as outfile:
json.dump(sar_averages, outfile)
if __name__ == "__main__":
main()
|
from django.apps import AppConfig
class LaudosConfig(AppConfig):
name = 'laudos'
|
import numpy as np
import pandas as pd
def weighted_random_choice(df, p, target_units):
"""
Proposal selection using weighted random choice.
Parameters
----------
df : DataFrame
Proposals to select from
p : Series
Weights for each proposal
target_units: int
Number of units to build
Returns
-------
build_idx : ndarray
Index of buildings selected for development
"""
# We don't know how many developments we will need, as they
# differ in net_units. If all developments have net_units of 1
# than we need target_units of them. So we choose the smaller
# of available developments and target_units.
num_to_sample = int(min(len(df.index), target_units))
choices = np.random.choice(df.index.values,
size=num_to_sample,
replace=False, p=p)
tot_units = df.net_units.loc[choices].values.cumsum()
ind = int(np.searchsorted(tot_units, target_units,
side="left")) + 1
return choices[:ind]
def weighted_random_choice_multiparcel(df, p, target_units):
"""
Proposal selection using weighted random choice in the context of multiple
proposals per parcel.
Parameters
----------
df : DataFrame
Proposals to select from
p : Series
Weights for each proposal
target_units: int
Number of units to build
Returns
-------
build_idx : ndarray
Index of buildings selected for development
"""
choice_idx = weighted_random_choice(df, p, target_units)
choices = df.loc[choice_idx]
while True:
# If multiple proposals sampled for a given parcel, keep only one
choice_counts = choices.parcel_id.value_counts()
chosen_multiple = choice_counts[choice_counts > 1].index.values
single_choices = choices[~choices.parcel_id.isin(chosen_multiple)]
duplicate_choices = choices[choices.parcel_id.isin(chosen_multiple)]
keep_choice = duplicate_choices.parcel_id.drop_duplicates(keep='first')
dup_choices_to_keep = duplicate_choices.loc[keep_choice.index]
choices = pd.concat([single_choices, dup_choices_to_keep])
if choices.net_units.sum() >= target_units:
break
df = df[~df.parcel_id.isin(choices.parcel_id)]
if len(df) == 0:
break
p = p.reindex(df.index)
p = p / p.sum()
new_target = target_units - choices.net_units.sum()
next_choice_idx = weighted_random_choice(df, p, new_target)
next_choices = df.loc[next_choice_idx]
choices = pd.concat([choices, next_choices])
return choices.index.values
|
from abc import ABC
from abc import abstractmethod
from dataclasses import dataclass
from dataclasses import field
from enum import Enum
from itertools import groupby
from itertools import product
from operator import itemgetter
from typing import Any
from typing import Iterable
from typing import List
from typing import Tuple
from mip import BINARY
from mip import Model
from mip import OptimizationStatus
from mip import maximize
from mip import xsum
from nltk import SnowballStemmer
from nltk import word_tokenize
class Language(str, Enum):
german = "german"
english = "english"
@dataclass
class Token:
tid: int
text: str
stages: List[Any] = field(default_factory=list)
class StageBase(ABC):
"""
Base class for matching stages. Matching stages produce representations
for each token that can be used to find matching tokens in the other
sentence.
"""
def __init__(self, weight: float, *args, **kwargs):
self._weight = weight
@property
def weight(self) -> float:
return self._weight
@abstractmethod
def process_tokens(self, tokens: List[Token]):
"""
Append a representation of each token to each token's stages list.
"""
def validate(self, tokens: List[Token]):
if len(set(len(token.stages) for token in tokens)) > 1:
raise AssertionError(
"Unequal number of stage representations in tokens."
)
class IdentityStage(StageBase):
""" Exact matching of tokens """
def process_tokens(self, tokens: List[Token]):
for token in tokens:
token.stages.append(token.text)
class StemmingStage(StageBase):
""" Use stemming to find tokens with the same stem """
def __init__(self, weight: float, language: Language, *args, **kwargs):
super().__init__(weight, *args, **kwargs)
self._stemmer = SnowballStemmer(
language=language.value, ignore_stopwords=True
)
def process_tokens(self, tokens: List[Token]):
for token in tokens:
token.stages.append(self._stemmer.stem(token.text))
def tokenize(text: str, language: Language) -> List[Token]:
return [
Token(tid=i, text=token)
for i, token in enumerate(word_tokenize(text, language=language.value))
]
def preprocess(
stages: List[StageBase], text: str, language: Language
) -> List[Token]:
"""
Tokenize the given text and apply all given matching stages to each token.
"""
tokens = tokenize(text, language)
for stage in stages:
stage.process_tokens(tokens)
stage.validate(tokens)
return tokens
def align(
hypothesis: List[Token], reference: List[Token], stages: List[StageBase]
) -> List[Tuple[int, int]]:
"""
Produce an alignment between matching tokens of each sentence.
If there are multiple possible alignments, the one with the minimum
number of crossings between matches is chosen. Matches are weighted
by their stage weight.
Uses the following binary integer linear program to find the optimal
alignment:
variables:
M(i,j): set of possible matches, defined by the different stages
C = M(i,j) x M(k,l): set of possible crossings of matches,
for each i < k and j > l OR i > k and j < l
W: weights for each stage
constraints:
each token is matched with not more than one other token
m(i,0) + ... + m(i, j) <= 1
m(0,j) + ... + m(i, j) <= 1
there must be as many matches as there possible matches between
connected nodes in the alignment graph
m(0,0) ... m(i,j) == sum(possible matches per clique)
if two matches cross each other, the corresponding crossing var is 1
m(i,j) + m(k,l) - c(i,j,k,l) <= 1
objective function:
maximize match scores, minimize crossings
MAX (SUM w(i,j) * m(i,j) - SUM c(i,j,k,l))
"""
# compute all possible matches with their best weight over all stages
match_weights = [
[float("-inf")] * len(reference) for _ in range(len(hypothesis))
]
for hyptoken, reftoken in product(hypothesis, reference):
weights = [
stage.weight
for i, stage in enumerate(stages)
if hyptoken.stages[i] == reftoken.stages[i]
]
if weights:
match_weights[hyptoken.tid][reftoken.tid] = max(weights)
# create BILP
model = Model("alignment")
model.verbose = 0 # set to n > 0 to see solver output
# create matching variables for each possible match
match_vars = {
(h, r): model.add_var(var_type=BINARY)
for h in range(len(hypothesis))
for r in range(len(reference))
if match_weights[h][r] > float("-inf")
}
# create crossing variables for each possible crossing of any two matches
# add constraint that crossing var will be 1 if both matches are selected
crossing_vars = []
for (i, j), (k, l) in product(match_vars.keys(), repeat=2):
if (i < k and j > l) or (i > k and j < l):
cvar = model.add_var(var_type=BINARY)
model += (
xsum([-1.0 * cvar, match_vars[(i, j)], match_vars[(k, l)]])
<= 1
)
crossing_vars.append(cvar)
# add uniqueness constraints: each word is matched to one other word
# words that can't be matched are already excluded
for h in range(len(hypothesis)):
matches = [
match_vars[(h, r)]
for r in range(len(reference))
if match_weights[h][r] > float("-inf")
]
if matches:
model += xsum(matches) <= 1
for r in range(len(reference)):
matches = [
match_vars[(h, r)]
for h in range(len(hypothesis))
if match_weights[h][r] > float("-inf")
]
if matches:
model += xsum(matches) <= 1
# require all possible matches to be part of the solution
cliques = compute_cliques(match_vars.keys())
required_matches = sum(
[
min(len(set(h for h, _ in clique)), len(set(r for _, r in clique)))
for clique in cliques
]
)
model += xsum(match_vars.values()) == required_matches
# define objective: maximize match scores and minimize crossings
model.objective = maximize(
xsum(
[match_weights[h][r] * match_vars[(h, r)] for h, r in match_vars]
+ [-1.0 * cvar for cvar in crossing_vars]
)
)
status = model.optimize()
assert (
status != OptimizationStatus.INFEASIBLE
), "The alignment problem was infeasible. Please open an issue on github."
return [match for match, var in match_vars.items() if var.x >= 0.99]
def compute_cliques(
matches: Iterable[Tuple[int, int]]
) -> List[List[Tuple[int, int]]]:
"""
Group matches that are connected in the alignment graph into cliques
"""
matches = list(matches)
# Simple union-find: group matches that connect the same node in the
# hypothesis or reference sentence.
sets = {index: index for index in range(len(matches))}
for i, (h_i, r_i) in enumerate(matches):
for j, (h_j, r_j) in enumerate(matches[i:]):
if h_i == h_j or r_i == r_j:
sets[i + j] = sets[i]
cliques = [
[matches[index] for index, _ in group]
for _, group in groupby(
sorted(sets.items(), key=itemgetter(1)), key=itemgetter(1)
)
]
return cliques
def count_chunks(alignment: List[Tuple[int, int]]) -> int:
"""
Find the minimum number of chunks the alignment can be grouped into.
"""
alignment = sorted(alignment)
num_chunks = 0
last_h, last_r = -2, -2
for (h, r) in alignment:
if abs(last_h - h) != 1 or abs(last_r - r) != 1:
num_chunks += 1
last_h, last_r = (h, r)
return num_chunks
def meteor(
hypothesis: str,
reference: str,
stages: List[StageBase],
language: Language,
) -> float:
"""
Compute meteor score for the given sentence pair
with the given set of matching stages.
"""
hypo_tokens = preprocess(stages, hypothesis, language)
ref_tokens = preprocess(stages, reference, language)
if len(hypo_tokens) == 0 or len(ref_tokens) == 0:
if len(hypo_tokens) != len(ref_tokens):
return 0.0
return 1.0
alignment = align(hypo_tokens, ref_tokens, stages)
num_matches = len(alignment)
if num_matches == 0:
return 0.0
precision = num_matches / float(len(hypo_tokens))
recall = num_matches / float(len(ref_tokens))
fscore = (10 * precision * recall) / (recall + 9 * precision)
num_chunks = count_chunks(alignment)
penalty = 0.5 * (num_chunks / num_matches) ** 3 if num_chunks > 1 else 0
score = fscore * (1 - penalty)
return score
def meteor_macro_avg(
hypotheses: List[str],
references: List[str],
stages: List[StageBase],
language: Language,
) -> float:
"""
Apply meteor score to multiple hypothesis-reference pairs
and return the macro average.
"""
scores = [
meteor(hypothesis, reference, stages, language)
for hypothesis, reference in zip(hypotheses, references)
]
return sum(scores) / len(scores)
|
#!/usr/bin/env python3
''' NowPlaying as run via python -m '''
#import faulthandler
import logging
import multiprocessing
import os
import pathlib
import socket
import sys
from PySide2.QtCore import QCoreApplication, QStandardPaths, Qt # pylint: disable=no-name-in-module
from PySide2.QtGui import QIcon # pylint: disable=no-name-in-module
from PySide2.QtWidgets import QApplication # pylint: disable=no-name-in-module
import nowplaying
import nowplaying.bootstrap
import nowplaying.config
import nowplaying.db
import nowplaying.systemtray
# pragma: no cover
#
# as of now, there isn't really much here to test... basic bootstrap stuff
#
def run_bootstrap(bundledir=None):
''' bootstrap the app '''
logpath = os.path.join(
QStandardPaths.standardLocations(QStandardPaths.DocumentsLocation)[0],
QCoreApplication.applicationName(), 'logs')
pathlib.Path(logpath).mkdir(parents=True, exist_ok=True)
logpath = os.path.join(logpath, "debug.log")
# we are in a hurry to get results. If it takes longer than
# 5 seconds, consider it a failure and move on. At some
# point this should be configurable but this is good enough for now
socket.setdefaulttimeout(5.0)
nowplaying.bootstrap.setuplogging(logpath=logpath)
nowplaying.bootstrap.upgrade(bundledir=bundledir)
# fail early if metadatadb can't be configured
metadb = nowplaying.db.MetadataDB()
metadb.setupsql()
def main():
''' main entrypoint '''
multiprocessing.freeze_support()
#faulthandler.enable()
# set paths for bundled files
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
bundledir = getattr(sys, '_MEIPASS',
os.path.abspath(os.path.dirname(__file__)))
else:
bundledir = os.path.abspath(os.path.dirname(__file__))
QCoreApplication.setAttribute(Qt.AA_ShareOpenGLContexts)
qapp = QApplication(sys.argv)
qapp.setQuitOnLastWindowClosed(False)
nowplaying.bootstrap.set_qt_names()
run_bootstrap(bundledir=bundledir)
config = nowplaying.config.ConfigFile(bundledir=bundledir)
logging.getLogger().setLevel(config.loglevel)
logging.captureWarnings(True)
tray = nowplaying.systemtray.Tray() # pylint: disable=unused-variable
icon = QIcon(config.iconfile)
qapp.setWindowIcon(icon)
exitval = qapp.exec_()
logging.info('shutting down v%s',
nowplaying.version.get_versions()['version'])
sys.exit(exitval)
if __name__ == '__main__':
main()
|
#the main aim of this program is to retrive all ASIN's of product
import bs4,requests,re
print('enter the url of product')
prod_url = input()
res=requests.get(prod_url)
#print(res.text)
try:
res.raise_for_status()
regexobj=re.compile(r'.*?data-asin=("\w\w\w\w\w\w\w\w\w\w").*?')
asinlists=regexobj.findall(res.text)
playfile=open('asin.txt','w')
for asin_code in asinlists:
playfile.write(asin_code)
playfile.close()
except:
#error 503:server is too busy so try agian later
print('Error:503 Your request can\'t be processed ')
|
# lilypondpngvideogenerator -- processes lilypond file, scans postscript
# file for page boundaries, analyzes tempo
# track and generates MP4 video and
# subtitle file
#
# author: Dr. Thomas Tensi, 2006 - 2017
#====================
# IMPORTS
#====================
import re
import basemodules.simpleassertion
from basemodules.simplelogging import Logging
from basemodules.simpletypes import Boolean, Map, Natural, Real, \
RealList, String
from basemodules.operatingsystem import OperatingSystem
from basemodules.ttbase import iif
from basemodules.utf8file import UTF8File
from basemodules.validitychecker import ValidityChecker
simpleassertion = basemodules.simpleassertion
#====================
_ffmpegCommand = None
_lilypondCommand = None
_infinity = 999999
# ==== configuration settings ====
# show measure number in subtitle only for 95% of the measure duration
_displayTimePercentage = 0.95
# the log level for ffmpeg rendering
_ffmpegLogLevel = "error"
# encoding of Postscript file of lilypond
_postscriptFileEncoding = "latin_1"
# ==== end of configuration settings ====
#============================================================
class Assertion:
"""Provides all services for assertion checking."""
#--------------------
# EXPORTED FEATURES
#--------------------
@classmethod
def check (cls,
condition : Boolean,
message : String):
"""Checks for <condition> and if not satisfied, raises exception
with <message>."""
simpleassertion.Assertion.check(condition, message)
#--------------------
@classmethod
def ensureFileExistence (cls,
fileName : String,
fileKind : String):
"""Checks whether file with <fileName> exists, otherwise gives
error message about file kind mentioning file name."""
Logging.trace(">>: name = %r, kind = %s", fileName, fileKind)
errorTemplate = "%s file does not exist - %r"
errorMessage = errorTemplate % (fileKind, fileName)
cls.check(OperatingSystem.hasFile(fileName), errorMessage)
Logging.trace("<<")
#--------------------
@classmethod
def ensureProgramAvailability (cls,
programName : String,
programPath : String,
option : String):
"""Checks whether program on <programPath> is available and otherwise
gives error message and exits. <option> is the only
command-line option for program."""
Logging.trace(">>: '%s %s'", programName, option)
cls.check(OperatingSystem.programIsAvailable(programPath, option),
("cannot execute %s program - path %r'"
% (programName, programPath)))
Logging.trace("<<")
#============================================================
class DurationManager:
"""Handles all duration related services like e.g. the calculation
of the duration list from tempo map and page to measure map."""
#--------------------
@classmethod
def measureToDurationMap (cls,
measureToTempoMap : Map,
countInMeasures : Natural,
lastMeasureNumber : Natural):
"""Calculates mapping from measure number to duration based on
tempo track in <measureToTempoMap> and the number of
<countInMeasures>."""
Logging.trace(">>: measureToTempoMap = %r, countInMeasures = %d,"
+ " lastMeasureNumber = %d",
measureToTempoMap, countInMeasures, lastMeasureNumber)
firstMeasureNumber = 1
Assertion.check(firstMeasureNumber in measureToTempoMap,
"tempo track must contain setting for first measure")
(tempo, measureLength) = measureToTempoMap[firstMeasureNumber]
duration = cls.measureDuration(tempo, measureLength)
firstMeasureOffset = duration * countInMeasures
result = {}
measureList = range(firstMeasureNumber, lastMeasureNumber + 1)
for measureNumber in measureList:
if measureNumber in measureToTempoMap:
(tempo, measureLength) = measureToTempoMap[measureNumber]
duration = cls.measureDuration(tempo, measureLength)
isNormalMeasureNumber = (measureNumber > firstMeasureNumber)
currentMeasureDuration = (duration +
iif(isNormalMeasureNumber, 0,
firstMeasureOffset))
result[measureNumber] = currentMeasureDuration
Logging.trace("<<: %r", result)
return result
#--------------------
@classmethod
def pageDurationList (cls,
pageToMeasureMap : Map,
measureToDurationMap : Map) -> RealList:
"""Calculates page duration list based on mapping of pages to
measures <pageToMeasureMap> and the mapping of measures to
durations <measureToDurationMap>"""
Logging.trace(">>: pToM = %r, mToD = %r",
pageToMeasureMap, measureToDurationMap)
result = []
previousPageMeasureNumber = min(measureToDurationMap.keys())
pageList = list(pageToMeasureMap.keys())
pageList.sort()
for page in pageList:
if page > 1:
currentPageMeasureNumber = pageToMeasureMap[page]
# calculate duration of previous page from
# <previousMeasure> to <currentMeasure> - 1
pageDuration = 0
for measureNumber in range(previousPageMeasureNumber,
currentPageMeasureNumber):
pageDuration += measureToDurationMap[measureNumber]
result.append(pageDuration)
previousPageMeasureNumber = currentPageMeasureNumber
Logging.trace("<<: %r", result)
return result
#--------------------
@classmethod
def measureDuration (cls,
tempo : Real,
measureLength : Real) -> Real:
"""Returns the duration of some measure with <measureLength>
quarters and <tempo> given in quarters per minute."""
if tempo <= 1:
result = _infinity
else:
result = (60.0 * measureLength) / tempo
return result
#--------------------
@classmethod
def quantizeDurationList (cls,
durationList : RealList,
frameRate : Real):
"""Adjusts <durationList> such that it conforms to
<frameRate>."""
Logging.trace(">>: durations = %r, frameRate = %f",
durationList, frameRate)
frameDuration = 1.0 / frameRate
unallocatedDuration = 0
for (i, duration) in enumerate(durationList):
duration += unallocatedDuration
frameCount = int(duration / frameDuration)
effectiveDuration = frameCount * frameDuration
unallocatedDuration = duration - effectiveDuration
durationList[i] = effectiveDuration
Logging.trace("<<: %r", durationList)
#============================================================
class PostscriptFile:
"""Represents the source for getting the transitions between the
pages i.e. the mapping from page index to first measure on
page."""
_fileName = None
# relevant constants for analyzing the postscript file
_barNumberColourSettingText = " 0.0010 0.0020 0.0030 setrgbcolor"
_digitRegexp = re.compile(r".*/(zero|one|two|three|four"
+ r"|five|six|seven|eight|nine)")
_endOfFileText = "%EOF"
_fontDefinitionText = "selectfont"
_pageRegexp = re.compile(r"%%Page: *(\w+)")
_printGlyphsText = "print_glyphs"
_digitMap = { "zero" : 0, "one" : 1, "two" : 2, "three" : 3,
"four" : 4, "five" : 5, "six" : 6, "seven" : 7,
"eight" : 8, "nine" : 9 }
#--------------------
@classmethod
def setName (cls,
name : String):
"""Sets name of postscript file."""
Logging.trace(">>: %r", name)
Assertion.ensureFileExistence(name, "postscript")
cls._fileName = name
Logging.trace("<<")
#--------------------
@classmethod
def pageToMeasureMap (cls) -> Map:
"""Scans postscript file for page numbers and measure numbers
by some naive pattern matching and returns mapping from
page to lowest measure number in page. Assumes that pages
and page numbers are strictly ascending."""
Logging.trace(">>")
# read postscript file into line list
postscriptFile = UTF8File(cls._fileName, 'rb')
lineList = [ line.decode(_postscriptFileEncoding).rstrip()
for line in postscriptFile.readlines() ]
postscriptFile.close()
Logging.trace("--: lineListCount = %d", len(lineList))
# do the processing in a finite state machine
ParseState_inLimbo = 1
ParseState_inPage = 2
ParseState_beforeMeasureText = 3
ParseState_inMeasureText = 4
result = {}
parseState = ParseState_inLimbo
maximumPageNumber = 0
maximumMeasureNumber = 0
pageNumber = 0
for line in lineList:
lineIsPageStart = cls._pageRegexp.match(line)
if lineIsPageStart or cls._endOfFileText in line:
if pageNumber > 0:
Logging.trace("--: firstMeasure = %d, measureCount = %d",
pageMeasureNumber, measureCount)
# wait for a page start when not within page
if parseState == ParseState_inLimbo and not lineIsPageStart:
continue
if lineIsPageStart:
parseState = ParseState_inPage
matchList = cls._pageRegexp.match(line)
pageNumber = int(matchList.group(1))
Logging.trace("--: entering page %d", pageNumber)
maximumPageNumber = max(pageNumber, maximumPageNumber)
pageMeasureNumber = _infinity
measureCount = 0
elif parseState == ParseState_inPage:
if cls._barNumberColourSettingText in line:
parseState = ParseState_beforeMeasureText
currentNumber = 0
currentFactor = 1
elif parseState == ParseState_beforeMeasureText:
# skip over lines that are not a "selectfont"
parseState = iif(cls._fontDefinitionText in line,
ParseState_inMeasureText, parseState)
elif parseState == ParseState_inMeasureText:
if cls._digitRegexp.search(line):
matchList = cls._digitRegexp.match(line)
digit = matchList.group(1)
currentNumber += cls._digitMap[digit] * currentFactor
currentFactor *= 10
else:
parseState = ParseState_inPage
if (cls._printGlyphsText in line
and currentNumber > maximumMeasureNumber):
Logging.trace("--: measure number %d",
currentNumber)
pageMeasureNumber = min(currentNumber,
pageMeasureNumber)
result[pageNumber] = pageMeasureNumber
maximumMeasureNumber = currentNumber
# correct the first entry: first page always starts with
# measure 1
result[1] = 1
# add an artificial last page to measure map
maximumPageNumber += 1
lastMeasureNumber = maximumMeasureNumber + 8
result[maximumPageNumber] = lastMeasureNumber
Logging.trace("<<: %r", result)
return result
#============================================================
class MP4Video:
"""Handles the generation of the target MP4 video file."""
#--------------------
# LOCAL FEATURES
#--------------------
_tempFileName = "temp-noaudio.mp4"
# command
_ffmpegCommand = None
# files and paths
_concatSpecificationFileName = None
_intermediateFileNameTemplate = None
_pageFileNameTemplate = None
# video parameters
_ffmpegPresetName = None
_frameRate = None
_scaleFactor = None
_generatorLogLevel = None
_defaultMp4BaselineLevel = "3.0"
_pageCount = None
#--------------------
# EXPORTED FEATURES
#--------------------
fileName = None
#--------------------
@classmethod
def checkParameters (cls):
"""Checks whether data given for this class is plausible for
subsequent processing."""
Logging.trace(">>")
# check the executables
Assertion.ensureProgramAvailability("ffmpeg", cls._ffmpegCommand,
"-version")
# check the numeric parameters
ValidityChecker.isNumberString(cls._scaleFactor, "scale factor",
realIsAllowed=False, rangeKind=">0")
ValidityChecker.isNumberString(cls._frameRate, "frame rate",
realIsAllowed=True, rangeKind=">0"),
cls._scaleFactor = int(cls._scaleFactor)
cls._frameRate = float(cls._frameRate)
Logging.trace("<<: parameters okay")
#--------------------
@classmethod
def cleanUpConditionally (cls,
filesAreKept : Boolean):
"""Deletes all intermediate files when <filesAreKept> is unset"""
Logging.trace(">>: %r", filesAreKept)
for page in range(1, cls._pageCount + 1):
Logging.trace("--: %d", page)
fileName = cls._intermediateFileNameTemplate % page
OperatingSystem.removeFile(fileName, filesAreKept)
fileName = cls._pageFileNameTemplate % page
OperatingSystem.removeFile(fileName, filesAreKept)
OperatingSystem.removeFile(cls._concatSpecificationFileName,
filesAreKept)
if cls.fileName and cls.fileName == cls._tempFileName:
OperatingSystem.removeFile(cls.fileName, filesAreKept)
Logging.trace("<<")
#--------------------
@classmethod
def make (cls,
pageDurationList : RealList):
"""Generate an MP4 video from durations in <pageDurationList>
and generated PNG images."""
Logging.trace(">>: %r", pageDurationList)
# for each page an MP4 fragment file is generated and finally
# concatenated into the target file
concatSpecificationFile = \
UTF8File(cls._concatSpecificationFileName, 'wt')
for (i, pageDuration) in enumerate(pageDurationList):
page = i + 1
requiredNumberOfFrames = int(cls._frameRate * pageDuration) + 1
pageFileName = cls._pageFileNameTemplate % page
intermediateFileName = cls._intermediateFileNameTemplate % page
# write file name to concatenation file
normalizedFileName = \
OperatingSystem.basename(intermediateFileName, True)
st = "file '%s'\n" % normalizedFileName
concatSpecificationFile.write(st)
# make silent video from single lilypond page
command = ((cls._ffmpegCommand,
"-loglevel", cls._generatorLogLevel,
"-framerate", "1/" + str(requiredNumberOfFrames),
"-i", str(pageFileName),
"-vf", "scale=iw/%d:ih/%d" % (cls._scaleFactor,
cls._scaleFactor),
"-r", str(cls._frameRate),
"-t", "%02.2f" % pageDuration)
+ iif(cls._ffmpegPresetName != "",
("-fpre", cls._ffmpegPresetName),
("-pix_fmt", "yuv420p",
"-profile:v", "baseline",
"-level", cls._defaultMp4BaselineLevel))
+ ("-y", intermediateFileName))
OperatingSystem.executeCommand(command, True)
concatSpecificationFile.close()
# concatenate silent video fragments into single file
cls._pageCount = page
command = (cls._ffmpegCommand,
"-safe", "0",
"-y",
"-loglevel", cls._generatorLogLevel,
"-f", "concat",
"-i", cls._concatSpecificationFileName,
"-codec", "copy",
cls.fileName)
OperatingSystem.executeCommand(command, True)
Logging.trace("<<")
#--------------------
@classmethod
def setName (cls,
fileName : String):
"""Sets file name for MP4 generation to <fileName>; if empty, some
temporary name will be used."""
Logging.trace(">>: %r", fileName)
if fileName == "":
fileName = cls._tempFileName
cls.fileName = fileName
Logging.trace("<<")
#============================================================
class SubtitleFile:
"""Encapsulates generation of an SRT subtitle file."""
_tempFileName = "temp-subtitle.srt"
#--------------------
# LOCAL FEATURES
#--------------------
@classmethod
def _formatTime (cls,
timeInSeconds : Real) -> String:
"""Returns <timeInSeconds> in SRT format with HH:MM:SS,000."""
hours = int(timeInSeconds / 3600)
timeInSeconds -= hours * 3600
minutes = int(timeInSeconds / 60)
timeInSeconds -= minutes * 60
seconds = int(timeInSeconds)
milliseconds = 1000 * (timeInSeconds - seconds)
return "%02d:%02d:%02d,%03d" % (hours, minutes, seconds, milliseconds)
#--------------------
# EXPORTED FEATURES
#--------------------
fileName = None
#--------------------
@classmethod
def make (cls,
measureToDurationMap : Map,
countInMeasures : Natural):
"""Generates SRT subtitle file from <measureToDuration> and
<countInMeasures>."""
Logging.trace(">>: mToDMap = %r, countIn = %d",
measureToDurationMap, countInMeasures)
measureNumberList = list(measureToDurationMap.keys())
measureNumberList.sort()
startTime = 0
subtitleFile = UTF8File(cls.fileName, 'wt')
for measureNumber in measureNumberList:
duration = measureToDurationMap[measureNumber]
endTime = startTime + _displayTimePercentage * duration
st = (cls._formatTime(startTime) + " --> "
+ cls._formatTime(endTime))
startTime += duration
if measureNumber>= 1:
# write 4 lines of SRT data: number, time interval,
# measure number and an empty separation line
Logging.trace("--: measure %d: %s", measureNumber, st)
st = ("%d\n%s\n%d\n\n"
% (measureNumber, st, measureNumber))
subtitleFile.write(st)
subtitleFile.close()
Logging.trace("<<: subtitles done.")
#--------------------
@classmethod
def setName (cls,
name : String):
"""Sets name of subtitle file."""
Logging.trace(">>: %r", name)
if name == "":
name = cls._tempFileName
cls.fileName = name
Logging.trace("<<")
#--------------------
@classmethod
def cleanUpConditionally (cls,
filesAreKept : Boolean):
"""Cleans up subtitle file if <filesAreKept> is unset,
otherwise moves it to directory given by <targetPath>"""
Logging.trace(">>: %r", filesAreKept)
if cls.fileName == cls._tempFileName:
OperatingSystem.removeFile(cls.fileName, filesAreKept)
Logging.trace("<<")
#============================================================
class LilypondPngVideoGenerator:
"""Responsible for the main processing methods."""
#--------------------
# LOCAL FEATURES
#--------------------
def _checkParameters (self):
"""Checks whether data given is plausible for subsequent
processing."""
Logging.trace(">>: %r", self)
# check the executables
Assertion.ensureProgramAvailability("lilypond", self._lilypondCommand,
"-v")
# check the input files
Assertion.ensureFileExistence(self._lilypondFileName, "lilypond")
# check the numeric parameters
ValidityChecker.isNumberString(self._countInMeasures,
"count-in measures",
realIsAllowed=True)
ValidityChecker.isNumberString(self._frameRate, "frame rate",
realIsAllowed=True, rangeKind=">0")
Assertion.check(len(self._measureToTempoMap) > 0,
"at least one tempo must be specified")
self._countInMeasures = float(self._countInMeasures)
self._frameRate = float(self._frameRate)
MP4Video.checkParameters()
Logging.trace("<<: parameters okay")
#--------------------
def _initializeOtherModuleData (self):
"""Initializes other data in different classes from current
object."""
Logging.trace(">>: %r", self)
# set commands
MP4Video._ffmpegCommand = self._ffmpegCommand
# intermediate file names or paths
MP4Video._concatSpecificationFileName = \
self._makePath("temp-concat.txt")
MP4Video._intermediateFileNameTemplate = \
self._makePath("temp%d.mp4")
MP4Video._pageFileNameTemplate = self._pictureFileStem + "-page%d.png"
# technical parameters
MP4Video._frameRate = self._frameRate
MP4Video._scaleFactor = self._scaleFactor
MP4Video._ffmpegPresetName = self._ffmpegPresetName
MP4Video._generatorLogLevel = _ffmpegLogLevel
# file parameters
SubtitleFile.setName(self._targetSubtitleFileName)
MP4Video.setName(self._targetMp4FileName)
Logging.trace("<<")
#--------------------
def _makePath (self,
fileName : String):
"""makes path from <fileName> and _intermediateFilePath"""
return (self._intermediateFileDirectoryPath
+ OperatingSystem.pathSeparator + fileName)
#--------------------
def _processLilypondFile (self):
"""Generates postscript file and picture files from lilypond
file."""
Logging.trace(">>: %r", self._lilypondFileName)
command = (self._lilypondCommand,
"-l", "WARNING",
"-dno-point-and-click",
"--ps",
"--png",
"--output=" + self._pictureFileStem,
self._lilypondFileName)
OperatingSystem.executeCommand(command, True)
Logging.trace("<<")
#--------------------
# EXPORTED FEATURES
#--------------------
@classmethod
def initialize (cls,
ffmpegCommand : String,
lilypondCommand : String):
"""Sets module-specific configuration variables"""
Logging.trace(">>: ffmpeg = %r, lilypond = %r",
ffmpegCommand, lilypondCommand)
globals()['_ffmpegCommand'] = ffmpegCommand
globals()['_lilypondCommand'] = lilypondCommand
Logging.trace("<<")
#--------------------
def __init__ (self,
lilypondFileName : String,
targetMp4FileName : String,
targetSubtitleFileName : String,
measureToTempoMap : Map,
countInMeasures : Natural,
frameRate : Real,
scalingFactor : Real,
ffmpegPresetName : String,
intermediateFileDirectoryPath : String,
intermediateFilesAreKept : Boolean = False):
"""Initializes generator"""
Logging.trace(">>: lilypondFileName = %r, targetMp4FileName = %r,"
+ " targetSubtitleFileName = %r,"
+ " measureToTempoMap = %r, countInMeasures = %r,"
+ " frameRate = %r, scalingFactor = %d,"
+ " ffmpegPresetName = %r,"
+ " intermediateFileDirectoryPath = %r,"
+ " intermediateFilesAreKept = %r",
lilypondFileName, targetMp4FileName,
targetSubtitleFileName, measureToTempoMap,
countInMeasures, frameRate, scalingFactor,
ffmpegPresetName, intermediateFileDirectoryPath,
intermediateFilesAreKept)
self._ffmpegCommand = _ffmpegCommand
self._lilypondCommand = _lilypondCommand
# files
self._intermediateFilesAreKept = intermediateFilesAreKept
self._intermediateFileDirectoryPath = intermediateFileDirectoryPath
self._lilypondFileName = lilypondFileName
self._pictureFileStem = self._makePath("temp_frame")
self._postscriptFileName = self._pictureFileStem + ".ps"
self._targetMp4FileName = targetMp4FileName
self._targetSubtitleFileName = targetSubtitleFileName
self._measureToTempoMap = measureToTempoMap
# video parameters
self._countInMeasures = countInMeasures
self._frameRate = frameRate
self._scaleFactor = scalingFactor
self._ffmpegPresetName = ffmpegPresetName
# -- initialize other modules
self._initializeOtherModuleData()
# -- check consistency of data
self._checkParameters()
Logging.trace("<<: %r", self)
#--------------------
def __repr__ (self) -> String:
"""Returns strings representation of <self>."""
className = self.__class__.__name__
result = (("%s(ffmpegCommand = %r, lilypondCommand = %r,"
+ " lilypondFileName = %r, pictureFileStem = %r,"
+ " postscriptFileName = %r, targetMp4FileName = %r,"
+ " targetSubtitleFileName = %r,"
+ " measureToTempoMap = %r, countInMeasures = %r,"
+ " frameRate = %r, scaleFactor = %r,"
+ " ffmpegPresetName = %r,"
+ " intermediateFileDirectoryPath = %r,"
+ " intermediateFilesAreKept = %r)") %
(className, self._ffmpegCommand, self._lilypondCommand,
self._lilypondFileName, self._pictureFileStem,
self._postscriptFileName, self._targetMp4FileName,
self._targetSubtitleFileName, self._measureToTempoMap,
self._countInMeasures, self._frameRate,
self._scaleFactor, self._ffmpegPresetName,
self._intermediateFileDirectoryPath,
self._intermediateFilesAreKept))
return result
#--------------------
def cleanup (self):
"""Deletes all intermediate files."""
Logging.trace(">>")
filesAreKept = self._intermediateFilesAreKept
OperatingSystem.removeFile(self._postscriptFileName, filesAreKept)
MP4Video.cleanUpConditionally(filesAreKept)
SubtitleFile.cleanUpConditionally(filesAreKept)
Logging.trace("<<")
#--------------------
def process (self):
"""Coordinates the processing of all other modules."""
Logging.trace(">>: %r", self)
try:
self._processLilypondFile()
# parse postscript file for mapping from page to first
# measure
PostscriptFile.setName(self._postscriptFileName)
pageToMeasureMap = PostscriptFile.pageToMeasureMap()
lastMeasureNumber = max(pageToMeasureMap.values())
Logging.trace("--: lastMeasureNumber = %d ", lastMeasureNumber)
# generate ffmpeg command fragment from frame rate, page
# to measure map and measure to tempo map
measureToDurationMap = \
DurationManager.measureToDurationMap(self._measureToTempoMap,
self._countInMeasures,
lastMeasureNumber)
pageDurationList = \
DurationManager.pageDurationList(pageToMeasureMap,
measureToDurationMap)
DurationManager.quantizeDurationList(pageDurationList,
self._frameRate)
MP4Video.make(pageDurationList)
# generate subtitle file (if specified)
if SubtitleFile.fileName:
SubtitleFile.make(measureToDurationMap, self._countInMeasures)
except RuntimeError as exception:
Logging.trace("--: exception %s", exception.args[0])
Logging.trace("<<")
|
from lale.lib.sklearn import *
from lale.lib.xgboost import *
from lale.lib.lightgbm import *
#from lale.lib.lale import KeepNumbers, KeepNonNumbers
from lale.lib.lale import ConcatFeatures as Concat
from lale.lib.lale import NoOp
from lale.pretty_print import to_string
from lale.lib.lale import Hyperopt
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
import numpy as np
import time as time
from abstract_optimizer import APipelineOptimizer
# Translates pipelines to LALE and uses hyperopt to train them and obtain feedback
class PipelineOptimizer(APipelineOptimizer):
REGRESSION = True
EVALS = 20
def __init__(self, data=None, regression=True, evals=20):
self.data = data if data is not None else load_iris(return_X_y=True)
self.REGRESSION = regression
self.EVALS = evals
self.X, self.y = self.data
def evaluate_pipeline(self, pipeline):
if 'lale_pipeline' not in pipeline:
return pipeline
print("Starting to optimize " + pipeline['pipeline'])
start_time = time.time()
opt_scorer = 'r2' if self.REGRESSION else 'accuracy'
opt = Hyperopt(
estimator=pipeline['lale_pipeline'],
max_evals=self.EVALS,
scoring=opt_scorer
)
trained_pipeline = None
best_accuracy = 0
try:
trained_pipeline = opt.fit(self.X, self.y)
print('Fit completed.')
predictions = trained_pipeline.predict(self.X)
print('Predict completed.')
# best_accuracy = -np.min(opt.get_trials().losses())
best_accuracy = accuracy_score(self.y, [round(pred) for pred in predictions])
print('Best accuracy: ' + str(best_accuracy))
except Exception as e:
print("EXCEPTION OCCURRED: " + str(e))
end_time = time.time()
print("Completed optimization for " + pipeline['pipeline'])
tlp = pipeline.copy()
tlp.update({
'trained_pipeline': trained_pipeline,
'best_accuracy': best_accuracy,
'opt_duration': (end_time-start_time)
})
return tlp
|
#From https://github.com/DerekGloudemans/video-write-utilities/blob/master/combine_frames.py
import cv2
import numpy as np
import time
paths = [
"/media/worklab/data_HDD/cv_data/video/data - test pole 6 cameras july 22/Jul_22_2019_12-05-07/Axis_Camera_10/cam_0_capture_002.avi",
"/media/worklab/data_HDD/cv_data/video/data - test pole 6 cameras july 22/Jul_22_2019_12-05-07/Axis_Camera_12/cam_2_capture_002.avi",
"/media/worklab/data_HDD/cv_data/video/data - test pole 6 cameras july 22/Jul_22_2019_12-05-07/Axis_Camera_13/cam_3_capture_002.avi",
"/media/worklab/data_HDD/cv_data/video/data - test pole 6 cameras july 22/Jul_22_2019_12-05-07/Axis_Camera_14/cam_4_capture_002.avi",
"/media/worklab/data_HDD/cv_data/video/data - test pole 6 cameras july 22/Jul_22_2019_12-05-07/Axis_Camera_15/cam_5_capture_002.avi",
"/media/worklab/data_HDD/cv_data/video/data - test pole 6 cameras july 22/Jul_22_2019_12-05-07/Axis_Camera_16/cam_1_capture_002.avi"
]
paths = [
"/home/worklab/Documents/CV-detection/example_videos/110_foot_example_tracks/detect0.avi",
"/home/worklab/Documents/CV-detection/example_videos/110_foot_example_tracks/detect3.avi",
"/home/worklab/Documents/CV-detection/example_videos/110_foot_example_tracks/detect1.avi",
"/home/worklab/Documents/CV-detection/example_videos/110_foot_example_tracks/track0.avi",
"/home/worklab/Documents/CV-detection/example_videos/110_foot_example_tracks/track3.avi",
"/home/worklab/Documents/CV-detection/example_videos/110_foot_example_tracks/track1.avi"
]
paths = [
"/media/worklab/data_HDD/cv_data/video/110_foot_pole_test/Axis_Camera_10/cam_0_capture_000.avi",
"/media/worklab/data_HDD/cv_data/video/110_foot_pole_test/Axis_Camera_12/cam_2_capture_000.avi",
"/media/worklab/data_HDD/cv_data/video/110_foot_pole_test/Axis_Camera_13/cam_3_capture_000.avi",
"/media/worklab/data_HDD/cv_data/video/110_foot_pole_test/Axis_Camera_14/cam_4_capture_000.avi",
"/media/worklab/data_HDD/cv_data/video/110_foot_pole_test/Axis_Camera_15/cam_5_capture_000.avi",
"/media/worklab/data_HDD/cv_data/video/110_foot_pole_test/Axis_Camera_16/cam_1_capture_000.avi"
]
paths = ['C:/Users/Nikki/Documents/work/inputs-outputs/vid_output/aot1.avi', 'C:/Users/Nikki/Documents/work/inputs-outputs/vid_output/aot2.avi']
file_out = "C:/Users/Nikki/Documents/work/inputs-outputs/vid_output/aot_combo.avi"
# title_frame = "/home/worklab/Desktop/110 Traffic Camera Pole Test.jpg"
title_frame = None
show = False
# open capture devices to read video files
cap_list = []
for file_in in paths:
cap = cv2.VideoCapture(file_in)
assert cap.isOpened(), "Cannot open file \"{}\"".format(file_in)
cap_list.append(cap)
# determine size of combined frame (assumed that all frames are the same size)
cam_num = len(cap_list)
n_wide = 2
n_high = (cam_num-1) // 3 + 1
frame_width = int(cap_list[0].get(3)*n_wide /2.0)
frame_height = int(cap_list[0].get(4)*n_high /2.0)
# opens VideoWriter object for saving video file if necessary
if file_out != None:
out = cv2.VideoWriter(file_out,cv2.CAP_FFMPEG,7, (frame_width,frame_height))
# add title frame for 2.5 seconds
if title_frame != None:
title = cv2.imread(title_frame,1)
for i in range(0,75):
out.write(cv2.resize(title,(1920*3,1080*2)))
# read first frame from all captures
frames = []
#i = 0
for cap in cap_list:
ret,frame = cap.read()
#cv2.imwrite("align{}.jpg".format(i),frame)
frame = cv2.resize(frame,(1920,1080))
frames.append(frame)
#i = i + 1
start = time.time()
frame_num = 0
while cap.isOpened():
if ret:
# top_row = np.concatenate((frames[0],frames[1],frames[2]),axis = 1)
# bottom_row = np.concatenate((frames[3],frames[4],frames[5]),axis = 1)
# combined = np.concatenate((top_row,bottom_row),axis = 0)
combined = np.concatenate((frames[0], frames[1]))
# save frame to file if necessary
if file_out != None:
out.write(combined)
#summary statistics
frame_num += 1
print("FPS of the video is {:5.2f}".format( frame_num / (time.time() - start)))
# get next frames
frames = []
for cap in cap_list:
ret,frame = cap.read()
if ret == False:
break
frame = cv2.resize(frame,(1920,1080))
frames.append(frame)
# output frame
if show:
combined = cv2.resize(combined, (2880, 1080))
cv2.imshow("frame", combined)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
else:
break
# close all resources used
for cap in cap_list:
cap.release()
cv2.destroyAllWindows()
try:
out.release()
except:
pass
print("Video combination finished.") |
# Copyright (c) 2020, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
#
# connecting from outside directly to server
# connecting from outside via router
# connecting from inside directly to server - same namespace
# connecting from inside via router - same namespace
# connecting from inside directly to server - different namespace
# connecting from inside via router - different namespace
|
#! python3
# -*- coding: utf-8 -*-
import numpy as np
lis = np.array([4, 6])
#L0 Norm
print('norm 0')
print(np.linalg.norm(lis, ord=0))
#L1 Norm
#単純にベクトルの各要素の絶対値を足し合わせる。
#X=4, Y=6の場合、 4+6 となる
print('norm 1')
print(np.linalg.norm(lis, ord=1))
#L2 Norm
#原点からベクトルを終点とした直線距離を求める。←の実際の長さを計算する。
print('norm 2')
print(np.linalg.norm(lis, ord=2)) |
"""Replace matched group
https://stackoverflow.com/questions/6711567/how-to-use-python-regex-to-replace-using-captured-group/6711631
"""
import re
text = 'end, Wrong punctuation.'
pattern = re.compile(r', ([A-Z])')
text = pattern.sub(r'. \1', text)
print(text)
# end. Wrong punctuation.
text = 'excellence, excellence,'
pattern = re.compile(r',$')
text = pattern.sub(r'.', text)
print(text)
# excellence, excellence. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, str))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual(string, string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual(string, string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
# In Python strings, the backslash "\" is a special character, also called the "escape" character.
# Prefixing a quote with "\" turns it into an ordinary character (rather than a closing quote).
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
# In Python, a backslash ( \ ) is a continuation character, and if it is placed at the end of a line,
# it is considered that the line is continued, ignoring subsequent newlines.
self.assertEqual(52, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
# String literals inside triple quotes, """ or ''',
# can span multiple lines of text. Python strings are "immutable"
# which means they cannot be changed after they are created
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
self.assertEqual(string, string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual(string, string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual(string, string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
# The + operator is used both for adding numbers and strings;
# in programming we say that the operator is overloaded.
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
# The easiest way of concatenating strings is to use the + or the += operator.
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual(__, original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(__, len(string))
|
"""
http://3dgep.com/?p=1356
http://3dgep.com/?p=1053
"""
import os
from collections import namedtuple
import numpy
from pyglet.gl import *
from pyrr import vector
from pyrr import quaternion
from pyrr import matrix44
from pygly.shader import Shader, ShaderProgram
from razorback.mesh import Mesh
from razorback.md5.skeleton import BaseFrameSkeleton
"""
correcting x,y,z for opengl
joint position
pos_x, pos_y, pos_z = pos_x, pos_z, -pos_y
joint quaternion
quat_x, quat_y, quat_z, quat_w = quat_x, quat_z, -quat_y, quat_w
weight position
pos_x, pos_y, pos_z = pos_x, pos_z, -pos_y
"""
class Mesh( Mesh ):
shader_source = {
'vert': open(os.path.dirname(__file__) + '/md5.vert','r').read(),
'frag': open(os.path.dirname(__file__) + '/md5.frag','r').read(),
}
def __init__( self, md5mesh ):
super( Mesh, self ).__init__()
self.mesh = MeshData( md5mesh )
self.vbo = (GLuint)()
self.tbo = (GLuint)()
self.shader = None
glGenBuffers( 1, self.vbo )
glGenTextures( 1, self.tbo )
self.shader = ShaderProgram(
Shader( GL_VERTEX_SHADER, Mesh.shader_source['vert'] ),
Shader( GL_FRAGMENT_SHADER, Mesh.shader_source['frag'] ),
link_now = False
)
# set our shader data
# we MUST do this before we link the shader
self.shader.attributes.in_normal = 0
self.shader.attributes.in_texture_coord = 1
self.shader.attributes.in_bone_indices = 2
self.shader.attributes.in_bone_weights_1 = 3
self.shader.attributes.in_bone_weights_2 = 4
self.shader.attributes.in_bone_weights_3 = 5
self.shader.attributes.in_bone_weights_4 = 6
self.shader.frag_location( 'out_frag_colour' )
# link the shader now
self.shader.link()
# bind our uniform indices
self.shader.bind()
self.shader.uniforms.in_diffuse = 0
self.shader.uniforms.in_specular = 1
self.shader.uniforms.in_normal = 2
self.shader.uniforms.in_bone_matrices = 4
self.shader.unbind()
def set_skeleton( self, skeleton ):
# load the matrices into our texture buffer
#matrices = skeleton.matrices
matrices = numpy.zeros( (skeleton.num_joints, 2, 4), dtype = 'float32' )
matrices[ :, 0 ] = skeleton.orientations
matrices[ :, 1, 0:3 ] = skeleton.positions
glBindBuffer( GL_TEXTURE_BUFFER, self.vbo )
glBufferData(
GL_TEXTURE_BUFFER,
matrices.nbytes,
(GLfloat * matrices.size)(*matrices.flat),
GL_STATIC_DRAW
)
# link to our BO
glBindTexture( GL_TEXTURE_BUFFER, self.tbo )
glTexBuffer( GL_TEXTURE_BUFFER, GL_RGBA32F, self.vbo )
glBindTexture( GL_TEXTURE_BUFFER, 0 )
glBindBuffer( GL_TEXTURE_BUFFER, 0 )
def render( self, projection, model_view ):
# bind our shader and pass in our model view
self.shader.bind()
self.shader.uniforms.in_model_view = model_view
self.shader.uniforms.in_projection = projection
# set our animation data
glActiveTexture( GL_TEXTURE0 + 4 )
glBindTexture( GL_TEXTURE_BUFFER, self.tbo )
# render the mesh
self.mesh.render()
# restore state
glActiveTexture( GL_TEXTURE0 + 3 )
glBindTexture( GL_TEXTURE_BUFFER, 0 )
glActiveTexture( GL_TEXTURE0 )
self.shader.unbind()
class MeshData( object ):
mesh_layout = namedtuple(
'MD5_MeshData',
[
'normals',
'tcs',
'bone_indices',
'weights',
'indices'
]
)
def __init__( self, md5mesh ):
super( MeshData, self ).__init__()
self.md5mesh = md5mesh
self.vaos = None
self.vbos = None
self.load()
def load( self ):
mesh = self._generate_mesh()
# load into opengl
self.vbos = self._generate_vbos( mesh )
self.vaos = self._generate_vaos( self.vbos )
def _generate_mesh( self ):
def prepare_submesh( mesh ):
tcs = mesh.tcs
# store weights as [pos.x, pos,y, pos.z, bias] * 4
weights = numpy.zeros( (mesh.num_verts, 4, 4), dtype = 'float32' )
#bone_indices = numpy.zeros( (mesh.num_verts, 4), dtype = 'uint32' )
bone_indices = numpy.zeros( (mesh.num_verts, 4), dtype = 'float32' )
# iterate through each vertex and generate our
# vertex position, texture coordinate, bone index and
# bone weights
for vert_index, (vertex, vertex_weight, bone_index) in enumerate(
zip( mesh.vertices, weights, bone_indices )
):
for weight_index in range( vertex.weight_count ):
# we only support 4 bones per vertex
# this is so we can fit it into a vec4
if weight_index >= 4:
print 'Too many weights for vertex! %i' % vertex.weight_count
break
weight = mesh.weight( vertex.start_weight + weight_index )
vertex_weight[ weight_index ][ 0:3 ] = weight.position
vertex_weight[ weight_index ][ 3 ] = weight.bias
bone_index[ weight_index ] = weight.joint
return ( tcs, weights, bone_indices )
"""
def prepare_normals( mesh, positions ):
def generate_normals( positions, triangles ):
normals = numpy.zeros( positions.shape, dtype = 'float32' )
# generate a normal for each triangle
for triangle in triangles:
v1, v2, v3 = positions[ triangle[ 0 ] ]
v2 = positions[ triangle[ 1 ] ]
v3 = positions[ triangle[ 2 ] ]
normal = vector.generate_normals(
v1,
v2,
v3,
normalise_result = False
)
normals[ triangle[ 0 ] ] += normal
normals[ triangle[ 1 ] ] += normal
normals[ triangle[ 2 ] ] += normal
return normals
def generate_bind_pose_normals( mesh, normals ):
# convert the normals to bind-pose position
for vert_index, vertex in enumerate( mesh.vertices ):
# retrieve our calculated normal
# normalise the normal
normal = vector.normalise( normals[ vert_index ] )
# clear our stored normal
# we want to store a bind pose normal
normals[ vert_index ] = [ 0.0, 0.0, 0.0 ]
# convert to bind-pose
# this is very similar to prepare_mesh
for weight_index in range( vertex.weight_count ):
weight = mesh.weight( vertex.start_weight + weight_index )
joint = self.md5mesh.joint( weight.joint )
# rotate the normal by the joint
rotated_position = quaternion.apply_to_vector(
joint.orientation,
normal
)
normals[ vert_index ] += rotated_position * weight.bias
return normals
normals = generate_normals( positions, mesh.tris )
normals = generate_bind_pose_normals( mesh, normals )
return normals
"""
# prepare our mesh vertex data
mesh_data = MeshData.mesh_layout(
# normals
numpy.empty( (self.md5mesh.num_verts, 3), dtype = 'float32' ),
# tcs
numpy.empty( (self.md5mesh.num_verts, 2), dtype = 'float32' ),
# bone_indices
#numpy.empty( (self.md5mesh.num_verts, 4), dtype = 'uint32' ),
numpy.empty( (self.md5mesh.num_verts, 4), dtype = 'float32' ),
# weights
numpy.empty( (self.md5mesh.num_verts, 4, 4), dtype = 'float32' ),
# indices
numpy.empty( (self.md5mesh.num_tris, 3), dtype = 'uint32' )
)
current_vert_offset = 0
current_tri_offset = 0
for mesh in self.md5mesh.meshes:
# generate the bind pose
# and after that, use the bind pose to generate our normals
tcs, weights, bone_indices = prepare_submesh( mesh )
#normals = prepare_normals( mesh, positions )
# write to our arrays
start, end = current_vert_offset, current_vert_offset + mesh.num_verts
#mesh_data.normals[ start : end ] = normals
mesh_data.tcs[ start : end ] = tcs
mesh_data.weights[ start : end ] = weights
mesh_data.bone_indices[ start : end ] = bone_indices
# increment our current offset by the number of vertices
current_vert_offset += mesh.num_verts
# store our indices
start, end = current_tri_offset, current_tri_offset + mesh.num_tris
mesh_data.indices[ start : end ] = mesh.tris
# increment our current offset by the number of vertices
current_tri_offset += mesh.num_tris
return mesh_data
def _generate_vbos( self, bindpose ):
def fill_array_buffer( vbo, data, gltype ):
glBindBuffer( GL_ARRAY_BUFFER, vbo )
glBufferData(
GL_ARRAY_BUFFER,
data.nbytes,
(gltype * data.size)(*data.flat),
GL_STATIC_DRAW
)
def fill_index_buffer( bo, data, gltype ):
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, bo )
glBufferData(
GL_ELEMENT_ARRAY_BUFFER,
data.nbytes,
(gltype * data.size)(*data.flat),
GL_STATIC_DRAW
)
# load our vertex buffers
# these are per-vertex values
vbos = (GLuint * 5)()
glGenBuffers( len(vbos), vbos )
#fill_array_buffer( vbos[ 0 ], bindpose.normals, GLfloat )
fill_array_buffer( vbos[ 1 ], bindpose.tcs, GLfloat )
#fill_array_buffer( vbos[ 2 ], bindpose.bone_indices, GLuint )
fill_array_buffer( vbos[ 2 ], bindpose.bone_indices, GLfloat )
fill_array_buffer( vbos[ 3 ], bindpose.weights, GLfloat )
# triangle indices
fill_index_buffer( vbos[ 4 ], bindpose.indices, GLuint )
# unbind
glBindBuffer( GL_ARRAY_BUFFER, 0 )
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 )
return MeshData.mesh_layout(
vbos[ 0 ],
vbos[ 1 ],
vbos[ 2 ],
vbos[ 3 ],
vbos[ 4 ]
)
def _generate_vaos( self, vbos ):
def calculate_offset( offset, elements, bytes ):
return offset * elements * bytes
# create our VAOs
vaos = (GLuint * self.md5mesh.num_meshes)()
glGenVertexArrays( self.md5mesh.num_meshes, vaos )
# bind the arrays to our VAOs
current_offset = 0
for vao, mesh in zip( vaos, self.md5mesh.meshes ):
glBindVertexArray( vao )
"""
# normals
offset = calculate_offset( current_offset, 3, 4 )
glBindBuffer( GL_ARRAY_BUFFER, vbos.normals )
glEnableVertexAttribArray( 0 )
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, 0, offset )
"""
# tcs
offset = calculate_offset( current_offset, 2, 4 )
glBindBuffer( GL_ARRAY_BUFFER, vbos.tcs )
glEnableVertexAttribArray( 1 )
glVertexAttribPointer( 1, 2, GL_FLOAT, GL_FALSE, 0, offset)
# bone_indices
offset = calculate_offset( current_offset, 4, 4 )
glBindBuffer( GL_ARRAY_BUFFER, vbos.bone_indices )
glEnableVertexAttribArray( 2 )
#glVertexAttribIPointer( 2, 4, GL_UNSIGNED_INT, GL_FALSE, 0, offset )
glVertexAttribPointer( 2, 4, GL_FLOAT, GL_FALSE, 0, offset )
# weights
offset = calculate_offset( current_offset, 16, 4 )
stride = 16 * 4
glBindBuffer( GL_ARRAY_BUFFER, vbos.weights )
glEnableVertexAttribArray( 3 )
glVertexAttribPointer( 3, 4, GL_FLOAT, GL_FALSE, stride, offset + (4 * 0) )
glEnableVertexAttribArray( 4 )
glVertexAttribPointer( 4, 4, GL_FLOAT, GL_FALSE, stride, offset + (4 * 4) )
glEnableVertexAttribArray( 5 )
glVertexAttribPointer( 5, 4, GL_FLOAT, GL_FALSE, stride, offset + (4 * 8) )
glEnableVertexAttribArray( 6 )
glVertexAttribPointer( 6, 4, GL_FLOAT, GL_FALSE, stride, offset + (4 * 12) )
# increment our buffer offset to the next mesh
current_offset += mesh.num_verts
#break
# unbind
glBindVertexArray( 0 )
glBindBuffer( GL_ARRAY_BUFFER, 0 )
return vaos
def render( self ):
# bind our vertex attributes
current_offset = 0
for index, (vao, mesh) in enumerate( zip( self.vaos, self.md5mesh.meshes ) ):
# num indices = num tris * 3 indices per tri
# offset = offset * 3 indices per tri * 4 bytes per element
# bind our indices
glBindVertexArray( vao )
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, self.vbos.indices )
glDrawElements(
GL_TRIANGLES,
mesh.num_tris * 3,
GL_UNSIGNED_INT,
current_offset * 3 * 4
)
current_offset += mesh.num_tris
#break
# reset our state
glBindVertexArray( 0 )
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 )
|
# -*- coding: utf-8 -*-
"""
Exceptions module.
"""
from __future__ import unicode_literals
class EventError(Exception):
"""Using to notify subscribed clients about event failure."""
pass |
"""
AIS playbox TV addon for kodi.
This Kodi addon allows you to play the free TV channels provided by the AIS
playbox device.
"""
import json
import random
import re
import requests
import sys
import xbmcaddon
import xbmcgui
import xbmcplugin
import zlib
from base64 import b64encode
from time import strftime
from urlparse import parse_qsl
AISWEB = "https://playbox.ais.co.th/AISWeb/"
# Without SSL GET_DEVICE_OWNER = "http://stbbe.ais.co.th:8080/getDeviceOwner"
GET_DEVICE_OWNER = "https://stbbe.ais.co.th:8443/getDeviceOwner"
TV_CHANNELS = "https://sifsecretstore.s3.amazonaws.com/AIS/json/Page/NewTV.json.gz"
GET_USER_ID = AISWEB + "ServiceGetUserIdFromPrivateId.aspx"
GET_PACKAGES = AISWEB + "PageGetPackagesByUserID.aspx"
CHECK_ENTITLEMENT = AISWEB + "ServiceCheckAssetEntitlementByUserId.aspx"
PLAYBOX_APP_KEY = "UHgZAVpacCXP/spFoX+S7Pwt/sM="
HEADERS = {
'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; n200 Build/KOT49H)'}
plugin_url = sys.argv[0]
plugin_handle = int(sys.argv[1])
def get_tv_channels():
"""Retrieve the current list of TV Channels"""
response = requests.get(TV_CHANNELS, headers=HEADERS)
data = json.loads(zlib.decompress(response.content, 16+zlib.MAX_WBITS))
flatten = [item for x in data['SubPage'] for item in x['Items']]
unique = dict((i['ItemID'], i) for i in flatten).values()
return sorted(unique, key=lambda item: item['ItemName'])
def get_subscriber_packages(user_id):
"""Asks for the packages the current subscriber has"""
parameters = {
'appId': 'AND',
'userId': user_id,
# Not needed but just in case
'appKey': PLAYBOX_APP_KEY}
data = {'JSONtext': json.dumps(parameters)}
res = requests.post(GET_PACKAGES, headers=HEADERS, data=data,
verify=False)
return [p["ServiceID"] for p in res.json()["PackageInfo"]]
def filter_channels(channels):
user_id = xbmcplugin.getSetting(plugin_handle, 'userId')
packages = set(get_subscriber_packages(user_id))
included = lambda cp: not packages.isdisjoint(cp.split('|'))
return [c for c in channels if included(c['Packages'])]
def map_channels(channels):
"""Creates a xbmc list of playable TV channels"""
final_list = []
for channel in channels:
list_item = xbmcgui.ListItem(label=channel['ItemName'])
list_item.setArt({'thumb': channel['ItemIcon'],
'icon': channel['ItemIcon']})
list_item.setInfo('video', {'title': channel['ItemName']})
list_item.setProperty('IsPlayable', 'true')
url = '{0}?action=play&channel={1}'.format(
plugin_url, channel['ItemID'])
final_list.append((url, list_item, False))
return final_list
def get_channel_url(assetId):
"""Request the final playable URL
This url contains a playlist with SQ and HQ streams.
"""
parameters = {
'appId': 'AND',
'assetId': assetId,
'assetType': 'CHANNEL',
'deviceType': 'STB',
'userId': xbmcplugin.getSetting(plugin_handle, 'userId'),
'lang': 'en',
'appKey': PLAYBOX_APP_KEY}
data = {'JSONtext': json.dumps(parameters)}
# Verify false due to problems in kodi v16 in macos with old python
res = requests.post(CHECK_ENTITLEMENT, headers=HEADERS, data=data,
verify=False)
return res.json()["StreamingInfo"][0]["URLInfo"]
def play_channel(channel):
"""Make kodi play a TV channel"""
url = get_channel_url(channel)
play_item = xbmcgui.ListItem("Channel")
play_item.setPath(url)
play_item.setInfo(type='Video', infoLabels={'Title': 'Channel'})
play_item.setProperty("IsPlayable", "true")
xbmcplugin.setResolvedUrl(plugin_handle, True, listitem=play_item)
def generate_command_id(serial):
"""AIS command ids"""
timestamp = strftime('%m%d%Y%H%M%S')
options = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
rand_ending = "".join([random.choice(options) for _ in range(4)])
return "{0}-{1}{2}".format(serial, timestamp, rand_ending)
def get_device_owner(mac, serial):
"""Gets the internal/private/email id of the device owner"""
device_id = b64encode('n200|null|{0}|{1}'.format(mac, serial))
command_id = generate_command_id(serial)
parameters = {
'commandId': command_id,
'deviceId': device_id}
res = requests.get(GET_DEVICE_OWNER, params=parameters, headers=HEADERS,
verify=False)
return res.json()["ownerId"]
def get_user_id_from_email(email):
"""Converts the email/private id to the user id used in channels"""
parameters = {
'PrivateId': email,
# Not needed but just in case
'appKey': PLAYBOX_APP_KEY}
data = {'JSONtext': json.dumps(parameters)}
res = requests.post(GET_USER_ID, headers=HEADERS, data=data,
verify=False)
return res.json()["UserId"]
def get_user_id():
"""Get and save AIS user id and email/private id."""
mac = xbmcplugin.getSetting(plugin_handle, 'playboxMAC').strip().upper()
if re.match('^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$', mac) is None:
xbmcgui.Dialog().ok('AIS', 'Wrong MAC address')
return
serial = xbmcplugin.getSetting(plugin_handle, 'playboxSerial').strip()
email = get_device_owner(mac, serial)
user_id = get_user_id_from_email(email)
myself = xbmcaddon.Addon()
myself.setSetting('privateId', email)
myself.setSetting('userId', user_id)
def check_settings():
"""Checks if there is a user id needed to play TV"""
user_id = xbmcplugin.getSetting(plugin_handle, 'userId')
if user_id:
return
get_user_id()
def refresh_packages():
user_id = xbmcplugin.getSetting(plugin_handle, 'userId')
packages = cache.cacheFunction(get_subscriber_packages, user_id)
def router(paramstring):
"""Decides what to do based on script parameters"""
check_settings()
params = dict(parse_qsl(paramstring))
# Nothing to do yet with those
if not params:
# Demo channel list
channels = map_channels(filter_channels(get_tv_channels()))
xbmcplugin.addDirectoryItems(plugin_handle, channels, len(channels))
xbmcplugin.addSortMethod(
plugin_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
xbmcplugin.endOfDirectory(plugin_handle)
elif params['action'] == 'play':
play_channel(params['channel'])
elif params['action'] == 'get_user_id':
get_user_id()
if __name__ == '__main__':
router(sys.argv[2][1:])
|
# Workshop: Integrate the AWS Cloud with Responsive Xilinx Machine Learning at the Edge
# Copyright (C) 2018 Amazon.com, Inc. and Xilinx Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import greengrasssdk
import platform
from threading import Timer
import signal
import logging
import os
client = greengrasssdk.client('iot-data')
my_platform = platform.platform()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
def greengrass_hello_world_run():
if not my_platform:
payload = {'message': 'Hello world! Sent from Greengrass Core'}
client.publish(topic='hello/world', payload=json.dumps(payload))
else:
payload = {'message': 'Hello world! Sent from Greengrass Core '
'running on platform: {}'.format(my_platform)}
client.publish(topic='hello/world', payload=json.dumps(payload))
Timer(5, greengrass_hello_world_run).start()
SYSFS_LEDS = [
'/sys/class/leds/ds4',
'/sys/class/leds/ds5',
'/sys/class/leds/ds6'
]
def trap_my_signal(signal, frame):
logger.info('In trap_signal')
logger.info('In trap with signal {0}'.format(str(signal)))
for f in SYSFS_LEDS:
with open(os.path.join(f, 'brightness'), 'w') as fh:
fh.write('{0}'.format('0'))
#signal.signal(signal.SIGINT, trap_my_signal)
#signal.signal(signal.SIGHUP, trap_my_signal)
signal.signal(signal.SIGTERM, trap_my_signal)
#signal.signal(signal.SIGKILL, trap_my_signal)
greengrass_hello_world_run()
def lambda_handler(event, context):
return
|
import cv2
src = cv2.imread("../res/3.jpg", cv2.IMREAD_COLOR)
dst = cv2.bitwise_not(src)
cv2.imshow("src", src)
cv2.imshow("dst", dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from __future__ import print_function
import os.path, pickle, time, yaml
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ["https://www.googleapis.com/auth/directory.readonly"]
def main():
"""Shows basic usage of the People API.
Prints the name of the first 10 connections.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('people', 'v1', credentials=creds)
# Call the People API
## return service
request = service.people().listDirectoryPeople(
mergeSources="DIRECTORY_MERGE_SOURCE_TYPE_CONTACT",
pageSize=1000,
readMask="names,emailAddresses",
sources="DIRECTORY_SOURCE_TYPE_DOMAIN_CONTACT")
c = 0
with open("bot_stats.yaml") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
while request != None:
if c != 0:
request = service.people().listDirectoryPeople(
mergeSources="DIRECTORY_MERGE_SOURCE_TYPE_CONTACT",
pageSize=1000,
pageToken=req.get("nextPageToken"),
readMask="names,emailAddresses",
sources="DIRECTORY_SOURCE_TYPE_DOMAIN_CONTACT")
req = request.execute()
contacts = req.get("people", [])
with open("loot.txt", 'a') as f:
for person in contacts:
names = person.get("names", [])
emails = person.get("emailAddresses", [])
if names and emails:
name = names[0].get("displayName")
email = emails[0].get("value")
f.write(f"{name}\t{email}\n")
c += 1
print(name, email)
print(req.get("nextPageToken"))
with open("bot_stats.yaml", 'w') as f:
data["Database Status"] = f":yellow_circle: (Updating as of {time.ctime()})"
data["Total Records Retrieved"] = c
yaml.dump(data, f)
if c > 50000:
return 0
time.sleep(60)
print("Escaped with", c, "records!")
if __name__ == "__main__":
t0 = time.process_time()
service = main()
t1 = time.process_time()
total = t1 - t0
print(f"\nTimestamp 1: {t0} secs\nTimestamp 2: {t1} secs")
print("Module Time Elapsed:", total, "seconds")
|
from django.shortcuts import render
from .serializers import *
from rest_framework.renderers import JSONRenderer
from management.commands.updatecomponents import send_GET_request
from django.http import JsonResponse, HttpResponseServerError
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse
import urllib, json
from django.core.management import call_command
from django.core.serializers import serialize
from django.views.decorators.clickjacking import xframe_options_exempt
from django.core.paginator import Paginator, EmptyPage
import numpy as np
def index(request):
top_downloaded_components = Component.objects.all().only('name', 'id', 'downloads', 'url_name').order_by('-downloads')[:3]
top_starred_components = Component.objects.all().only('name', 'id', 'stars', 'url_name').order_by('-stars')[:3]
recent_components = Component.objects.all().only('name', 'id', 'modified_time', 'url_name').order_by('-modified_time')[:3]
downloaded = DownloadSerializer(top_downloaded_components, many=True) # serialized data containing number of downloads
starred = StarSerializer(top_starred_components, many=True) # serialized data containing number of stars
recent = RecentComponentSerializer(recent_components, many=True)
return JsonResponse({
'top_dl_components':downloaded.data,
'top_starred_components':starred.data,
'most_recent_components':recent.data,
})
def all_components(request): # requested on_load() for querying
all_components = BaseComponentSerializer(Component.objects.all().only('name', 'id', 'url_name'), many=True)
return JsonResponse({
'all_components':all_components.data,
})
def top_components(request):
# Download data is from Github and hence stars are used
top_components = TopComponentSerializer(Component.objects.all().order_by('-stars')[:10], many=True)
return JsonResponse({
'top_components':top_components.data,
})
def components(request):
size = request.GET.get('size', 20)
page = request.GET.get('page', 1)
components_list = Component.objects.all().order_by('-modified_time')
paginator = Paginator(components_list, size)
try:
currentPage = paginator.page(page)
serialised = TopComponentSerializer(currentPage, many=True)
components = serialised.data
except EmptyPage:
components = []
return JsonResponse({ 'components': components })
def component_details(request, url_name):
component = Component.objects.get(url_name=url_name)
details = DetailComponentSerializer(component, context={'request':request})
contributions = ContributionSerializer(component.contributions, many=True)
js_dependencies = JSDependencySerializer(component.jsdependency_set.all(), many=True)
css_dependencies = CSSDependencySerializer(component.cssdependency_set.all(), many=True)
try:
sniper_data = component.sniperdata
sniper = SniperDataSerializer(sniper_data)
snippets = SnippetSerializer(sniper_data.snippet_set.all(), many=True)
return JsonResponse({
'details' : details.data,
'contributors' : contributions.data,
'js_dependencies' : js_dependencies.data,
'css_dependencies' : css_dependencies.data,
'sniper_data' : sniper.data,
'snippets' : snippets.data,
})
except:
return JsonResponse({
'details' : details.data,
'contributors' : contributions.data,
'js_dependencies' : js_dependencies.data,
'css_dependencies' : css_dependencies.data,
})
@xframe_options_exempt
def render_visualization(request, url_name, visualization_name):
try:
component = Component.objects.get(url_name=url_name)
js_dependencies = component.jsdependency_set.all()
css_dependencies = component.cssdependency_set.all()
sniper_data = component.sniperdata
snippet = Snippet.objects.get(sniperData=sniper_data, name=visualization_name)
response = send_GET_request(snippet.url)
script = response.read()
serializer = DetailComponentSerializer(component).data
component_data = JSONRenderer().render(serializer)
js_deps_json = serialize('json', js_dependencies)
css_deps_json = serialize('json', css_dependencies)
context = {
'component' : component_data,
'js_dependencies' : js_deps_json,
'css_dependencies' : css_deps_json,
'snippet' : snippet,
'snippet_script' : script,
'sniper_data' : sniper_data,
'no_browserify': sniper_data.no_browserify
}
# return HttpResponse()
return render(request, 'main/visualizations.html', context)
except Exception as e:
print('Error in visualisation!', e)
return HttpResponseServerError(e)
@staff_member_required
def update_data(request):
call_command('updatecomponents')
return HttpResponse("Database Successfully Updated.")
def generate_random_snippets(request):
try:
count = request.GET.get('q')
if int(count) > Component.objects.filter(sniperdata__isnull=False).count():
return JsonResponse({'error':'Input number q must not exceed %s.'%str(Component.objects.filter(sniperdata__isnull=False).count())})
components = Component.objects.filter(sniperdata__isnull=False)
required_components = np.random.choice(components, int(count), replace=False)
return JsonResponse({'components':BaseComponentSerializer(required_components, many=True).data})
except:
return JsonResponse({'error':'Input number as query q in the URL.'}) |
import urllib
from behave import *
from hamcrest import *
from eats.pyhamcrest import array_equal_to_by_key, array_equal_to
from eats.utils.sitemap import sitemap_parser, replace_env_url_to_prod, SiteMapGen, url_encode
from eats.utils.robots import RobotFileEats
from eats.utils.google_site_verification import GoogleSiteVerification
from eats.utils.bing import bing_parser
from eats.utils.mapping import table_mapping
@then(u'{user_name:Username} should have the following google tracking keys information')
def step_impl(context, user_name):
user = context.users.get(user_name)
application = user.current_application
keys = context.table.headings
ga = application.driver.get_google_tracking_keys()
assert_that(table_mapping(ga, keys=keys), array_equal_to(table_mapping(context.table)))
@then(u'{user_name:Username} should have the following information on sitemap.xml')
def step_impl(context, user_name):
user = context.users.get(user_name)
application = user.current_application
application.go_to_url('/sitemap.xml')
contents = application.get_page_source()
sitemap = sitemap_parser(contents)
prod_netloc = context.application.prod_netloc
keys = ["loc", "lastmod", "priority", "changefreq"]
trans = lambda page: url_encode(replace_env_url_to_prod(application.get_page(page).url_without_fragment, prod_netloc))
expected = table_mapping(context.table, maps={"page": "loc"}, keys=keys, transform={"page": trans})
gen_sitemap = SiteMapGen(expected)
gen_sitemap.write_xml(context.workspace.cwd() + "/sitemap.xml")
print(expected)
assert_that(sitemap, array_equal_to_by_key(expected, "loc"))
@then(u'{user_name:Username} should have the following information on BingSiteAuth.xml')
def step_impl(context, user_name):
user = context.users.get(user_name)
application = user.current_application
application.go_to_url('/BingSiteAuth.xml')
contents = application.get_page_source()
bing = bing_parser(contents)
assert_that(bing, array_equal_to_by_key(table_mapping(context.table), "user"))
@then(u'{user_name:Username} should have the following information on robots.txt')
def step_impl(context, user_name):
user = context.users.get(user_name)
application = user.current_application
application.go_to_url('/robots.txt')
f = urllib.urlopen(application.current_url())
contents = f.read()
robots = RobotFileEats(contents)
expected = [{"entry": x["ENTRY"], "value": x["VALUE"]} for x in context.table]
assert_that(robots.entries, array_equal_to(expected))
@then(u'{user_name:Username} should have the following information on "{relative_path}" google site verification')
def step_impl(context, user_name, relative_path):
user = context.users.get(user_name)
application = user.current_application
application.go_to_url(relative_path)
f = urllib.urlopen(application.current_url())
contents = f.read()
gsv = GoogleSiteVerification(contents)
expected = [{"entry": x["ENTRY"], "value": x["VALUE"]} for x in context.table]
assert_that(gsv.entries, array_equal_to(expected))
|
"""Example/test script for (stress) testing multiple requests to the bridge."""
import argparse
import asyncio
import logging
from os.path import abspath, dirname
from sys import path
import time
path.insert(1, dirname(dirname(abspath(__file__))))
from aiohue import HueBridgeV2
parser = argparse.ArgumentParser(description="AIOHue Example")
parser.add_argument("host", help="hostname of Hue bridge")
parser.add_argument("appkey", help="appkey for Hue bridge")
parser.add_argument("--debug", help="enable debug logging", action="store_true")
args = parser.parse_args()
async def main():
"""Run Main execution."""
if args.debug:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)-15s %(levelname)-5s %(name)s -- %(message)s",
)
asyncio.get_running_loop().set_debug(True)
async with HueBridgeV2(args.host, args.appkey) as bridge:
print("Connected to bridge: ", bridge.bridge_id)
print(bridge.config.bridge_device)
# pick a random light
light = bridge.lights.items[0]
print(f"Sending 100 requests to bridge for {light.name}...")
async def toggle_light():
await bridge.lights.turn_on(light.id)
await bridge.lights.turn_off(light.id)
before = time.time()
await asyncio.gather(*[toggle_light() for i in range(0, 50)])
after = time.time()
print(f"Completed in {after-before} seconds...")
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
|
# Generated by Django 4.0.1 on 2022-01-07 23:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Livros',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_do_livro', models.CharField(max_length=200)),
('editora_do_livro', models.CharField(max_length=200)),
('autor', models.CharField(max_length=200)),
('genero', models.CharField(max_length=200)),
('estrelas', models.IntegerField()),
('situacao', models.CharField(blank=True, max_length=100)),
('opiniao', models.TextField(blank=True, max_length=500)),
],
),
]
|
def string_to_int(list_a):
new_list = []
for item in list_a:
num = int(item)
new_list.append(num)
return new_list
num_list = input().split()
num_list = string_to_int(num_list)
maximum = max(num_list)
num_set = set(num_list)
first_n_num_set = set(range(1, maximum+1))
missing_num_set = first_n_num_set.difference(num_set)
missing_num_list = list(missing_num_set)
missing_num_list.sort()
print(missing_num_list)
|
from workerpool import WorkerPool
"""
WARNING: This sample class is obsolete since version 0.9.2. It will be removed
or replaced soon.
"""
class BlockingWorkerPool(WorkerPool):
"""
Similar to WorkerPool but a result queue is passed in along with each job
and the method will block until the queue is filled with one entry per job.
Bulk job lists can be performed using the `contract` method.
"""
def put(self, job, result):
"Perform a job by a member in the pool and return the result."
self.job.put(job)
r = result.get()
return r
def contract(self, jobs, result):
"""
Perform a contract on a number of jobs and block until a result is
retrieved for each job.
"""
for j in jobs:
WorkerPool.put(self, j)
r = []
for i in xrange(len(jobs)):
r.append(result.get())
return r
|
import pytest
from jsondaora.exceptions import DeserializationError
from jsondaora.schema import IntegerField, StringField
def test_should_validate_minimum_integer():
class Integer(IntegerField, minimum=10):
...
with pytest.raises(DeserializationError) as exc_info:
Integer(9)
assert exc_info.value.args == ('Invalid minimum integer value: 10 < 9',)
def test_should_validate_maximum_integer():
class Integer(IntegerField, maximum=9):
...
with pytest.raises(DeserializationError) as exc_info:
Integer(10)
assert exc_info.value.args == ('Invalid maximum integer value: 10 < 9',)
def test_should_validate_min_length_string():
class String(StringField, min_length=2):
...
with pytest.raises(DeserializationError) as exc_info:
String('1')
assert exc_info.value.args == ('Invalid min_length string value: 2 < 1',)
def test_should_validate_max_length_string():
class String(StringField, max_length=2):
...
with pytest.raises(DeserializationError) as exc_info:
String('333')
assert exc_info.value.args == ('Invalid max_length string value: 3 < 2',)
|
from django.apps import AppConfig
from django.db.models.signals import post_save
from django.utils.translation import ugettext_lazy as _
from sui_hei.signals import (add_twitter_on_puzzle_created,
add_twitter_on_schedule_created)
class SuiHeiConfig(AppConfig):
name = 'sui_hei'
verbose_name = _('Lateral Thinking')
def ready(self):
from sui_hei.models import Puzzle, Schedule
post_save.connect(add_twitter_on_puzzle_created, sender=Puzzle)
post_save.connect(add_twitter_on_schedule_created, sender=Schedule)
|
from typing import Dict, List, Union
import numpy as np
def boolean_mask_by_value(mask: np.ndarray, value: int) -> np.ndarray:
return boolean_mask_by_values(mask=mask, values=[value])
def boolean_mask_by_values(mask: np.ndarray, values: List[int]) -> np.ndarray:
return np.isin(mask, values)
def replace_value(mask: np.ndarray, old_value: int, new_value: int) -> np.ndarray:
return replace_values(mask=mask, value_map={old_value: new_value})
def replace_values(
mask: np.ndarray, value_map: Dict[int, int], value_min: Union[int, None] = None, value_max: Union[int, None] = None
) -> np.ndarray:
index_substitutes = np.array(
[
value_map.get(item, item)
for item in range(
value_min if value_min is not None else np.iinfo(mask.dtype).min,
(value_max if value_max is not None else np.iinfo(mask.dtype).max) + 1,
)
]
)
return index_substitutes[mask]
def encode_int32_as_rgb8(mask: np.ndarray) -> np.ndarray:
return np.concatenate([mask & 0xFF, mask >> 8 & 0xFF, mask >> 16 & 0xFF], axis=-1).astype(np.uint8)
def encode_rgb8_as_int32(mask: np.ndarray) -> np.ndarray:
return (mask[..., 2:3] << 16) + (mask[..., 1:2] << 8) + mask[..., 0:1]
def encode_2int16_as_rgba8(mask: np.ndarray) -> np.ndarray:
return np.concatenate(
[mask[..., [0]] >> 8, mask[..., [0]] & 0xFF, mask[..., [1]] >> 8, mask[..., [1]] & 0xFF], axis=-1
).astype(np.uint8)
def lookup_values(
mask: np.ndarray, x: Union[np.ndarray, List], y: Union[np.ndarray, List], interpolate: bool = False
) -> np.ndarray:
"""Executes bilinear interpolation on a 2D plane.
Args:
mask: Array of shape (M x N [x L]). Note: If 3 dimensions are provided,
bilinear interpolation is performed on each 2D plane in the first two dimensions.
x: List of indices to interpolate on along the x-axis (columns).
Indices < 0 and > (M-1, N-1) will be clipped to 0 or (M-1, N-1), respectively.
y: List of indices to interpolate on along the y-axis (rows).
Indices < 0 and > (M-1, N-1) will be clipped to 0 or (M-1, N-1), respectively.
Returns:
Returns interpolated values for input (x,y) as array with shape (len(x) [x L]).
"""
x = np.asarray(x)
y = np.asarray(y)
if x.ndim > 1 and x.shape[1] > 1:
raise ValueError(f"Expecting shapes (N) or (N x 1) for `x`, received {x.shape}.")
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError(f"Expecting shapes (N) or (N x 1) for `y`, received {y.shape}.")
if x.shape != y.shape:
raise ValueError(f"Both `x` and `y` must have same shapes, received x: {x.shape} and y: {y.shape}.")
x = x.reshape(-1)
y = y.reshape(-1)
if interpolate:
x0 = np.floor(x).astype(int)
x1 = x0 + 1
y0 = np.floor(y).astype(int)
y1 = y0 + 1
x0 = np.clip(x0, 0, mask.shape[1] - 1)
x1 = np.clip(x1, 0, mask.shape[1] - 1)
y0 = np.clip(y0, 0, mask.shape[0] - 1)
y1 = np.clip(y1, 0, mask.shape[0] - 1)
Ia = mask[y0, x0]
Ib = mask[y1, x0]
Ic = mask[y0, x1]
Id = mask[y1, x1]
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
interpolated_result = (Ia.T * wa).T + (Ib.T * wb).T + (Ic.T * wc).T + (Id.T * wd).T
border_cases = np.logical_or(x0 == x1, y0 == y1)
interpolated_result[border_cases] = mask[y0[border_cases], x0[border_cases]]
return interpolated_result
else:
return mask[np.clip(y, 0, mask.shape[0] - 1).astype(int), np.clip(x, 0, mask.shape[1] - 1).astype(int)]
|
import unittest
import numpy as np
from random import choice
from unittest import mock
from aix360.algorithms.rbm import (
BooleanRuleCG, BRCGExplainer, GLRMExplainer, LinearRuleRegression,
LogisticRuleRegression
)
from pandas import DataFrame
from depiction.core import DataType, Task
from depiction.interpreters.aix360.rule_based_model import RuleAIX360
from depiction.models.base.base_model import BaseModel
class DummyModel(BaseModel):
def predict(self, sample):
return np.array([choice([0, 1]) for _ in range(sample.shape[0])])
class RuleAIX360TestCase(unittest.TestCase):
def setUp(self):
self.X = np.random.randn(100, 10)
self.y = (np.random.randn(100) > 0.).astype(int)
def _build_posthoc_interpreter(self):
model = DummyModel(
choice(list(RuleAIX360.SUPPORTED_TASK)),
choice(list(RuleAIX360.SUPPORTED_DATATYPE))
)
interpreter = RuleAIX360(
choice(list(RuleAIX360.AVAILABLE_INTERPRETERS)),
X=self.X,
model=model
)
return interpreter
def _build_antehoc_interpreter(self):
interpreter = RuleAIX360(
choice(list(RuleAIX360.AVAILABLE_INTERPRETERS)), self.X, y=self.y
)
return interpreter
def testConstructor(self):
# test error for wrong model
NOT_SUPPORTED_TASKS = [
t for t in set(Task) for T in RuleAIX360.SUPPORTED_TASK
if not (t <= T)
]
NOT_SUPPORTED_TYPES = list(
set(DataType).difference(RuleAIX360.SUPPORTED_DATATYPE)
)
wrong_model = DummyModel(
choice(NOT_SUPPORTED_TASKS), choice(NOT_SUPPORTED_TYPES)
)
with self.assertRaises(ValueError):
RuleAIX360(
choice(list(RuleAIX360.AVAILABLE_INTERPRETERS)),
X=self.X,
model=wrong_model
)
# test error for not supported interpreter
with self.assertRaises(ValueError):
RuleAIX360('', X=self.X, y=self.y)
# test error for not supported GLRM regressor
with self.assertRaises(ValueError):
RuleAIX360('glrm_bubu', X=self.X, y=self.y)
# test correctly chosen glrm and regressor
valid_glrm = [
i for i in RuleAIX360.AVAILABLE_INTERPRETERS if 'glrm' in i
]
interpreter = RuleAIX360(choice(valid_glrm), X=self.X, y=self.y)
self.assertTrue(isinstance(interpreter.explainer, GLRMExplainer))
self.assertTrue(
isinstance(interpreter.regressor, LogisticRuleRegression)
or isinstance(interpreter.regressor, LinearRuleRegression)
)
self.assertFalse(interpreter._fitted)
# -- test correctness of ante-hoc model
self.assertEqual(interpreter.usage_mode, RuleAIX360.UsageMode.ANTE_HOC)
self.assertTrue(
Task.check_support(interpreter.task, RuleAIX360.SUPPORTED_TASK)
)
self.assertTrue(interpreter.data_type in RuleAIX360.SUPPORTED_DATATYPE)
# test brcg model
interpreter = RuleAIX360('brcg', X=self.X, y=self.y)
self.assertTrue(isinstance(interpreter.explainer, BRCGExplainer))
self.assertTrue(isinstance(interpreter.regressor, BooleanRuleCG))
self.assertFalse(interpreter._fitted)
# test with right model
interpreter = self._build_posthoc_interpreter()
self.assertEqual(interpreter.usage_mode, RuleAIX360.UsageMode.POST_HOC)
self.assertFalse(interpreter._fitted)
def testFit(self):
# test fit antehoc called correctly
interpreter = self._build_antehoc_interpreter()
with mock.patch.object(
interpreter, '_fit_antehoc'
) as mock_fit_antehoc:
interpreter.fit(0, 0)
mock_fit_antehoc.assert_called_once()
# test fit posthoc called correctly
interpreter = self._build_posthoc_interpreter()
with mock.patch.object(
interpreter, '_fit_posthoc'
) as mock_fit_posthoc:
interpreter.fit(0, 0)
mock_fit_posthoc.assert_called_once()
def testFitAntehoc(self):
interpreter = self._build_antehoc_interpreter()
with mock.patch.object(
interpreter.explainer, 'fit'
) as mock_explainer_fit:
interpreter.fit(0, 0)
mock_explainer_fit.assert_called_once()
def testFitPosthoc(self):
interpreter = self._build_posthoc_interpreter()
with mock.patch.object(
interpreter._to_interpret, 'predict'
) as mock_predict:
with mock.patch.object(
interpreter, '_fit_antehoc'
) as mock_fit_antehoc:
interpreter.fit(0)
mock_predict.assert_called_once()
mock_fit_antehoc.assert_called_once()
with mock.patch.object(
interpreter._to_interpret, 'predict'
) as mock_predict:
with mock.patch.object(
interpreter, '_fit_antehoc'
) as mock_fit_antehoc:
preprocess = mock.MagicMock()
interpreter.fit(0, preprocess)
preprocess.assert_called_once()
preprocess.assert_called_with(0)
with mock.patch.object(
interpreter._to_interpret, 'predict', return_value=2
) as mock_predict:
with mock.patch.object(
interpreter, '_fit_antehoc'
) as mock_fit_antehoc:
postprocess = mock.MagicMock()
interpreter.fit(0, postprocess_y=postprocess)
postprocess.assert_called_once()
postprocess.assert_called_with(2)
def testInterpret(self):
builder = choice(
[self._build_posthoc_interpreter, self._build_antehoc_interpreter]
)
interpreter = builder()
with mock.patch.object(
interpreter.explainer, 'explain'
) as mock_explain:
with mock.patch.object(
interpreter, '_visualize_explanation'
) as mock_visualize:
e = interpreter.interpret()
mock_explain.assert_called_once()
mock_visualize.assert_called_once()
self.assertTrue(e, interpreter.explanation)
with mock.patch.object(
interpreter.explainer, 'explain'
) as mock_explain:
with mock.patch.object(
interpreter, '_save_explanation'
) as mock_save:
e = interpreter.interpret(path='')
mock_explain.assert_called_once()
mock_save.assert_called_once()
self.assertTrue(e, interpreter.explanation)
def testVisualize(self):
"""
TODO(phineasng): think if it's possible or make sense to test this
"""
pass
def testSave(self):
builder = choice(
[self._build_posthoc_interpreter, self._build_antehoc_interpreter]
)
interpreter = builder()
# test DataFrame
df = DataFrame()
with mock.patch.object(df, 'to_pickle') as mock_to_pickle:
interpreter._save_explanation(df, path='')
mock_to_pickle.assert_called_with('')
exp = object()
module_name = 'depiction.interpreters.aix360.rule_based_model'
with mock.patch('{}.open'.format(module_name)) as mock_open:
with mock.patch('{}.pickle.dump'.format(module_name)) as mock_dump:
interpreter._save_explanation(exp, path='')
mock_open.assert_called_once()
mock_open.assert_called_with('', 'wb')
mock_dump.assert_called_once()
def testPredict(self):
builder = choice(
[self._build_posthoc_interpreter, self._build_antehoc_interpreter]
)
interpreter = builder()
with mock.patch.object(
interpreter.explainer, 'predict'
) as mock_predict:
interpreter.predict(0)
mock_predict.assert_called_once()
mock_predict.assert_called_with(0)
if __name__ == "__main__":
unittest.main()
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
#url(r'^e/(?P<path>.*)$', 'django.views.static.serve',
# { 'document_root': settings.STATIC_DOC_ROOT }),
url(r'^', include('endpoint.urls')),
)
|
### TILE SHOWING THE RESULTS
from sepal_ui import sepalwidgets as sw
from sepal_ui import mapping as sm
import ipyvuetify as v
from component.message import ms
from component.scripts import *
from component import parameter as pm
# create an empty result tile that will be filled with displayable plot, map, links, text
class SelectionTile(sw.Tile):
def __init__(self, aoi_io, io, viz_tile, export_tile, **kwargs):
# gather the io
self.aoi_io = aoi_io
self.io = io
self.viz_tile = viz_tile
self.export_tile = export_tile
# create an output alert
self.output = sw.Alert()
#
self.start = sw.Markdown(pm.end)
self.start_picker = sw.DatePicker(label='Start date')
self.end = sw.Markdown(pm.end)
self.end_picker = sw.DatePicker(label='End date')
self.select = sw.Markdown(pm.select)
self.l8 = v.Switch(
class_ = "ml-5",
label = ms.selection.l8,
v_model = False
)
self.l7 = v.Switch(
class_ = "ml-5",
label = ms.selection.l7,
v_model = False
)
self.l5 = v.Switch(
class_ = "ml-5",
label = ms.selection.l5,
v_model = False
)
self.l4 = v.Switch(
class_ = "ml-5",
label = ms.selection.l4,
v_model = False
)
self.t2 = v.Switch(
class_ = "ml-5",
label = ms.selection.t2,
v_model = False
)
self.s2 = v.Switch(
class_ = "ml-5",
label = ms.selection.s2,
v_model = False
)
self.sr_mess = sw.Markdown(pm.sr)
self.sr = v.Switch(
class_ = "ml-5",
label = ms.selection.sr,
v_model = False
)
self.stats = sw.Markdown(pm.stats)
self.measure = v.Select(
label = ms.selection.measure,
v_model = None,
items = pm.measures
)
self.annual = v.Switch(
class_ = "ml-5",
label = ms.selection.annual,
v_model = False
)
# create the output alert
# this component will be used to display information to the end user when you lanch the process
# it's hidden by default
# it also has the embeded `bind` method that link mutable variable to component v_model
# bind return self so it can be chained to bind everything in one statement.
# args are (widget, io, io_attribute_name)
self.output = sw.Alert() \
.bind(self.start_picker, self.io, 'start') \
.bind(self.end_picker, self.io, 'end') \
.bind(self.l8, self.io, 'l8') \
.bind(self.l7, self.io, 'l7') \
.bind(self.l5, self.io, 'l5') \
.bind(self.l4, self.io, 'l4') \
.bind(self.t2, self.io, 't2') \
.bind(self.s2, self.io, 's2') \
.bind(self.sr, self.io, 'sr') \
.bind(self.measure, self.io, 'measure') \
.bind(self.annual, self.io, 'annual')
# to launch the process you'll need a btn
# here it is as a special sw widget (the message and the icon can also be customized see sepal_ui widget doc)
self.btn = sw.Btn()
# construct the Tile with the widget we have initialized
super().__init__(
id_ = "selection_widget", # the id will be used to make the Tile appear and disapear
title = ms.selection.title, # the Title will be displayed on the top of the tile
inputs = [self.start, self.start_picker, self.end, self.end_picker,
self.select, self.l8, self.l7, self.l5, self.l4, self.t2, self.s2,
self.sr_mess, self.sr, self.stats, self.measure, self.annual],
btn = self.btn,
output = self.output
)
# now that the Tile is created we can link it to a specific function
self.btn.on_event("click", self._on_run)
# PROCESS AFTER ACTIVATING BUTTON
def _on_run(self, widget, data, event):
# toggle the loading button (ensure that the user doesn't launch the process multiple times)
widget.toggle_loading()
# check that the input that you're gonna use are set (Not mandatory)
if not self.output.check_input(self.aoi_io.get_aoi_name(), ms.process.no_aoi): return widget.toggle_loading()
# if not self.output.check_input(self.io.year, ms.process.no_slider): return widget.toggle_loading()
# Wrap the process in a try/catch statement
try:
dataset = analysis(
self.aoi_io.get_aoi_ee(),
self.io.start,
self.io.end,
self.io.l8,
self.io.l7,
self.io.l5,
self.io.l4,
self.io.t2,
self.io.s2,
self.io.sr,
#self.output
)
# change the io values as its a mutable object
# useful if the io is used as an input in another tile
self.io.dataset = dataset
# release the export btn
self.export_tile.asset_btn.disabled = False
self.export_tile.sepal_btn.disabled = False
# launch vizualisation
self.viz_tile._on_change(None)
# conclude the computation with a message
self.output.add_live_msg(ms.process.end_computation, 'success')
except Exception as e:
self.output.add_live_msg(str(e), 'error')
# release the btn
widget.toggle_loading()
return |
import torch
X = torch.tensor(
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], requires_grad=True
)
K = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
def corr2d(X, K):
"""Compute 2D cross-correlation."""
h, w = K.shape
Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i : i + h, j : j + w] * K).sum()
return Y
if __name__ == "__main__":
try:
result = corr2d(X, K)
result.backward(
torch.ones(result.shape[0], result.shape[1], dtype=torch.float)
)
print(X.grad)
except Exception as e:
print("Error Message:", e)
print("我不知道Error是什麼QAQ")
|
from collections import deque
class Solution:
# # Deque (Accepted), O(n) time and space
# def diStringMatch(self, s: str) -> List[int]:
# queue = deque(range(len(s)+1))
# res = []
# for c in s:
# if c == 'I':
# res.append(queue.popleft())
# else:
# res.append(queue.pop())
# if queue:
# res.append(queue.pop())
# return res
# Ad-Hoc (Solution), O(n) time and space
def diStringMatch(self, S: str) -> List[int]:
lo, hi = 0, len(S)
ans = []
for x in S:
if x == 'I':
ans.append(lo)
lo += 1
else:
ans.append(hi)
hi -= 1
return ans + [lo]
|
from typing import Optional, Any, Dict
from fastapi import status
from starlette.exceptions import HTTPException
from conf.const import StatusCode
from .schema import SchemaMixin
class BaseHTTPException(HTTPException):
MESSAGE = None
STATUS_CODE = status.HTTP_400_BAD_REQUEST
CODE = 40000
def __init__(
self,
message: Any = None,
code: int = None,
headers: Optional[Dict[str, Any]] = None
) -> None:
self.message = message or self.MESSAGE
self.status_code = self.STATUS_CODE
self.code = code or self.CODE
self.detail = self.message
self.headers = headers
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f"{class_name}(status_code={self.status_code!r}, code={self.code}, msg={self.message!r})"
def response(self):
return SchemaMixin(code=self.code, message=self.message, data=None).dict()
class BadRequest(BaseHTTPException):
STATUS_CODE = status.HTTP_400_BAD_REQUEST
CODE = StatusCode.bad_request
class Unauthorized(BaseHTTPException):
STATUS_CODE = status.HTTP_401_UNAUTHORIZED
CODE = StatusCode.unauthorized
class Forbidden(BaseHTTPException):
STATUS_CODE = status.HTTP_403_FORBIDDEN
CODE = StatusCode.forbidden
class NotFound(BaseHTTPException):
STATUS_CODE = status.HTTP_404_NOT_FOUND
CODE = StatusCode.not_found
class MethodNotAllowed(BaseHTTPException):
STATUS_CODE = status.HTTP_405_METHOD_NOT_ALLOWED
CODE = StatusCode.method_not_allowed
class Locked(BaseHTTPException):
STATUS_CODE = status.HTTP_423_LOCKED
CODE = StatusCode.locked
|
# -*- coding: utf-8 -*-
__author__ = 'yijingping'
import time
import urllib2
ip_check_url = 'http://api.ipify.org'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0'
socket_timeout = 3
# Get real public IP address
def get_real_pip():
req = urllib2.Request(ip_check_url)
req.add_header('User-agent', user_agent)
conn = urllib2.urlopen(req)
page = conn.read()
conn.close()
return page
# Set global variable containing "real" public IP address
real_pip = get_real_pip()
def check_proxy(host, port):
try:
# Build opener
proxy_handler = urllib2.ProxyHandler({'http': '%s:%s' % (host, port)})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent', user_agent)]
urllib2.install_opener(opener)
# Build, time, and execute request
req = urllib2.Request(ip_check_url)
time_start = time.time()
conn = urllib2.urlopen(req, timeout=socket_timeout)
time_end = time.time()
detected_pip = conn.read()
conn.close()
# Calculate request time
time_diff = time_end - time_start
# Check if proxy is detected
if detected_pip == real_pip:
proxy_detected = False
else:
proxy_detected = True
# Catch exceptions
except urllib2.HTTPError, e:
print "ERROR: Code ", e.code
return (True, False, 999)
except Exception, detail:
print "ERROR: ", detail
return (True, False, 999)
# Return False if no exceptions, proxy_detected=True if proxy detected
return (False, proxy_detected, time_diff) |
Subsets and Splits