filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_14174
|
""""
Layers / convolution layers under tensorflow environment.
Supports NCCL multi-gpu environment.
To activate the environment, use code below in your
main.py.
>> os.environ['nccl_multigpu_env'] = 'true'
"""
__version__ = "1.0.0"
import os
import tensorflow as tf
from ..normalizations import spectral_norm
NCCL_FLAG = os.environ.get('nccl_multigpu_env')
def conv2d(input_, output_dim, kernel=(5, 5), strides=(2, 2), sn=False, name="conv2d", tower_config=None):
with tf.variable_scope(name):
w = tf.get_variable(name="w",
shape=[kernel[0], kernel[1], input_.get_shape()[-1], output_dim],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
b = tf.get_variable(name='b',
shape=[output_dim],
initializer=tf.constant_initializer(0.0))
if sn:
conv = tf.nn.conv2d(input_, spectral_norm(w, tower_config=tower_config),
strides=[1, strides[0], strides[1], 1],
padding='SAME')
else:
conv = tf.nn.conv2d(input_, w,
strides=[1, strides[0], strides[1], 1],
padding='SAME')
return tf.nn.bias_add(conv, b)
def conv2d_transpose(input_,
output_shape,
kernel=(4, 4),
strides=(2, 2),
sn=False,
name="conv2d_transpose",
with_w=False,
tower_config=None):
with tf.variable_scope(name):
# filter : (height, width, output_channels, in_channels)
w = tf.get_variable(name="w",
shape=[kernel[0], kernel[1], output_shape[-1], input_.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
b = tf.get_variable(name="b",
shape=[output_shape[-1]],
initializer=tf.constant_initializer(0.0))
if sn:
conv_tp = tf.nn.conv2d_transpose(input_, spectral_norm(w, tower_config=tower_config),
output_shape=output_shape,
strides=[1, strides[0], strides[1], 1])
else:
conv_tp = tf.nn.conv2d_transpose(input_, w,
output_shape=output_shape,
strides=[1, strides[0], strides[1], 1])
conv_tp = tf.reshape(tf.nn.bias_add(conv_tp, b), conv_tp.get_shape())
if with_w:
return conv_tp, w, b
else:
return conv_tp
|
the-stack_106_14178
|
import logging
import subprocess
import re
from git import GitCommandError
import gifi.epic
import gifi.git_hub
from gifi.command import AggregatedCommand, Command, CommandException
from gifi.git_hub import PULL_REQUEST_COMMIT_TAG
from gifi.utils import git_utils
from gifi.utils.configuration import Configuration, configuration_command
from gifi.utils.git_utils import get_repo, check_repo_is_clean, get_from_last_commit_message
class Feature:
@staticmethod
def parse(branch):
parts = branch.split('/')
target_remote = parts[0]
target_branch = '/'.join(parts[1:-1])
feature = parts[-1]
return Feature(target_remote, target_branch, feature)
def __init__(self, target_remote, target_branch, name):
self.target_remote = target_remote
self.target_branch = target_branch
self.name = name
def to_branch_name(self):
return "%s/%s/%s" % (self.target_remote, self.target_branch, self.name)
def _start(feature=None, e=None):
repo = get_repo()
if e is None:
e = gifi.epic.select()
else:
e = gifi.epic.Epic.parse(e)
numbered_epic_features = list(map(
lambda head: head.name.replace(e.to_string() + '/', ''),
[head for head in repo.heads if re.match(r'%s/[0-9].*' % e.to_string(), head.name)]))
feature_id = 1
if len(numbered_epic_features) > 0:
feature_id = 1 + max(map(
lambda epic_feature: int('0' + re.sub('_.*', '', epic_feature)),
numbered_epic_features))
feature_branch = '%s/%03d' % (e.to_string(), feature_id)
if feature:
feature_branch = '%s_%s' % (feature_branch, feature)
git_utils.check_repo_is_clean(repo)
print('Starting ', feature_branch)
_fetch(repo, e.remote)
repo.create_head(feature_branch, e.to_string())
repo.heads[feature_branch].set_tracking_branch(repo.remotes[e.remote].refs[e.branch])
repo.heads[feature_branch].checkout()
def _fetch(repo, remote):
try:
repo.git.fetch(remote)
except GitCommandError as e:
logging.warn('Unable to fetch: %s' % e)
print('WARNING: Unable to fetch changes.')
def _publish(message=None):
repo = get_repo()
check_repo_is_clean(repo)
config = configuration(repo)
_push_working_branch(config, repo)
if config.publish_with_pull_request:
gifi.git_hub.request(repo, message)
def _push_working_branch(config, repo):
current_branch = _current_feature_branch(repo)
push_params = [config.working_remote, 'HEAD:%s' % current_branch]
try:
repo.git.push('-u', *push_params)
except GitCommandError as e:
logging.warn('Unable push (publish) feature branch without force: %s' % e)
message = 'Unable to push your changes ("git push -u %s %s"). Would you like to use force?'
question = message % tuple(push_params)
if ask(question):
repo.git.push('-f', '-u', *push_params)
else:
raise CommandException('Manual pull and rebase is required')
def ask(question):
while True:
answer = input('%s [yes|no]: ' % question).strip().lower()
if answer == 'yes':
return True
elif answer == 'no':
return False
def _finish():
repo = get_repo()
check_repo_is_clean(repo)
config = configuration(repo)
_current_feature_branch(repo)
_rebase(repo, config)
feature = current(repo)
_push_working_branch(config, repo)
repo.git.push(feature.target_remote, 'HEAD:%s' % feature.target_branch)
_discard()
def _rebase(repo=None, config=None):
repo = get_repo(repo)
if config is None:
config = configuration(repo)
feature = current(repo)
_fetch(repo, feature.target_remote)
interactive = '-i' if config.finish_with_rebase_interactive else ''
rebase_cmd = 'git rebase %s/%s %s' % (feature.target_remote, feature.target_branch, interactive)
rebase_status = subprocess.call(rebase_cmd, shell=True)
if rebase_status != 0:
message = 'Rebase finished with an error, please fix it manually and then use "git rebase --continue"'
raise CommandException(message)
def _discard():
repo = get_repo()
config = configuration(repo)
feature = current(repo)
if repo.is_dirty():
if ask("There are uncommitted changes, would you like to remove them"):
repo.git.reset('--hard', 'HEAD')
else:
return
repo.git.checkout(feature.target_branch)
try:
repo.git.push(config.working_remote, ':%s' % feature.to_branch_name())
except GitCommandError as e:
logging.warn('Unable to drop remote feature branch: %s' % e)
print('WARNING: Unable to remove remote feature branch. Maybe it was not yet created?')
repo.git.branch('-D', feature.to_branch_name())
repo.git.rebase('%s/%s' % (feature.target_remote, feature.target_branch))
repo.git.fetch('%s' % config.working_remote, '--prune')
def configuration(repo=None):
repo = get_repo(repo)
return Configuration(repo, 'feature', {
'finish-with-rebase-interactive': (False, 'Should do a rebase interactive during feature finishing'),
'publish-with-pull-request': (False, 'Should create a pull request during feature publishing'),
'working-remote': ('origin', 'On which remote you are working at')
})
def is_on_feature_branch(repo):
current_branch = git_utils.get_current_branch(repo)
return current_branch.count('/') > 1
def _current_feature_branch(repo):
current_branch = git_utils.get_current_branch(repo)
if not current_branch.count('/') > 1:
raise CommandException('Please checkout to a feature branch.')
return current_branch
def current(repo):
return Feature.parse(_current_feature_branch(repo))
command = AggregatedCommand('feature', 'Manages a feature branches.', [
Command('start', 'Creates a new feature branch.', _start, '<feature name>'),
Command('publish', 'Publishes a feature branch to review.', _publish),
Command('finish', 'Closes and pushes a feature to a feature epic branch.', _finish),
Command('discard', 'Closes a feature branch without a push.', _discard),
Command('rebase', 'Rebases current feature on recent epic.', _rebase),
configuration_command(configuration, 'Configure feature behaviour.')
])
|
the-stack_106_14179
|
# encoding: utf-8
from sdsstools import get_config, get_logger, get_package_version
# pip package name
NAME = 'sdss-valis'
# Loads config. config name is the package name.
config = get_config('valis')
# Inits the logging system as NAME. Only shell logging, and exception and warning catching.
# File logging can be started by calling log.start_file_logger(path). Filename can be different
# than NAME.
log = get_logger(NAME)
# package name should be pip package name
__version__ = get_package_version(path=__file__, package_name=NAME)
|
the-stack_106_14183
|
import numpy as np
import pygame
from load_utils import load_png
from settings import PADDLE_IMG, MAX_FPS
class Paddle(pygame.sprite.Sprite):
"""
Movable paddle
Returns: Paddle object
Functions: reinit, update, move_left, move_right
"""
MAX_RESIZE_TIMES = 2
MAX_SPEED_CHANGE = 1
LASER_COUNT = 3
def __init__(self, area):
super().__init__()
self.image = self.rect = None
self.area = area
self.speed = 300 / MAX_FPS
self.movepos = [0, 0]
self.bounce_angle_range = (3.4, 6.)
self.bounce_angle_array = None
self.resize_state = self.speed_state = self.laser = 0
self.reinit()
def reinit(self):
self.image, self.rect = load_png(PADDLE_IMG)
self.movepos = [0, 0]
self.rect.midbottom = self.area.midbottom
self.resize_state = self.speed_state = 0
self.bounce_angle_array = np.linspace(*self.bounce_angle_range, self.rect.width)
def update(self):
newpos = self.rect.move(self.movepos)
if self.area.contains(newpos):
self.rect = newpos
pygame.event.pump()
def move_left(self):
self.movepos[0] = self.movepos[0] - self.speed
def move_right(self):
self.movepos[0] = self.movepos[0] + self.speed
def stop(self):
self.movepos = [0, 0]
def get_bounce_angle(self, ball_x):
idx = ball_x - self.rect.left
if idx < 0:
idx = 0
if idx >= self.rect.width:
idx = self.rect.width - 1
return self.bounce_angle_array[idx]
def shrink(self):
if self.resize_state != -self.MAX_RESIZE_TIMES:
self._resize_width_by(0.8)
self.resize_state -= 1
def expand(self):
if self.resize_state != self.MAX_RESIZE_TIMES:
self._resize_width_by(1.2)
self.resize_state += 1
def _resize_width_by(self, by):
self.image = pygame.transform.scale(self.image, (round(self.image.get_width() * by), self.image.get_height()))
old_pos = self.rect.topleft
self.rect = self.image.get_rect()
self.rect.topleft = old_pos
self.bounce_angle_array = np.linspace(*self.bounce_angle_range, self.rect.width)
if not self.area.contains(self.rect):
self.rect.midbottom = self.area.midbottom
def slow_down(self):
if self.resize_state != -self.MAX_SPEED_CHANGE:
self.speed *= 0.8
self.speed_state -= 1
def speed_up(self):
if self.resize_state != self.MAX_SPEED_CHANGE:
self.speed *= 1.2
self.speed_state += 1
def init_laser(self):
self.laser += Paddle.LASER_COUNT
|
the-stack_106_14186
|
from fractions import Fraction
from ptulsconv.broadcast_timecode import TimecodeFormat
from typing import Tuple, List, Iterator
class SessionDescriptor:
header: "HeaderDescriptor"
files: List["FileDescriptor"]
clips: List["ClipDescriptor"]
plugins: List["PluginDescriptor"]
tracks: List["TrackDescriptor"]
markers: List["MarkerDescriptor"]
def __init__(self, **kwargs):
self.header = kwargs['header']
self.files = kwargs['files']
self.clips = kwargs['clips']
self.plugins = kwargs['plugins']
self.tracks = kwargs['tracks']
self.markers = kwargs['markers']
def markers_timed(self) -> Iterator[Tuple['MarkerDescriptor', Fraction]]:
for marker in self.markers:
marker_time = Fraction(marker.time_reference, int(self.header.sample_rate))
#marker_time = self.header.convert_timecode(marker.location)
yield marker, marker_time
def tracks_clips(self) -> Iterator[Tuple['TrackDescriptor', 'TrackClipDescriptor']]:
for track in self.tracks:
for clip in track.clips:
yield track, clip
def track_clips_timed(self) -> Iterator[Tuple["TrackDescriptor", "TrackClipDescriptor",
Fraction, Fraction, Fraction]]:
"""
:return: A Generator that yields track, clip, start time, finish time, and timestamp
"""
for track, clip in self.tracks_clips():
start_time = self.header.convert_timecode(clip.start_timecode)
finish_time = self.header.convert_timecode(clip.finish_timecode)
timestamp_time = self.header.convert_timecode(clip.timestamp) \
if clip.timestamp is not None else None
yield track, clip, start_time, finish_time, timestamp_time
class HeaderDescriptor:
session_name: str
sample_rate: float
bit_depth: int
start_timecode: str
timecode_fps: str
timecode_drop_frame: bool
count_audio_tracks: int
count_clips: int
count_files: int
def __init__(self, **kwargs):
self.session_name = kwargs['session_name']
self.sample_rate = kwargs['sample_rate']
self.bit_depth = kwargs['bit_depth']
self.start_timecode = kwargs['start_timecode']
self.timecode_fps = kwargs['timecode_format']
self.timecode_drop_frame = kwargs['timecode_drop_frame']
self.count_audio_tracks = kwargs['count_audio_tracks']
self.count_clips = kwargs['count_clips']
self.count_files = kwargs['count_files']
@property
def timecode_format(self):
return TimecodeFormat(frame_duration=self.frame_duration,
logical_fps=self.logical_fps,
drop_frame=self.timecode_drop_frame)
def convert_timecode(self, tc_string: str) -> Fraction:
return self.timecode_format.smpte_to_seconds(tc_string)
@property
def start_time(self) -> Fraction:
"""
The start time of this session.
:return: Start time in seconds
"""
return self.convert_timecode(self.start_timecode)
@property
def logical_fps(self) -> int:
return self._get_tc_format_params[0]
@property
def frame_duration(self) -> Fraction:
return self._get_tc_format_params[1]
@property
def _get_tc_format_params(self) -> Tuple[int, Fraction]:
frame_rates = {"23.976": (24, Fraction(1001, 24_000)),
"24": (24, Fraction(1, 24)),
"25": (25, Fraction(1, 25)),
"29.97": (30, Fraction(1001, 30_000)),
"30": (30, Fraction(1, 30)),
"59.94": (60, Fraction(1001, 60_000)),
"60": (60, Fraction(1, 60))
}
if self.timecode_fps in frame_rates.keys():
return frame_rates[self.timecode_fps]
else:
raise ValueError("Unrecognized TC rate (%s)" % self.timecode_format)
class TrackDescriptor:
name: str
comments: str
user_delay_samples: int
state: List[str]
plugins: List[str]
clips: List["TrackClipDescriptor"]
def __init__(self, **kwargs):
self.name = kwargs['name']
self.comments = kwargs['comments']
self.user_delay_samples = kwargs['user_delay_samples']
self.state = kwargs['state']
self.plugins = kwargs['plugins']
self.clips = kwargs['clips']
class FileDescriptor(dict):
pass
class TrackClipDescriptor:
channel: int
event: int
clip_name: str
start_timecode: str
finish_timecode: str
duration: str
timestamp: str
state: str
def __init__(self, **kwargs):
self.channel = kwargs['channel']
self.event = kwargs['event']
self.clip_name = kwargs['clip_name']
self.start_timecode = kwargs['start_time']
self.finish_timecode = kwargs['finish_time']
self.duration = kwargs['duration']
self.timestamp = kwargs['timestamp']
self.state = kwargs['state']
class ClipDescriptor(dict):
pass
class PluginDescriptor(dict):
pass
class MarkerDescriptor:
number: int
location: str
time_reference: int
units: str
name: str
comments: str
def __init__(self, **kwargs):
self.number = kwargs['number']
self.location = kwargs['location']
self.time_reference = kwargs['time_reference']
self.units = kwargs['units']
self.name = kwargs['name']
self.comments = kwargs['comments']
|
the-stack_106_14188
|
import datetime
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from django.contrib.auth.hashers import make_password
from rest_framework import status
from base import serializer
from base.models import OrderItem, Product, Order, ShippingAddress
from base.serializer import UserSerializer, MyTokenObtainPairSerializer, OrderSerializer
from rest_framework_simplejwt.views import TokenObtainPairView
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def addOrderItem(request):
user = request.user
data = request.data
orderItems = data['orderItems']
if orderItems and len(orderItems) == 0:
return Response({'detail': 'No Order Items'}, status=status.HTTP_400_BAD_REQUEST)
else:
order = Order.objects.create(
user=user,
paymentMethod=data['paymentMethod'],
taxPrice=data['taxPrice'],
shippingPrice=data['shippingPrice'],
totalPrice=data['totalPrice']
)
shipping = ShippingAddress.objects.create(
order=order,
address=data['shippingAddress']['address'],
city=data['shippingAddress']['city'],
postalCode=data['shippingAddress']['postCode'],
country=data['shippingAddress']['country'],
)
order.taxPrice = 0
order.shippingPrice = 0
order.totalPrice = 0
for i in orderItems:
product = Product.objects.get(id=i['product'])
qty = 0
if product.countInStock == 0:
order.delete()
shipping.delete()
return Response({'detail': 'Count some items in order equal 0. Go back to cart, and check count of items.'}, status=status.HTTP_400_BAD_REQUEST)
if i['qty'] > product.countInStock:
qty = product.countInStock
else:
qty = i['qty']
order.taxPrice += float((int(qty) * float(i['price'])) * 0.082)
order.totalPrice += float(int(qty) * float(i['price']))
item = OrderItem.objects.create(
product=product,
order=order,
name=product.name,
qty=qty,
price=i['price'],
image=product.image.url,
)
product.countInStock -= item.qty
product.save()
if order.totalPrice < 100:
order.shippingPrice += 10
order.totalPrice += order.shippingPrice
order.save()
serializer = OrderSerializer(order, many=False)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getMyOrders(request):
user = request.user
orders = user.order_set.all()
serializer = OrderSerializer(orders, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes([IsAdminUser])
def getOrders(request):
orders = Order.objects.all()
serializer = OrderSerializer(orders, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getOrderById(request, pk):
user = request.user
try:
order = Order.objects.get(id=pk)
if user.is_staff or order.user == user:
serializer = OrderSerializer(order, many=False)
return Response(serializer.data)
else:
Response({'detail': 'Not authorized to view this order'},
status=status.HTTP_400_BAD_REQUEST)
except:
return Response({'detail': 'Order does not exist'},
status=status.HTTP_400_BAD_REQUEST)
@api_view(['PUT'])
@permission_classes([IsAuthenticated])
def updateOrderToPaid(request, pk):
order = Order.objects.get(id=pk)
order.isPaid = True
order.paidAt = datetime.datetime.now()
order.save()
return Response('Order was paid')
@api_view(['PUT'])
@permission_classes([IsAuthenticated])
def updateOrderToDelivered(request, pk):
order = Order.objects.get(id=pk)
order.isDelivered = True
order.deliveredAt = datetime.datetime.now()
order.save()
return Response('Order was delivered')
|
the-stack_106_14189
|
"""src/talus_utils/elib.py module."""
import sqlite3
import tempfile
from pathlib import Path
from sqlite3.dbapi2 import Cursor
from typing import Dict, Optional, Union
import pandas as pd
from talus_utils.s3 import _read_object
class Elib:
"""Handle easy interactions with .elib files."""
def __init__(self, key_or_filename: Union[Path, str], bucket: Optional[str] = None):
"""Initialize a new SQLite connection to a file by downloading it as a tmp file.
Parameters
----------
key_or_filename : str
Either a key to an object in S3 (when bucket is given) or a file name to connect to.
bucket : Optional[str], optional
The name of the S3 bucket to load the file from, by default None
"""
self._tmp = None
if not bucket:
self._file_name = key_or_filename
else:
elib = _read_object(bucket=bucket, key=key_or_filename)
elib_content = elib.read()
self._tmp = tempfile.NamedTemporaryFile()
self._tmp.write(elib_content)
self._file_name = self._tmp.name
# connect to tmp file
self._connection = sqlite3.connect(self._file_name)
self._cursor = self._connection.cursor()
def execute_sql(
self, sql: str, use_pandas: Optional[bool] = False
) -> Union[pd.DataFrame, Cursor]:
"""Execute a given SQL command and returns the result as a cursor or a pandas DataFrame.
Parameters
----------
sql : str
SQL String to excute.
use_pandas : bool
If True, return the query result as a pandas DataFrame. (Default value = False).
Returns
-------
Union[pd.DataFrame, Cursor]
Returns either a cursor or a pandas DataFrame with the result
of the executed SQL query.
"""
if use_pandas:
return pd.read_sql_query(sql=sql, con=self._connection)
else:
return self._cursor.execute(sql)
def close(self) -> None:
"""Close and remove the tmp file and the connection."""
if self._tmp:
self._tmp.close()
def get_unique_peptide_proteins(
elib_filename: Union[Path, str], bucket: Optional[str] = None
) -> Dict[str, Union[int, str]]:
"""Get the number of unique peptides and proteins in the given elib file.
Parameters
----------
elib_filename : Union[Path, str]
The path to the elib file.
bucket : Optional[str], optional
The name of the bucket to use. (Default value = None)
Returns
-------
Dict[str, Union[int, str]]
A dictionary containing the sample name, number of unique peptides and proteins.
"""
elib_conn = Elib(key_or_filename=elib_filename, bucket=bucket)
peptide_to_protein = elib_conn.execute_sql(
sql="SELECT PeptideSeq, ProteinAccession FROM peptidetoprotein WHERE isDecoy == 0;",
use_pandas=True,
)
sample_name = Path(elib_filename).with_suffix("").stem
unique_proteins = peptide_to_protein["ProteinAccession"].nunique()
unique_peptides = peptide_to_protein["PeptideSeq"].nunique()
elib_conn.close()
return {
"Sample Name": sample_name,
"Unique Proteins": unique_proteins,
"Unique Peptides": unique_peptides,
}
|
the-stack_106_14191
|
#!/usr/bin/env python
import sys
from Bio import SeqIO
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
name_ids = sys.argv[1]
file_name = sys.argv[2]
ids_file = open(name_ids, "r")
#seq_file = open(file_name, "r")
ids = {}
for line in ids_file:
line = line.strip()
ids[line.split("\t")[0]] = line.split("\t")[1]
for seq in SeqIO.parse(file_name, "fasta"):
for key in ids.keys():
if seq.id.startswith(key):
seq.id = ids[key]
print(">"+seq.id)
print(str(seq.seq))
break
|
the-stack_106_14192
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=protected-access
# pylint: disable=no-self-use
# pylint: disable=too-few-public-methods
# pylint: disable=consider-using-from-import
#############################################################
# Copyright (c) 2020-2020 Maurice Karrenbrock #
# #
# This software is open-source and is distributed under the #
# BSD 3-Clause "New" or "Revised" License #
#############################################################
from pathlib import Path
import PythonAuxiliaryFunctions.files_IO.read_file as read_file
import PythonPDBStructures.trajectories.extract_frames as extract_frames
class Testextract_frames():
def test_works(self, tmp_path):
input_dir = Path('tests/integration_tests/input_files')
trajectory = input_dir / 'ch2cl2.trr'
topology = input_dir / 'ch2cl2.tpr'
output_path = tmp_path / 'ch2cl2_extract_frames'
output_path.mkdir()
output_file = output_path / 'output'
output = extract_frames.extract_frames(10, trajectory, topology,
output_file, 'pdb', 3, 89)
assert output == 8
assert set(output_path.iterdir()) == set([
output_path / 'output0.pdb', output_path / 'output1.pdb',
output_path / 'output2.pdb', output_path / 'output3.pdb',
output_path / 'output4.pdb', output_path / 'output5.pdb',
output_path / 'output6.pdb', output_path / 'output7.pdb'
])
assert read_file.read_file(output_path / 'output0.pdb')[1:] != \
read_file.read_file(output_path / 'output1.pdb')[1:]
class Testextract_all_frames():
def test_works(self, tmp_path):
input_dir = Path('tests/integration_tests/input_files')
trajectory = input_dir / 'ch2cl2.trr'
topology = input_dir / 'ch2cl2.tpr'
output_path = tmp_path / 'ch2cl2_extract_frames'
output_path.mkdir()
output_file = output_path / 'output'
output = extract_frames.extract_all_frames(trajectory, topology,
output_file, 'pdb')
assert output == 4001
assert len(list(output_path.iterdir())) == 4001
assert read_file.read_file(output_path / 'output0.pdb')[1:] != \
read_file.read_file(output_path / 'output1.pdb')[1:]
|
the-stack_106_14193
|
import os
import sys
import subprocess
GULP_CMD = os.environ["GULP"]
def run_gulp(inp, out, stdout):
try:
output = subprocess.check_output(
" ".join([GULP_CMD, f"< {inp}", f"> {out}"]),
stderr=subprocess.STDOUT,
shell=True,
).decode()
except subprocess.CalledProcessError as e:
print(f"ERROR: return code {e.returncode}")
print(f"ERROR: {e.output}")
sys.exit()
with open(stdout, "w+") as f:
f.write(output)
return output
|
the-stack_106_14194
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Shih-Hao Tseng ([email protected])
#
import os,json,csv,shutil
def ip_list_generator(network_name):
server_public_ips = {}
server_private_ips = None
server_ips = {}
try:
with open('settings/%s/public_ips.csv' % network_name,'r') as fpub:
csv_reader = csv.reader(fpub, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
continue
server_public_ips[row[0]] = []
for ip in row[1:]:
if ip != '':
server_public_ips[row[0]].append(ip)
line_count += 1
except:
print('error: settings/%s/public_ips.csv does not exist' % network_name)
return
try:
server_private_ips = {}
with open('settings/%s/private_ips.csv' % network_name,'r') as fpriv:
csv_reader = csv.reader(fpriv, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
continue
server_private_ips[row[0]] = []
for ip in row[1:]:
if ip != '':
server_private_ips[row[0]].append(ip)
line_count += 1
# check if private ips match the public ips structure
if len(server_public_ips) != len(server_private_ips):
print('error: public_ips does not match private_ips')
return
for key in server_public_ips.keys():
if len(server_public_ips[key]) != len(server_private_ips[key]):
print('error: public_ips[%d] does not match private_ips[%d]' % (key,key))
return
except:
server_private_ips = None
# generate the topology.cc_part
with open('tmp/%s.cc_part' % network_name,'w') as fcc:
fcc.write('/* Author: Shih-Hao Tseng ([email protected]) */\n')
fcc.write('#ifdef __TOPOLOGY_MACROS__\n\n')
total_nodes = len(server_public_ips.keys())
fcc.write('#define TOTAL_SWITCHES %d\n' % total_nodes)
fcc.write('#define TOTAL_HOSTS %d\n\n' % total_nodes)
try:
# include memo if there is any
with open('settings/%s/memo' % network_name,'r') as fmemo:
for line in fmemo:
fcc.write('// %s' % line)
fcc.write('\n')
except:
pass
fcc.write('#define GENERATE_TOPOLOGY()\\\n')
fcc.write('graph->addNodes(TOTAL_SWITCHES);\\\n')
fcc.write('\\\n')
for key in server_public_ips.keys():
fcc.write('LIST_OF_PUBLIC_ADDRESSES(%s,' % key)
first = True
for ip in server_public_ips[key]:
if first:
first = False
else:
fcc.write(' COMMA')
fcc.write(' \"%s\"' % ip)
fcc.write(' )\\\n')
fcc.write('\\\n')
if server_private_ips is None:
for key in server_public_ips.keys():
fcc.write('PRIVATE_ADDRESSES_IS_THE_SAME_AS_PUBLIC(%s)\\\n' % key)
server_private_ips = server_public_ips
else:
for key in server_private_ips.keys():
fcc.write('LIST_OF_PRIVATE_ADDRESSES(%s,' % key)
first = True
for ip in server_private_ips[key]:
if first:
first = False
else:
fcc.write(' COMMA')
fcc.write(' \"%s\"' % ip)
fcc.write(' )\\\n')
fcc.write('\\\n')
for key in server_public_ips.keys():
fcc.write('AUTO_REG_SW_ADDR(%s)\\\n' % key)
public_ip_counter = {}
# each node -> remote public ips
remote_ips = {}
for key in server_public_ips.keys():
public_ip_counter[key] = 0
remote_ips[key] = {}
try:
# links
fcc.write('\\\n')
fcc.write('/* link the switches */\\\n')
fcc.write('/* private to public */\\\n')
with open('settings/%s/links.csv' % network_name,'r') as flink:
csv_reader = csv.reader(flink, delimiter=',')
line_count = 0
head = []
for row in csv_reader:
if line_count == 0:
# first line
head = row[1:]
else:
line = row[1:]
len_line = len(line)
src_node = row[0]
for dst_node_index in range(len_line):
rate = line[dst_node_index]
if rate != '':
dst_node = head[dst_node_index]
fcc.write('AUTO_SW_TO_SW_IP(%s,%s,%s)\\\n' % (src_node,dst_node,rate))
remote_ips[src_node][dst_node] = server_public_ips[dst_node][public_ip_counter[dst_node]]
public_ip_counter[dst_node] += 1
remote_ips[dst_node][src_node] = server_public_ips[src_node][public_ip_counter[src_node]]
public_ip_counter[src_node] += 1
line_count += 1
except:
print('warning: no link data')
pass
fcc.write('\n#endif // __TOPOLOGY_MACROS__')
# generate the server_ips
for key in server_public_ips.keys():
pub_ip = server_public_ips[key][0]
server_ips[pub_ip] = server_private_ips[key]
with open('tmp/server_ips.json','w') as fips:
json.dump(server_ips,fips)
# save the remote_ips
with open('tmp/%s_remote_ips.json' % network_name,'w') as fips:
json.dump(remote_ips,fips)
return len(server_ips)
def generate_exp(network_name):
network_name = network_name.lower()
total_nodes = 0
load = '0.1'
print('Generate codes for experiment \'%s\'' % network_name)
# generate topology files
total_nodes = ip_list_generator(network_name)
# generate codes
with open('tmp/%s_auto_gen.sh' % network_name,'w') as fout:
fout.write('#!/bin/bash\n./auto_gen.sh %s %d' % (network_name,total_nodes))
with open('tmp/traffic-gen-%s.cc' % network_name,'w') as fout:
fout.write("""/***********
* Generate %s
***********/
#define TOPOLOGY_NAME %s
#define WORKLOAD_PREFIX %s
#include "traffic-gen-mixed.cc_part" """ % (network_name, network_name, network_name))
print('Generating WAN controller for experiment \'%s\' under WAN_exp_controller/exp_%s' % (network_name, network_name))
# create root folder
# delete the existing one
shutil.rmtree('WAN_exp_controller/exp_%s' % network_name, ignore_errors=True)
os.mkdir('WAN_exp_controller/exp_%s' % network_name)
os.chdir('WAN_exp_controller/exp_%s' % network_name)
# create subfolders
os.mkdir('error_logs')
os.mkdir('keys')
os.mkdir('results')
os.mkdir('codes')
os.mkdir('codes/main/')
os.makedirs('codes/settings/topology/', exist_ok=True)
shutil.move('../../tmp/%s_auto_gen.sh' % network_name, 'codes/%s_auto_gen.sh' % network_name)
shutil.move('../../tmp/traffic-gen-%s.cc' % network_name, 'codes/main/traffic-gen-%s.cc' % network_name)
shutil.move('../../tmp/%s.cc_part' % network_name, 'codes/settings/topology/%s.cc_part' % network_name)
os.symlink('../../../settings/%s/workloads' % network_name,'codes/workloads')
# create symbolic links
os.symlink('../core/common_settings.py','common_settings.py')
os.symlink('../core/deployment.py','deployment.py')
os.symlink('../core/load_exp_settings.py','load_exp_settings.py')
os.symlink('../core/local_controller.py','local_controller.py')
os.symlink('../core/server_information.py','server_information.py')
os.symlink('../core/tools','tools')
# create files
with open('exp_settings.json','w') as fout:
fout.write('{ "network_name": "%s", "total_nodes": %d, "start_from_exp": "%s-1-3-interactive-%s-0" }'
% (network_name, total_nodes, network_name, load))
# move settings
shutil.move('../../tmp/%s_remote_ips.json' % network_name, '%s_remote_ips.json' % network_name)
shutil.move('../../tmp/server_ips.json', 'server_ips.json')
if __name__ == '__main__':
network_name = input('Enter the network name of the experiment: ')
if network_name == '':
network_name = 'dummy'
generate_exp(network_name)
|
the-stack_106_14198
|
from __future__ import print_function
import numpy as np, scipy as sp, sys, os, gc
from copy import deepcopy
from warnings import warn
from time import time
from Florence.QuadratureRules import GaussLobattoQuadrature
from Florence.QuadratureRules.FeketePointsTri import FeketePointsTri
from Florence.QuadratureRules.EquallySpacedPoints import EquallySpacedPoints
from Florence.QuadratureRules import GaussLobattoPointsQuad
class BoundaryCondition(object):
"""Base class for applying all types of boundary conditions"""
def __init__(self,
surface_identification_algorithm='minimisation',
modify_linear_mesh_on_projection=False,
project_on_curves=True,
activate_bounding_box=False,
bounding_box_padding=1e-3,
has_planar_surfaces=True,
solve_for_planar_faces=True,
save_dirichlet_data=False,
save_nurbs_data=False,
filename=None,
read_dirichlet_from_file=False,
make_loading="ramp",
compound_dirichlet_bcs=False
):
# TYPE OF BOUNDARY: straight or nurbs
self.boundary_type = 'straight'
self.dirichlet_data_applied_at = 'node' # or 'faces'
self.neumann_data_applied_at = 'node' # or 'faces'
self.requires_cad = False
self.cad_file = None
# PROJECTION TYPE FOR CAD EITHER orthogonal OR arc_length
self.projection_type = 'orthogonal'
# WHAT TYPE OF ARC LENGTH BASED PROJECTION, EITHER 'equal' OR 'fekete'
self.nodal_spacing_for_cad = 'equal'
self.project_on_curves = project_on_curves
self.scale_mesh_on_projection = False
self.scale_value_on_projection = 1.0
self.condition_for_projection = 1.0e20
self.has_planar_surfaces = False
self.solve_for_planar_faces = solve_for_planar_faces
self.projection_flags = None
# FIX DEGREES OF FREEDOM EVERY WHERE CAD PROJECTION IS NOT APPLIED
self.fix_dof_elsewhere = True
# FOR 3D ARC-LENGTH PROJECTION
self.orthogonal_fallback_tolerance = 1.0
# WHICH ALGORITHM TO USE FOR SURFACE IDENTIFICATION, EITHER 'minimisation' or 'pure_projection'
self.surface_identification_algorithm = surface_identification_algorithm
# MODIFY LINEAR MESH ON PROJECTION
self.modify_linear_mesh_on_projection = modify_linear_mesh_on_projection
# COMPUTE A BOUNDING BOX FOR EACH CAD SURFACE
self.activate_bounding_box = activate_bounding_box
self.bounding_box_padding = float(bounding_box_padding)
# FOR IGAKit WRAPPER
self.nurbs_info = None
self.nurbs_condition = None
self.analysis_type = 'static'
self.analysis_nature = 'linear'
self.is_dirichlet_computed = False
self.columns_out = None
self.columns_in = None
self.applied_dirichlet = None
self.save_dirichlet_data = save_dirichlet_data
self.save_nurbs_data = save_nurbs_data
self.filename = filename
self.read_dirichlet_from_file = read_dirichlet_from_file
self.dirichlet_flags = None
self.neumann_flags = None
self.applied_neumann = None
self.is_applied_neumann_shape_functions_computed = False
self.is_body_force_shape_functions_computed = False
self.make_loading = make_loading # "ramp" or "constant"
self.has_step_wise_dirichlet_loading = False
self.step_wise_dirichlet_data = None
self.has_step_wise_neumann_loading = False
self.step_wise_neumann_data = None
self.compound_dirichlet_bcs = compound_dirichlet_bcs
# NODAL FORCES GENERATED BASED ON DIRICHLET OR NEUMANN ARE NOT
# IMPLEMENTED AS PART OF BOUNDARY CONDITION YET. THIS ESSENTIALLY
# MEANS SAVING MULTIPLE RHS VALUES
# self.dirichlet_forces = None
# self.neumann_forces = None
# # THE FOLLOWING MEMBERS ARE NOT UPDATED, TO REDUCE MEMORY FOOTPRINT
# self.external_nodal_forces = None
# self.internal_traction_forces = None
# self.residual = None
# STORE A COPY OF SELF AT THE START TO RESET TO AT THE END
self.__save_state__()
# FOR INTERNAL PURPOSES WHEN WE DO NOT WANT TO REST
self.do_not_reset = True
def __save_state__(self):
self.__initialdict__ = deepcopy(self.__dict__)
def __reset_state__(self):
self.__dict__.update(self.__initialdict__)
def SetAnalysisParameters(self,analysis_type='static',analysis_nature='linear',
columns_in=None,columns_out=None,applied_dirichlet=None):
"""Set analysis parameters such as analysis type, analysis nature and even
Dirichlet boundary conditions if known a priori
"""
self.analysis_type = analysis_type
self.analysis_nature = analysis_nature
self.columns_out = columns_out
self.columns_in = columns_in
self.applied_dirichlet = applied_dirichlet
def SetCADProjectionParameters(self, cad_file=None, requires_cad=True, projection_type='orthogonal',
nodal_spacing='equal', project_on_curves=True, has_planar_surfaces=True, solve_for_planar_faces=True,
scale=1.0,condition=1.0e20, projection_flags=None, fix_dof_elsewhere=True,
orthogonal_fallback_tolerance=1.0, surface_identification_algorithm='minimisation',
modify_linear_mesh_on_projection=False, activate_bounding_box=False, bounding_box_padding=1e-3):
"""Set parameters for CAD projection in order to obtain dirichlet boundary
conditinos
"""
self.boundary_type = 'nurbs'
self.requires_cad = requires_cad
self.cad_file = cad_file
self.projection_type = projection_type
self.scale_mesh_on_projection = True
self.scale_value_on_projection = 1.0*scale
self.condition_for_projection = 1.0*condition
self.project_on_curves = int(project_on_curves)
self.has_planar_surfaces = has_planar_surfaces
self.solve_for_planar_faces = solve_for_planar_faces
self.projection_flags = projection_flags
self.fix_dof_elsewhere = fix_dof_elsewhere
self.orthogonal_fallback_tolerance = orthogonal_fallback_tolerance
self.surface_identification_algorithm = surface_identification_algorithm
self.modify_linear_mesh_on_projection = int(modify_linear_mesh_on_projection)
self.nodal_spacing_for_cad = nodal_spacing
self.activate_bounding_box = activate_bounding_box
self.bounding_box_padding = float(bounding_box_padding)
def SetProjectionCriteria(self, proj_func, mesh, *args, **kwargs):
"""Factory function for setting projection criteria specific
to a problem
input:
func [function] function that computes projection criteria
mesh [Mesh] an instance of mesh class
"""
self.projection_flags = proj_func(mesh, *args, **kwargs)
if isinstance(self.projection_flags,np.ndarray):
if self.projection_flags.ndim==1:
self.projection_flags.reshape(-1,1)
ndim = mesh.InferSpatialDimension()
if self.projection_flags.shape[0] != mesh.edges.shape[0] and ndim == 2:
raise ValueError("Projection flags are incorrect. "
"Ensure that your projection function returns an ndarray of shape (mesh.edges.shape[0],1)")
elif self.projection_flags.shape[0] != mesh.faces.shape[0] and ndim == 3:
raise ValueError("Projection flags are incorrect. "
"Ensure that your projection function returns an ndarray of shape (mesh.faces.shape[0],1)")
else:
raise ValueError("Projection flags for CAD not set. "
"Ensure that your projection function returns an ndarray")
def GetProjectionCriteria(self,mesh):
"""Convenience method for computing projection flags, as many problems
require this type of projection
"""
ndim = mesh.InferSpatialDimension()
if ndim==3:
boundaries = mesh.faces
elif ndim==2:
boundaries = mesh.edges
projection_flags = np.zeros((boundaries.shape[0],1),dtype=np.uint64)
num = boundaries.shape[1]
xyz = (self.scale_value_on_projection/num)*np.sum(mesh.points[boundaries,:],axis=1)
projection_flags[:,0] = np.linalg.norm(xyz,axis=1) < self.condition_for_projection
self.projection_flags = projection_flags
def GetGeometryMeshScale(self,gpoints,mesh):
"""Compares CAD geometry and mesh, to check if the mesh coordinates
require scaling
raises an error if scaling is
"""
gminx, gmaxx = np.min(gpoints[:,0]), np.max(gpoints[:,0])
gminy, gmaxy = np.min(gpoints[:,1]), np.max(gpoints[:,1])
gmax = np.max(gpoints)
mmax = np.max(mesh.points)
# NOTE THAT THE BOUNDS OF NURBS BOUNDARY ISN'T
# NECESSARILY EXACTLY THE SAME AS THE MESH, EVEN IF
# SCALED APPROPRIATELY
gbounds = np.array([[gminx,gminy],[gmaxx,gmaxy]])
units_scalar = [1000.,25.4]
for scale in units_scalar:
if np.isclose(gmax/mmax,scale):
if self.scale_value_on_projection != scale:
self.scale_value_on_projection = scale
raise ValueError('Geometry to mesh scale seems incorrect. Change it to %9.3f' % scale)
# A SIMPLE WARNING IS NOT POSSIBLE AT THE MOMENT
# warn('Geometry to mesh scale seems incorrect. Change it to %9.3f' % scale)
# break
return gbounds
def SetDirichletCriteria(self, func, *args, **kwargs):
"""Applies user defined Dirichlet data to self
"""
if "apply" in kwargs.keys():
del kwargs["apply"]
self.has_step_wise_dirichlet_loading = True
self.step_wise_dirichlet_data = {'func':func, 'args': args, 'kwargs': kwargs}
self.dirichlet_flags = func(0, *args, **kwargs)
return self.dirichlet_flags
self.dirichlet_flags = func(*args, **kwargs)
return self.dirichlet_flags
def SetNeumannCriteria(self, func, *args, **kwargs):
"""Applies user defined Neumann data to self
"""
if "apply" in kwargs.keys():
del kwargs["apply"]
self.has_step_wise_neumann_loading = True
self.step_wise_neumann_data = {'func':func, 'args': args, 'kwargs': kwargs}
tups = func(0, *args, **kwargs)
else:
tups = func(*args, **kwargs)
if not isinstance(tups,tuple) and self.neumann_data_applied_at == "node":
self.neumann_flags = tups
return self.neumann_flags
else:
self.neumann_data_applied_at == "face"
if len(tups) !=2:
raise ValueError("User-defined Neumann criterion function {} "
"should return one flag and one data array".format(func.__name__))
self.neumann_flags = tups[0]
self.applied_neumann = tups[1]
return tups
def ApplyStepWiseDirichletFunc(self, formulation, mesh, increment=0):
self.dirichlet_flags = self.step_wise_dirichlet_data['func'](increment,
*self.step_wise_dirichlet_data['args'], **self.step_wise_dirichlet_data['kwargs'])
self.analysis_type = "static"
self.GetDirichletBoundaryConditions(formulation, mesh)
self.analysis_type = "dynamic"
def ApplyStepWiseNeumannFunc(self, formulation, mesh, material, increment=0):
tups = self.step_wise_neumann_data['func'](increment,
*self.step_wise_neumann_data['args'], **self.step_wise_neumann_data['kwargs'])
if not isinstance(tups,tuple) and self.neumann_data_applied_at == "node":
self.neumann_flags = tups
else:
self.neumann_data_applied_at == "face"
if len(tups) !=2:
raise ValueError("User-defined Neumann criterion function {} "
"should return one flag and one data array".format(func.__name__))
self.neumann_flags = tups[0]
self.applied_neumann = tups[1]
self.analysis_type = "static"
F = self.ComputeNeumannForces(mesh, material, formulation.function_spaces)
self.analysis_type = "dynamic"
return F
def GetDirichletBoundaryConditions(self, formulation, mesh, material=None, solver=None, fem_solver=None):
nvar = formulation.nvar
ndim = formulation.ndim
self.columns_in, self.applied_dirichlet = [], []
#----------------------------------------------------------------------------------------------------#
#-------------------------------------- NURBS BASED SOLUTION ----------------------------------------#
#----------------------------------------------------------------------------------------------------#
if self.boundary_type == 'nurbs':
tCAD = time()
if self.read_dirichlet_from_file is False:
if not self.is_dirichlet_computed:
# GET DIRICHLET BOUNDARY CONDITIONS BASED ON THE EXACT GEOMETRY FROM CAD
if self.requires_cad:
# CALL POSTMESH WRAPPER
nodesDBC, Dirichlet = self.PostMeshWrapper(formulation, mesh, material, solver, fem_solver)
else:
nodesDBC, Dirichlet = self.nodesDBC, self.Dirichlet
# GET DIRICHLET DoFs
self.columns_out = (np.repeat(nodesDBC,nvar,axis=1)*nvar +\
np.tile(np.arange(nvar)[None,:],nodesDBC.shape[0]).reshape(nodesDBC.shape[0],formulation.ndim)).ravel()
self.applied_dirichlet = Dirichlet.ravel()
# FIX THE DOF IN THE REST OF THE BOUNDARY
if self.fix_dof_elsewhere:
if ndim==2:
rest_dofs = np.setdiff1d(np.unique(mesh.edges),nodesDBC)
elif ndim==3:
rest_dofs = np.setdiff1d(np.unique(mesh.faces),nodesDBC)
rest_out = np.repeat(rest_dofs,nvar)*nvar + np.tile(np.arange(nvar),rest_dofs.shape[0])
rest_app = np.zeros(rest_dofs.shape[0]*nvar)
self.columns_out = np.concatenate((self.columns_out,rest_out)).astype(np.int64)
self.applied_dirichlet = np.concatenate((self.applied_dirichlet,rest_app))
print('Finished identifying Dirichlet boundary conditions from CAD geometry.',
' Time taken', time()-tCAD, 'seconds')
else:
end = -3
self.applied_dirichlet = np.loadtxt(mesh.filename.split(".")[0][:end]+"_dirichlet.dat", dtype=np.float64)
self.columns_out = np.loadtxt(mesh.filename.split(".")[0][:end]+"_columns_out.dat")
print('Finished identifying Dirichlet boundary conditions from CAD geometry.',
' Time taken', time()-tCAD, 'seconds')
#----------------------------------------------------------------------------------------------------#
#------------------------------------- NON-NURBS BASED SOLUTION -------------------------------------#
#----------------------------------------------------------------------------------------------------#
elif self.boundary_type == 'straight' or self.boundary_type == 'mixed':
# IF DIRICHLET BOUNDARY CONDITIONS ARE APPLIED DIRECTLY AT NODES
if self.dirichlet_flags is None:
raise RuntimeError("Dirichlet boundary conditions are not set for the analysis")
if self.dirichlet_data_applied_at == 'node':
if self.analysis_type == "dynamic":
# FOR DYNAMIC ANALYSIS IT IS ASSUMED THAT
# self.columns_in and self.columns_out DO NOT CHANGE
# DURING THE ANALYSIS
if self.dirichlet_flags.ndim == 3:
flat_dirich = self.dirichlet_flags[:,:,0].ravel()
self.columns_out = np.arange(self.dirichlet_flags[:,:,0].size)[~np.isnan(flat_dirich)]
self.applied_dirichlet = np.zeros((self.columns_out.shape[0],self.dirichlet_flags.shape[2]))
for step in range(self.dirichlet_flags.shape[2]):
flat_dirich = self.dirichlet_flags[:,:,step].ravel()
self.applied_dirichlet[:,step] = flat_dirich[~np.isnan(flat_dirich)]
elif self.dirichlet_flags.ndim == 2:
flat_dirich = self.dirichlet_flags.ravel()
self.columns_out = np.arange(self.dirichlet_flags.size)[~np.isnan(flat_dirich)]
self.applied_dirichlet = flat_dirich[~np.isnan(flat_dirich)]
else:
raise ValueError("Incorrect Dirichlet flags for dynamic analysis")
else:
flat_dirich = self.dirichlet_flags.ravel()
self.columns_out = np.arange(self.dirichlet_flags.size)[~np.isnan(flat_dirich)]
self.applied_dirichlet = flat_dirich[~np.isnan(flat_dirich)]
# GENERAL PROCEDURE - GET REDUCED MATRICES FOR FINAL SOLUTION
self.columns_out = self.columns_out.astype(np.int64)
self.columns_in = np.delete(np.arange(0,nvar*mesh.points.shape[0]),self.columns_out)
if self.columns_in.shape[0] == 0:
warn("Dirichlet boundary conditions have been applied on the entire mesh")
if self.columns_in.shape[0] == 0:
warn("No Dirichlet boundary conditions have been applied. The system is unconstrained")
if self.save_dirichlet_data:
from scipy.io import savemat
diri_dict = {'columns_in':self.columns_in,
'columns_out':self.columns_out,
'applied_dirichlet':self.applied_dirichlet}
savemat(self.filename,diri_dict, do_compression=True)
def ConvertStaticsToDynamics(self, mesh, nincr):
"""Convert static boundary condition data to dynamic
"""
if self.analysis_type == "dynamic":
# AVOID ZERO DIVISION FOR RAMP (LINSPACE TYPE) LOADING
nincr_last = float(nincr-1) if nincr !=1 else 1
if self.applied_dirichlet is not None:
if self.applied_dirichlet.ndim == 1:
dum = np.zeros((self.applied_dirichlet.shape[0],nincr))
for incr in range(nincr):
if self.make_loading == "constant":
dum[:,incr] = self.applied_dirichlet/float(nincr)
else:
dum[:,incr] = incr*self.applied_dirichlet/nincr_last
self.applied_dirichlet = np.copy(dum)
else:
return
# if self.boundary_type != "nurbs":
# if self.dirichlet_flags is not None:
# if self.dirichlet_flags.ndim == 2:
# dum = np.zeros((self.dirichlet_flags.shape[0],self.dirichlet_flags.shape[1],nincr))
# for incr in range(nincr):
# if self.make_loading == "constant":
# dum[:,:,incr] = self.dirichlet_flags/float(nincr)
# else:
# dum[:,:,incr] = incr*self.dirichlet_flags/nincr_last
# self.dirichlet_flags = np.copy(dum)
# else:
# return
# else:
# if self.applied_dirichlet is not None:
# if self.applied_dirichlet.ndim == 1:
# dum = np.zeros((self.applied_dirichlet.shape[0],nincr))
# for incr in range(nincr):
# if self.make_loading == "constant":
# dum[:,incr] = self.applied_dirichlet/float(nincr)
# else:
# dum[:,incr] = incr*self.applied_dirichlet/nincr_last
# self.applied_dirichlet = np.copy(dum)
# else:
# return
if self.neumann_flags is not None:
ndim = mesh.InferSpatialDimension()
if self.neumann_flags.shape[0] == mesh.points.shape[0]:
self.neumann_data_applied_at = "node"
else:
if ndim==3:
if self.neumann_flags.shape[0] == mesh.faces.shape[0]:
self.neumann_data_applied_at = "face"
elif ndim==2:
if self.neumann_flags.shape[0] == mesh.edges.shape[0]:
self.neumann_data_applied_at = "face"
if self.neumann_data_applied_at == "node":
if self.neumann_flags.ndim == 2:
dum = np.zeros((self.neumann_flags.shape[0],self.neumann_flags.shape[1],nincr))
for incr in range(nincr):
if self.make_loading == "constant":
dum[:,:,incr] = self.neumann_flags/float(nincr)
else:
dum[:,:,incr] = incr*self.neumann_flags/nincr_last
self.neumann_flags = np.copy(dum)
else:
return
elif self.neumann_data_applied_at == "face":
if self.applied_neumann is None:
raise ValueError("Incorrect Neumann data supplied")
if self.neumann_flags.ndim == 1:
tmp_flags = np.zeros((self.neumann_flags.shape[0],nincr))
tmp_data = np.zeros((self.applied_neumann.shape[0],self.applied_neumann.shape[1],nincr))
for incr in range(nincr):
if self.make_loading == "constant":
tmp_data[:,:,incr] = self.applied_neumann/float(nincr)
else:
tmp_data[:,:,incr] = incr*self.applied_neumann/nincr_last
tmp_flags[:,incr] = self.neumann_flags
self.neumann_flags = np.copy(tmp_flags)
self.applied_neumann = np.copy(tmp_data)
else:
return
def PostMeshWrapper(self, formulation, mesh, material, solver, fem_solver):
"""Calls PostMesh wrapper to get exact Dirichlet boundary conditions"""
try:
# from .PostMeshPy import (PostMeshCurvePy as PostMeshCurve, PostMeshSurfacePy as PostMeshSurface)
from PostMeshPy import (PostMeshCurvePy as PostMeshCurve, PostMeshSurfacePy as PostMeshSurface)
except ImportError:
raise ImportError("PostMesh is not installed. Please install using 'pip install PostMeshPy'")
from Florence.FunctionSpace import Tri
C = mesh.InferPolynomialDegree() - 1
mesh.ChangeType()
if formulation.ndim == 2:
# CHOOSE TYPE OF BOUNDARY SPACING
boundary_fekete = np.array([[]])
if self.nodal_spacing_for_cad == 'fekete':
boundary_fekete = GaussLobattoQuadrature(C+2)[0]
else:
boundary_fekete = EquallySpacedPoints(formulation.ndim,C)
# IT IS IMPORTANT TO ENSURE THAT THE DATA IS C-CONITGUOUS
boundary_fekete = boundary_fekete.copy(order="c")
curvilinear_mesh = PostMeshCurve(mesh.element_type,dimension=formulation.ndim)
curvilinear_mesh.SetMeshElements(mesh.elements)
curvilinear_mesh.SetMeshPoints(mesh.points)
curvilinear_mesh.SetMeshEdges(mesh.edges)
curvilinear_mesh.SetMeshFaces(np.zeros((1,4),dtype=np.uint64))
curvilinear_mesh.SetScale(self.scale_value_on_projection)
curvilinear_mesh.SetCondition(self.condition_for_projection)
curvilinear_mesh.SetProjectionPrecision(1.0e-04)
curvilinear_mesh.SetProjectionCriteria(self.projection_flags)
curvilinear_mesh.ScaleMesh()
# curvilinear_mesh.InferInterpolationPolynomialDegree()
curvilinear_mesh.SetNodalSpacing(boundary_fekete)
curvilinear_mesh.GetBoundaryPointsOrder()
# READ THE GEOMETRY FROM THE IGES FILE
curvilinear_mesh.ReadCAD(self.cad_file)
# EXTRACT GEOMETRY INFORMATION FROM THE IGES FILE
geometry_points = curvilinear_mesh.GetGeomVertices()
self.GetGeometryMeshScale(geometry_points,mesh)
# print([np.min(geometry_points[:,0]),np.max(geometry_points[:,0])], mesh.Bounds)
# exit()
curvilinear_mesh.GetGeomEdges()
curvilinear_mesh.GetGeomFaces()
curvilinear_mesh.GetGeomPointsOnCorrespondingEdges()
# FIRST IDENTIFY WHICH CURVES CONTAIN WHICH EDGES
curvilinear_mesh.IdentifyCurvesContainingEdges()
# PROJECT ALL BOUNDARY POINTS FROM THE MESH TO THE CURVE
curvilinear_mesh.ProjectMeshOnCurve()
# FIX IMAGES AND ANTI IMAGES IN PERIODIC CURVES/SURFACES
curvilinear_mesh.RepairDualProjectedParameters()
# PERFORM POINT INVERSION FOR THE INTERIOR POINTS
if self.projection_type == 'orthogonal':
curvilinear_mesh.MeshPointInversionCurve()
elif self.projection_type == 'arc_length':
curvilinear_mesh.MeshPointInversionCurveArcLength()
else:
# warn("projection type not understood. Arc length based projection is going to be used")
curvilinear_mesh.MeshPointInversionCurveArcLength()
# OBTAIN MODIFIED MESH POINTS - THIS IS NECESSARY TO ENSURE LINEAR MESH IS ALSO CORRECT
curvilinear_mesh.ReturnModifiedMeshPoints(mesh.points)
# GET DIRICHLET DATA
nodesDBC, Dirichlet = curvilinear_mesh.GetDirichletData()
# GET ACTUAL CURVE POINTS - THIS FUNCTION IS EXPENSIVE
# self.ActualCurve = curvilinear_mesh.DiscretiseCurves(100)
if self.save_nurbs_data:
from scipy.io import savemat
nurbs_dict = {'nodesDBC':nodesDBC, 'Dirichlet':Dirichlet}
savemat(self.filename, nurbs_dict, do_compression=True)
elif formulation.ndim == 3:
t_all_proj = time()
boundary_points = FeketePointsTri(C)
if mesh.element_type == "hex":
boundary_points = GaussLobattoPointsQuad(C)
curvilinear_mesh = PostMeshSurface(mesh.element_type,dimension=formulation.ndim)
curvilinear_mesh.SetMeshElements(mesh.elements)
curvilinear_mesh.SetMeshPoints(mesh.points)
if mesh.edges is not None:
if mesh.edges.ndim == 2 and mesh.edges.shape[1]==0:
mesh.edges = np.zeros((1,4),dtype=np.uint64)
else:
curvilinear_mesh.SetMeshEdges(mesh.edges)
curvilinear_mesh.SetMeshFaces(mesh.faces)
curvilinear_mesh.SetScale(self.scale_value_on_projection)
curvilinear_mesh.SetCondition(self.condition_for_projection)
curvilinear_mesh.SetProjectionPrecision(1.0e-04)
curvilinear_mesh.SetProjectionCriteria(self.projection_flags)
curvilinear_mesh.ScaleMesh()
curvilinear_mesh.SetNodalSpacing(boundary_points)
# curvilinear_mesh.GetBoundaryPointsOrder()
# READ THE GEOMETRY FROM THE IGES FILE
curvilinear_mesh.ReadCAD(self.cad_file)
# EXTRACT GEOMETRY INFORMATION FROM THE IGES FILE
geometry_points = curvilinear_mesh.GetGeomVertices()
self.GetGeometryMeshScale(geometry_points,mesh)
# print([np.min(geometry_points[:,2]),np.max(geometry_points[:,2])], mesh.Bounds)
# exit()
curvilinear_mesh.GetGeomEdges()
curvilinear_mesh.GetGeomFaces()
print("CAD geometry has", curvilinear_mesh.NbPoints, "points,", \
curvilinear_mesh.NbCurves, "curves and", curvilinear_mesh.NbSurfaces, "surfaces")
curvilinear_mesh.GetGeomPointsOnCorrespondingFaces()
# FIRST IDENTIFY WHICH SURFACES CONTAIN WHICH FACES
if getattr(mesh,"face_to_surface",None) is not None:
if mesh.faces.shape[0] == mesh.face_to_surface.size:
if mesh.face_to_surface.size != mesh.face_to_surface.shape[0]:
mesh.face_to_surface = np.ascontiguousarray(mesh.face_to_surface.flatten(),dtype=np.int64)
curvilinear_mesh.SupplySurfacesContainingFaces(mesh.face_to_surface,already_mapped=1)
else:
raise AssertionError("face-to-surface mapping does not seem correct. "
"Point projection is going to stop")
else:
if self.surface_identification_algorithm == 'minimisation':
curvilinear_mesh.IdentifySurfacesContainingFaces(int(self.activate_bounding_box),
self.bounding_box_padding)
elif self.surface_identification_algorithm == 'pure_projection':
curvilinear_mesh.IdentifySurfacesContainingFacesByPureProjection(int(self.activate_bounding_box),
self.bounding_box_padding)
else:
# warn("surface identification algorithm not understood. minimisation algorithm is going to be used")
curvilinear_mesh.IdentifySurfacesContainingFaces(int(self.activate_bounding_box))
if self.project_on_curves:
t_proj = time()
# IDENTIFY WHICH EDGES ARE SHARED BETWEEN SURFACES
curvilinear_mesh.IdentifySurfacesIntersections()
print("Curve intersection recognition took {} seconds".format(time()-t_proj))
# PERFORM POINT INVERSION FOR THE INTERIOR POINTS
if self.projection_type == "arc_length":
assert mesh.element_type == "tet"
Neval = np.zeros((3,boundary_points.shape[0]),dtype=np.float64)
hpBases = Tri.hpNodal.hpBases
for i in range(3,boundary_points.shape[0]):
Neval[:,i] = hpBases(0,boundary_points[i,0],boundary_points[i,1],1)[0]
if self.projection_type == 'orthogonal':
curvilinear_mesh.MeshPointInversionSurface(self.project_on_curves, self.modify_linear_mesh_on_projection)
elif self.projection_type == 'arc_length':
# PROJECT ALL BOUNDARY POINTS FROM THE MESH TO THE SURFACE
curvilinear_mesh.ProjectMeshOnSurface()
# curvilinear_mesh.RepairDualProjectedParameters()
curvilinear_mesh.MeshPointInversionSurfaceArcLength(self.project_on_curves,
self.orthogonal_fallback_tolerance,Neval)
else:
warn("projection type not understood. Orthogonal projection is going to be used")
curvilinear_mesh.MeshPointInversionSurface(self.project_on_curves)
# OBTAIN MODIFIED MESH POINTS - THIS IS NECESSARY TO ENSURE LINEAR MESH IS ALSO CORRECT
if self.modify_linear_mesh_on_projection:
curvilinear_mesh.ReturnModifiedMeshPoints(mesh.points)
# GET DIRICHLET DATA
nodesDBC, Dirichlet = curvilinear_mesh.GetDirichletData()
# GET DIRICHLET FACES (IF REQUIRED)
can_get_dirichlet_faces = True
try:
dirichlet_faces = curvilinear_mesh.GetDirichletFaces()
except ValueError:
can_get_dirichlet_faces = False
# FOR GEOMETRIES CONTAINING PLANAR SURFACES
planar_mesh_faces = curvilinear_mesh.GetMeshFacesOnPlanarSurfaces()
# self.planar_mesh_faces = planar_mesh_faces
if self.save_nurbs_data:
from scipy.io import savemat
if can_get_dirichlet_faces:
nurbs_dict = {'nodesDBC':nodesDBC,
'Dirichlet':Dirichlet,
'dirichlet_faces':dirichlet_faces}
else:
warn("dirichlet_faces can be computed properly")
nurbs_dict = {'nodesDBC':nodesDBC,
'Dirichlet':Dirichlet,
'dirichlet_faces':np.array([])}
savemat(self.filename, nurbs_dict, do_compression=True)
print("3D multi-level projection (excluding mesh deformation) took {} seconds".format(time()-t_all_proj))
if self.solve_for_planar_faces:
if planar_mesh_faces.shape[0] != 0:
# SOLVE A 2D PROBLEM FOR PLANAR SURFACES
switcher = fem_solver.parallel
if fem_solver.parallel is True:
fem_solver.parallel = False
self.GetDirichletDataForPlanarFaces(formulation, material,
mesh, solver, fem_solver, planar_mesh_faces, nodesDBC, Dirichlet, plot=False)
fem_solver.parallel == switcher
return nodesDBC, Dirichlet
@staticmethod
def GetDirichletDataForPlanarFaces(formulation, material,
mesh, solver, fem_solver, planar_mesh_faces, nodesDBC, Dirichlet, plot=False):
"""Solve a 2D problem for planar faces. Modifies Dirichlet"""
from copy import deepcopy
from Florence import Mesh, FunctionSpace, QuadratureRule
from Florence.PostProcessing import PostProcess
from Florence.Tensor import itemfreq, makezero, in2d_unsorted
surface_flags = itemfreq(planar_mesh_faces[:,1])
number_of_planar_surfaces = surface_flags.shape[0]
C = mesh.InferPolynomialDegree() - 1
E1 = [1.,0.,0.]
E2 = [0.,1.,0.]
E3 = [0.,0.,1.]
# MAKE A SINGLE INSTANCE OF MATERIAL AND UPDATE IF NECESSARY
import Florence.MaterialLibrary
pmaterial_func = getattr(Florence.MaterialLibrary,material.mtype,None)
pmaterial_dict = deepcopy(material.__dict__)
del pmaterial_dict['ndim'], pmaterial_dict['mtype']
pmaterial = pmaterial_func(2,**pmaterial_dict)
print("The problem requires 2D analyses. Solving", number_of_planar_surfaces, "2D problems")
for niter in range(number_of_planar_surfaces):
pmesh = Mesh()
if mesh.element_type == "tet":
pmesh.element_type = "tri"
no_face_vertices = 3
elif mesh.element_type == "hex":
pmesh.element_type = "quad"
no_face_vertices = 4
else:
raise ValueError("Curvilinear mesher for element type {} not yet implemented".format(mesh.element_type))
pmesh.elements = mesh.faces[planar_mesh_faces[planar_mesh_faces[:,1]==surface_flags[niter,0],0],:]
pmesh.nelem = np.int64(surface_flags[niter,1])
pmesh.GetBoundaryEdges()
unique_edges = np.unique(pmesh.edges).astype(nodesDBC.dtype)
unique_elements, inv = np.unique(pmesh.elements, return_inverse=True)
unique_elements = unique_elements.astype(nodesDBC.dtype)
aranger = np.arange(unique_elements.shape[0],dtype=np.uint64)
pmesh.elements = aranger[inv].reshape(pmesh.elements.shape)
dirichlet_edges = in2d_unsorted(nodesDBC,unique_edges[:,None]).flatten()
nodesDBC2D = in2d_unsorted(unique_elements.astype(nodesDBC.dtype)[:,None],nodesDBC[dirichlet_edges]).flatten()
Dirichlet2D = Dirichlet[dirichlet_edges,:]
pmesh.points = mesh.points[unique_elements,:]
one_element_coord = pmesh.points[pmesh.elements[0,:no_face_vertices],:]
# FOR COORDINATE TRANSFORMATION
AB = one_element_coord[0,:] - one_element_coord[1,:]
AC = one_element_coord[0,:] - one_element_coord[2,:]
normal = np.cross(AB,AC)
unit_normal = normal/np.linalg.norm(normal)
e1 = AB/np.linalg.norm(AB)
e2 = np.cross(normal,AB)/np.linalg.norm(np.cross(normal,AB))
e3 = unit_normal
# TRANSFORMATION MATRIX
Q = np.array([
[np.einsum('i,i',e1,E1), np.einsum('i,i',e1,E2), np.einsum('i,i',e1,E3)],
[np.einsum('i,i',e2,E1), np.einsum('i,i',e2,E2), np.einsum('i,i',e2,E3)],
[np.einsum('i,i',e3,E1), np.einsum('i,i',e3,E2), np.einsum('i,i',e3,E3)]
])
pmesh.points = np.dot(pmesh.points,Q.T)
# assert np.allclose(pmesh.points[:,2],pmesh.points[0,2])
# z_plane = pmesh.points[0,2]
pmesh.points = pmesh.points[:,:2]
Dirichlet2D = np.dot(Dirichlet2D,Q.T)
Dirichlet2D = Dirichlet2D[:,:2]
pmesh.edges = None
pmesh.GetBoundaryEdges()
# GET BOUNDARY CONDITION FOR 2D PROBLEM
pboundary_condition = BoundaryCondition()
pboundary_condition.SetCADProjectionParameters()
pboundary_condition.is_dirichlet_computed = True
pboundary_condition.nodesDBC = nodesDBC2D[:,None]
pboundary_condition.Dirichlet = Dirichlet2D
# GET VARIATIONAL FORMULATION FOR 2D PROBLEM
# from Florence import DisplacementFormulation
# pformulation = DisplacementFormulation(pmesh)
pformulation_func = formulation.__class__
pformulation = pformulation_func(pmesh)
pfem_solver = deepcopy(fem_solver)
pfem_solver.do_not_reset = True
pfem_solver.is_partitioned = False
pfem_solver.is_sparsity_pattern_computed = False
print('Solving planar problem {}. Number of DoF is {}'.format(niter,pmesh.points.shape[0]*pformulation.nvar))
if pmesh.points.shape[0] != Dirichlet2D.shape[0]:
# CALL THE FEM SOLVER FOR SOLVING THE 2D PROBLEM
solution = pfem_solver.Solve(formulation=pformulation,
mesh=pmesh, material=pmaterial,
boundary_condition=pboundary_condition)
TotalDisp = solution.sol
else:
# IF THERE IS NO DEGREE OF FREEDOM TO SOLVE FOR (ONE ELEMENT CASE)
TotalDisp = Dirichlet2D[:,:,None]
Disp = np.zeros((TotalDisp.shape[0],3))
Disp[:,:2] = TotalDisp[:,:,-1]
temp_dict = in2d_unsorted(nodesDBC,unique_elements[:,None]).flatten()
Dirichlet[temp_dict,:] = np.dot(Disp,Q)
if plot:
post_process = PostProcess(2,2)
post_process.CurvilinearPlot(pmesh, TotalDisp,
QuantityToPlot=solution.ScaledJacobian, interpolation_degree=40)
import matplotlib.pyplot as plt
plt.show()
del pmesh, pboundary_condition
gc.collect()
def GetReducedMatrices(self, stiffness, F, mass=None, only_residual=False):
# GET REDUCED FORCE VECTOR
F_b = F[self.columns_in,0]
if only_residual:
return F_b
# GET REDUCED STIFFNESS MATRIX
stiffness_b = stiffness[self.columns_in,:][:,self.columns_in]
# GET REDUCED MASS MATRIX
mass_b = np.array([])
# if self.analysis_type != 'static':
# mass_b = mass[self.columns_in,:][:,self.columns_in]
return stiffness_b, F_b, mass_b
def ApplyDirichletGetReducedMatrices(self, stiffness, F, AppliedDirichlet, LoadFactor=1., mass=None, only_residual=False):
"""AppliedDirichlet is a non-member because it can be external incremental Dirichlet,
which is currently not implemented as member of BoundaryCondition. F also does not
correspond to Dirichlet forces, as it can be residual in incrementally linearised
framework.
"""
# # APPLY DIRICHLET BOUNDARY CONDITIONS
# for i in range(self.columns_out.shape[0]):
# F = F - LoadFactor*AppliedDirichlet[i]*stiffness.getcol(self.columns_out[i])
# MUCH FASTER APPROACH
# F = F - (stiffness[:,self.columns_out]*AppliedDirichlet*LoadFactor)[:,None]
nnz_cols = ~np.isclose(AppliedDirichlet,0.0)
F[self.columns_in] = F[self.columns_in] - (stiffness[self.columns_in,:][:,
self.columns_out[nnz_cols]]*AppliedDirichlet[nnz_cols]*LoadFactor)[:,None]
if only_residual:
return F
# GET REDUCED FORCE VECTOR
F_b = F[self.columns_in,0]
# GET REDUCED STIFFNESS
stiffness_b = stiffness[self.columns_in,:][:,self.columns_in]
# GET REDUCED MASS MATRIX
if self.analysis_type != 'static':
mass_b = mass[self.columns_in,:][:,self.columns_in]
return stiffness_b, F_b, F, mass_b
return stiffness_b, F_b, F
def GetReducedVectors(self, F, mass=None, only_residual=False):
# GET REDUCED FORCE VECTOR
F_b = F[self.columns_in,0]
# GET REDUCED MASS MATRIX
mass_b = []
if self.analysis_type != 'static' and not only_residual:
mass_b = mass[self.columns_in,0]
return F_b, mass_b
def UpdateFixDoFs(self, AppliedDirichletInc, fsize, nvar):
"""Updates the geometry (DoFs) with incremental Dirichlet boundary conditions
for fixed/constrained degrees of freedom only. Needs to be applied per time steps"""
# GET TOTAL SOLUTION
TotalSol = np.zeros((fsize,1))
TotalSol[self.columns_out,0] = AppliedDirichletInc
# RE-ORDER SOLUTION COMPONENTS
dU = TotalSol.reshape(int(TotalSol.shape[0]/nvar),nvar)
return dU
def UpdateFreeDoFs(self, sol, fsize, nvar):
"""Updates the geometry with iterative solutions of Newton-Raphson
for free degrees of freedom only. Needs to be applied per time NR iteration"""
# GET TOTAL SOLUTION
TotalSol = np.zeros((fsize,1))
TotalSol[self.columns_in,0] = sol
# RE-ORDER SOLUTION COMPONENTS
dU = TotalSol.reshape(int(TotalSol.shape[0]/nvar),nvar)
return dU
def SetNURBSParameterisation(self,nurbs_func,*args):
self.nurbs_info = nurbs_func(*args)
def SetNURBSCondition(self,nurbs_func,*args):
self.nurbs_condition = nurbs_func(*args)
def ComputeNeumannForces(self, mesh, material, function_spaces, compute_traction_forces=True, compute_body_forces=False):
"""Compute/assemble traction and body forces"""
if self.neumann_flags is None:
return np.zeros((mesh.points.shape[0]*material.nvar,1),dtype=np.float64)
nvar = material.nvar
ndim = mesh.InferSpatialDimension()
if self.neumann_flags.shape[0] == mesh.points.shape[0]:
self.neumann_data_applied_at = "node"
else:
if ndim==3:
if self.neumann_flags.shape[0] == mesh.faces.shape[0]:
self.neumann_data_applied_at = "face"
elif ndim==2:
if self.neumann_flags.shape[0] == mesh.edges.shape[0]:
self.neumann_data_applied_at = "face"
if self.neumann_data_applied_at == 'face':
from Florence.FiniteElements.Assembly import AssembleForces
if not isinstance(function_spaces,tuple):
raise ValueError("Boundary functional spaces not available for computing Neumman and body forces")
else:
# CHECK IF A FUNCTION SPACE FOR BOUNDARY EXISTS - SAFEGAURDS AGAINST FORMULATIONS THAT DO NO PROVIDE ONE
has_boundary_spaces = False
for fs in function_spaces:
if ndim == 3 and fs.ndim == 2:
has_boundary_spaces = True
break
elif ndim == 2 and fs.ndim == 1:
has_boundary_spaces = True
break
if not has_boundary_spaces:
from Florence import QuadratureRule, FunctionSpace
# COMPUTE BOUNDARY FUNCTIONAL SPACES
p = mesh.InferPolynomialDegree()
bquadrature = QuadratureRule(optimal=3, norder=2*p+1,
mesh_type=mesh.boundary_element_type, is_flattened=False)
bfunction_space = FunctionSpace(mesh.CreateDummyLowerDimensionalMesh(),
bquadrature, p=p, equally_spaced=mesh.IsEquallySpaced, use_optimal_quadrature=False)
function_spaces = (function_spaces[0],bfunction_space)
# raise ValueError("Boundary functional spaces not available for computing Neumman and body forces")
t_tassembly = time()
if self.analysis_type == "static":
F = AssembleForces(self, mesh, material, function_spaces,
compute_traction_forces=compute_traction_forces, compute_body_forces=compute_body_forces)
elif self.analysis_type == "dynamic":
if self.neumann_flags.ndim==2:
# THE POSITION OF NEUMANN DATA APPLIED AT FACES CAN CHANGE DYNAMICALLY
tmp_flags = np.copy(self.neumann_flags)
tmp_data = np.copy(self.applied_neumann)
F = np.zeros((mesh.points.shape[0]*nvar,self.neumann_flags.shape[1]))
for step in range(self.neumann_flags.shape[1]):
self.neumann_flags = tmp_flags[:,step]
self.applied_neumann = tmp_data[:,:,step]
F[:,step] = AssembleForces(self, mesh, material, function_spaces,
compute_traction_forces=compute_traction_forces, compute_body_forces=compute_body_forces).flatten()
self.neumann_flags = tmp_flags
self.applied_neumann = tmp_data
else:
# THE POSITION OF NEUMANN DATA APPLIED AT FACES CAN CHANGE DYNAMICALLY
F = AssembleForces(self, mesh, material, function_spaces,
compute_traction_forces=compute_traction_forces, compute_body_forces=compute_body_forces).flatten()
print("Assembled external traction forces. Time elapsed is {} seconds".format(time()-t_tassembly))
elif self.neumann_data_applied_at == 'node':
# A DIRICHLET TYPE METHODOLGY FOR APPLYING NEUMANN BOUNDARY CONDITONS (i.e. AT NODES)
if self.analysis_type == "dynamic":
if self.neumann_flags.ndim ==3:
# FOR DYNAMIC ANALYSIS IT IS ASSUMED THAT
# to_apply DOOES NOT CHANGE DURING THE ANALYSIS
flat_neu = self.neumann_flags[:,:,0].ravel()
to_apply = np.arange(self.neumann_flags[:,:,0].size)[~np.isnan(flat_neu)]
F = np.zeros((mesh.points.shape[0]*nvar,self.neumann_flags.shape[2]))
for step in range(self.neumann_flags.shape[2]):
flat_neu = self.neumann_flags[:,:,step].ravel()
to_apply = np.arange(self.neumann_flags[:,:,step].size)[~np.isnan(flat_neu)]
F[to_apply,step] = flat_neu[~np.isnan(flat_neu)]
else:
F = np.zeros((mesh.points.shape[0]*nvar,1))
flat_neu = self.neumann_flags.ravel()
to_apply = np.arange(self.neumann_flags.size)[~np.isnan(flat_neu)]
applied_neumann = flat_neu[~np.isnan(flat_neu)]
F[to_apply,0] = applied_neumann
else:
F = np.zeros((mesh.points.shape[0]*nvar,1))
flat_neu = self.neumann_flags.ravel()
to_apply = np.arange(self.neumann_flags.size)[~np.isnan(flat_neu)]
applied_neumann = flat_neu[~np.isnan(flat_neu)]
F[to_apply,0] = applied_neumann
return F
def __dirichlet_helper__(self,stiffness, AppliedDirichlet, columns_out):
from scipy.sparse import csc_matrix
M = csc_matrix((AppliedDirichlet,
(columns_out,np.zeros_like(columns_out))),
shape=(stiffness.shape[1],1))
return (stiffness*M).A
|
the-stack_106_14200
|
import matplotlib.pyplot as plt
from data_keys import renda_colors
def pie(df, labels, title, legend, bbox_to_anchor):
ax = df \
.value_counts() \
.sort_index() \
.rename(labels) \
.plot.pie(figsize=(12.8, 8),
colors=renda_colors,
labels=[''] * len(labels),
ylabel='',
autopct='%.2f%%',
textprops={'backgroundcolor': (1, 1, 1, 0.5), 'color': '#303030'})
mid = (ax.figure.subplotpars.right + ax.figure.subplotpars.left) / 2
plt.title(title,
fontsize=20,
ha='center',
va='baseline',
x=mid)
plt.suptitle(f'Quantidade total de alunos: {df.count()}',
fontsize=14,
ha='center',
va='baseline',
x=mid,
y=.85)
ax.legend(title=legend,
labels=labels.values(),
labelcolor='#303030',
loc="center left",
bbox_to_anchor=bbox_to_anchor)
plt.tight_layout()
plt.show()
plt.close()
|
the-stack_106_14201
|
"""
:class:`.DataBC` geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_SCHEME, DEFAULT_TIMEOUT
from geopy.exc import GeocoderQueryError
from geopy.location import Location
from geopy.util import logger
__all__ = ("DataBC", )
class DataBC(Geocoder):
"""
Geocoder using the Physical Address Geocoder from DataBC. Documentation at:
http://www.data.gov.bc.ca/dbc/geographic/locate/geocoding.page
"""
def __init__(self, scheme=DEFAULT_SCHEME, timeout=DEFAULT_TIMEOUT,
proxies=None, user_agent=None):
"""
Create a DataBC-based geocoder.
:param str scheme: Desired scheme.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
:param str user_agent: Use a custom User-Agent header.
.. versionadded:: 1.12.0
"""
super(DataBC, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent
)
self.api = '%s://apps.gov.bc.ca/pub/geocoder/addresses.geojson' % self.scheme
def geocode(
self,
query,
max_results=25,
set_back=0,
location_descriptor='any',
exactly_one=True,
timeout=None,
):
"""
Geocode a location query.
:param str query: The address or query you wish to geocode.
:param int max_results: The maximum number of resutls to request.
:param float set_back: The distance to move the accessPoint away
from the curb (in meters) and towards the interior of the parcel.
location_descriptor must be set to accessPoint for set_back to
take effect.
:param str location_descriptor: The type of point requested. It
can be any, accessPoint, frontDoorPoint, parcelPoint,
rooftopPoint and routingPoint.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {'addressString': query}
if set_back != 0:
params['setBack'] = set_back
if location_descriptor not in ['any',
'accessPoint',
'frontDoorPoint',
'parcelPoint',
'rooftopPoint',
'routingPoint']:
raise GeocoderQueryError(
"You did not provided a location_descriptor "
"the webservice can consume. It should be any, accessPoint, "
"frontDoorPoint, parcelPoint, rooftopPoint or routingPoint."
)
params['locationDescriptor'] = location_descriptor
if exactly_one:
max_results = 1
params['maxResults'] = max_results
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
response = self._call_geocoder(url, timeout=timeout)
# Success; convert from GeoJSON
if not len(response['features']):
return None
geocoded = []
for feature in response['features']:
geocoded.append(self._parse_feature(feature))
if exactly_one:
return geocoded[0]
return geocoded
@staticmethod
def _parse_feature(feature):
properties = feature['properties']
coordinates = feature['geometry']['coordinates']
return Location(
properties['fullAddress'], (coordinates[1], coordinates[0]),
properties
)
|
the-stack_106_14203
|
# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import annotations
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from braket.circuits.ascii_circuit_diagram import AsciiCircuitDiagram
from braket.circuits.gate import Gate
from braket.circuits.instruction import Instruction
from braket.circuits.moments import Moments
from braket.circuits.noise import Noise
from braket.circuits.noise_helpers import (
apply_noise_to_gates,
apply_noise_to_moments,
check_noise_target_gates,
check_noise_target_qubits,
check_noise_target_unitary,
wrap_with_list,
)
from braket.circuits.observable import Observable
from braket.circuits.observables import TensorProduct
from braket.circuits.qubit import QubitInput
from braket.circuits.qubit_set import QubitSet, QubitSetInput
from braket.circuits.result_type import ObservableResultType, ResultType
from braket.ir.jaqcd import Program
SubroutineReturn = TypeVar(
"SubroutineReturn", Iterable[Instruction], Instruction, ResultType, Iterable[ResultType]
)
SubroutineCallable = TypeVar("SubroutineCallable", bound=Callable[..., SubroutineReturn])
AddableTypes = TypeVar("AddableTypes", SubroutineReturn, SubroutineCallable)
class Circuit:
"""
A representation of a quantum circuit that contains the instructions to be performed on a
quantum device and the requested result types.
See :mod:`braket.circuits.gates` module for all of the supported instructions.
See :mod:`braket.circuits.result_types` module for all of the supported result types.
`AddableTypes` are `Instruction`, iterable of `Instruction`, `ResultType`,
iterable of `ResultType`, or `SubroutineCallable`
"""
_ALL_QUBITS = "ALL" # Flag to indicate all qubits in _qubit_observable_mapping
@classmethod
def register_subroutine(cls, func: SubroutineCallable) -> None:
"""
Register the subroutine `func` as an attribute of the `Circuit` class. The attribute name
is the name of `func`.
Args:
func (Callable[..., Union[Instruction, Iterable[Instruction], ResultType,
Iterable[ResultType]]): The function of the subroutine to add to the class.
Examples:
>>> def h_on_all(target):
... circ = Circuit()
... for qubit in target:
... circ += Instruction(Gate.H(), qubit)
... return circ
...
>>> Circuit.register_subroutine(h_on_all)
>>> circ = Circuit().h_on_all(range(2))
>>> for instr in circ.instructions:
... print(instr)
...
Instruction('operator': 'H', 'target': QubitSet(Qubit(0),))
Instruction('operator': 'H', 'target': QubitSet(Qubit(1),))
"""
def method_from_subroutine(self, *args, **kwargs) -> SubroutineReturn:
return self.add(func, *args, **kwargs)
function_name = func.__name__
setattr(cls, function_name, method_from_subroutine)
function_attr = getattr(cls, function_name)
setattr(function_attr, "__doc__", func.__doc__)
def __init__(self, addable: AddableTypes = None, *args, **kwargs):
"""
Args:
addable (AddableTypes): The item(s) to add to self.
Default = None.
*args: Variable length argument list. Supports any arguments that `add()` offers.
**kwargs: Arbitrary keyword arguments. Supports any keyword arguments that `add()`
offers.
Raises:
TypeError: If `addable` is an unsupported type.
Examples:
>>> circ = Circuit([Instruction(Gate.H(), 4), Instruction(Gate.CNot(), [4, 5])])
>>> circ = Circuit().h(0).cnot(0, 1)
>>> circ = Circuit().h(0).cnot(0, 1).probability([0, 1])
>>> @circuit.subroutine(register=True)
>>> def bell_pair(target):
... return Circ().h(target[0]).cnot(target[0:2])
...
>>> circ = Circuit(bell_pair, [4,5])
>>> circ = Circuit().bell_pair([4,5])
"""
self._moments: Moments = Moments()
self._result_types: Dict[ResultType] = {}
self._qubit_observable_mapping: Dict[Union[int, Circuit._ALL_QUBITS], Observable] = {}
self._qubit_target_mapping: Dict[int, Tuple[int]] = {}
self._qubit_observable_set = set()
if addable is not None:
self.add(addable, *args, **kwargs)
@property
def depth(self) -> int:
"""int: Get the circuit depth."""
return self._moments.depth
@property
def instructions(self) -> Iterable[Instruction]:
"""Iterable[Instruction]: Get an `iterable` of instructions in the circuit."""
return self._moments.values()
@property
def result_types(self) -> List[ResultType]:
"""List[ResultType]: Get a list of requested result types in the circuit."""
return list(self._result_types.keys())
@property
def basis_rotation_instructions(self) -> List[Instruction]:
"""List[Instruction]: Get a list of basis rotation instructions in the circuit.
These basis rotation instructions are added if result types are requested for
an observable other than Pauli-Z.
"""
# Note that basis_rotation_instructions can change each time a new instruction
# is added to the circuit because `self._moments.qubits` would change
basis_rotation_instructions = []
all_qubit_observable = self._qubit_observable_mapping.get(Circuit._ALL_QUBITS)
if all_qubit_observable:
for target in self.qubits:
basis_rotation_instructions += Circuit._observable_to_instruction(
all_qubit_observable, target
)
return basis_rotation_instructions
target_lists = sorted(list(set(self._qubit_target_mapping.values())))
for target_list in target_lists:
observable = self._qubit_observable_mapping[target_list[0]]
basis_rotation_instructions += Circuit._observable_to_instruction(
observable, target_list
)
return basis_rotation_instructions
@staticmethod
def _observable_to_instruction(observable: Observable, target_list: List[int]):
return [Instruction(gate, target_list) for gate in observable.basis_rotation_gates]
@property
def moments(self) -> Moments:
"""Moments: Get the `moments` for this circuit. Note that this includes observables."""
return self._moments
@property
def qubit_count(self) -> int:
"""Get the qubit count for this circuit. Note that this includes observables."""
all_qubits = self._moments.qubits.union(self._qubit_observable_set)
return len(all_qubits)
@property
def qubits(self) -> QubitSet:
"""QubitSet: Get a copy of the qubits for this circuit."""
return QubitSet(self._moments.qubits.union(self._qubit_observable_set))
def add_result_type(
self,
result_type: ResultType,
target: QubitSetInput = None,
target_mapping: Dict[QubitInput, QubitInput] = {},
) -> Circuit:
"""
Add a requested result type to `self`, returns `self` for chaining ability.
Args:
result_type (ResultType): `ResultType` to add into `self`.
target (int, Qubit, or iterable of int / Qubit, optional): Target qubits for the
`result_type`.
Default = `None`.
target_mapping (dictionary[int or Qubit, int or Qubit], optional): A dictionary of
qubit mappings to apply to the `result_type.target`. Key is the qubit in
`result_type.target` and the value is what the key will be changed to.
Default = `{}`.
Note: target and target_mapping will only be applied to those requested result types with
the attribute `target`. The result_type will be appended to the end of the dict keys of
`circuit.result_types` only if it does not already exist in `circuit.result_types`
Returns:
Circuit: self
Raises:
TypeError: If both `target_mapping` and `target` are supplied.
ValueError: If the observable specified for a qubit is different from what is
specified by the result types already added to the circuit. Only one observable
is allowed for a qubit.
Examples:
>>> result_type = ResultType.Probability(target=[0, 1])
>>> circ = Circuit().add_result_type(result_type)
>>> print(circ.result_types[0])
Probability(target=QubitSet([Qubit(0), Qubit(1)]))
>>> result_type = ResultType.Probability(target=[0, 1])
>>> circ = Circuit().add_result_type(result_type, target_mapping={0: 10, 1: 11})
>>> print(circ.result_types[0])
Probability(target=QubitSet([Qubit(10), Qubit(11)]))
>>> result_type = ResultType.Probability(target=[0, 1])
>>> circ = Circuit().add_result_type(result_type, target=[10, 11])
>>> print(circ.result_types[0])
Probability(target=QubitSet([Qubit(10), Qubit(11)]))
>>> result_type = ResultType.StateVector()
>>> circ = Circuit().add_result_type(result_type)
>>> print(circ.result_types[0])
StateVector()
"""
if target_mapping and target is not None:
raise TypeError("Only one of 'target_mapping' or 'target' can be supplied.")
if not target_mapping and not target:
# Nothing has been supplied, add result_type
result_type_to_add = result_type
elif target_mapping:
# Target mapping has been supplied, copy result_type
result_type_to_add = result_type.copy(target_mapping=target_mapping)
else:
# ResultType with target
result_type_to_add = result_type.copy(target=target)
if result_type_to_add not in self._result_types:
self._add_to_qubit_observable_mapping(result_type_to_add)
self._add_to_qubit_observable_set(result_type_to_add)
# using dict as an ordered set, value is arbitrary
self._result_types[result_type_to_add] = None
return self
def _add_to_qubit_observable_mapping(self, result_type: ResultType) -> None:
if isinstance(result_type, ResultType.Probability):
observable = Observable.Z() # computational basis
elif isinstance(result_type, ObservableResultType):
observable = result_type.observable
else:
return
targets = result_type.target or list(self._qubit_observable_set)
all_qubits_observable = self._qubit_observable_mapping.get(Circuit._ALL_QUBITS)
for i in range(len(targets)):
target = targets[i]
tensor_product_dict = (
Circuit._tensor_product_index_dict(observable)
if isinstance(observable, TensorProduct)
else None
)
new_observable = tensor_product_dict[i][0] if tensor_product_dict else observable
current_observable = all_qubits_observable or self._qubit_observable_mapping.get(target)
add_observable = Circuit._validate_observable_to_add_for_qubit(
current_observable, new_observable, target
)
if result_type.target:
new_targets = (
tuple(
result_type.target[
tensor_product_dict[i][1][0] : tensor_product_dict[i][1][1]
]
)
if tensor_product_dict
else tuple(result_type.target)
)
if add_observable:
self._qubit_target_mapping[target] = new_targets
self._qubit_observable_mapping[target] = new_observable
elif new_observable.qubit_count > 1:
current_target = self._qubit_target_mapping.get(target)
if current_target and current_target != new_targets:
raise ValueError(
f"Target order {current_target} of existing result type with"
f" observable {current_observable} conflicts with order {targets}"
" of new result type"
)
if not result_type.target:
if all_qubits_observable and all_qubits_observable != observable:
raise ValueError(
f"Existing result type for observable {all_qubits_observable} for all qubits"
f" conflicts with observable {observable} for new result type"
)
self._qubit_observable_mapping[Circuit._ALL_QUBITS] = observable
@staticmethod
def _validate_observable_to_add_for_qubit(current_observable, new_observable, target):
identity = Observable.I()
add_observable = False
if not current_observable or (
current_observable == identity and new_observable != identity
):
add_observable = True
elif (
current_observable != identity
and new_observable != identity
and current_observable != new_observable
):
raise ValueError(
f"Observable {new_observable} specified for target {target} conflicts with"
+ f" existing observable {current_observable} on this target."
)
return add_observable
@staticmethod
def _tensor_product_index_dict(observable: TensorProduct) -> Dict[int, Observable]:
obj_dict = {}
i = 0
factors = list(observable.factors)
total = factors[0].qubit_count
while factors:
if i >= total:
factors.pop(0)
if factors:
total += factors[0].qubit_count
if factors:
obj_dict[i] = (factors[0], (total - factors[0].qubit_count, total))
i += 1
return obj_dict
def _add_to_qubit_observable_set(self, result_type: ResultType) -> None:
if isinstance(result_type, ObservableResultType) and result_type.target:
self._qubit_observable_set.update(result_type.target)
def add_instruction(
self,
instruction: Instruction,
target: QubitSetInput = None,
target_mapping: Dict[QubitInput, QubitInput] = {},
) -> Circuit:
"""
Add an instruction to `self`, returns `self` for chaining ability.
Args:
instruction (Instruction): `Instruction` to add into `self`.
target (int, Qubit, or iterable of int / Qubit, optional): Target qubits for the
`instruction`. If a single qubit gate, an instruction is created for every index
in `target`.
Default = `None`.
target_mapping (dictionary[int or Qubit, int or Qubit], optional): A dictionary of
qubit mappings to apply to the `instruction.target`. Key is the qubit in
`instruction.target` and the value is what the key will be changed to.
Default = `{}`.
Returns:
Circuit: self
Raises:
TypeError: If both `target_mapping` and `target` are supplied.
Examples:
>>> instr = Instruction(Gate.CNot(), [0, 1])
>>> circ = Circuit().add_instruction(instr)
>>> print(circ.instructions[0])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(0), Qubit(1)))
>>> instr = Instruction(Gate.CNot(), [0, 1])
>>> circ = Circuit().add_instruction(instr, target_mapping={0: 10, 1: 11})
>>> print(circ.instructions[0])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(10), Qubit(11)))
>>> instr = Instruction(Gate.CNot(), [0, 1])
>>> circ = Circuit().add_instruction(instr, target=[10, 11])
>>> print(circ.instructions[0])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(10), Qubit(11)))
>>> instr = Instruction(Gate.H(), 0)
>>> circ = Circuit().add_instruction(instr, target=[10, 11])
>>> print(circ.instructions[0])
Instruction('operator': 'H', 'target': QubitSet(Qubit(10),))
>>> print(circ.instructions[1])
Instruction('operator': 'H', 'target': QubitSet(Qubit(11),))
"""
if target_mapping and target is not None:
raise TypeError("Only one of 'target_mapping' or 'target' can be supplied.")
if not target_mapping and not target:
# Nothing has been supplied, add instruction
instructions_to_add = [instruction]
elif target_mapping:
# Target mapping has been supplied, copy instruction
instructions_to_add = [instruction.copy(target_mapping=target_mapping)]
elif hasattr(instruction.operator, "qubit_count") and instruction.operator.qubit_count == 1:
# single qubit operator with target, add an instruction for each target
instructions_to_add = [instruction.copy(target=qubit) for qubit in target]
else:
# non single qubit operator with target, add instruction with target
instructions_to_add = [instruction.copy(target=target)]
self._moments.add(instructions_to_add)
return self
def add_circuit(
self,
circuit: Circuit,
target: QubitSetInput = None,
target_mapping: Dict[QubitInput, QubitInput] = {},
) -> Circuit:
"""
Add a `circuit` to self, returns self for chaining ability.
Args:
circuit (Circuit): Circuit to add into self.
target (int, Qubit, or iterable of int / Qubit, optional): Target qubits for the
supplied circuit. This is a macro over `target_mapping`; `target` is converted to
a `target_mapping` by zipping together a sorted `circuit.qubits` and `target`.
Default = `None`.
target_mapping (dictionary[int or Qubit, int or Qubit], optional): A dictionary of
qubit mappings to apply to the qubits of `circuit.instructions`. Key is the qubit
to map, and the value is what to change it to. Default = `{}`.
Returns:
Circuit: self
Raises:
TypeError: If both `target_mapping` and `target` are supplied.
Note:
Supplying `target` sorts `circuit.qubits` to have deterministic behavior since
`circuit.qubits` ordering is based on how instructions are inserted.
Use caution when using this with circuits that with a lot of qubits, as the sort
can be resource-intensive. Use `target_mapping` to use a linear runtime to remap
the qubits.
Requested result types of the circuit that will be added will be appended to the end
of the list for the existing requested result types. A result type to be added that is
equivalent to an existing requested result type will not be added.
Examples:
>>> widget = Circuit().h(0).cnot([0, 1])
>>> circ = Circuit().add_circuit(widget)
>>> print(circ.instructions[0])
Instruction('operator': 'H', 'target': QubitSet(Qubit(0),))
>>> print(circ.instructions[1])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(0), Qubit(1)))
>>> widget = Circuit().h(0).cnot([0, 1])
>>> circ = Circuit().add_circuit(widget, target_mapping={0: 10, 1: 11})
>>> print(circ.instructions[0])
Instruction('operator': 'H', 'target': QubitSet(Qubit(10),))
>>> print(circ.instructions[1])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(10), Qubit(11)))
>>> widget = Circuit().h(0).cnot([0, 1])
>>> circ = Circuit().add_circuit(widget, target=[10, 11])
>>> print(circ.instructions[0])
Instruction('operator': 'H', 'target': QubitSet(Qubit(10),))
>>> print(circ.instructions[1])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(10), Qubit(11)))
"""
if target_mapping and target is not None:
raise TypeError("Only one of 'target_mapping' or 'target' can be supplied.")
elif target is not None:
keys = sorted(circuit.qubits)
values = target
target_mapping = dict(zip(keys, values))
for instruction in circuit.instructions:
self.add_instruction(instruction, target_mapping=target_mapping)
for result_type in circuit.result_types:
self.add_result_type(result_type, target_mapping=target_mapping)
return self
def apply_gate_noise(
self,
noise: Union[Type[Noise], Iterable[Type[Noise]]],
target_gates: Optional[Union[Type[Gate], Iterable[Type[Gate]]]] = None,
target_unitary: np.ndarray = None,
target_qubits: Optional[QubitSetInput] = None,
) -> Circuit:
"""Apply `noise` to the circuit according to `target_gates`, `target_unitary` and
`target_qubits`.
For any parameter that is None, that specification is ignored (e.g. if `target_gates`
is None then the noise is applied after every gate in `target_qubits`).
If `target_gates` and `target_qubits` are both None, then `noise` is
applied to every qubit after every gate.
Noise is either applied to `target_gates` or `target_unitary`, so they cannot be
provided at the same time.
When `noise.qubit_count` == 1, ie. `noise` is single-qubit, `noise` is added to all
qubits in `target_gates` or `target_unitary` (or to all qubits in `target_qubits`
if `target_gates` is None).
When `noise.qubit_count` > 1 and `target_gates` is not None, the number of qubits of
any gate in `target_gates` must be the same as `noise.qubit_count`.
When `noise.qubit_count` > 1, `target_gates` and `target_unitary` is None, noise is
only applied to gates with the same qubit_count in target_qubits.
Args:
noise (Union[Type[Noise], Iterable[Type[Noise]]]): Noise channel(s) to be applied
to the circuit.
target_gates (Union[Type[Gate], Iterable[Type[Gate]], optional]): Gate class or
List of Gate classes which `noise` is applied to. Default=None.
target_unitary (np.ndarray): matrix of the target unitary gates. Default=None.
target_qubits (Union[QubitSetInput, optional]): Index or indices of qubit(s).
Default=None.
Returns:
Circuit: self
Raises:
TypeError:
If `noise` is not Noise type.
If `target_gates` is not a Gate type, Iterable[Gate].
If `target_unitary` is not a np.ndarray type.
If `target_qubits` has non-integers or negative integers.
IndexError:
If applying noise to an empty circuit.
If `target_qubits` is out of range of circuit.qubits.
ValueError:
If both `target_gates` and `target_unitary` are provided.
If `target_unitary` is not a unitary.
If `noise` is multi-qubit noise and `target_gates` contain gates
with the number of qubits not the same as `noise.qubit_count`.
Warning:
If `noise` is multi-qubit noise while there is no gate with the same
number of qubits in `target_qubits` or in the whole circuit when
`target_qubits` is not given.
If no `target_gates` or `target_unitary` exist in `target_qubits` or
in the whole circuit when they are not given.
Examples:
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ)
T : |0|1|2|
q0 : -X-Z-C-
|
q1 : -Y-X-X-
T : |0|1|2|
>>> noise = Noise.Depolarizing(probability=0.1)
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_gate_noise(noise, target_gates = Gate.X))
T : | 0 | 1 |2|
q0 : -X-DEPO(0.1)-Z-----------C-
|
q1 : -Y-----------X-DEPO(0.1)-X-
T : | 0 | 1 |2|
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_gate_noise(noise, target_qubits = 1))
T : | 0 | 1 | 2 |
q0 : -X-----------Z-----------C-----------
|
q1 : -Y-DEPO(0.1)-X-DEPO(0.1)-X-DEPO(0.1)-
T : | 0 | 1 | 2 |
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_gate_noise(noise,
... target_gates = [Gate.X,Gate.Y],
... target_qubits = [0,1])
... )
T : | 0 | 1 |2|
q0 : -X-DEPO(0.1)-Z-----------C-
|
q1 : -Y-DEPO(0.1)-X-DEPO(0.1)-X-
T : | 0 | 1 |2|
"""
# check whether gate noise is applied to an empty circuit
if not self.qubits:
raise IndexError("Gate noise cannot be applied to an empty circuit.")
# check if target_gates and target_unitary are both given
if (target_unitary is not None) and (target_gates is not None):
raise ValueError("target_unitary and target_gates cannot be input at the same time.")
# check target_qubits
target_qubits = check_noise_target_qubits(self, target_qubits)
if not all(qubit in self.qubits for qubit in target_qubits):
raise IndexError("target_qubits must be within the range of the current circuit.")
# make noise a list
noise = wrap_with_list(noise)
# make target_gates a list
if target_gates is not None:
target_gates = wrap_with_list(target_gates)
# remove duplicate items
target_gates = list(dict.fromkeys(target_gates))
for noise_channel in noise:
if not isinstance(noise_channel, Noise):
raise TypeError("Noise must be an instance of the Noise class")
# check whether target_gates is valid
if target_gates is not None:
check_noise_target_gates(noise_channel, target_gates)
if target_unitary is not None:
check_noise_target_unitary(noise_channel, target_unitary)
if target_unitary is not None:
return apply_noise_to_gates(self, noise, target_unitary, target_qubits)
else:
return apply_noise_to_gates(self, noise, target_gates, target_qubits)
def apply_initialization_noise(
self,
noise: Union[Type[Noise], Iterable[Type[Noise]]],
target_qubits: Optional[QubitSetInput] = None,
) -> Circuit:
"""Apply `noise` at the beginning of the circuit for every qubit (default) or target_qubits`.
Only when `target_qubits` is given can the noise be applied to an empty circuit.
When `noise.qubit_count` > 1, the number of qubits in target_qubits must be equal
to `noise.qubit_count`.
Args:
noise (Union[Type[Noise], Iterable[Type[Noise]]]): Noise channel(s) to be applied
to the circuit.
target_qubits (Union[QubitSetInput, optional]): Index or indices of qubit(s).
Default=None.
Returns:
Circuit: self
Raises:
TypeError:
If `noise` is not Noise type.
If `target_qubits` has non-integers or negative integers.
IndexError:
If applying noise to an empty circuit when `target_qubits` is not given.
ValueError:
If `noise.qubit_count` > 1 and the number of qubits in target_qubits is
not the same as `noise.qubit_count`.
Examples:
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ)
>>> noise = Noise.Depolarizing(probability=0.1)
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_initialization_noise(noise))
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_initialization_noise(noise, target_qubits = 1))
>>> circ = Circuit()
>>> print(circ.apply_initialization_noise(noise, target_qubits = [0, 1]))
"""
if (len(self.qubits) == 0) and (target_qubits is None):
raise IndexError(
"target_qubits must be provided in order to apply the initialization noise \
to an empty circuit."
)
target_qubits = check_noise_target_qubits(self, target_qubits)
# make noise a list
noise = wrap_with_list(noise)
for noise_channel in noise:
if not isinstance(noise_channel, Noise):
raise TypeError("Noise must be an instance of the Noise class")
if noise_channel.qubit_count > 1 and noise_channel.qubit_count != len(target_qubits):
raise ValueError(
"target_qubits needs to be provided for this multi-qubit noise channel, and \
the number of qubits in target_qubits must be the same as defined by the multi-qubit noise channel."
)
return apply_noise_to_moments(self, noise, target_qubits, "initialization")
def apply_readout_noise(
self,
noise: Union[Type[Noise], Iterable[Type[Noise]]],
target_qubits: Optional[QubitSetInput] = None,
) -> Circuit:
"""Apply `noise` right before measurement in every qubit (default) or target_qubits`.
Only when `target_qubits` is given can the noise be applied to an empty circuit.
When `noise.qubit_count` > 1, the number of qubits in target_qubits must be equal
to `noise.qubit_count`.
Args:
noise (Union[Type[Noise], Iterable[Type[Noise]]]): Noise channel(s) to be applied
to the circuit.
target_qubits (Union[QubitSetInput, optional]): Index or indices of qubit(s).
Default=None.
Returns:
Circuit: self
Raises:
TypeError:
If `noise` is not Noise type.
If `target_qubits` has non-integers.
IndexError:
If applying noise to an empty circuit.
ValueError:
If `target_qubits` has negative integers.
If `noise.qubit_count` > 1 and the number of qubits in target_qubits is
not the same as `noise.qubit_count`.
Examples:
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ)
>>> noise = Noise.Depolarizing(probability=0.1)
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_initialization_noise(noise))
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_initialization_noise(noise, target_qubits = 1))
>>> circ = Circuit()
>>> print(circ.apply_initialization_noise(noise, target_qubits = [0, 1]))
"""
if (len(self.qubits) == 0) and (target_qubits is None):
raise IndexError(
"target_qubits must be provided in order to apply the readout noise \
to an empty circuit."
)
if target_qubits is None:
target_qubits = self.qubits
else:
if not isinstance(target_qubits, list):
target_qubits = [target_qubits]
if not all(isinstance(q, int) for q in target_qubits):
raise TypeError("target_qubits must be integer(s)")
if not all(q >= 0 for q in target_qubits):
raise ValueError("target_qubits must contain only non-negative integers.")
target_qubits = QubitSet(target_qubits)
# make noise a list
noise = wrap_with_list(noise)
for noise_channel in noise:
if not isinstance(noise_channel, Noise):
raise TypeError("Noise must be an instance of the Noise class")
if noise_channel.qubit_count > 1 and noise_channel.qubit_count != len(target_qubits):
raise ValueError(
"target_qubits needs to be provided for this multi-qubit noise channel, and \
the number of qubits in target_qubits must be the same as defined by the multi-qubit noise channel."
)
return apply_noise_to_moments(self, noise, target_qubits, "readout")
def add(self, addable: AddableTypes, *args, **kwargs) -> Circuit:
"""
Generic add method for adding item(s) to self. Any arguments that
`add_circuit()` and / or `add_instruction()` and / or `add_result_type`
supports are supported by this method. If adding a
subroutine, check with that subroutines documentation to determine what
input it allows.
Args:
addable (AddableTypes): The item(s) to add to self. Default = `None`.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
Circuit: self
Raises:
TypeError: If `addable` is an unsupported type
See Also:
`add_circuit()`
`add_instruction()`
`add_result_type()`
Examples:
>>> circ = Circuit().add([Instruction(Gate.H(), 4), Instruction(Gate.CNot(), [4, 5])])
>>> circ = Circuit().add([ResultType.StateVector()])
>>> circ = Circuit().h(4).cnot([4, 5])
>>> @circuit.subroutine()
>>> def bell_pair(target):
... return Circuit().h(target[0]).cnot(target[0: 2])
...
>>> circ = Circuit().add(bell_pair, [4,5])
"""
def _flatten(addable):
if isinstance(addable, Iterable):
for item in addable:
yield from _flatten(item)
else:
yield addable
for item in _flatten(addable):
if isinstance(item, Instruction):
self.add_instruction(item, *args, **kwargs)
elif isinstance(item, ResultType):
self.add_result_type(item, *args, **kwargs)
elif isinstance(item, Circuit):
self.add_circuit(item, *args, **kwargs)
elif callable(item):
self.add(item(*args, **kwargs))
else:
raise TypeError(f"Cannot add a '{type(item)}' to a Circuit")
return self
def diagram(self, circuit_diagram_class=AsciiCircuitDiagram) -> str:
"""
Get a diagram for the current circuit.
Args:
circuit_diagram_class (Class, optional): A `CircuitDiagram` class that builds the
diagram for this circuit. Default = `AsciiCircuitDiagram`.
Returns:
str: An ASCII string circuit diagram.
"""
return circuit_diagram_class.build_diagram(self)
def to_ir(self) -> Program:
"""
Converts the circuit into the canonical intermediate representation.
If the circuit is sent over the wire, this method is called before it is sent.
Returns:
(Program): An AWS quantum circuit description program in JSON format.
"""
ir_instructions = [instr.to_ir() for instr in self.instructions]
ir_results = [result_type.to_ir() for result_type in self.result_types]
ir_basis_rotation_instructions = [
instr.to_ir() for instr in self.basis_rotation_instructions
]
return Program.construct(
instructions=ir_instructions,
results=ir_results,
basis_rotation_instructions=ir_basis_rotation_instructions,
)
def _copy(self) -> Circuit:
copy = Circuit().add(self.instructions)
copy.add(self.result_types)
return copy
def copy(self) -> Circuit:
"""
Return a shallow copy of the circuit.
Returns:
Circuit: A shallow copy of the circuit.
"""
return self._copy()
def __iadd__(self, addable: AddableTypes) -> Circuit:
return self.add(addable)
def __add__(self, addable: AddableTypes) -> Circuit:
new = self._copy()
new.add(addable)
return new
def __repr__(self) -> str:
if not self.result_types:
return f"Circuit('instructions': {list(self.instructions)})"
else:
return (
f"Circuit('instructions': {list(self.instructions)}"
+ f"result_types': {self.result_types})"
)
def __str__(self):
return self.diagram(AsciiCircuitDiagram)
def __eq__(self, other):
if isinstance(other, Circuit):
return (
list(self.instructions) == list(other.instructions)
and self.result_types == other.result_types
)
return NotImplemented
def subroutine(register=False):
"""
Subroutine is a function that returns instructions, result types, or circuits.
Args:
register (bool, optional): If `True`, adds this subroutine into the `Circuit` class.
Default = `False`.
Examples:
>>> @circuit.subroutine(register=True)
>>> def bell_circuit():
... return Circuit().h(0).cnot(0, 1)
...
>>> circ = Circuit().bell_circuit()
>>> for instr in circ.instructions:
... print(instr)
...
Instruction('operator': 'H', 'target': QubitSet(Qubit(0),))
Instruction('operator': 'H', 'target': QubitSet(Qubit(1),))
"""
def subroutine_function_wrapper(func: Callable[..., SubroutineReturn]) -> SubroutineReturn:
if register:
Circuit.register_subroutine(func)
return func
return subroutine_function_wrapper
|
the-stack_106_14204
|
import logging
import os
import re
import shutil
import sys
import tempfile
from textwrap import dedent
from unittest import mock
import pytest
import dask
from distributed import Client, Nanny, Scheduler, Worker
from distributed.utils_test import captured_logger, cluster, gen_cluster, gen_test
PRELOAD_TEXT = """
_worker_info = {}
def dask_setup(worker):
_worker_info['address'] = worker.address
def get_worker_address():
return _worker_info['address']
"""
def test_worker_preload_file(loop):
def check_worker():
import worker_info
return worker_info.get_worker_address()
tmpdir = tempfile.mkdtemp()
try:
path = os.path.join(tmpdir, "worker_info.py")
with open(path, "w") as f:
f.write(PRELOAD_TEXT)
with cluster(worker_kwargs={"preload": [path]}) as (s, workers), Client(
s["address"], loop=loop
) as c:
assert c.run(check_worker) == {
worker["address"]: worker["address"] for worker in workers
}
finally:
shutil.rmtree(tmpdir)
@gen_test()
async def test_worker_preload_text():
text = """
def dask_setup(worker):
worker.foo = 'setup'
"""
async with Scheduler(dashboard_address=":0", preload=text) as s:
assert s.foo == "setup"
async with Worker(s.address, preload=[text]) as w:
assert w.foo == "setup"
@gen_cluster(nthreads=[])
async def test_worker_preload_config(s):
text = """
def dask_setup(worker):
worker.foo = 'setup'
def dask_teardown(worker):
worker.foo = 'teardown'
"""
with dask.config.set(
{"distributed.worker.preload": [text], "distributed.nanny.preload": [text]}
):
async with Nanny(s.address) as w:
assert w.foo == "setup"
async with Client(s.address, asynchronous=True) as c:
d = await c.run(lambda dask_worker: dask_worker.foo)
assert d == {w.worker_address: "setup"}
assert w.foo == "teardown"
def test_worker_preload_module(loop):
def check_worker():
import worker_info
return worker_info.get_worker_address()
tmpdir = tempfile.mkdtemp()
sys.path.insert(0, tmpdir)
try:
path = os.path.join(tmpdir, "worker_info.py")
with open(path, "w") as f:
f.write(PRELOAD_TEXT)
with cluster(worker_kwargs={"preload": ["worker_info"]}) as (
s,
workers,
), Client(s["address"], loop=loop) as c:
assert c.run(check_worker) == {
worker["address"]: worker["address"] for worker in workers
}
finally:
sys.path.remove(tmpdir)
shutil.rmtree(tmpdir)
@gen_cluster(nthreads=[])
async def test_worker_preload_click(s):
text = """
import click
@click.command()
def dask_setup(worker):
worker.foo = 'setup'
"""
async with Worker(s.address, preload=text) as w:
assert w.foo == "setup"
@gen_cluster(nthreads=[])
async def test_worker_preload_click_async(s, tmpdir):
# Ensure we allow for click commands wrapping coroutines
# https://github.com/dask/distributed/issues/4169
text = """
import click
@click.command()
async def dask_setup(worker):
worker.foo = 'setup'
"""
async with Worker(s.address, preload=text) as w:
assert w.foo == "setup"
@gen_test()
async def test_preload_import_time():
text = """
from distributed.comm.registry import backends
from distributed.comm.tcp import TCPBackend
backends["foo"] = TCPBackend()
""".strip()
try:
async with Scheduler(dashboard_address=":0", preload=text, protocol="foo") as s:
async with Nanny(s.address, preload=text, protocol="foo") as n:
async with Client(s.address, asynchronous=True) as c:
await c.wait_for_workers(1)
finally:
from distributed.comm.registry import backends
del backends["foo"]
@gen_test()
async def test_web_preload():
with mock.patch(
"urllib3.PoolManager.request",
**{
"return_value.data": b"def dask_setup(dask_server):"
b"\n dask_server.foo = 1"
b"\n"
},
) as request, captured_logger("distributed.preloading") as log:
async with Scheduler(
host="localhost", preload=["http://example.com/preload"]
) as s:
assert s.foo == 1
assert (
re.match(
r"(?s).*Downloading preload at http://example.com/preload\n"
r".*Run preload setup function: http://example.com/preload\n"
r".*",
log.getvalue(),
)
is not None
)
assert request.mock_calls == [
mock.call(method="GET", url="http://example.com/preload", retries=mock.ANY)
]
@gen_cluster(nthreads=[])
async def test_scheduler_startup(s):
text = f"""
import dask
dask.config.set(scheduler_address="{s.address}")
"""
async with Worker(preload=text) as w:
assert w.scheduler.address == s.address
@gen_cluster(nthreads=[])
async def test_scheduler_startup_nanny(s):
text = f"""
import dask
dask.config.set(scheduler_address="{s.address}")
"""
async with Nanny(preload_nanny=text) as w:
assert w.scheduler.address == s.address
@gen_test()
async def test_web_preload_worker():
with mock.patch(
"urllib3.PoolManager.request",
**{
"return_value.data": b"import dask"
b'\ndask.config.set(scheduler_address="tcp://127.0.0.1:8786")'
b"\n"
},
) as request:
async with Scheduler(port=8786, host="localhost") as s:
async with Nanny(preload_nanny=["http://example.com/preload"]) as nanny:
assert nanny.scheduler_addr == s.address
assert request.mock_calls == [
mock.call(method="GET", url="http://example.com/preload", retries=mock.ANY)
]
# This test is blocked on https://github.com/dask/distributed/issues/5819
@pytest.mark.xfail(
reason="The preload argument to the client isn't supported yet", strict=True
)
@gen_cluster(nthreads=[])
async def test_client_preload_text(s: Scheduler):
text = dedent(
"""\
def dask_setup(client):
client.foo = "setup"
def dask_teardown(client):
client.foo = "teardown"
"""
)
async with Client(address=s.address, asynchronous=True, preload=text) as c:
assert c.foo == "setup"
assert c.foo == "teardown"
@gen_cluster(nthreads=[])
async def test_client_preload_config(s):
text = dedent(
"""\
def dask_setup(client):
client.foo = "setup"
def dask_teardown(client):
client.foo = "teardown"
"""
)
with dask.config.set({"distributed.client.preload": [text]}):
async with Client(address=s.address, asynchronous=True) as c:
assert c.foo == "setup"
assert c.foo == "teardown"
# This test is blocked on https://github.com/dask/distributed/issues/5819
@pytest.mark.xfail(
reason="The preload argument to the client isn't supported yet", strict=True
)
@gen_cluster(nthreads=[])
async def test_client_preload_click(s):
text = dedent(
"""\
import click
@click.command()
@click.argument("value")
def dask_setup(client, value):
client.foo = value
"""
)
value = "setup"
async with Client(
address=s.address, asynchronous=True, preload=text, preload_argv=[[value]]
) as c:
assert c.foo == value
@gen_test()
async def test_teardown_failure_doesnt_crash_scheduler():
text = """
def dask_teardown(worker):
raise Exception(123)
"""
with captured_logger(logging.getLogger("distributed.scheduler")) as s_logger:
with captured_logger(logging.getLogger("distributed.worker")) as w_logger:
async with Scheduler(dashboard_address=":0", preload=text) as s:
async with Worker(s.address, preload=[text]) as w:
pass
assert "123" in s_logger.getvalue()
assert "123" in w_logger.getvalue()
@gen_cluster(nthreads=[])
async def test_client_preload_config_click(s):
text = dedent(
"""\
import click
@click.command()
@click.argument("value")
def dask_setup(client, value):
client.foo = value
"""
)
value = "setup"
with dask.config.set(
{
"distributed.client.preload": [text],
"distributed.client.preload-argv": [[value]],
}
):
async with Client(address=s.address, asynchronous=True) as c:
assert c.foo == value
|
the-stack_106_14206
|
from typing import Union
def caesar(msg: str, k: int = 3) -> Union[str, None]:
out = ''
if abs(k) >= 26:
return None
A, Z, a, z = ord('A'), ord('Z'), ord('a'), ord('z')
for c in msg:
oc = ord(c)
if a <= oc <= z:
shift = (oc - a + k) % 26
out += chr(a + shift)
elif A <= oc <= Z:
shift = (oc - A + k) % 26
out += chr(A + shift)
else:
out += c
return out
if __name__ == '__main__':
test_msg = 'this is a test xyz'
encoded_msg = caesar(test_msg)
# print encoded and again decoded msg
print(encoded_msg)
print(caesar(encoded_msg, -3))
|
the-stack_106_14209
|
import unittest
from list import List
from context import Context
from symbol import Symbol
class ListEvaluateTests(unittest.TestCase):
def test_evaluate_define(self):
context = Context()
context.set('define', DefinePrimitive())
list = List(Symbol('define'), List('one', List(1, None)))
self.assertEqual(1, list.evaluate(context))
self.assertEqual(1, context.get('one'))
class DefinePrimitive:
def apply(self, context, args):
name = args.head()
value = args.tail().head()
context.set(name, value)
return value
if __name__ == '__main__':
unittest.main()
|
the-stack_106_14210
|
import unittest
from app.ashare.ashare_strategy1 import AshareStrategy1
class TAshareStrategy1(unittest.TestCase):
def test_calculate_buy_money(self):
cash_amount = 1100000
percent = 0.1
price = 11.3
result = 97
buy_shares = AshareStrategy1.calculate_buy_money(cash_amount, percent, price)
print('v1 购买股数:{0};正确值:{1}'.format(buy_shares, result))
self.assertEqual(buy_shares, result)
|
the-stack_106_14212
|
# x_1_9
#
# 処理が共通している部分を関数にしてください
import random
momotaro = {
'名前': '桃太郎',
'ヒットポイント': 1800,
'攻撃力': 230,
'守備力': 200,
}
aka_oni = {
'名前': '赤鬼',
'ヒットポイント': 2500,
'攻撃力': 250,
'守備力': 250,
}
momotaro_attack = ((momotaro['攻撃力'] / 2) - (aka_oni['守備力'] / 4)) * \
(random.randint(7, 9) / 8)
print(aka_oni['名前'] + 'は' + str(momotaro_attack) + 'のダメージを受けた')
aka_oni_attack = ((aka_oni['攻撃力'] / 2) - (momotaro['守備力'] / 4)) * \
(random.randint(7, 9) / 8)
print(momotaro['名前'] + 'は' + str(aka_oni_attack) + 'のダメージを受けた')
|
the-stack_106_14214
|
#Original_Author -- Koki Shirota
import cv2
import numpy as np
def pick_up_blue_ball():
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
while True:
ret,img = cap.read()
size = (640,480)
cimg1 = img
# Convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_blue = np.array([100, 50, 50])
upper_blue = np.array([150, 255, 255])
img_mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
img_color_blue = cv2.bitwise_and(img, img, mask=img_mask_blue)
# Hough_tranceration
img = img[:,::-1]
img_color_blue = cv2.resize(img_color_blue, size)
img_color_blue = cv2.GaussianBlur(img_color_blue, (33,33), 1)
cimg2 = img_color_blue
img_color_blue = cv2.cvtColor(img_color_blue, cv2.COLOR_RGB2GRAY)
circles = cv2.HoughCircles(img_color_blue,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=10,maxRadius=120)
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
#draw the outer circle
cv2.circle(cimg1,(i[0],i[1]),i[2],(0,255,0),2)
#draw the center of the circle
cv2.circle(cimg1,(i[0],i[1]),2,(0,0,255),3)
else:
print("nothing")
cv2.imshow('orizin',cimg1)
cv2.imshow('blue',cimg2)
k = cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
pick_up_blue_ball()
|
the-stack_106_14215
|
#coding: UTF8
"""
Translation History COM Server
"""
from win32com.server import util
from win32com.client import Dispatch
import win32com.server.register
import pythoncom
from cPickle import load, dump
import sys
import os
import utils
from utils import debug, warn, error, log_err
from utils import TransUnit
from utils import serialized_source
from utils import FelixObject
__version__ = "0.2"
__progname__ = "Felix Word translation history"
WORD_STORY_SHAPE = 5
def unserialize_seg(selection, segdata):
segment = SegmentObject(selection, segdata["unit"])
start, end = segdata["pos"]
segment.segment.SetRange(start, end)
segment.guard.SetRange(end, end+1)
return segment
def unserialize_segments(selection, segdata):
return [unserialize_seg(selection, seg) for seg in segdata]
def unserialize_shape(document, shapedata):
"""
Unserialize the shape with the given name.
If the unserialize operation failes, return
None.
"""
try:
name = u"Undefined"
name = shapedata["name"]
shape = document.Shapes(name)
shape.TextFrame.TextRange.Select()
shape_obj = ShapeObject(shape)
shape_obj.segments = unserialize_segments(shape.Application.Selection,
shapedata["segments"])
return shape_obj
except:
LOGGER.exception(u"Failed to unserialize shape [%s]" % name)
return None
def unserialize_shapes(document, shapedata):
"""
Unserialize the shapes from the pickled file.
If the unserialize_shape() function returns None,
don't add that shape to our dict.
"""
shapes = {}
for key, val in shapedata.iteritems():
shape = unserialize_shape(document, val)
if shape:
shapes[key] = shape
return shapes
def unserialize_story(document, story):
comstory = document.StoryRanges(story["story_type"])
comstory.Select()
story_obj = StoryObject(comstory)
story_obj.segments = unserialize_segments(comstory.Application.Selection,
story["segments"])
return story_obj
def load_data(document, datasource):
"""
Load the data from the data source.
Synchronize the sheets in the data with the worksheets in the workbook.
"""
try:
original_range = document.Application.Selection.Range
data = load(datasource)
stories = data["stories"]
shapes = data["shapes"]
doc_obj = DocumentObject(document)
doc_obj.shapes = unserialize_shapes(document, shapes)
for story in data["stories"]:
story_type = story["story_type"]
doc_obj.stories[story_type] = unserialize_story(document, story)
original_range.Select()
return doc_obj
except:
error("Failed to load serialized data")
raise
def serialize_segment(segment):
segment.expand()
segrange = segment.segment
return dict(pos=(segrange.Start, segrange.End), unit=segment.unit)
def valid_segment(segment):
return segment.segment.Start < segment.segment.End
def serialize_segments(segments):
return [serialize_segment(seg)
for seg in segments
if valid_segment(seg)]
def serialize_shape(shape):
"""Create a dictionary from the shape for serialization"""
name = shape.Name
return dict(name=name,
segments=serialize_segments(shape.segments))
def serialize_story(story):
"""Create a dictionary from the story for serialization"""
story_type = story.StoryType
debug(u"Serializing story: %s" % story_type)
return dict(story_type=story_type,
segments=serialize_segments(story.segments))
def make_serialization_object(docobj):
"""Get the filename and data for serialization"""
try:
filename = serialized_source(docobj)
stories = [serialize_story(story) for key, story in docobj.stories.items()]
shapes = dict([(shape.Name, serialize_shape(shape))
for key, shape in docobj.shapes.items()])
return filename, dict(stories=stories, shapes=shapes)
except:
error("Failed to serialize document data")
raise
class SegmentObject(object):
def __init__(self, selection, unit):
self.segment = None
self.guard = None
self.set_segrange(selection)
self.unit = unit
def set_segrange(self, selection):
self.segment = selection.Range
self.guard = selection.Range
start = self.guard.End
self.guard.SetRange(start, start+1)
def expand(self):
end = self.guard.End-1
segend = self.segment.End
if segend != end:
debug(u"Expanding segment end from %s to %s" % (segend, end))
self.segment.End = end
self.guard.Start = end
def __repr__(self):
return u"<SegmentObject (%s, %s): %s>" % (self.segment.Start,
self.segment.End,
self.unit)
def make_segment(felix, selection, text):
record = felix.get_record()
unit = TransUnit(record.Id, record.Source, text)
return SegmentObject(selection, unit)
class ShapeObject(object):
def __init__(self, shape):
self.shape = shape
self.segments = []
def _get_name(self):
return self.shape.Name
Name = property(_get_name)
def __repr__(self):
return u"<ShapeObject (%s): %s>" % (self.Name, self.segments)
class StoryObject(object):
"""Wraps a Word story object"""
def __init__(self, story):
app = story.Application
doc = app.ActiveDocument
story_type = story.StoryType
the_story = doc.StoryRanges(story_type)
self.story = the_story
self.segments = []
def _get_storytype(self):
return self.story.StoryType
StoryType = property(_get_storytype)
def __repr__(self):
return u"<StoryObject (%s): %s segments>" % (self.StoryType,
len(self.segments))
class DocumentObject(object):
"""Wraps a document object from Word"""
def __init__(self, document):
self.document = document
self.stories = {}
self.shapes = {}
def _get_fullname(self):
return self.document.FullName
FullName = property(_get_fullname)
def segments_equal(segment_range, selection_range):
if segment_range.Start != selection_range.Start:
return False
return segment_range.End >= selection_range.End
def fit_segment(segments, segment):
start = segment.segment.Start
end = segment.segment.End
for seg in segments:
segrange = seg.segment
segstart, segend = segrange.Start, segrange.End
if segstart < end <= segend:
if not (start, end) == (segstart, segend):
segrange.Start = end
elif segstart < start <= segend:
segrange.End = start
seg.guard.SetRange(start, start+1)
segments.append(segment)
class TransHistoryWord(object):
"""Translation history server for Word files"""
_public_methods_ = ["RecordTrans",
"ShutDown",
"LookupTrans",
"ReflectChanges",
"CorrectTrans",
"CorrectTransFixed",
"OpenDoc"]
_public_attrs_ = ['App', 'Felix', 'Parser']
_reg_progid_ = "FelixTransHistory.Word"
# import pythoncom
# print pythoncom.CreateGuid()
_reg_clsid_ = "{D967E6DC-3F23-4331-B481-AF803988E249}"
def __init__(self):
self._app = None
self._felix = None
self._parser = None
self._history = []
debug("FelixTransHistory.Word")
def get_felix(self):
"""
Get the Felix COM server.
Create it if it doesn't exist.
"""
if not self._felix:
self._felix = FelixObject()
return self._felix
def get_document(self, active_doc):
"""
Get the DocumentObject for the
active document. Add it if it doesn't
already exist.
"""
name = active_doc.FullName
for _doc in self._history:
if _doc.FullName == name:
return _doc
if os.path.exists(serialized_source(active_doc)):
datasource = open(serialized_source(active_doc))
self._history.append(load_data(active_doc, datasource))
else:
self._history.append(DocumentObject(active_doc))
return self._history[-1]
def Parser(self):
"""Get the parser COM server."""
return self._parser
def SetParser(self, parser):
"""Set the parser COM server."""
try:
self._parser = Dispatch(parser)
except:
LOGGER.exception("Error setting parser")
raise
def App(self):
"""Get the Word application"""
return self._app
def SetApp(self, app):
"""Set the Word application"""
debug("Set Word application")
try:
self._app = Dispatch(app)
except:
LOGGER.exception("Error setting Word application")
raise
def Felix(self):
"""Get the Felix application"""
self._felix.ensure_felix()
return self._felix._felix
def SetFelix(self, felix):
"""
Set the Felix application.
Create if it hasn't been set.
"""
try:
if not felix:
self._felix = FelixObject()
else:
self._felix = FelixObject(Dispatch(felix))
except:
LOGGER.exception("Error setting Felix application")
raise
def get_shape(self, doc, selection):
"""
Get the shape of the current selection.
Assumes that ShapeRange is non-empty.
"""
shape = selection.ShapeRange[0]
try:
return doc.shapes[shape.Name]
except KeyError:
shapeobj = ShapeObject(shape)
doc.shapes[shape.Name] = shapeobj
return shapeobj
def add_segment(self, holder, segment):
"""
Add the segment to the holder (either story or shape).
If the segment already exists, then update the translation unit.
"""
for seg in holder.segments:
if segments_equal(segment.segment, seg.segment):
old_sel = self._app.Selection
seg.set_segrange(self._app.Selection)
seg.unit = segment.unit
old_sel.Select()
return
fit_segment(holder.segments, segment)
def get_story(self, doc, selection):
"""
Get the story from the active document.
Add it if it doesn't exist.
"""
story_type = selection.StoryType
if story_type not in doc.stories:
doc.stories[story_type] = StoryObject(doc.document.StoryRanges(story_type))
return doc.stories[story_type]
def record_shape(self, doc, selection):
"""
Record the translation for a segment in a shape.
"""
shape = self.get_shape(doc, selection)
self.add_segment(shape, make_segment(self.get_felix(),
selection,
self.parse_text(selection)))
def record_range(self, doc, selection):
story = self.get_story(doc, selection)
self.add_segment(story, make_segment(self.get_felix(),
selection,
self.parse_text(selection)))
def RecordTrans(self):
"""
Record a translation in the history
"""
try:
doc = self.get_document(self._app.ActiveDocument)
selection = Dispatch(self._app.Selection)
if selection.StoryType == WORD_STORY_SHAPE:
self.record_shape(doc, selection)
else:
self.record_range(doc, selection)
except:
LOGGER.exception("Error recording translation")
raise
def get_segments(self, doc, selection):
if selection.StoryType == WORD_STORY_SHAPE:
shape = self.get_shape(doc, selection)
return shape.segments
else:
story = self.get_story(doc, selection)
return story.segments
def LookupTrans(self):
try:
doc = self.get_document(self._app.ActiveDocument)
selection = Dispatch(self._app.Selection)
felix = self.get_felix()
seg = self.find_segment(doc, selection)
if seg:
reflect = felix.ReviewTranslation
self.reflect_segment(Dispatch(self._app.Selection),
seg,
reflect)
return
felix.LookupTrans(self.parse_text(selection))
except:
LOGGER.exception("Error looking up translation")
raise
def find_segment(self, doc, selection):
"""
See if we have a segment in our history matching the
segment being looked up.
Allow for the possibility that our segment is actually longer
than the selection (like maybe we had two translation sentences
for one source segment)
"""
selrange = selection.Range
for seg in self.get_segments(doc, selection):
if segments_equal(seg.segment, selrange):
seg.expand()
if seg.segment.End > selrange.End:
seg.segment.Select()
else:
seg.set_segrange(selection)
return seg
return None
def CorrectTrans(self):
try:
doc = self.get_document(self._app.ActiveDocument)
selection = Dispatch(self._app.Selection)
felix = self.get_felix()
seg = self.find_segment(doc, selection)
if seg:
reflect = felix.ReflectChanges
self.reflect_segment(selection, seg, reflect)
return
felix.CorrectTrans(self.parse_text(selection))
self.RecordTrans()
except:
LOGGER.exception("Error correcting translation")
raise
def parse_text(self, selection):
try:
return self._parser.RangeToHtml(selection)
except:
LOGGER.exception(u"Failed to parse Word range to HTML")
return selection.Text
def CorrectTransFixed(self):
try:
doc = self.get_document(self._app.ActiveDocument)
selection = Dispatch(self._app.Selection)
felix = self.get_felix()
selrange = selection.Range
selpos = selrange.Start, selrange.End
for seg in self.get_segments(doc, selection):
segment = seg.segment
if (segment.Start, segment.End) == selpos:
reflect = felix.ReflectChanges
self.reflect_segment(selection, seg, reflect)
return
felix.CorrectTrans(self.parse_text(selection))
self.RecordTrans()
except:
LOGGER.exception("Error correcting translation")
raise
def ReflectChanges(self):
"""
Reflect changes to the document in the translation memory
"""
try:
docname = self._app.ActiveDocument.FullName
debug(u"Reflecting changes in Word document %s" % docname)
doc = self.get_document(self._app.ActiveDocument)
selection = Dispatch(self._app.Selection)
start, end = selection.Start, selection.End
felix = self.get_felix()
reflect = felix.ReflectChanges
debug("Reflecting shape translations")
for key, shape in doc.shapes.items():
self.reflect_segments(selection, shape.segments, reflect)
for key, story in doc.stories.items():
debug("uReflecting translations for story %s" % story.StoryType)
self.reflect_segments(selection, story.segments, reflect)
except:
LOGGER.exception("Error correcting translation")
raise
def reflect_segments(self, selection, segments, reflect):
"""
Expand to the guard. This ensures that if we added text to the
end, that we capture it.
Next, we make sure that we didn't step on any other toes.
"""
end = 0
segments = sorted([(seg.segment.Start, seg) for seg in segments])
for start, segment in segments:
segment.expand()
segrange = segment.segment
if start < end:
segrange.Start = end
segrange.Select()
this_end = segrange.End
if end < this_end:
self.reflect_segment(selection, segment, reflect)
end = max(end, this_end)
def reflect_segment(self, selection, segment, reflect):
seg = segment.segment
seg.Select()
unit = segment.unit
reflect(unit.recid,
unit.source,
self.parse_text(selection))
def OpenDoc(self, docdisp):
try:
debug("Document opening")
try:
comdoc = Dispatch(docdisp)
name = comdoc.FullName
except:
# no harm, no foul
LOGGER.exception("Failed to retrieve document object")
return
# if it's already loaded, don't do anything
for _doc in self._history:
if _doc.FullName == name:
return
debug(" ... Document history not loaded. Checking for file.")
# it's not loaded, and there's a translation history
if os.path.exists(serialized_source(comdoc)):
debug(" ... Loading document history")
datasource = open(serialized_source(comdoc))
self._history.append(load_data(comdoc, datasource))
except:
LOGGER.exception("Error handling document open event")
raise
def ShutDown(self, docdisp):
try:
debug("Document closing")
try:
comdoc = Dispatch(docdisp)
name = comdoc.FullName
except:
# no harm, no foul
LOGGER.exception("Failed to retrieve document object")
return
for doc in self._history:
if name == doc.FullName:
debug(u"Serializing history for %s" % doc.FullName)
filename, data = make_serialization_object(doc)
dump(data, file(filename, "w"))
# We've got to remove the document
self.remove_doc(name)
return
# We've got to remove the document
self.remove_doc(name)
debug("No history for document")
except:
LOGGER.exception("Error shutting down")
raise
def remove_doc(self, name):
self._history = [doc for doc in self._history
if name != doc.FullName]
def reg():
"""Register COM servers"""
debug("Registering COM servers")
win32com.server.register.UseCommandLine(TransHistoryWord)
utils.determine_redirect("history_word.log")
# Add code so that when this script is run by
# Python.exe, it self-registers.
if __name__ == '__main__':
reg()
|
the-stack_106_14217
|
#!/usr/bin/env python
import numpy as np
import os
# on Windows, we need the original PATH without Anaconda's compiler in it:
PATH = os.environ.get('PATH') + ';C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\bin'
from distutils.spawn import spawn, find_executable
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
import sys
# CUDA specific config
# nvcc is assumed to be in user's PATH
nvcc_compile_args = ['-O', '--ptxas-options=-v', '-arch=sm_35', '-c', '--compiler-options=-fPIC']
nvcc_compile_args = os.environ.get('NVCCFLAGS', '').split() + nvcc_compile_args
cuda_libs = ['cublas']
nvcc_bin = 'nvcc.exe'
lib_dir = 'lib/x64'
import distutils.msvc9compiler
distutils.msvc9compiler.VERSION = 14.0
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
cudamat_ext = Extension('gpu_mv', ['gpu_mv.cu'],
language='c++',
libraries=cuda_libs,
extra_compile_args=nvcc_compile_args,
include_dirs=[numpy_include, 'C:\\Programming\\CUDA\\v8.0\\include'])
class CUDA_build_ext(build_ext):
"""
Custom build_ext command that compiles CUDA files.
Note that all extension source files will be processed with this compiler.
"""
def build_extensions(self):
self.compiler.src_extensions.append('.cu')
self.compiler.set_executable('compiler_so', 'nvcc')
self.compiler.set_executable('linker_so', 'nvcc --shared')
if hasattr(self.compiler, '_c_extensions'):
self.compiler._c_extensions.append('.cu') # needed for Windows
self.compiler.spawn = self.spawn
build_ext.build_extensions(self)
def spawn(self, cmd, search_path=1, verbose=0, dry_run=0):
"""
Perform any CUDA specific customizations before actually launching
compile/link etc. commands.
"""
if (sys.platform == 'darwin' and len(cmd) >= 2 and cmd[0] == 'nvcc' and
cmd[1] == '--shared' and cmd.count('-arch') > 0):
# Versions of distutils on OSX earlier than 2.7.9 inject
# '-arch x86_64' which we need to strip while using nvcc for
# linking
while True:
try:
index = cmd.index('-arch')
del cmd[index:index+2]
except ValueError:
break
elif self.compiler.compiler_type == 'msvc':
# There are several things we need to do to change the commands
# issued by MSVCCompiler into one that works with nvcc. In the end,
# it might have been easier to write our own CCompiler class for
# nvcc, as we're only interested in creating a shared library to
# load with ctypes, not in creating an importable Python extension.
# - First, we replace the cl.exe or link.exe call with an nvcc
# call. In case we're running Anaconda, we search cl.exe in the
# original search path we captured further above -- Anaconda
# inserts a MSVC version into PATH that is too old for nvcc.
cmd[:1] = ['nvcc', '--compiler-bindir',
os.path.dirname(find_executable("cl.exe", PATH))
or cmd[0]]
# - Secondly, we fix a bunch of command line arguments.
for idx, c in enumerate(cmd):
# create .dll instead of .pyd files
#if '.pyd' in c: cmd[idx] = c = c.replace('.pyd', '.dll') #20160601, by MrX
# replace /c by -c
if c == '/c': cmd[idx] = '-c'
# replace /DLL by --shared
elif c == '/DLL': cmd[idx] = '--shared'
# remove --compiler-options=-fPIC
elif '-fPIC' in c: del cmd[idx]
# replace /Tc... by ...
elif c.startswith('/Tc'): cmd[idx] = c[3:]
# replace /Fo... by -o ...
elif c.startswith('/Fo'): cmd[idx:idx+1] = ['-o', c[3:]]
# replace /LIBPATH:... by -L...
elif c.startswith('/LIBPATH:'): cmd[idx] = '-L' + c[9:]
# replace /OUT:... by -o ...
elif c.startswith('/OUT:'): cmd[idx:idx+1] = ['-o', c[5:]]
# remove /EXPORT:initlibcudamat or /EXPORT:initlibcudalearn
elif c.startswith('/EXPORT:'): del cmd[idx]
# replace cublas.lib by -lcublas
elif c == 'cublas.lib': cmd[idx] = '-lcublas'
# - Finally, we pass on all arguments starting with a '/' to the
# compiler or linker, and have nvcc handle all other arguments
if '--shared' in cmd:
pass_on = '--linker-options='
# we only need MSVCRT for a .dll, remove CMT if it sneaks in:
cmd.append('/NODEFAULTLIB:libcmt.lib')
else:
pass_on = '--compiler-options='
cmd = ([c for c in cmd if c[0] != '/'] +
[pass_on + ','.join(c for c in cmd if c[0] == '/')])
# For the future: Apart from the wrongly set PATH by Anaconda, it
# would suffice to run the following for compilation on Windows:
# nvcc -c -O -o <file>.obj <file>.cu
# And the following for linking:
# nvcc --shared -o <file>.dll <file1>.obj <file2>.obj -lcublas
# This could be done by a NVCCCompiler class for all platforms.
spawn(cmd, search_path, verbose, dry_run)
setup(name="mask_voting_gpu",
description="Performs linear algebra computation on the GPU via CUDA",
ext_modules=[cudamat_ext],
cmdclass={'build_ext': CUDA_build_ext},
)
|
the-stack_106_14221
|
# jaccardIndex.py
# ===============
#
# Compute the Jaccard index of two binary
# images on the GPU.
#
# Author: Robert Haase, [email protected]
# December 2019
#########################################
from ij import IJ;
from net.haesleinhuepf.clijx import CLIJx;
IJ.run("Close All");
# init GPU
clijx = CLIJx.getInstance();
# init two binary images
image1 = clijx.create([100, 100, 10], clijx.UnsignedByte);
image2 = clijx.create([100, 100, 10], clijx.UnsignedByte);
temp = clijx.create([100, 100, 10], clijx.UnsignedByte);
clijx.set(image1, 0);
clijx.set(image2, 0);
# set two spheres
clijx.drawSphere(image1, 50, 50, 5, 20, 20, 5);
clijx.drawSphere(image2, 40, 40, 5, 20, 20, 5);
# visualise overlap
clijx.showRGB(image2, image1, image2, "Overlap (single plane)");
# compute and output overlap
jaccardIndex = clijx.jaccardIndex(image1, image2);
diceIndex = clijx.sorensenDiceCoefficient(image1, image2);
print("Jaccard index:" + str(jaccardIndex));
print("Dice index:" + str(diceIndex));
# cleanup by the end
clijx.clear();
|
the-stack_106_14222
|
# -*- coding: utf-8 -*-
"""
Free algebra elements
AUTHORS:
- David Kohel (2005-09)
TESTS::
sage: R.<x,y> = FreeAlgebra(QQ,2)
sage: x == loads(dumps(x))
True
sage: x*y
x*y
sage: (x*y)^0
1
sage: (x*y)^3
x*y*x*y*x*y
"""
#*****************************************************************************
# Copyright (C) 2005 David Kohel <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details; the full text
# is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.repr import repr_lincomb
from sage.monoids.free_monoid_element import FreeMonoidElement
from sage.modules.with_basis.indexed_element import IndexedFreeModuleElement
from sage.structure.element import AlgebraElement
class FreeAlgebraElement(IndexedFreeModuleElement, AlgebraElement):
"""
A free algebra element.
TESTS:
The ordering is inherited from ``IndexedFreeModuleElement``::
sage: R.<x,y> = FreeAlgebra(QQ,2)
sage: x < y
True
sage: x * y < y * x
True
sage: y * x < x * y
False
"""
def __init__(self, A, x):
"""
Create the element ``x`` of the FreeAlgebra ``A``.
TESTS::
sage: F.<x,y,z> = FreeAlgebra(QQ, 3)
sage: elt = x^3 * y - z^2*x
sage: TestSuite(elt).run()
"""
if isinstance(x, FreeAlgebraElement):
# We should have an input for when we know we don't need to
# convert the keys/values
x = x._monomial_coefficients
R = A.base_ring()
if isinstance(x, AlgebraElement): # and x.parent() == A.base_ring():
x = {A.monoid()(1): R(x)}
elif isinstance(x, FreeMonoidElement):
x = {x: R(1)}
elif True:
x = {A.monoid()(e1): R(e2) for e1, e2 in x.items()}
else:
raise TypeError("argument x (= {}) is of the wrong type".format(x))
IndexedFreeModuleElement.__init__(self, A, x)
def _repr_(self):
"""
Return string representation of self.
EXAMPLES::
sage: A.<x,y,z> = FreeAlgebra(ZZ,3)
sage: repr(-x+3*y*z) # indirect doctest
'-x + 3*y*z'
Trac ticket :trac:`11068` enables the use of local variable names::
sage: from sage.structure.parent_gens import localvars
sage: with localvars(A, ['a','b','c']):
....: print(-x+3*y*z)
-a + 3*b*c
"""
v = sorted(self._monomial_coefficients.items())
P = self.parent()
M = P.monoid()
from sage.structure.parent_gens import localvars
with localvars(M, P.variable_names(), normalize=False):
x = repr_lincomb(v, strip_one=True)
return x
def _latex_(self):
r"""
Return latex representation of self.
EXAMPLES::
sage: A.<x,y,z>=FreeAlgebra(ZZ,3)
sage: latex(-x+3*y^20*z) # indirect doctest
-x + 3 y^{20}z
sage: alpha,beta,gamma=FreeAlgebra(ZZ,3,'alpha,beta,gamma').gens()
sage: latex(alpha-beta)
\alpha - \beta
"""
v = sorted(self._monomial_coefficients.items())
return repr_lincomb(v, strip_one=True, is_latex=True)
def __call__(self, *x, **kwds):
"""
EXAMPLES::
sage: A.<x,y,z>=FreeAlgebra(ZZ,3)
sage: (x+3*y).subs(x=1,y=2,z=14)
7
sage: (2*x+y).subs({x:1,y:z})
2 + z
sage: f=x+3*y+z
sage: f(1,2,1/2)
15/2
sage: f(1,2)
Traceback (most recent call last):
...
ValueError: must specify as many values as generators in parent
AUTHORS:
- Joel B. Mohler (2007-10-27)
"""
if kwds and x:
raise ValueError("must not specify both a keyword and positional argument")
if kwds:
p = self.parent()
def extract_from(kwds,g):
for x in g:
try:
return kwds[x]
except KeyError:
pass
return None
x = [extract_from(kwds,(p.gen(i),p.variable_name(i))) for i in range(p.ngens())]
elif isinstance(x[0], tuple):
x = x[0]
if len(x) != self.parent().ngens():
raise ValueError("must specify as many values as generators in parent")
# I don't start with 0, because I don't want to preclude evaluation with
# arbitrary objects (e.g. matrices) because of funny coercion.
result = None
for m, c in self._monomial_coefficients.items():
if result is None:
result = c*m(x)
else:
result += c*m(x)
if result is None:
return self.parent().zero()
return result
def _mul_(self, y):
"""
Return the product of ``self`` and ``y`` (another free algebra
element with the same parent).
EXAMPLES::
sage: A.<x,y,z> = FreeAlgebra(ZZ,3)
sage: (x+y+x*y)*(x+y+1)
x + y + x^2 + 2*x*y + y*x + y^2 + x*y*x + x*y^2
"""
A = self.parent()
z_elt = {}
for mx, cx in self:
for my, cy in y:
key = mx*my
if key in z_elt:
z_elt[key] += cx*cy
else:
z_elt[key] = cx*cy
if not z_elt[key]:
del z_elt[key]
return A._from_dict(z_elt)
def _acted_upon_(self, scalar, self_on_left=False):
"""
Return the action of a scalar on ``self``.
EXAMPLES::
sage: R.<x,y> = FreeAlgebra(QQ,2)
sage: f = Factorization([(x,2),(y,3)]); f
x^2 * y^3
sage: x * f
x^3 * y^3
sage: f * x
x^2 * y^3 * x
"""
from sage.structure.factorization import Factorization
# FIXME: Make factorization work properly in the coercion framework
# Keep factorization since we want to "coerce" into a factorization
if isinstance(scalar, Factorization):
if self_on_left:
return Factorization([(self, 1)]) * scalar
return scalar * Factorization([(self, 1)])
return super(FreeAlgebraElement, self)._acted_upon_(scalar, self_on_left)
# For backward compatibility
#_lmul_ = _acted_upon_
#_rmul_ = _acted_upon_
def variables(self):
"""
Return the variables used in ``self``.
EXAMPLES::
sage: A.<x,y,z> = FreeAlgebra(ZZ,3)
sage: elt = x + x*y + x^3*y
sage: elt.variables()
[x, y]
sage: elt = x + x^2 - x^4
sage: elt.variables()
[x]
sage: elt = x + z*y + z*x
sage: elt.variables()
[x, y, z]
"""
v = set([])
for s in self._monomial_coefficients: # Only gets the keys
for var,exp in s:
v.add(var)
A = self.parent()
return sorted(map(A, v))
def to_pbw_basis(self):
"""
Return ``self`` in the Poincaré-Birkhoff-Witt (PBW) basis.
EXAMPLES::
sage: F.<x,y,z> = FreeAlgebra(ZZ, 3)
sage: p = x^2*y + 3*y*x + 2
sage: p.to_pbw_basis()
2*PBW[1] + 3*PBW[y]*PBW[x] + PBW[x^2*y]
+ 2*PBW[x*y]*PBW[x] + PBW[y]*PBW[x]^2
"""
return self.parent().pbw_element(self)
|
the-stack_106_14223
|
#!/usr/bin/env python3
'''
GET OLD TWEETS
Command line wrapper
Simon Lindgren 200219
'''
import got3 as got
import sqlite3
import re
import sys
import datetime
def main():
print("GET OLD TWEETS")
print("==============")
project_setup()
create_database()
run_search()
remove_duplicates()
def project_setup():
projname = input("Project name? ")
global dbname
dbname = (projname + ".db")
print("Searches can by done by search term(s), by username(s), or by both in combination")
# QUERYSEARCH
print("")
print("Search terms, one or several separated by comma")
print("Leave empty to only search by username")
global keywords
keywords = ""
keywords = input('e.g. monkey,"time for bananas",#ape2020,"donkey kong": ')
# USERNAMES
print("")
print("Usernames, one or several separated by space")
print("Leave empty to only search by terms")
global usernames
usernames = ""
usernames = input('e.g. @nintendo @jupyter (with or without the "@"): ')
usernames = [un for un in usernames.split()]
# DATES
print("")
print("Enter date range for search in YYYY-NN-DD format")
global since
since = (input("start date UTC (included in search): "))
validate(since)
global until
until = (input("end date UTC (excluded from search): "))
validate(until)
# TOPTWEETS
print("")
print("Do you want to get only the Top Tweets?")
global toptweets
top_t = input("y/n? ")
if top_t == "y":
toptweets = True
else:
toptweets = False
#MAXTWEETS
print("")
print("\nEnter maximum number of tweets to get per search term, or set 0 to get all possible tweets")
global maxtweets
maxtweets = (input("max tweets "))
if maxtweets.isnumeric():
maxtweets = int(maxtweets)
pass
else:
print("You did not enter a numeric value")
sys.exit()
def create_database():
try:
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute("""CREATE TABLE tweets (
tweet_id TEXT,
author TEXT,
in_reply_to TEXT,
tweet TEXT,
date TEXT,
retweets INT,
favourites INT,
mentions TEXT,
hashtags TEXT,
geo TEXT)
""")
conn.close()
except:
print("A database with this name already exists")
sys.exit()
def run_search():
for kw in keywords.split(","):
print("Getting tweets for " + kw)
conn = sqlite3.connect(dbname)
tweetCriteria = got.manager.TweetCriteria()
# Set the search parameters that we always set
tweetCriteria.setMaxTweets(maxtweets)
tweetCriteria.setSince(since)
tweetCriteria.setUntil(until)
tweetCriteria.setTopTweets(toptweets)
tweetCriteria.setEmoji("unicode")
if len(keywords) != 0:
tweetCriteria.setQuerySearch(kw)
if len(usernames) != 0:
tweetCriteria.setUsername(usernames)
tweets=got.manager.TweetManager.getTweets(tweetCriteria)
for t in tweets:
tweet_id = t.id
author = t.username
in_reply_to = t.to
tweet = t.text
date = t.date
retweets = t.retweets
favourites = t.favorites
mentions = t.mentions
hashtags = t.hashtags
geo = t.geo
conn.execute('INSERT INTO tweets (tweet_id, author, in_reply_to, tweet, date, retweets, favourites, mentions, hashtags,geo) VALUES (?,?,?,?,?,?,?,?,?,?)',\
(tweet_id, author, in_reply_to, tweet, date, retweets, favourites, mentions, hashtags, geo))
conn.commit()
def remove_duplicates():
conn = sqlite3.connect(dbname)
cur = conn.cursor()
cur.execute("CREATE TABLE temp_table as SELECT DISTINCT * FROM tweets")
cur.execute("DELETE from tweets")
conn.commit()
cur.execute("INSERT INTO tweets SELECT * FROM temp_table")
cur.execute("DELETE from temp_table")
conn.commit()
cur.execute("SELECT max(rowid) from tweets")
n = cur.fetchone()[0]
print("\n" + str(n) + " tweets written to database\n")
def validate(date_text):
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
if __name__ == '__main__':
main()
|
the-stack_106_14224
|
# import required module
import os,calendar;
from time import strptime
import yaml
# assign directory
directory = '_data'
# itrate over files in
# that directory
for filename in os.listdir(directory):
if 'covid' in filename:
#covid-day-status-2020.yaml extract year
year=filename[17:21]
#print(year)
stats_path="_posts/stats/" + year
#print(stats_path)
if not os.path.isdir(stats_path):
print("")
try:
os.mkdir(stats_path)
#[os.mkdir(stats_path+"/"+m) for m in calendar.month_name if m]
except OSError:
print ("Creation of the directory %s failed" % stats_path)
else:
print ("Successfully created the directory %s " % stats_path)
for m in calendar.month_name :
month_path=stats_path+"/"+m
if not os.path.isdir(month_path):
try:
os.mkdir(month_path)
except OSError:
print ("Creation of the directory %s failed" % month_path)
else:
print ("Successfully created the directory %s " % month_path)
with open(os.path.join(directory,filename), 'r') as stream:
try:
#print(yaml.safe_load(stream))
data = yaml.safe_load(stream)
for result in data:
#print(result)
#2020-07-24-case-update-july-24
date = result['day'].split("-")
month_int = strptime(date[1],'%b').tm_mon
month_str=str(month_int)
date_str = month_str+ '-' + date[0]
case_file=year+ '-' + date_str +'-case-update-'+ date[1] + '-' + date[0] +'.md'
case_file_path = stats_path + "/" + calendar.month_name[month_int] + "/" + case_file
#print(case_file_path )
#print(case_file)
if not os.path.isfile(case_file_path):
f = open(case_file_path, "w")
f.write('--- \n'\
'layout: post \n'\
'title: "Covid Updates for ' + date[0] + '-'+ calendar.month_name[month_int]+ '-'+ year + '" \n'\
'date: '+ year+ '-' + date_str + ' 13:20:40 +0530 \n'\
'categories: stats \n'\
'--- \n\n'\
'Hubballi - Dharwad District \n\n'\
'No. of Cases > '+result['total']+ ' \n\n'\
'No. of Active > '+result['active']+ ' \n\n'\
'No. of New Case > '+result['new_case']+ ' \n\n'\
'No. of Recovered > '+ result['recovered'] + ' \n\n'\
'No. of Deceased > '+ result['deceased'] + ' \n\n'\
)
if 'icu' in result:
f.write('No. of ICU Cases > '+ result['icu'] + '\n\n')
if 'vaccinated' in result:
f.write('No. of Vaccination Done > '+ result['vaccinated']+ '\n\n')
f.write('View All Stats at [https://slabs.tech/covid/stats/](https://slabs.tech/covid/stats/)')
f.close()
except yaml.YAMLError as exc:
print(exc)
# f = os.path.join(directory, filename)
# checking if it is a file
# if os.path.isfile(f):
# print(f)
|
the-stack_106_14225
|
from numpy import arange, int64
from pytest import raises
from hypothesis import given
from hypothesis.strategies import integers, one_of
from ..integer import Integer
from ..ndindex import ndindex
from ..tuple import Tuple
from .helpers import check_same, ints, prod, shapes
def test_integer_args():
zero = Integer(0)
assert zero.raw == 0
idx = Integer(int64(0))
assert idx == zero
assert idx.raw == 0
assert isinstance(idx.raw, int)
assert Integer(zero) == zero
def test_integer_exhaustive():
a = arange(10)
for i in range(-12, 12):
check_same(a, i)
@given(ints(), integers(5, 100))
def test_integer_hypothesis(i, size):
a = arange(size)
check_same(a, i)
def test_integer_len_exhaustive():
for i in range(-12, 12):
idx = Integer(i)
assert len(idx) == 1
@given(ints())
def test_integer_len_hypothesis(i):
idx = Integer(i)
assert len(idx) == 1
def test_integer_reduce_exhaustive():
a = arange(10)
for i in range(-12, 12):
check_same(a, i, func=lambda x: x.reduce((10,)))
try:
reduced = Integer(i).reduce(10)
except IndexError:
pass
else:
assert reduced.raw >= 0
@given(ints(), shapes)
def test_integer_reduce_hypothesis(i, shape):
a = arange(prod(shape)).reshape(shape)
# The axis argument is tested implicitly in the Tuple.reduce test. It is
# difficult to test here because we would have to pass in a Tuple to
# check_same.
check_same(a, i, func=lambda x: x.reduce(shape))
try:
reduced = Integer(i).reduce(shape)
except IndexError:
pass
else:
assert reduced.raw >= 0
def test_integer_reduce_no_shape_exhaustive():
a = arange(10)
for i in range(-12, 12):
check_same(a, i, func=lambda x: x.reduce())
@given(ints(), shapes)
def test_integer_reduce_no_shape_hypothesis(i, shape):
a = arange(prod(shape)).reshape(shape)
check_same(a, i, func=lambda x: x.reduce())
def test_integer_newshape_exhaustive():
shape = 5
a = arange(shape)
for i in range(-10, 10):
def assert_equal(x, y):
newshape = ndindex(i).newshape(shape)
assert x.shape == y.shape == newshape
# Call newshape so we can see if any exceptions match
def func(i):
i.newshape(shape)
return i
check_same(a, i, func=func, assert_equal=assert_equal)
@given(ints(), one_of(shapes, integers(0, 10)))
def test_integer_newshape_hypothesis(i, shape):
if isinstance(shape, int):
a = arange(shape)
else:
a = arange(prod(shape)).reshape(shape)
def assert_equal(x, y):
newshape = ndindex(i).newshape(shape)
assert x.shape == y.shape == newshape
# Call newshape so we can see if any exceptions match
def func(i):
i.newshape(shape)
return i
check_same(a, i, func=func, assert_equal=assert_equal)
def test_integer_newshape_ndindex_input():
raises(TypeError, lambda: Integer(1).newshape(Tuple(2, 1)))
raises(TypeError, lambda: Integer(1).newshape(Integer(2)))
def test_integer_newshape_small_shape():
raises(IndexError, lambda: Integer(6).newshape(2))
raises(IndexError, lambda: Integer(6).newshape((4, 4)))
|
the-stack_106_14228
|
# -*- coding: utf-8 -*-
# flake8: noqa
# pylint: skip-file
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
from recommonmark.parser import CommonMarkParser
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# For sphinx-apidoc
sys.path.insert(0, os.path.abspath('../src/'))
# Get version info dynamically
with open('../VERSION') as v:
version_ = v.read().strip()
add_module_names = False
# Auto-generate API documentation for readthedocs.org
# See https://github.com/rtfd/readthedocs.org/issues/1139#issuecomment-398083449 # noqa: E501
def run_apidoc(_):
ignore_paths = []
argv = [
'-f',
# '-T',
'-e',
'-M',
'-o', './_apidoc',
'../src/'
] + ignore_paths # yapf: disable
try:
# Sphinx 1.7+
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx 1.6 (and earlier)
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
def setup(app):
app.connect('builder-inited', run_apidoc)
# -- Project information -----------------------------------------------------
project = 'garage'
copyright = '2019, garage contributors'
author = 'garage contributors'
# The short X.Y version.
version = version_
# The full version, including alpha/beta/rc tags.
release = version_
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Markdown parsing
source_parsers = {
'.md': CommonMarkParser,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'garagedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'garage.tex', 'garage Documentation', 'garage contributors',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'garage', 'garage Documentation', [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'garage', 'garage Documentation', author, 'garage',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autodoc_mock_imports = [
'dm_control', 'torch', 'torchvision', 'mujoco_py', 'glfw'
]
|
the-stack_106_14232
|
from systems.plugins.index import ProviderMixin
from utility.data import ensure_list
import os
import boto3
import random
class AWSServiceMixin(ProviderMixin('aws_service')):
@classmethod
def generate(cls, plugin, generator):
super().generate(plugin, generator)
def add_credentials(self, config):
self.aws_credentials(config)
def remove_credentials(self, config):
self.clean_aws_credentials(config)
plugin.add_credentials = add_credentials
plugin.remove_credentials = remove_credentials
def aws_credentials(self, config):
try:
config['access_key'] = self.command.get_config('aws_access_key', required = True).strip()
os.environ['AWS_ACCESS_KEY_ID'] = config['access_key']
config['secret_key'] = self.command.get_config('aws_secret_key', required = True).strip()
os.environ['AWS_SECRET_ACCESS_KEY'] = config['secret_key']
except Exception:
self.command.error("To use AWS provider you must have 'aws_access_key' and 'aws_secret_key' environment configurations; see: config save")
return config
def clean_aws_credentials(self, config):
config.pop('access_key', None)
os.environ.pop('AWS_ACCESS_KEY_ID', None)
config.pop('secret_key', None)
os.environ.pop('AWS_SECRET_ACCESS_KEY', None)
def _init_aws_session(self):
if not getattr(self, 'session', None):
config = self.aws_credentials({})
self.session = boto3.Session(
aws_access_key_id = config['access_key'],
aws_secret_access_key = config['secret_key']
)
def ec2(self, network):
self._init_aws_session()
return self.session.client('ec2',
region_name = network.config['region']
)
def efs(self, network):
self._init_aws_session()
return self.session.client('efs',
region_name = network.config['region']
)
def get_aws_ec2_keynames(self, network, ec2 = None):
if not ec2:
ec2 = self.ec2(network)
key_names = []
keypairs = ec2.describe_key_pairs()
for keypair in keypairs['KeyPairs']:
key_names.append(keypair['KeyName'])
return key_names
def create_aws_ec2_keypair(self, network, ec2 = None):
if not ec2:
ec2 = self.ec2(network)
key_names = self.get_aws_ec2_keynames(network, ec2)
while True:
key_name = "zimagi_{}".format(random.randint(1, 1000001))
if key_name not in key_names:
break
keypair = ec2.create_key_pair(KeyName = key_name)
return (key_name, keypair['KeyMaterial'])
def delete_aws_ec2_keypair(self, network, key_name, ec2 = None):
if not ec2:
ec2 = self.ec2(network)
return ec2.delete_key_pair(KeyName = key_name)
|
the-stack_106_14233
|
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/loki-project/gitian.sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('loki'):
subprocess.check_call(['git', 'clone', 'https://github.com/loki-project/loki.git'])
os.chdir('gitian-builder')
subprocess.check_call(['git', 'checkout', '963322de8420c50502c4cc33d4d7c0d84437b576'])
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('loki-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_output(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_output(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../loki/contrib/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'loki='+args.commit, '--url', 'loki='+args.url, '../loki/contrib/gitian/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../loki/contrib/gitian/gitian-linux.yml'])
subprocess.check_call('mv build/out/loki-*.tar.gz ../loki-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'loki='+args.commit, '--url', 'loki='+args.url, '../loki/contrib/gitian/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win', '--destination', '../gitian.sigs/', '../loki/contrib/gitian/gitian-win.yml'])
subprocess.check_call('mv build/out/loki*.zip ../loki-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'loki='+args.commit, '--url', 'loki'+args.url, '../loki/contrib/gitian/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx', '--destination', '../gitian.sigs/', '../loki/contrib/gitian/gitian-osx.yml'])
subprocess.check_call('mv build/out/loki*.tar.gz ../loki-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../loki/contrib/gitian/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win', '../loki/contrib/gitian/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx', '../loki/contrib/gitian/gitian-osx.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/loki-project/loki', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set enviroment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = args.commit if args.commit else args.version
if args.setup:
setup()
os.chdir('loki')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/loki')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True).strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.verify:
verify()
if __name__ == '__main__':
main()
|
the-stack_106_14234
|
from django.conf.urls import url, include
from rest_framework import routers
# from .models import Project
# from .views import DashboardView, DevelopmentProjectListView, DevelopmentProjectDetailView, DevelopmentProjectCreateView, \
# DevelopmentProjectUpdateView, DevelopmentProjectGISLayerCreateView, DevelopmentProjectGISLayerUpdateView, \
# DevelopmentProjectGISLayerDeleteView, DevelopmentProjectGISLayerDetailView
from . import views
import development
# api:
from .views import DevelopmentProjectViewSet, DevelopmentGISLayerFeatureViewSet, DevelopmentAssetViewSet, DevelopmentGISLayerViewSet
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'project', DevelopmentProjectViewSet, 'project')
router.register(r'feature', DevelopmentGISLayerFeatureViewSet, 'feature')
router.register(r'assets', DevelopmentAssetViewSet, 'secure-assets')
router.register(r'gislayers', DevelopmentGISLayerViewSet, 'gislayers')
urlpatterns = [
url(r'^$', views.DashboardView.as_view(), name='dashboard'),
url(r'^settings/$', development.views.DevelopmentSettingsView.as_view(), name='settings'),
# url(r'^fileno/(list/)?$', development.views.DevelopmentProjectListView.as_view(), name='fileno-list'),
url(r'^fileno/new/$', development.views.FileNoCreateView.as_view(), name='fileno-create'),
url(r'^fileno/(?P<pk>\d+)/edit$', development.views.FileNoUpdateView.as_view(), name='fileno-update'),
url(r'^fileno/(?P<pk>\d+)/delete/$', development.views.FileNoDeleteView.as_view(), name='fileno-delete'),
url(r'^project/(list/)?$', development.views.DevelopmentProjectListView.as_view(), name='project-list'),
url(r'^project/(list/)?report/$', development.views.DevelopmentProjectListPrintView.as_view(), name='project-list-print'),
url(r'^project/(?P<pk>\d+)/$', development.views.DevelopmentProjectDetailView.as_view(), name='project-detail'),
url(r'^project/print/(?P<pk>\d+)/$', development.views.DevelopmentProjectDetailPrintView.as_view(), name='project-detail-print'),
url(r'^project/new/$', development.views.DevelopmentProjectCreateView.as_view(), name='project-create'),
url(r'^project/new/xml/$', development.views.DevelopmentProjectCreateFromSERView.as_view(), name='project-create-xml'),
url(r'^project/(?P<pk>\d+)/edit/$', development.views.DevelopmentProjectUpdateView.as_view(), name='project-update'),
url(r'^project/(?P<pk>\d+)/delete/$', development.views.DevelopmentProjectDeleteView.as_view(), name='project-delete'),
url(r'^project/(?P<project_pk>\d+)/location/new/$', development.views.DevelopmentProjectGISLayerCreateView.as_view(), name='project-location-create'),
url(r'^project/(?P<project_pk>\d+)/location/(?P<pk>\d+)/edit/$', development.views.DevelopmentProjectGISLayerUpdateView.as_view(),
name='project-location-edit'),
url(r'^project/(?P<project_pk>\d+)/location/(?P<pk>\d+)/delete/$', development.views.DevelopmentProjectGISLayerDeleteView.as_view(),
name='project-location-delete'),
url(r'^gislayer/$', development.views.DevelopmentGISLayerListView.as_view(), name='gislayer-list'),
url(r'^gislayer/(?P<pk>\d+)/$', development.views.DevelopmentProjectGISLayerDetailView.as_view(), name='gislayer-detail'),
url(r'^gislayer/new/$', development.views.DevelopmentProjectGISLayerCreateView.as_view(), name='gislayer-create'),
url(r'^gislayer/(?P<pk>\d+)/edit/$', development.views.DevelopmentProjectGISLayerUpdateView.as_view(), name='gislayer-update'),
url(r'^gislayer/(?P<pk>\d+)/delete/$', development.views.DevelopmentProjectGISLayerDeleteView.as_view(), name='gislayer-delete'),
url(r'^project/(?P<project_pk>\d+)/asset/(?P<pk>\d+)/$', development.views.DevelopmentProjectAssetDetailView.as_view(), name='project-secureasset-detail'),
url(r'^project/(?P<project_pk>\d+)/asset/new/$', development.views.DevelopmentProjectAssetCreateView.as_view(), name='project-secureasset-create'),
url(r'^project/(?P<project_pk>\d+)/asset/(?P<pk>\d+)/edit/$', development.views.DevelopmentProjectAssetUpdateView.as_view(), name='project-secureasset-update'),
url(r'^project/(?P<project_pk>\d+)/asset/(?P<pk>\d+)/delete/$', development.views.DevelopmentProjectAssetDeleteView.as_view(), name='project-secureasset-delete'),
url(r'^project/(?P<project_pk>\d+)/spatialreport/new/$', development.views.DevelopmentSpatialReportFormView.as_view(), name='project-spatialreport-form'),
url(r'^file/dashboard/$', development.views.SecureAssetsDashboardView.as_view(), name='secureasset-dashboard'),
url(r'^file/list', development.views.SecureAssetListView.as_view(), name='secureasset-list'),
url(r'^file/search/$', development.views.SecureAssetSearchView.as_view(), name='secureasset-search'),
url(r'^file/search/csv/$', development.views.SecureAssetSearchViewCSV.as_view(), name='secureasset-search-csv'),
url(r'^assets/(?P<pk>\d+)/$', development.views.DevelopmentAssetDetailView.as_view(), name='secureasset-detail'),
url(r'^assets/new/$', development.views.DevelopmentAssetCreateView.as_view(), name='secureasset-create'),
url(r'^assets/(?P<pk>\d+)/edit/$', development.views.DevelopmentAssetUpdateView.as_view(), name='secureasset-update'),
url(r'^assets/(?P<pk>\d+)/delete/$', development.views.DevelopmentAssetDeleteView.as_view(), name='secureasset-delete'),
url(r'^ser/new/$', development.views.SERFormView.as_view(), name='ser-create'),
# blow up the reversing. gah! it should work.
# url('', include('library.urls', namespace='development-library', app_name='library'),
# Wire up our API using automatic URL routing.
url(r'^api/', include(router.urls, namespace='api')),
url(r'^api/layer-master/$', views.DevelopmentGISLayerMasterListAPIView.as_view(), name='layer-master-list-api'),
]
|
the-stack_106_14235
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
UTILS
Utility functions.
"""
__all__ = [
# Methods
'assert_alignment',
'assert_color',
'assert_cursor',
'assert_list_vector',
'assert_orientation',
'assert_position',
'assert_position_vector',
'assert_vector',
'check_key_pressed_valid',
'fill_gradient',
'format_color',
'get_cursor',
'get_finger_pos',
'load_pygame_image_file',
'make_surface',
'mouse_motion_current_mouse_position',
'parse_padding',
'print_menu_widget_structure',
'set_pygame_cursor',
'uuid4',
'warn',
'widget_terminal_title',
# Constants
'PYGAME_V2',
# Classes
'ShadowGenerator',
'TerminalColors'
]
import sys
import traceback
import uuid
import warnings
import pygame
import pygame_menu
from pygame_menu.locals import ALIGN_CENTER, ALIGN_LEFT, ALIGN_RIGHT, POSITION_CENTER, \
POSITION_NORTH, POSITION_SOUTH, POSITION_SOUTHEAST, POSITION_NORTHWEST, \
POSITION_WEST, POSITION_EAST, POSITION_NORTHEAST, POSITION_SOUTHWEST, \
ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL, FINGERDOWN, FINGERUP, FINGERMOTION
from pygame_menu._types import ColorType, ColorInputType, Union, List, Vector2NumberType, \
NumberType, Any, Optional, Tuple, NumberInstance, VectorInstance, PaddingInstance, \
PaddingType, Tuple4IntType, ColorInputInstance, VectorType, EventType, \
CursorInputInstance, CursorInputType, Tuple2IntType, Dict, Tuple3IntType
PYGAME_V2 = pygame.version.vernum[0] >= 2
WARNINGS_LAST_MESSAGES: Dict[int, bool] = {}
def assert_alignment(align: str) -> None:
"""
Assert that a certain alignment is valid.
:param align: Align value
"""
assert isinstance(align, str), f'alignment "{align}" must be a string'
assert align in (ALIGN_LEFT, ALIGN_CENTER, ALIGN_RIGHT), \
f'incorrect alignment value "{align}"'
def assert_color(
color: Union[ColorInputType, List[int]],
warn_if_invalid: bool = True
) -> ColorType:
"""
Assert that a certain color is valid.
:param color: Object color
:param warn_if_invalid: If ``True`` warns if the color is invalid
:return: Formatted color if valid, else, throws an ``AssertionError`` exception
"""
color = format_color(color, warn_if_invalid=warn_if_invalid)
assert isinstance(color, VectorInstance), \
f'color must be a tuple or list, not type "{type(color)}"'
assert 4 >= len(color) >= 3, \
'color must be a tuple or list of 3 or 4 numbers'
for i in range(3):
assert isinstance(color[i], int), \
f'"{color[i]}" in element color {color} must be an integer, not type "{type(color)}"'
assert 0 <= color[i] <= 255, \
f'"{color[i]}" in element color {color} must be an integer between 0 and 255'
if len(color) == 4:
assert isinstance(color[3], int), \
f'alpha channel must be an integer between 0 and 255, not type "{type(color)}"'
assert 0 <= color[3] <= 255, \
f'opacity of color {color} must be an integer between 0 and 255; ' \
f'where 0 is fully-transparent and 255 is fully-opaque'
return color
def assert_cursor(cursor: CursorInputType) -> None:
"""
Assert a given cursor is valid.
:param cursor: Cursor object
"""
assert isinstance(cursor, CursorInputInstance), \
'cursor instance invalid, it can be None, an integer, ' \
'or pygame.cursors.Cursor'
def assert_list_vector(list_vector: Union[List[Vector2NumberType], Tuple[Vector2NumberType, ...]],
length: int) -> None:
"""
Assert that a list fixed length vector is numeric.
:param list_vector: Numeric list vector
:param length: Length of the required vector. If ``0`` don't check the length
"""
assert isinstance(list_vector, VectorInstance), \
f'list_vector "{list_vector}" must be a tuple or list'
for v in list_vector:
assert_vector(v, length)
def assert_orientation(orientation: str) -> None:
"""
Assert that a certain widget orientation is valid.
:param orientation: Object orientation
"""
assert isinstance(orientation, str), \
f'orientation "{orientation}" must be a string'
assert orientation in (ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL), \
f'invalid orientation value "{orientation}"'
def assert_position(position: str) -> None:
"""
Assert that a certain position is valid.
:param position: Object position
"""
assert isinstance(position, str), \
f'position "{position}" must be a string'
assert position in (POSITION_WEST, POSITION_SOUTHWEST, POSITION_SOUTH,
POSITION_SOUTHEAST, POSITION_EAST, POSITION_NORTH,
POSITION_NORTHWEST, POSITION_NORTHEAST, POSITION_CENTER), \
f'invalid position value "{position}"'
def assert_position_vector(position: Union[str, List[str], Tuple[str, ...]]) -> None:
"""
Assert that a position vector is valid.
:param position: Object position
"""
if isinstance(position, str):
assert_position(position)
else:
assert isinstance(position, VectorInstance)
unique = []
for pos in position:
assert_position(pos)
if pos not in unique:
unique.append(pos)
assert len(unique) == len(position), 'there cannot be repeated positions'
def assert_vector(
num_vector: VectorType,
length: int,
instance: type = NumberInstance
) -> None:
"""
Assert that a fixed length vector is numeric.
:param num_vector: Numeric vector
:param length: Length of the required vector. If ``0`` don't check the length
:param instance: Instance of each item of the vector
"""
assert isinstance(num_vector, VectorInstance), \
f'vector "{num_vector}" must be a list or tuple of {length} items if type {instance}'
if length != 0:
assert len(num_vector) == length, \
f'vector "{num_vector}" must contain {length} numbers only, ' \
f'but {num_vector} were given'
for i in range(len(num_vector)):
num = num_vector[i]
if instance == int and isinstance(num, float) and int(num) == num:
num = int(num)
assert isinstance(num, instance), \
f'item {num} of vector must be {instance}, not type "{type(num)}"'
def check_key_pressed_valid(event: EventType) -> bool:
"""
Checks if the pressed key is valid.
:param event: Key press event
:return: ``True`` if a key is pressed
"""
# If the system detects that any key event has been pressed but
# there's not any key pressed then this method raises a KEYUP
# flag
bad_event = not (True in pygame.key.get_pressed())
if bad_event:
if 'test' in event.dict and event.dict['test']:
return True
ev = pygame.event.Event(pygame.KEYUP, {'key': event.key})
pygame.event.post(ev)
return not bad_event
def fill_gradient(
surface: 'pygame.Surface',
color: ColorInputType,
gradient: ColorInputType,
rect: Optional['pygame.Rect'] = None,
vertical: bool = True,
forward: bool = True
) -> None:
"""
Fill a surface with a gradient pattern.
:param surface: Surface to fill
:param color: Starting color
:param gradient: Final color
:param rect: Area to fill; default is surface's rect
:param vertical: True=vertical; False=horizontal
:param forward: True=forward; False=reverse
"""
if rect is None:
rect = surface.get_rect()
x1, x2 = rect.left, rect.right
y1, y2 = rect.top, rect.bottom
color = assert_color(color)
gradient = assert_color(gradient)
if vertical:
h = y2 - y1
else:
h = x2 - x1
if forward:
a, b = color, gradient
else:
b, a = color, gradient
rate = (
float(b[0] - a[0]) / h,
float(b[1] - a[1]) / h,
float(b[2] - a[2]) / h
)
fn_line = pygame.draw.line
if vertical:
for line in range(y1, y2):
color = (
min(max(a[0] + (rate[0] * (line - y1)), 0), 255),
min(max(a[1] + (rate[1] * (line - y1)), 0), 255),
min(max(a[2] + (rate[2] * (line - y1)), 0), 255)
)
fn_line(surface, color, (x1, line), (x2, line))
else:
for col in range(x1, x2):
color = (
min(max(a[0] + (rate[0] * (col - x1)), 0), 255),
min(max(a[1] + (rate[1] * (col - x1)), 0), 255),
min(max(a[2] + (rate[2] * (col - x1)), 0), 255)
)
fn_line(surface, color, (col, y1), (col, y2))
def format_color(
color: Union[ColorInputType, Any],
warn_if_invalid: bool = True
) -> Union[ColorType, Any]:
"""
Format color from string, int, or tuple to tuple type.
Available formats:
- Color name str: name of the color to use, e.g. ``"red"`` (all the supported name strings can be found in the colordict module, see https://github.com/pygame/pygame/blob/main/src_py/colordict.py)
- HTML color format str: ``"#rrggbbaa"`` or ``"#rrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided
- Hex number str: ``"0xrrggbbaa"`` or ``"0xrrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided
- int: int value of the color to use, using hex numbers can make this parameter more readable, e.g. ``0xrrggbbaa``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, note that the aa (alpha) value is not optional for the int format and must be provided
- tuple/list of int color values: ``(R, G, B, A)`` or ``(R, G, B)``, where R, G, B, and A are int values in the range of ``0`` to ``255`` inclusive, the A (alpha) value defaults to ``255`` (opaque) if not provided
:param color: Color to format. If format is valid returns the same input value
:param warn_if_invalid: If ``True`` warns if the color is invalid
:return: Color in (r, g, b, a) format
"""
if not isinstance(color, ColorInputInstance):
return color
if not isinstance(color, pygame.Color):
if isinstance(color, str):
if len(color) == 4 and color[0] == '#':
r, g, b = color[1], color[2], color[3]
color = f'#{r * 2}{g * 2}{b * 2}'
try:
if isinstance(color, VectorInstance) and 3 <= len(color) <= 4:
if PYGAME_V2:
for j in color:
if not isinstance(j, int):
raise ValueError('color cannot contain floating point values')
c = pygame.Color(*color)
else:
c = pygame.Color(color)
except ValueError:
if warn_if_invalid:
warn(f'invalid color value "{color}"')
else:
raise
return color
else:
c = color
return c.r, c.g, c.b, c.a
def get_cursor() -> CursorInputType:
"""
Return the pygame cursor object.
:return: Cursor object
"""
try:
return pygame.mouse.get_cursor()
except TypeError as e:
warn(str(e))
return None
def get_finger_pos(menu: 'pygame_menu.Menu', event: EventType) -> Tuple2IntType:
"""
Return the position from finger (or mouse) event on x-axis and y-axis (x, y).
:param menu: Menu object for relative positioning in finger events
:param event: Pygame event object
:return: Position on x-axis and y-axis (x, y) in px
"""
if event.type in (FINGERDOWN, FINGERMOTION, FINGERUP):
assert menu is not None, \
'menu reference cannot be none while using finger position'
display_size = menu.get_window_size()
finger_pos = (int(event.x * display_size[0]), int(event.y * display_size[1]))
return finger_pos
return event.pos
def is_callable(func: Any) -> bool:
"""
Return ``True`` if ``func`` is callable.
:param func: Function object
:return: ``True`` if function
"""
e = 'is_callable(func) method will be removed in v5, consider using built-in' \
' callable(func) method instead'
warnings.warn(e, DeprecationWarning)
return callable(func)
def load_pygame_image_file(image_path: str, **kwargs) -> 'pygame.Surface':
"""
Loads an image and returns a surface.
:param image_path: Image file
:param kwargs: Optional keyword arguments
:return: Surface
"""
# Try to load the image
try:
if 'test' in kwargs.keys():
raise pygame.error('File is not a Windows BMP file')
surface = pygame.image.load(image_path)
except pygame.error as exc:
# Check if file is not a Windows file
if str(exc) == 'File is not a Windows BMP file':
pil_invalid_exception = Exception
# Check if Pillow exists
try:
# noinspection PyPackageRequirements
from PIL import Image, UnidentifiedImageError
pil_invalid_exception = UnidentifiedImageError
img_pil = Image.open(image_path)
# noinspection PyTypeChecker
surface = pygame.image.fromstring(
img_pil.tobytes(), img_pil.size, img_pil.mode).convert()
except (ModuleNotFoundError, ImportError):
warn(f'Image file "{image_path}" could not be loaded, as pygame.error '
f'is raised. To avoid this issue install the Pillow library')
raise
except pil_invalid_exception:
warn(f'The image "{image_path}" could not be loaded using Pillow')
raise
else:
raise
return surface
def make_surface(
width: NumberType,
height: NumberType,
alpha: bool = False,
fill_color: Optional[ColorInputType] = None
) -> 'pygame.Surface':
"""
Creates a pygame surface object.
:param width: Surface width
:param height: Surface height
:param alpha: Enable alpha channel on surface
:param fill_color: Fill surface with a certain color
:return: Pygame surface
"""
assert isinstance(width, NumberInstance)
assert isinstance(height, NumberInstance)
assert isinstance(alpha, bool)
assert width >= 0 and height >= 0, \
'surface width and height must be equal or greater than zero'
surface = pygame.Surface((int(width), int(height)), pygame.SRCALPHA, 32) # lgtm [py/call/wrong-arguments]
if alpha:
# noinspection PyArgumentList
surface = pygame.Surface.convert_alpha(surface)
if fill_color is not None:
fill_color = assert_color(fill_color)
surface.fill(fill_color)
return surface
def mouse_motion_current_mouse_position() -> EventType:
"""
Return a pygame event type MOUSEMOTION in the current mouse position.
:return: Event
"""
x, y = pygame.mouse.get_pos()
return pygame.event.Event(pygame.MOUSEMOTION, {'pos': (int(x), int(y))})
def parse_padding(padding: PaddingType) -> Tuple4IntType:
"""
Get the padding value from tuple.
- If an integer or float is provided: top, right, bottom and left values will be the same
- If 2-item tuple is provided: top and bottom takes the first value, left and right the second
- If 3-item tuple is provided: top will take the first value, left and right the second, and bottom the third
- If 4-item tuple is provided: padding will be (top, right, bottom, left)
.. note::
See `CSS W3Schools <https://www.w3schools.com/css/css_padding.asp>`_ for more info about padding.
:param padding: Can be a single number, or a tuple of 2, 3 or 4 elements following CSS style
:return: Padding value, (top, right, bottom, left), in px
"""
if padding is False or None:
padding = 0
assert isinstance(padding, PaddingInstance)
if isinstance(padding, NumberInstance):
assert padding >= 0, 'padding cannot be a negative number'
return int(padding), int(padding), int(padding), int(padding)
else:
assert 1 <= len(padding) <= 4, 'padding must be a tuple of 2, 3 or 4 elements'
for i in range(len(padding)):
assert isinstance(padding[i], NumberInstance), \
'all padding elements must be integers or floats'
assert padding[i] >= 0, \
'all padding elements must be equal or greater than zero'
if len(padding) == 1:
return int(padding[0]), int(padding[0]), int(padding[0]), int(padding[0])
elif len(padding) == 2:
return int(padding[0]), int(padding[1]), int(padding[0]), int(padding[1])
elif len(padding) == 3:
return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[1])
else:
return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[3])
def print_menu_widget_structure(
widgets: List['pygame_menu.widgets.Widget'],
index: int
) -> None:
"""
Test printing widgets order.
.. note::
- Φ Floating status
- ⇇ Selected
- !▲ Widget is not appended to current menu
- ╳ Widget is hidden
- ∑ Scrollable frame sizing
- β Widget is not selectable
- {x,y} Widget *column, row* position
- <x,y> Frame indices (min, max)
:param widgets: Menu widgets list
:param index: Menu index
"""
indx = 0
current_depth = 0
depth_widths = {}
c = TerminalColors
def close_frames(depth: int) -> None:
"""
Close frames up to current depth.
:param depth: Depth to close
"""
d = current_depth - depth
for i in range(d):
j = depth + d - (i + 1) # Current depth
line = f'· {"│ " * j}└{"┄" * 3}' # * depth_widths[j]
print(c.BRIGHT_WHITE + line.ljust(0, '━') + c.ENDC) # 80 also work
non_menu_frame_widgets: Dict[int, List['pygame_menu.widgets.Widget']] = {}
def process_non_menu_frame(w_indx: int) -> None:
"""
Print non-menu frames list.
:param w_indx: Current iteration index to print widgets
"""
for nmi in list(non_menu_frame_widgets.keys()):
if nmi == w_indx:
v = non_menu_frame_widgets[nmi]
for v_wid in v:
print(c.BRIGHT_WHITE + '· ' + '│ ' * v_wid.get_frame_depth()
+ c.ENDC + widget_terminal_title(v_wid))
del non_menu_frame_widgets[nmi]
for w in widgets:
w_depth = w.get_frame_depth()
close_frames(w.get_frame_depth())
title = widget_terminal_title(w, indx, index)
print('{0}{1}{2}'.format(
str(indx).ljust(3),
' ' + c.BRIGHT_WHITE + '│ ' * w_depth + c.ENDC,
title
))
if w_depth not in depth_widths.keys():
depth_widths[w_depth] = 0
# depth_widths[w_depth] = max(int(len(title) * 1.2) + 3, depth_widths[w_depth])
depth_widths[w_depth] = len(title) - 2
current_depth = w.get_frame_depth()
process_non_menu_frame(indx)
jw = widgets[0]
try:
if isinstance(w, pygame_menu.widgets.Frame): # Print ordered non-menu widgets
current_depth += 1
prev_indx = indx
for jw in w.get_widgets(unpack_subframes=False):
if jw.get_menu() is None or jw not in widgets:
if prev_indx not in non_menu_frame_widgets.keys():
non_menu_frame_widgets[prev_indx] = []
non_menu_frame_widgets[prev_indx].append(jw)
else:
prev_indx = widgets.index(jw)
except ValueError as e:
print(f'[ERROR] while requesting widget {jw.get_class_id()}')
warn(str(e))
indx += 1
process_non_menu_frame(indx)
close_frames(0)
def set_pygame_cursor(cursor: CursorInputType) -> None:
"""
Set pygame cursor.
:param cursor: Cursor object
"""
try:
if cursor is not None:
# noinspection PyArgumentList
pygame.mouse.set_cursor(cursor)
except (pygame.error, TypeError):
if PYGAME_V2:
warn(f'could not establish widget cursor, invalid value {cursor}')
def uuid4(short: bool = False) -> str:
"""
Create custom version of uuid4.
:param short: If ``True`` only returns the first 8 chars of the uuid, else, 18
:return: UUID of 18 chars
"""
return str(uuid.uuid4())[:18 if not short else 8]
def warn(message: str, print_stack: bool = True) -> None:
"""
Warnings warn method.
:param message: Message to warn about
:param print_stack: Print stack trace of the call
"""
assert isinstance(message, str)
# noinspection PyUnresolvedReferences,PyProtectedMember
frame = sys._getframe().f_back
# frame_info = inspect.getframeinfo(frame) # Traceback(filename, lineno, function, code_context, index)
# Check if message in dict
msg_hash = hash(message)
msg_in_hash = False
try:
msg_in_hash = WARNINGS_LAST_MESSAGES[msg_hash]
except KeyError:
pass
if not msg_in_hash and print_stack:
traceback.print_stack(frame, limit=5)
WARNINGS_LAST_MESSAGES[msg_hash] = True
# warnings.showwarning(message, UserWarning, frame_info[0], frame_info[1])
warnings.warn(message, stacklevel=2)
def widget_terminal_title(
widget: 'pygame_menu.widgets.Widget',
widget_index: int = -1,
current_index: int = -1
) -> str:
"""
Return widget title to be printed on terminals.
:param widget: Widget to get title from
:param widget_index: Widget index
:param current_index: Menu index
:return: Widget title
"""
w_class_id = TerminalColors.BOLD + widget.get_class_id() + TerminalColors.ENDC
if isinstance(widget, pygame_menu.widgets.Frame):
w_title = TerminalColors.BRIGHT_WHITE + '┌━' + TerminalColors.ENDC
w_title += f'{0} - {3}[{1},{2},'.format(w_class_id, *widget.get_indices(), TerminalColors.LGREEN)
if widget.horizontal:
w_title += 'H] '
else:
w_title += 'V] '
if widget.is_scrollable:
wsz = widget.get_inner_size()
wsm = widget.get_max_size()
wsh = wsm[0] if wsm[0] == wsz[0] else f'{wsm[0]}→{wsz[0]}'
wsv = wsm[1] if wsm[1] == wsz[1] else f'{wsm[1]}→{wsz[1]}'
w_title += f'∑ [{wsh},{wsv}] '
w_title += TerminalColors.ENDC
else:
if widget.get_title() != '':
title_f = TerminalColors.UNDERLINE + widget.get_title() + TerminalColors.ENDC
w_title = f'{w_class_id} - {title_f} - '
else:
w_title = w_class_id + ' - '
# Column/Row position
w_title += TerminalColors.INDIGO
cr = widget.get_col_row_index()
w_title += '{' + str(cr[0]) + ',' + str(cr[1]) + '}'
w_title += TerminalColors.ENDC
# Add position
w_title += TerminalColors.MAGENTA
w_title += ' ({0},{1})'.format(*widget.get_position())
w_title += TerminalColors.ENDC
# Add size
w_title += TerminalColors.BLUE
w_title += ' ({0},{1})'.format(*widget.get_size())
w_title += TerminalColors.ENDC
# Add mods
w_title += TerminalColors.CYAN
if widget.is_floating():
w_title += ' Φ'
if not widget.is_visible():
w_title += ' ╳'
if not widget.is_selectable:
w_title += ' β'
if widget.is_selected():
w_title += TerminalColors.BOLD + ' ⟵'
if current_index != -1 and current_index != widget_index:
w_title += f'! [{widget_index}->{current_index}]'
if widget.get_menu() is None:
w_title += ' !▲'
w_title += TerminalColors.ENDC
return w_title
class TerminalColors(object):
"""
Terminal colors.
See https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html.
"""
BLUE = '\u001b[38;5;27m'
BOLD = '\033[1m'
BRIGHT_MAGENTA = '\u001b[35;1m'
BRIGHT_WHITE = '\u001b[37;1m'
CYAN = '\u001b[36m'
ENDC = '\u001b[0m'
GRAY = '\u001b[30;1m'
INDIGO = '\u001b[38;5;129m'
LGREEN = '\u001b[38;5;150m'
MAGENTA = '\u001b[35m'
RED = '\u001b[31m'
UNDERLINE = '\033[4m'
class ShadowGenerator(object):
"""
A class to generate surfaces that work as a 'shadow' for rectangular UI elements. Base shadow
surface are generated with an algorithm, then when one is requested at a specific size the
closest pre-generated shadow surface is picked and then scaled to the exact size requested.
By default, it creates four base shadows in a small range of sizes. If you find the shadow
appearance unsatisfactory then it is possible to create closer to the size of the
elements you are having trouble with.
Source: https://github.com/MyreMylar/pygame_gui with many edits.
"""
_created_ellipse_shadows: Dict[str, 'pygame.Surface']
_preloaded_shadow_corners: Dict[str, Dict[str, 'pygame.Surface']]
_short_term_rect_cache: Dict[str, 'pygame.Surface']
def __init__(self) -> None:
self._created_ellipse_shadows = {}
self._preloaded_shadow_corners = {}
self._short_term_rect_cache = {}
def clear_short_term_caches(self, force: bool = False) -> None:
"""
Empties short term caches, so we aren't hanging on to so many surfaces.
:param force: Force clear
"""
t = len(self._created_ellipse_shadows) + len(self._preloaded_shadow_corners) + \
len(self._short_term_rect_cache)
if t >= 100 or force:
self._created_ellipse_shadows.clear()
self._preloaded_shadow_corners.clear()
self._short_term_rect_cache.clear()
def _create_shadow_corners(
self,
shadow_width_param: int,
corner_radius_param: int,
color: Tuple3IntType,
aa_amount: int = 4
) -> Dict[str, 'pygame.Surface']:
"""
Create corners for our rectangular shadows. These can be used across many
sizes of shadow with the same shadow width and corner radius.
:param shadow_width_param: Width of the shadow
:param corner_radius_param: Corner radius of the shadow
:param color: Shadow color
:param aa_amount: Anti-aliasing amount. Defaults to 4x
:return: Dict that contain the shadows of each border
"""
shadow_width_param = max(1, shadow_width_param)
corner_rect = pygame.Rect(
0, 0,
corner_radius_param * aa_amount,
corner_radius_param * aa_amount
)
corner_surface, edge_surface = self._create_single_corner_and_edge(
aa_amount=aa_amount,
corner_radius_param=corner_radius_param,
corner_rect=corner_rect,
shadow_width_param=shadow_width_param,
color=color
)
sub_radius = ((corner_radius_param - shadow_width_param) * aa_amount)
top_edge = pygame.transform.smoothscale(edge_surface,
(shadow_width_param, shadow_width_param))
left_edge = pygame.transform.rotate(top_edge, 90)
tl_corner = pygame.transform.smoothscale(corner_surface,
(corner_radius_param,
corner_radius_param))
if sub_radius > 0:
corner_sub_surface = pygame.surface.Surface(corner_rect.size,
flags=pygame.SRCALPHA,
depth=32)
corner_sub_surface.fill(pygame.Color('#00000000'))
pygame.draw.circle(corner_sub_surface,
pygame.Color('#FFFFFFFF'),
corner_rect.size,
sub_radius)
corner_small_sub_surface = pygame.transform.smoothscale(corner_sub_surface,
(corner_radius_param,
corner_radius_param))
tl_corner.blit(corner_small_sub_surface,
(0, 0),
special_flags=pygame.BLEND_RGBA_SUB)
corners_and_edges = {
'bottom': pygame.transform.flip(top_edge, False, True),
'bottom_left': pygame.transform.flip(tl_corner, False, True),
'bottom_right': pygame.transform.flip(tl_corner, True, True),
'left': left_edge,
'right': pygame.transform.flip(left_edge, True, False),
'top': top_edge,
'top_left': tl_corner,
'top_right': pygame.transform.flip(tl_corner, True, False)
}
self._preloaded_shadow_corners[(str(shadow_width_param) +
'x' +
str(corner_radius_param))] = corners_and_edges
return corners_and_edges
@staticmethod
def _create_single_corner_and_edge(
aa_amount: int,
corner_radius_param: int,
corner_rect: 'pygame.Rect',
shadow_width_param: int,
color: Tuple3IntType
) -> Tuple['pygame.Surface', 'pygame.Surface']:
"""
Creates a single corner surface and a single edge surface for a shadow.
:param aa_amount: Amount of anti-aliasing
:param corner_radius_param: Radius of a corner this shadow will go around
:param corner_rect: Rectangular size of corner
:param shadow_width_param: Width of shadow
:param color: Shadow color
:return: A tuple of the corner surface and the edge surface
"""
aa_amount = max(1, aa_amount)
final_corner_surface = pygame.surface.Surface((corner_radius_param * aa_amount,
corner_radius_param * aa_amount),
flags=pygame.SRCALPHA, depth=32)
final_corner_surface.fill(pygame.Color('#00000000'))
final_edge_surface = pygame.surface.Surface((shadow_width_param * aa_amount,
shadow_width_param * aa_amount),
flags=pygame.SRCALPHA, depth=32)
final_edge_surface.fill(pygame.Color('#00000000'))
corner_radius = corner_radius_param * aa_amount
corner_centre = (corner_radius, corner_radius)
edge_rect = pygame.Rect(0, 0,
shadow_width_param * aa_amount,
shadow_width_param * aa_amount)
edge_shadow_fade_height = edge_rect.width
alpha_increment = 20.0 / (shadow_width_param ** 1.5)
shadow_alpha = alpha_increment
r, g, b = color
for _ in range(shadow_width_param):
if corner_rect.width > 0 and corner_rect.height > 0 and corner_radius > 0:
# Edge
edge_shadow_surface = pygame.surface.Surface(
edge_rect.size,
flags=pygame.SRCALPHA,
depth=32)
edge_shadow_surface.fill(pygame.Color('#00000000'))
edge_shadow_surface.fill(pygame.Color(r, g, b, int(shadow_alpha)),
pygame.Rect(0,
edge_rect.height - edge_shadow_fade_height,
edge_rect.width,
edge_shadow_fade_height))
final_edge_surface.blit(edge_shadow_surface,
(0, 0),
special_flags=pygame.BLEND_RGBA_ADD)
# Corner
corner_shadow_surface = pygame.surface.Surface(corner_rect.size,
flags=pygame.SRCALPHA,
depth=32)
corner_shadow_surface.fill(pygame.Color('#00000000'))
pygame.draw.circle(corner_shadow_surface,
pygame.Color(r, g, b, int(shadow_alpha)),
corner_centre,
corner_radius)
final_corner_surface.blit(corner_shadow_surface,
(0, 0),
special_flags=pygame.BLEND_RGBA_ADD)
# increments/decrements
shadow_alpha += alpha_increment
corner_radius -= aa_amount
edge_shadow_fade_height -= aa_amount
return final_corner_surface, final_edge_surface
def create_new_rectangle_shadow(
self,
width: int,
height: int,
shadow_width_param: int,
corner_radius_param: int,
aa_amount: int = 4,
color: Tuple3IntType = (0, 0, 0)
) -> Optional['pygame.Surface']:
"""
Creates a rectangular shadow surface at the specified size and stores it for later use.
:param width: The width of the base shadow to create
:param height: The height of the base shadow to create
:param shadow_width_param: The width of the shadowed edge
:param corner_radius_param: The radius of the rectangular shadow's corners
:param aa_amount: Antialiasing
:param color: Shadow color (r, g, b)
return: Shadow
"""
assert isinstance(width, int)
assert isinstance(height, int)
assert_vector(color, 3, int)
shadow_width_param, corner_radius_param, aa_amount = int(shadow_width_param), \
int(corner_radius_param), int(aa_amount)
if width < corner_radius_param or height < corner_radius_param or shadow_width_param == 0:
return None
r, g, b = color
params = [width, height, shadow_width_param, corner_radius_param, aa_amount, r, g, b]
shadow_id = '_'.join(str(param) for param in params)
if shadow_id in self._short_term_rect_cache:
return self._short_term_rect_cache[shadow_id]
final_surface = pygame.surface.Surface((width, height), flags=pygame.SRCALPHA, depth=32)
final_surface.fill(pygame.Color('#00000000'))
corner_index_id = str(shadow_width_param) + 'x' + str(corner_radius_param)
if corner_index_id in self._preloaded_shadow_corners:
edges_and_corners = self._preloaded_shadow_corners[corner_index_id]
else:
edges_and_corners = self._create_shadow_corners(
shadow_width_param=shadow_width_param,
corner_radius_param=corner_radius_param,
color=color,
aa_amount=aa_amount
)
final_surface.blit(edges_and_corners['top_left'], (0, 0))
final_surface.blit(edges_and_corners['top_right'], (width - corner_radius_param, 0))
final_surface.blit(edges_and_corners['bottom_left'],
(0, height - corner_radius_param))
final_surface.blit(edges_and_corners['bottom_right'],
(width - corner_radius_param, height - corner_radius_param))
if width - (2 * corner_radius_param) > 0:
top_edge = pygame.transform.scale(edges_and_corners['top'],
(width - (2 * corner_radius_param),
shadow_width_param))
bottom_edge = pygame.transform.scale(edges_and_corners['bottom'],
(width - (2 * corner_radius_param),
shadow_width_param))
final_surface.blit(top_edge, (corner_radius_param, 0))
final_surface.blit(bottom_edge, (corner_radius_param, height - shadow_width_param))
if height - (2 * corner_radius_param) > 0:
left_edge = pygame.transform.scale(edges_and_corners['left'],
(shadow_width_param,
height - (2 * corner_radius_param)))
right_edge = pygame.transform.scale(edges_and_corners['right'],
(shadow_width_param,
height - (2 * corner_radius_param)))
final_surface.blit(left_edge, (0, corner_radius_param))
final_surface.blit(right_edge, (width - shadow_width_param,
corner_radius_param))
self._short_term_rect_cache[shadow_id] = final_surface
return final_surface
def create_new_ellipse_shadow(
self,
width: int,
height: int,
shadow_width_param: int,
aa_amount: int = 4,
color: Tuple3IntType = (0, 0, 0)
) -> Optional['pygame.Surface']:
"""
Creates an ellipse shaped shadow surface at the specified size and stores it for later use.
:param width: The width of the shadow to create
:param height: The height of the shadow to create
:param shadow_width_param: The width of the shadowed edge
:param aa_amount: The amount of anti-aliasing to use, defaults to 4
:param color: Shadow color (r, g, b)
:return: Surface with shadow
"""
assert isinstance(width, int)
assert isinstance(height, int)
assert_vector(color, 3, int)
shadow_width_param, aa_amount = int(shadow_width_param), int(aa_amount)
if shadow_width_param == 0:
return None
shadow_surface = pygame.surface.Surface((width * aa_amount, height * aa_amount),
flags=pygame.SRCALPHA, depth=32)
shadow_surface.fill(pygame.Color('#00000000'))
r, g, b = color
ellipse_id = str(width) + 'x' + str(height) + 'x' + str(shadow_width_param)
if ellipse_id in self._created_ellipse_shadows:
return self._created_ellipse_shadows[ellipse_id]
alpha_increment = max(1, int(20 / shadow_width_param))
shadow_alpha = alpha_increment
shadow_width = width * aa_amount
shadow_height = height * aa_amount
for i in range(shadow_width_param):
if shadow_width > 0 and shadow_height > 0:
shadow_rect = pygame.Rect(i * aa_amount,
i * aa_amount,
shadow_width,
shadow_height)
pygame.draw.ellipse(shadow_surface,
pygame.Color(r, g, b, shadow_alpha), shadow_rect)
shadow_width -= (2 * aa_amount)
shadow_height -= (2 * aa_amount)
shadow_alpha += alpha_increment
final_surface = pygame.transform.smoothscale(shadow_surface, (width, height))
self._created_ellipse_shadows[ellipse_id] = final_surface
return final_surface
|
the-stack_106_14236
|
name = "Tom"
age = 23
point = 170.5
s = "{} is {} old and he got {}."
text = s.format(name, age, point)
print(text)
text2 = f"{name} is {age} old and he got {point}."
print(text2)
tokyo = 123456789
kyoto = 987654321
print(f"Tokyo: {tokyo:,}, Kyoto: {kyoto:,}")
length = 120
weight = 60.22
g = "長さ:{:.1f},厚み:{:.0f}"
print(g.format(length, weight))
num1 = 123.44
num2 = 234.4422
num3 = 12.333
print(f"{num1:>10.1f}")
print(f"{num2:>10.1f}")
print(f"{num3:>10.1f}")
|
the-stack_106_14237
|
import httpx
url_base = 'http://localhost:8000/api/projects/'
headers = {'Content-Type': 'application/json'}
payload_name = 'django-titan'
payload = {
'name': payload_name,
'packages': [
{"name": "Django"},
{"name": "graphene", "version": "2.0"}
]
}
def initial_delete_for_test(package_name):
if httpx.get(url_base+package_name).status_code == 200:
httpx.delete(url_base+package_name)
def initial_insert_for_test():
httpx.post(url_base, json=payload, headers=headers)
def test_get_list_projects():
response = httpx.get(url_base)
assert response.status_code == 200, "Error when fetching projects"
def test_get_by_name_project():
initial_delete_for_test(payload_name)
initial_insert_for_test()
response = httpx.get(url_base+payload_name)
assert response.status_code == 200, 'Error when fetching project'
initial_delete_for_test(payload_name)
def test_create_new_project():
initial_delete_for_test(payload_name)
response = httpx.post(url_base, json=payload, headers=headers)
assert response.status_code == 201, 'Error creating project'
initial_delete_for_test(payload_name)
def test_create_new_projeto_unnamed():
payload_unnamed = {
'name': '',
'packages': [
{"name": "Django"},
{"name": "graphene", "version": "2.0"}
]
}
response = httpx.post(url_base, json=payload_unnamed, headers=headers)
assert response.status_code == 400, 'Project created without name'
def test_create_same_project():
initial_delete_for_test(payload_name)
initial_insert_for_test()
response = httpx.post(url_base, json=payload, headers=headers)
assert response.status_code == 400, 'Duplicate project created'
initial_delete_for_test(payload_name)
def test_create_project_without_packages():
payload_without_packages = {
"name": "Projeto sem pacotes"
}
response = httpx.post(
url_base, json=payload_without_packages, headers=headers)
assert response.status_code == 400, 'Project create without "packeges" key'
payload_packages_without_list = {
"name": "Projeto sem pacotes",
"packages": ""
}
response = httpx.post(
url_base, json=payload_packages_without_list, headers=headers)
assert response.status_code == 400, '"packages" with invalid format'
def test_create_package_without_version():
payload_test_version = {
'name': 'version',
'packages': [
{"name": "Django"}
]
}
response = httpx.post(url_base, json=payload_test_version, headers=headers)
django = response.json()['packages'][0]
assert 'version' in django, 'Version not included in the package'
initial_delete_for_test('version')
def test_ceate_invalid_package():
payload_test = {
'name': 'version',
'packages': [
{"name": "Xblau"}
]
}
response = httpx.post(url_base, json=payload_test, headers=headers)
assert response.status_code == 400, 'Invalid package name'
def test_create_package_invalid_version():
payload_test = {
'name': 'version',
'packages': [
{"name": "django", "version": "AAA"}
]
}
response = httpx.post(url_base, json=payload_test, headers=headers)
assert response.status_code == 400, 'Package with invalid version'
def test_delete_project():
initial_delete_for_test(payload_name)
initial_insert_for_test()
response = httpx.delete(url_base+payload_name)
assert response.status_code == 204, 'Error deleting project'
def test_delete_project_nonexistent():
response = httpx.delete(url_base+"xablau")
assert response.status_code == 404, 'Error deleting non-existent project'
|
the-stack_106_14240
|
''' Process diffusion imaging parameters
* ``q`` is a vector in Q space
* ``b`` is a b value
* ``g`` is the unit vector along the direction of q (the gradient
direction)
Thus:
b = norm(q)
g = q / norm(q)
(``norm(q)`` is the Euclidean norm of ``q``)
The B matrix ``B`` is a symmetric positive semi-definite matrix. If
``q_est`` is the closest q vector equivalent to the B matrix, then:
B ~ (q_est . q_est.T) / norm(q_est)
'''
import numpy as np
import numpy.linalg as npl
def B2q(B, tol=None):
''' Estimate q vector from input B matrix `B`
We require that the input `B` is symmetric positive definite.
Because the solution is a square root, the sign of the returned
vector is arbitrary. We set the vector to have a positive x
component by convention.
Parameters
----------
B : (3,3) array-like
B matrix - symmetric. We do not check the symmetry.
tol : None or float
absolute tolerance below which to consider eigenvalues of the B
matrix to be small enough not to worry about them being negative,
in check for positive semi-definite-ness. None (default) results
in a fairly tight numerical threshold proportional to the maximum
eigenvalue
Returns
-------
q : (3,) vector
Estimated q vector from B matrix `B`
'''
B = np.asarray(B)
if not np.allclose(B - B.T, 0):
raise ValueError('B matrix is not symmetric enough')
w, v = npl.eigh(B)
if tol is None:
tol = np.abs(w.max()) * B.shape[0] * np.finfo(w.dtype).eps
non_trivial = np.abs(w) > tol
if np.any(w[non_trivial] < 0):
raise ValueError('B not positive semi-definite')
inds = np.argsort(w)[::-1]
max_ind = inds[0]
vector = v[:,max_ind]
# because the factor is a sqrt, the sign of the vector is arbitrary.
# We arbitrarily set it to have a positive x value.
if vector[0] < 0:
vector *= -1
return vector * w[max_ind]
def nearest_pos_semi_def(B):
''' Least squares positive semi-definite tensor estimation
Reference: Niethammer M, San Jose Estepar R, Bouix S, Shenton M,
Westin CF. On diffusion tensor estimation. Conf Proc IEEE Eng Med
Biol Soc. 2006;1:2622-5. PubMed PMID: 17946125; PubMed Central
PMCID: PMC2791793.
Parameters
----------
B : (3,3) array-like
B matrix - symmetric. We do not check the symmetry.
Returns
-------
npds : (3,3) array
Estimated nearest positive semi-definite array to matrix `B`.
Examples
--------
>>> B = np.diag([1, 1, -1])
>>> nearest_pos_semi_def(B)
array([[ 0.75, 0. , 0. ],
[ 0. , 0.75, 0. ],
[ 0. , 0. , 0. ]])
'''
B = np.asarray(B)
vals, vecs = npl.eigh(B)
# indices of eigenvalues in descending order
inds = np.argsort(vals)[::-1]
vals = vals[inds]
cardneg = np.sum(vals < 0)
if cardneg == 0:
return B
if cardneg == 3:
return np.zeros((3,3))
lam1a, lam2a, lam3a = vals
scalers = np.zeros((3,))
if cardneg == 2:
b112 = np.max([0,lam1a+(lam2a+lam3a)/3.])
scalers[0] = b112
elif cardneg == 1:
lam1b=lam1a+0.25*lam3a
lam2b=lam2a+0.25*lam3a
if lam1b >= 0 and lam2b >= 0:
scalers[:2] = lam1b, lam2b
else: # one of the lam1b, lam2b is < 0
if lam2b < 0:
b111=np.max([0,lam1a+(lam2a+lam3a)/3.])
scalers[0] = b111
if lam1b < 0:
b221=np.max([0,lam2a+(lam1a+lam3a)/3.])
scalers[1] = b221
# resort the scalers to match the original vecs
scalers = scalers[np.argsort(inds)]
return np.dot(vecs, np.dot(np.diag(scalers), vecs.T))
def q2bg(q_vector, tol=1e-5):
""" Return b value and q unit vector from q vector `q_vector`
Parameters
----------
q_vector : (3,) array-like
q vector
tol : float, optional
q vector L2 norm below which `q_vector` considered to be `b_value` of
zero, and therefore `g_vector` also considered to zero.
Returns
-------
b_value : float
L2 Norm of `q_vector` or 0 if L2 norm < `tol`
g_vector : shape (3,) ndarray
`q_vector` / `b_value` or 0 if L2 norma < `tol`
Examples
--------
>>> q2bg([1, 0, 0])
(1.0, array([ 1., 0., 0.]))
>>> q2bg([0, 10, 0])
(10.0, array([ 0., 1., 0.]))
>>> q2bg([0, 0, 0])
(0.0, array([ 0., 0., 0.]))
"""
q_vec = np.asarray(q_vector)
norm = np.sqrt(np.sum(q_vec * q_vec))
if norm < tol:
return (0., np.zeros((3,)))
return norm, q_vec / norm
|
the-stack_106_14241
|
# -*- coding: utf-8 -*-
"""
@file
@brief Defines an action for Mokadi.
"""
from .mokadi_action import MokadiAction
from .mokadi_info import MokadiInfo
from .mokadi_exceptions import MokadiException, MokadiAuthentification
from .mokadi_mails import enumerate_last_mails
from .mokadi_helper import parse_string_int
class MokadiActionMail(MokadiAction):
"""
Action. Mail.
"""
def __init__(self, user, pwd, server, fLOG=None):
"""
Constructor.
@param user login
@param pwd password
@param server server
@param fLOG logging function
"""
MokadiAction.__init__(self, fLOG=fLOG)
self._user = user
self._pwd = pwd
self._server = server
def can_do(self, interpreted, message):
"""
Tells if the class can process the message.
@param interpreted interpreted message
@param message message
@return true if the class can process the message
"""
if len(interpreted) < 2:
return False
word = interpreted[1]
for word in interpreted[1:]:
if word[1] == ":mails:":
return True
return False
def process_interpreted_message(self, interpretation, message):
"""
Process the interpreted message.
@param interpretation interpretation
@param message original message
@return iterator on Info
"""
done = False
stop = -1
keep = -1
good = False
body = False
interpretation0 = interpretation
interpretation = [_ for _ in interpretation if _[1] != ":numero:"]
interpretation_clean = [
_ for _ in interpretation if _[1] != ":stopword:"]
if len(interpretation_clean) == 6:
if interpretation_clean[1][1] == ":verb_voir:" and interpretation_clean[2][1] == ":mails:" and \
interpretation_clean[3][1] == ":int:" and interpretation_clean[4][1] == ":entier:":
keep = parse_string_int(interpretation_clean[3][0])
good = True
body = True
elif len(interpretation) == 5:
if interpretation[1][1] == ":verb_voir:" and interpretation[2][1] == ":int:" and \
interpretation[3][1] == ":mails:":
stop = parse_string_int(interpretation[2][0])
good = True
elif interpretation[1][1] == ":verb_voir:" and interpretation[2][1] == ":mails:" and \
interpretation[3][1] == ":int:":
keep = parse_string_int(interpretation[3][0])
good = True
if not good and len(interpretation_clean) == 4:
if interpretation_clean[1][1] == ":verb_voir:" and interpretation_clean[2][1] == ":mails:":
good = True
if good:
fetch = max(keep + 1, 5)
try:
mails = enumerate_last_mails(
self._user, self._pwd, self._server, fLOG=self.fLOG, nb=fetch)
except MokadiAuthentification:
yield MokadiInfo("error", "Il m'est impossible de me connecter à la boîte mail de {0}.".format(self._user))
try:
for i, mail in enumerate(mails):
if i == stop:
break
if keep not in (-1, i):
continue
self.fLOG(mail.get_name(), "**", mail.get_nb_attachements(),
"**", mail.get_date_str())
h = mail.get_date().hour
yield MokadiInfo("ok", "Mail reçu vers {0} heures de {1}.".format(h, mail.get_name()))
subj = mail.get_field("subject")
if subj is None:
subj = ""
else:
subj = subj.split("\n")[0]
yield MokadiInfo("ok", subj)
nb = mail.get_nb_attachements()
if nb > 0:
yield MokadiInfo("ok", "Ce mail a {0} pièces jointes.".format(nb))
if body:
# The content of the mail includes past answers.
# This should be removed as well as html tags.
for line in mail.body.split("\n"):
yield MokadiInfo("ok", line)
except MokadiAuthentification:
yield MokadiInfo("error", "Il m'est impossible de me connecter à la boîte mail de {0}.".format(self._user))
done = True
if not done:
raise MokadiException(
"Unable to interpret '{0}'\n{1} - {2} - {3}\n.".format(message, len(interpretation0), interpretation, interpretation_clean))
|
the-stack_106_14243
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import sys
import tensorflow as tf
import operator
import glob
import gzip
from collections import defaultdict
import multiprocessing
from functools import partial
tf.app.flags.DEFINE_string('in_files', '', 'pattern to match text input files')
tf.app.flags.DEFINE_string('out_dir', '', 'export tf protos')
tf.app.flags.DEFINE_string('load_vocab', '', 'directory containing vocab files to load')
tf.app.flags.DEFINE_integer('num_threads', 12, 'max number of threads to use for parallel processing')
tf.app.flags.DEFINE_integer('min_count', 10, 'max number of threads to use for parallel processing')
tf.app.flags.DEFINE_boolean('padding', 0, '0: no padding, 1: 0 pad to the right of seq, 2: 0 pad to the left')
tf.app.flags.DEFINE_boolean('normalize_digits', False, 'map all digits to 0')
tf.app.flags.DEFINE_boolean('start_end', False, 'add <START> and <END> tokens to sequence')
FLAGS = tf.app.flags.FLAGS
# Helpers for creating Example objects
feature = tf.train.Feature
sequence_example = tf.train.SequenceExample
def features(d): return tf.train.Features(feature=d)
def int64_feature(v): return feature(int64_list=tf.train.Int64List(value=v))
def feature_list(l): return tf.train.FeatureList(feature=l)
def feature_lists(d): return tf.train.FeatureLists(feature_list=d)
queue = multiprocessing.Queue()
queue.put(0)
lock = multiprocessing.Lock()
def update_vocab_counts(line, token_counter, label_map):
parts = line.strip().split('\t')
if len(parts) == 4:
# data format is token \t label \t kg_id \t doc_id'
token, label, _, _ = parts
# normalize the digits to all be 0
token_normalized = re.sub(r'(?<!\$ARG)[0-9]', '0', token) \
if FLAGS.normalize_digits else token
token_counter[token_normalized] += 1
if label not in label_map:
label_map[label] = len(label_map)
return 0
def make_example(token_map, label_map, entity_map, token_strs, label_strs, entity_strs, writer):
if FLAGS.start_end:
token_strs = ['<START>'] + token_strs + ['<END>']
entity_strs = ['<UNK>'] + entity_strs + ['<UNK>']
label_strs = ['<START>'] + label_strs + ['<END>']
tokens = [token_map[t] if t in token_map else token_map['<UNK>'] for t in token_strs]
entities = [entity_map[e] if e in entity_map else entity_map['<UNK>'] for e in entity_strs]
labels = [label_map[l] for l in label_strs]
seq_len = len(tokens)
if FLAGS.padding > 0 and len(tokens) < FLAGS.max_len:
padding = [token_map['<PAD>']] * (FLAGS.max_len - len(tokens))
tokens = tokens + padding if FLAGS.padding == 1 else padding + tokens
labels = labels + padding if FLAGS.padding == 1 else padding + labels
entities = entities + padding if FLAGS.padding == 1 else padding + entities
tokens = [tf.train.Feature(int64_list=tf.train.Int64List(value=[t])) for t in tokens]
labels = [tf.train.Feature(int64_list=tf.train.Int64List(value=[l])) for l in labels]
entities = [tf.train.Feature(int64_list=tf.train.Int64List(value=[e])) for e in entities]
example = sequence_example(
context=features({
'seq_len': int64_feature([seq_len]),
}),
feature_lists=feature_lists({
"tokens": feature_list(tokens),
"ner_labels": feature_list(labels),
"entities": feature_list(entities),
})
)
writer.write(example.SerializeToString())
return 1
def process_file(token_map, label_map, entity_map, total_lines, in_out, log_every=25):
try:
in_f, out_path = in_out
writer = tf.python_io.TFRecordWriter(out_path)
lines_written = 0
print('Converting %s to %s' % (in_f, out_path))
f_reader = gzip.open(in_f, 'rb') if in_f.endswith('.gz') else open(in_f, 'r')
i = 0
line = f_reader.readline().strip()
while line:
try:
token_list = []
label_list = []
entity_list = []
i += 1
# take lines until we reach a blank then create example
while line:
parts = line.strip().split('\t')
token_str, label_str, kg_id, _ = parts
token_normalized = re.sub(r'(?<!\$ARG)[0-9]', '0', token_str) \
if FLAGS.normalize_digits else token_str
token_list.append(token_normalized)
label_list.append(label_str)
entity_list.append(kg_id)
line = f_reader.readline().strip()
i += 1
line = f_reader.readline().strip()
if i % log_every == 0:
if not queue.empty():
lock.acquire()
processed_lines = queue.get(True, .25) + i
i = 0
queue.put(processed_lines, True, .25)
lock.release()
if total_lines > 0:
percent_done = 100 * processed_lines / float(total_lines)
sys.stdout.write('\rProcessing line %d of %d : %2.2f %%'
% (processed_lines, total_lines, percent_done))
else:
sys.stdout.write('\rProcessing line %d' % processed_lines)
sys.stdout.flush()
lines_written += (make_example(token_map, label_map, entity_map,
token_list, label_list, entity_list, writer))
except Exception as e:
print('error', e)
f_reader.close()
writer.close()
print('\nDone processing %s. Wrote %d lines' % (in_f, lines_written))
except KeyboardInterrupt:
return 'KeyboardException'
def tsv_to_examples():
if not os.path.exists(FLAGS.out_dir):
os.makedirs(FLAGS.out_dir)
in_files = sorted(glob.glob(FLAGS.in_files))
out_files = ['%s/%s.proto' % (FLAGS.out_dir, in_f.split('/')[-1]) for in_f in in_files]
total_lines = 0
# iterate over data once to get counts of tokens and entities
token_counter = defaultdict(int)
label_map = {'<PAD>': 0, '<START>': 2, '<END>': 1}
for in_f in in_files:
if in_f:
line_num = 0
errors = 0
print('Updating vocabs for %s' % in_f)
f_reader = gzip.open(in_f, 'rb') if in_f.endswith('.gz') else open(in_f, 'r')
for line in f_reader:
line_num += 1
if line_num % 1000 == 0:
sys.stdout.write('\rProcessing line: %d \t errors: %d ' % (line_num, errors))
sys.stdout.flush()
errors += update_vocab_counts(line, token_counter, label_map)
print(' Done')
f_reader.close()
total_lines += line_num
# remove tokens with < min_count
print('Sorting and filtering vocab maps')
keep_tokens = sorted([(t, c) for t, c in token_counter.iteritems()
if c >= FLAGS.min_count], key=lambda tup: tup[1], reverse=True)
keep_tokens = [t[0] for t in keep_tokens]
# export the string->int maps to file
export_map = [('ner_labels', label_map)]
# TODO handle generting new entities map
entity_map = {'<UNK>': 0}
if FLAGS.load_vocab:
print('Loading vocab from %s' % FLAGS.load_vocab)
with open('%s/token.txt' % FLAGS.load_vocab) as f:
token_map = {l.split('\t')[0]: int(l.split('\t')[1]) for l in f}
with open('%s/entities.txt' % FLAGS.load_vocab) as f:
entity_map = {l.split('\t')[0]: int(l.split('\t')[1]) for l in f}
print('Loaded %d tokens' % (len(token_map)))
else:
# int map all the kept vocab strings
token_map = {t: i for i, t in enumerate(['<PAD>', '<UNK>'] + keep_tokens)}
export_map.append(('token', token_map))
for f_str, id_map in export_map:
print('Exporting vocab maps to %s/%s' % (FLAGS.out_dir, f_str))
with open('%s/%s.txt' % (FLAGS.out_dir, f_str), 'w') as f:
sorted_id_map = sorted(id_map.items(), key=operator.itemgetter(1))
[f.write(s + '\t' + str(i) + '\n') for (s, i) in sorted_id_map]
print('Starting file process threads using %d threads' % FLAGS.num_threads)
pool = multiprocessing.Pool(FLAGS.num_threads)
try:
pool.map_async(partial(process_file, token_map, label_map, entity_map, total_lines),
zip(in_files, out_files)).get(999999)
pool.close()
pool.join()
except KeyboardInterrupt:
pool.terminate()
def main(argv):
print('\n'.join(sorted(["%s : %s" % (str(k), str(v)) for k, v in FLAGS.__dict__['__flags'].iteritems()])))
if FLAGS.out_dir == '':
print('Must supply out_dir')
sys.exit(1)
tsv_to_examples()
if __name__ == '__main__':
tf.app.run()
|
the-stack_106_14244
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of the Wapiti project (http://wapiti.sourceforge.io)
# Copyright (C) 2008-2020 Nicolas Surribas
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from urllib.parse import quote
from configparser import ConfigParser
from os.path import join as path_join
from math import ceil
from requests.exceptions import Timeout, ReadTimeout
from wapitiCore.attack.attack import Attack, PayloadType, Mutator
from wapitiCore.language.vulnerability import Vulnerability, Anomaly, _
from wapitiCore.net import web
from wapitiCore.net.xss_utils import generate_payloads, valid_xss_content_type, find_non_exec_parent, has_csp
class mod_permanentxss(Attack):
"""
This class detects permanent (stored) XSS vulnerabilities.
"""
# simple payloads that doesn't rely on their position in the DOM structure
# payloads injected after closing a tag attribute value (attrval) or in the
# content of a tag (text node like between <p> and </p>)
# only trick here must be on character encoding, filter bypassing, stuff like that
# form the simplest to the most complex, Wapiti will stop on the first working
independant_payloads = []
name = "permanentxss"
require = ["xss"]
PRIORITY = 6
# Attempted payload injection from mod_xss.
# key is tainted value, dict values are (mutated_request, parameter, flags)
TRIED_XSS = {}
# key = xss code, valid = (payload, flags)
SUCCESSFUL_XSS = {}
PAYLOADS_FILE = "xssPayloads.ini"
MSG_VULN = _("Stored XSS vulnerability")
def __init__(self, crawler, persister, logger, attack_options):
Attack.__init__(self, crawler, persister, logger, attack_options)
self.independant_payloads = self.payloads
def attack(self):
"""This method searches XSS which could be permanently stored in the web application"""
get_resources = self.persister.get_links(attack_module=self.name) if self.do_get else []
for original_request in get_resources:
if not valid_xss_content_type(original_request) or original_request.status in (301, 302, 303):
# If that content-type can't be interpreted as HTML by browsers then it is useless
# Same goes for redirections
continue
url = original_request.url
target_req = web.Request(url)
referer = original_request.referer
headers = {}
if referer:
headers["referer"] = referer
if self.verbose >= 1:
print("[+] {}".format(url))
try:
response = self.crawler.send(target_req, headers=headers)
data = response.content
except Timeout:
continue
except OSError as exception:
# TODO: those error messages are useless, don't give any valuable information
print(_("error: {0} while attacking {1}").format(exception.strerror, url))
continue
except Exception as exception:
print(_("error: {0} while attacking {1}").format(exception, url))
continue
# Should we look for taint codes sent with GET in the webpages?
# Exploiting those may imply sending more GET requests
# Search in the page source for every taint code used by mod_xss
for taint in self.TRIED_XSS:
input_request = self.TRIED_XSS[taint][0]
# Such situations should not occur as it would be stupid to block POST (or GET) requests for mod_xss
# and not mod_permanentxss, but it is possible so let's filter that.
if not self.do_get and input_request.method == "GET":
continue
if not self.do_post and input_request.method == "POST":
continue
if taint.lower() in data.lower():
# Code found in the webpage !
# Did mod_xss saw this as a reflected XSS ?
if taint in self.SUCCESSFUL_XSS:
# Yes, it means XSS payloads were injected, not just tainted code.
payload, flags = self.SUCCESSFUL_XSS[taint]
if self.check_payload(response, flags, taint):
# If we can find the payload again, this is in fact a stored XSS
get_params = input_request.get_params
post_params = input_request.post_params
file_params = input_request.file_params
referer = input_request.referer
# The following trick may seems dirty but it allows to treat GET and POST requests
# the same way.
for params_list in [get_params, post_params, file_params]:
for i in range(len(params_list)):
parameter, value = params_list[i]
parameter = quote(parameter)
if value != taint:
continue
if params_list is file_params:
params_list[i][1][0] = payload
else:
params_list[i][1] = payload
# we found the xss payload again -> stored xss vuln
evil_request = web.Request(
input_request.path,
method=input_request.method,
get_params=get_params,
post_params=post_params,
file_params=file_params,
referer=referer
)
if original_request.path == input_request.path:
description = _(
"Permanent XSS vulnerability found via injection in the parameter {0}"
).format(parameter)
else:
description = _(
"Permanent XSS vulnerability found in {0} by injecting"
" the parameter {1} of {2}"
).format(
original_request.url,
parameter,
input_request.path
)
if has_csp(response):
description += ".\n" + _("Warning: Content-Security-Policy is present!")
self.add_vuln(
request_id=original_request.path_id,
category=Vulnerability.XSS,
level=Vulnerability.HIGH_LEVEL,
request=evil_request,
parameter=parameter,
info=description
)
if parameter == "QUERY_STRING":
injection_msg = Vulnerability.MSG_QS_INJECT
else:
injection_msg = Vulnerability.MSG_PARAM_INJECT
self.log_red("---")
self.log_red(
injection_msg,
self.MSG_VULN,
original_request.path,
parameter
)
if has_csp(response):
self.log_red(_("Warning: Content-Security-Policy is present!"))
self.log_red(Vulnerability.MSG_EVIL_REQUEST)
self.log_red(evil_request.http_repr())
self.log_red("---")
# FIX: search for the next code in the webpage
# Ok the content is stored, but will we be able to inject javascript?
else:
parameter = self.TRIED_XSS[taint][1]
payloads = generate_payloads(response.content, taint, self.independant_payloads)
flags = self.TRIED_XSS[taint][2]
# TODO: check that and make it better
if PayloadType.get in flags:
method = "G"
elif PayloadType.file in flags:
method = "F"
else:
method = "P"
self.attempt_exploit(method, payloads, input_request, parameter, taint, original_request)
yield original_request
def load_require(self, dependencies: list = None):
if dependencies:
for module in dependencies:
if module.name == "xss":
self.SUCCESSFUL_XSS = module.SUCCESSFUL_XSS
self.TRIED_XSS = module.TRIED_XSS
def attempt_exploit(self, method, payloads, injection_request, parameter, taint, output_request):
timeouted = False
page = injection_request.path
saw_internal_error = False
output_url = output_request.url
attack_mutator = Mutator(
methods=method,
payloads=payloads,
qs_inject=self.must_attack_query_string,
parameters=[parameter],
skip=self.options.get("skipped_parameters")
)
for evil_request, xss_param, xss_payload, xss_flags in attack_mutator.mutate(injection_request):
if self.verbose == 2:
print("[¨] {0}".format(evil_request))
try:
self.crawler.send(evil_request)
except ReadTimeout:
if timeouted:
continue
self.log_orange("---")
self.log_orange(Anomaly.MSG_TIMEOUT, page)
self.log_orange(Anomaly.MSG_EVIL_REQUEST)
self.log_orange(evil_request.http_repr())
self.log_orange("---")
if xss_param == "QUERY_STRING":
anom_msg = Anomaly.MSG_QS_TIMEOUT
else:
anom_msg = Anomaly.MSG_PARAM_TIMEOUT.format(xss_param)
self.add_anom(
request_id=injection_request.path_id,
category=Anomaly.RES_CONSUMPTION,
level=Anomaly.MEDIUM_LEVEL,
request=evil_request,
info=anom_msg,
parameter=xss_param
)
timeouted = True
else:
try:
response = self.crawler.send(output_request)
except ReadTimeout:
continue
if (
response.status not in (301, 302, 303) and
valid_xss_content_type(evil_request) and
self.check_payload(response, xss_flags, taint)
):
if page == output_request.path:
description = _(
"Permanent XSS vulnerability found via injection in the parameter {0}"
).format(xss_param)
else:
description = _(
"Permanent XSS vulnerability found in {0} by injecting"
" the parameter {1} of {2}"
).format(
output_request.url,
parameter,
page
)
if has_csp(response):
description += ".\n" + _("Warning: Content-Security-Policy is present!")
self.add_vuln(
request_id=injection_request.path_id,
category=Vulnerability.XSS,
level=Vulnerability.HIGH_LEVEL,
request=evil_request,
parameter=xss_param,
info=description
)
if xss_param == "QUERY_STRING":
injection_msg = Vulnerability.MSG_QS_INJECT
else:
injection_msg = Vulnerability.MSG_PARAM_INJECT
self.log_red("---")
# TODO: a last parameter should give URL used to pass the vulnerable parameter
self.log_red(
injection_msg,
self.MSG_VULN,
output_url,
xss_param
)
if has_csp(response):
self.log_red(_("Warning: Content-Security-Policy is present!"))
self.log_red(Vulnerability.MSG_EVIL_REQUEST)
self.log_red(evil_request.http_repr())
self.log_red("---")
# stop trying payloads and jump to the next parameter
break
elif response.status == 500 and not saw_internal_error:
if xss_param == "QUERY_STRING":
anom_msg = Anomaly.MSG_QS_500
else:
anom_msg = Anomaly.MSG_PARAM_500.format(xss_param)
self.add_anom(
request_id=injection_request.path_id,
category=Anomaly.ERROR_500,
level=Anomaly.HIGH_LEVEL,
request=evil_request,
info=anom_msg,
parameter=xss_param
)
self.log_orange("---")
self.log_orange(Anomaly.MSG_500, page)
self.log_orange(Anomaly.MSG_EVIL_REQUEST)
self.log_orange(evil_request.http_repr())
self.log_orange("---")
saw_internal_error = True
@property
def payloads(self):
"""Load the payloads from the specified file"""
if not self.PAYLOADS_FILE:
return []
payloads = []
config_reader = ConfigParser(interpolation=None)
config_reader.read_file(open(path_join(self.CONFIG_DIR, self.PAYLOADS_FILE)))
for section in config_reader.sections():
payload = config_reader[section]["payload"]
flags = {section}
clean_payload = payload.strip(" \n")
clean_payload = clean_payload.replace("[TAB]", "\t")
clean_payload = clean_payload.replace("[LF]", "\n")
clean_payload = clean_payload.replace(
"[TIME]",
str(int(ceil(self.options["timeout"])) + 1)
)
payload_type = PayloadType.pattern
if "[TIMEOUT]" in clean_payload:
payload_type = PayloadType.time
clean_payload = clean_payload.replace("[TIMEOUT]", "")
flags.add(payload_type)
payloads.append((clean_payload, flags))
return payloads
def check_payload(self, response, flags, taint):
config_reader = ConfigParser(interpolation=None)
config_reader.read_file(open(path_join(self.CONFIG_DIR, self.PAYLOADS_FILE)))
for section in config_reader.sections():
if section in flags:
expected_value = config_reader[section]["value"].replace("__XSS__", taint)
attribute = config_reader[section]["attribute"]
case_sensitive = config_reader[section].getboolean("case_sensitive")
match_type = config_reader[section].get("match_type", "exact")
for tag in response.soup.find_all(config_reader[section]["tag"]):
if find_non_exec_parent(tag):
continue
if attribute == "string" and tag.string:
if case_sensitive:
if expected_value in tag.string:
return True
else:
if expected_value.lower() in tag.string.lower():
return True
elif attribute == "full_string" and tag.string:
if case_sensitive:
if match_type == "exact" and expected_value == tag.string.strip():
return True
elif match_type == "starts_with" and tag.string.strip().startswith(expected_value):
return True
else:
if match_type == "exact" and expected_value.lower() == tag.string.strip().lower():
return True
elif match_type == "starts_with" and \
tag.string.strip().lower().startswith(expected_value.lower()):
return True
else:
# Found attribute specified in .ini file in attributes of the HTML tag
if attribute in tag.attrs:
if case_sensitive:
if match_type == "exact" and tag[attribute] == expected_value:
return True
elif match_type == "starts_with" and tag[attribute].startswith(expected_value):
return True
else:
if match_type == "exact" and tag[attribute].lower() == expected_value.lower():
return True
elif match_type == "starts_with" and \
expected_value.lower().startswith(tag[attribute].lower()):
return True
break
return False
|
the-stack_106_14250
|
import sys
import logging
l = logging.getLogger("angr.engines.engine")
class SimEngine(object):
"""
A SimEngine is a class which understands how to perform execution on a state. This is a base class.
"""
def __init__(self, **kwargs):
self._check_failed = kwargs.get('check_failed')
def process(self, state, *args, **kwargs):
"""
Perform execution with a state.
You should only override this method in a subclass in order to provide the correct method signature and
docstring. You should override the ``_process`` method to do your actual execution.
:param state: The state with which to execute. This state will be copied before
modification.
:param inline: This is an inline execution. Do not bother copying the state.
:param force_addr: Force execution to pretend that we're working at this concrete address
:returns: A SimSuccessors object categorizing the execution's successor states
"""
inline = kwargs.pop('inline', False)
force_addr = kwargs.pop('force_addr', None)
addr = state.se.eval(state._ip) if force_addr is None else force_addr
# make a copy of the initial state for actual processing, if needed
if not inline and o.COW_STATES in state.options:
new_state = state.copy()
else:
new_state = state
# enforce this distinction
old_state = state
del state
# we have now officially begun the stepping process! now is where we "cycle" a state's
# data - move the "present" into the "past" by pushing an entry on the history stack.
# nuance: make sure to copy from the PREVIOUS state to the CURRENT one
# to avoid creating a dead link in the history, messing up the statehierarchy
new_state.register_plugin('history', old_state.history.make_child())
new_state.history.recent_bbl_addrs.append(addr)
successors = SimSuccessors(addr, old_state)
new_state._inspect('engine_process', when=BP_BEFORE, sim_engine=self, sim_successors=successors)
successors = new_state._inspect_getattr('sim_successors', successors)
try:
self._process(new_state, successors, *args, **kwargs)
except SimException:
if o.EXCEPTION_HANDLING not in old_state.options:
raise
old_state.project._simos.handle_exception(successors, self, *sys.exc_info())
new_state._inspect('engine_process', when=BP_AFTER, sim_successors=successors)
successors = new_state._inspect_getattr('sim_successors', successors)
# downsizing
new_state.inspect.downsize()
# if not TRACK, clear actions on OLD state
#if o.TRACK_ACTION_HISTORY not in old_state.options:
# old_state.history.recent_events = []
return successors
def check(self, state, *args, **kwargs):
"""
Check if this engine can be used for execution on the current state. A callback `check_failure` is called upon
failed checks. Note that the execution can still fail even if check() returns True.
You should only override this method in a subclass in order to provide the correct method signature and
docstring. You should override the ``_check`` method to do your actual execution.
:param SimState state: The state with which to execute.
:param args: Positional arguments that will be passed to process().
:param kwargs: Keyword arguments that will be passed to process().
:return: True if the state can be handled by the current engine, False otherwise.
"""
r = self._check(state, *args, **kwargs)
if not r:
if self._check_failed is not None:
self._check_failed(state, *args, **kwargs)
return r
def _check(self, state, *args, **kwargs):
raise NotImplementedError()
def _process(self, new_state, successors, *args, **kwargs):
raise NotImplementedError
#
# Pickling
#
# CPython cannot pickle methods, which is why we have special handlers here to avoid pickling callback registered
# with SimEngine.
def __setstate__(self, state):
self._check_failed = None
def __getstate__(self):
return { }
from .. import sim_options as o
from ..state_plugins.inspect import BP_BEFORE, BP_AFTER
from .successors import SimSuccessors
from ..errors import SimException
|
the-stack_106_14252
|
import math
import numpy as np
def reconstruction(e,s):
# e = a list of lists of edges in each path [[],[],[]]
# s = a list of sums paired with each list of edges in the paths
all_edges = []
for edges in e:
for i in range(0, len(edges)):
all_edges.append(edges[i])
all_edges = list(set(all_edges))
# as long as all the entries in all_edges are strings or integers, set will do the work
matrix = np.zeros((len(s),len(all_edges)), dtype = np.int)
for path in range(0, len(s)):
for j in range(0, len(all_edges)):
for i in range(0, len(e[path])):
if e[path][i] == all_edges[j]:
matrix[path][j] += 1
answer = np.linalg.lstsq(matrix,s)[0]
print(all_edges,answer)
return (all_edges,answer)
reconstruction([[1,2,3],[3,4,5],[2,5,6]], [3,4,5])
reconstruction([["1.1", "1.2", "1.3"],["1.1","1.2"],["1.2","1.3","1.2"],["1.1","1.6","1.3","1.4"],["1.2","1.4"]],[2,3,4,5,6])
|
the-stack_106_14256
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class AzureQuotaExtensionAPIConfiguration(Configuration):
"""Configuration for AzureQuotaExtensionAPI.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
"""
def __init__(
self,
credential, # type: "TokenCredential"
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(AzureQuotaExtensionAPIConfiguration, self).__init__(**kwargs)
self.credential = credential
self.api_version = "2021-03-15-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-quota/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
the-stack_106_14260
|
from conans import ConanFile, CMake, tools
import os
import textwrap
required_conan_version = ">=1.33.0"
class DCMTKConan(ConanFile):
name = "dcmtk"
description = "DCMTK is a collection of libraries and applications implementing large parts the DICOM standard"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://dicom.offis.de/dcmtk"
license = "BSD-3-Clause"
topics = "conan", "dcmtk", "dicom", "image"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_applications": [True, False],
"with_multithreading": [True, False],
"charset_conversion": [None, "libiconv", "icu"],
"with_libxml2": [True, False],
"with_zlib": [True, False],
"with_openjpeg": [True, False, "deprecated"],
"with_openssl": [True, False],
"with_libpng": [True, False],
"with_libsndfile": [True, False, "deprecated"],
"with_libtiff": [True, False],
"with_tcpwrappers": [True, False],
"builtin_dictionary": [None, True, False],
"builtin_private_tags": [True, False],
"external_dictionary": [None, True, False],
"wide_io": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_applications": False,
"with_multithreading": True,
"charset_conversion": "libiconv",
"with_libxml2": True,
"with_zlib": True,
"with_openjpeg": "deprecated",
"with_openssl": True,
"with_libpng": True,
"with_libsndfile": "deprecated",
"with_libtiff": True,
"with_tcpwrappers": False,
"builtin_dictionary": None,
"builtin_private_tags": False,
"external_dictionary": None,
"wide_io": False,
}
exports_sources = "CMakeLists.txt", "patches/**"
generators = "cmake", "cmake_find_package"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
if self.settings.os == "Windows":
del self.options.with_tcpwrappers
# Looking into source code, it appears that OpenJPEG and libsndfile are not used
if self.options.with_openjpeg != "deprecated":
self.output.warn("with_openjpeg option is deprecated, do not use anymore")
if self.options.with_libsndfile != "deprecated":
self.output.warn("with_libsndfile option is deprecated, do not use anymore")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def requirements(self):
if self.options.charset_conversion == "libiconv":
self.requires("libiconv/1.16")
elif self.options.charset_conversion == "icu":
self.requires("icu/68.2")
if self.options.with_libxml2:
self.requires("libxml2/2.9.10")
if self.options.with_zlib:
self.requires("zlib/1.2.11")
if self.options.with_openssl:
# FIXME: Bump openssl. dcmtk build files have a logic to detect
# various openssl API but for some reason it fails with 1.1 API
self.requires("openssl/1.0.2u")
if self.options.with_libpng:
self.requires("libpng/1.6.37")
if self.options.with_libtiff:
self.requires("libtiff/4.2.0")
if self.options.get_safe("with_tcpwrappers"):
self.requires("tcp-wrappers/7.6")
def package_id(self):
del self.info.options.with_openjpeg
del self.info.options.with_libsndfile
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
# DICOM Data Dictionaries are required
self._cmake.definitions["CMAKE_INSTALL_DATADIR"] = self._dcm_datadictionary_path
self._cmake.definitions["BUILD_APPS"] = self.options.with_applications
self._cmake.definitions["DCMTK_WITH_ICONV"] = self.options.charset_conversion == "libiconv"
if self.options.charset_conversion == "libiconv":
self._cmake.definitions["WITH_LIBICONVINC"] = self.deps_cpp_info["libiconv"].rootpath
self._cmake.definitions["DCMTK_WITH_ICU"] = self.options.charset_conversion == "icu"
self._cmake.definitions["DCMTK_WITH_OPENJPEG"] = False
self._cmake.definitions["DCMTK_WITH_OPENSSL"] = self.options.with_openssl
if self.options.with_openssl:
self._cmake.definitions["WITH_OPENSSLINC"] = self.deps_cpp_info["openssl"].rootpath
self._cmake.definitions["DCMTK_WITH_PNG"] = self.options.with_libpng
if self.options.with_libpng:
self._cmake.definitions["WITH_LIBPNGINC"] = self.deps_cpp_info["libpng"].rootpath
self._cmake.definitions["DCMTK_WITH_SNDFILE"] = False
self._cmake.definitions["DCMTK_WITH_THREADS"] = self.options.with_multithreading
self._cmake.definitions["DCMTK_WITH_TIFF"] = self.options.with_libtiff
if self.options.with_libtiff:
self._cmake.definitions["WITH_LIBTIFFINC"] = self.deps_cpp_info["libtiff"].rootpath
if self.settings.os != "Windows":
self._cmake.definitions["DCMTK_WITH_WRAP"] = self.options.with_tcpwrappers
self._cmake.definitions["DCMTK_WITH_XML"] = self.options.with_libxml2
if self.options.with_libxml2:
self._cmake.definitions["WITH_LIBXMLINC"] = self.deps_cpp_info["libxml2"].rootpath
self._cmake.definitions["WITH_LIBXML_SHARED"] = self.options["libxml2"].shared
self._cmake.definitions["DCMTK_WITH_ZLIB"] = self.options.with_zlib
if self.options.with_zlib:
self._cmake.definitions["WITH_ZLIBINC"] = self.deps_cpp_info["zlib"].rootpath
self._cmake.definitions["DCMTK_ENABLE_STL"] = True
self._cmake.definitions["DCMTK_ENABLE_CXX11"] = True
self._cmake.definitions["DCMTK_ENABLE_MANPAGE"] = False
self._cmake.definitions["DCMTK_WITH_DOXYGEN"] = False
self._cmake.definitions["DCMTK_ENABLE_PRIVATE_TAGS"] = self.options.builtin_private_tags
if self.options.external_dictionary is not None:
self._cmake.definitions["DCMTK_ENABLE_EXTERNAL_DICTIONARY"] = self.options.external_dictionary
if self.options.builtin_dictionary is not None:
self._cmake.definitions["DCMTK_ENABLE_BUILTIN_DICTIONARY"] = self.options.builtin_dictionary
self._cmake.definitions["DCMTK_WIDE_CHAR_FILE_IO_FUNCTIONS"] = self.options.wide_io
self._cmake.definitions["DCMTK_WIDE_CHAR_MAIN_FUNCTION"] = self.options.wide_io
if self.settings.os == "Windows":
self._cmake.definitions["DCMTK_OVERWRITE_WIN32_COMPILER_FLAGS"] = False
if self.settings.compiler == "Visual Studio":
self._cmake.definitions["DCMTK_ICONV_FLAGS_ANALYZED"] = True
self._cmake.definitions["DCMTK_COMPILE_WIN32_MULTITHREADED_DLL"] = "MD" in str(self.settings.compiler.runtime)
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYRIGHT", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "etc"))
tools.rmdir(os.path.join(self.package_folder, "share"))
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{target: "DCMTK::{}".format(target) for target in self._dcmtk_components.keys()}
)
@staticmethod
def _create_cmake_module_alias_targets(module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
tools.save(module_file, content)
@property
def _module_subfolder(self):
return os.path.join("lib", "cmake")
@property
def _module_file_rel_path(self):
return os.path.join(self._module_subfolder,
"conan-official-{}-targets.cmake".format(self.name))
@property
def _dcmtk_components(self):
def charset_conversion():
if bool(self.options.charset_conversion):
return ["libiconv::libiconv"] if self.options.charset_conversion == "libiconv" else ["icu::icu"]
return []
def zlib():
return ["zlib::zlib"] if self.options.with_zlib else []
def png():
return ["libpng::libpng"] if self.options.with_libpng else []
def tiff():
return ["libtiff::libtiff"] if self.options.with_libtiff else []
def openssl():
return ["openssl::openssl"] if self.options.with_openssl else []
def tcpwrappers():
return ["tcp-wrappers::tcp-wrappers"] if self.options.get_safe("with_tcpwrappers") else []
def xml2():
return ["libxml2::libxml2"] if self.options.with_libxml2 else []
charls = "dcmtkcharls" if tools.Version("3.6.6") <= self.version else "charls"
return {
"ofstd" : charset_conversion(),
"oflog" : ["ofstd"],
"dcmdata" : ["ofstd", "oflog"] + zlib(),
"i2d" : ["dcmdata"],
"dcmimgle": ["ofstd", "oflog", "dcmdata"],
"dcmimage": ["oflog", "dcmdata", "dcmimgle"] + png() + tiff(),
"dcmjpeg" : ["ofstd", "oflog", "dcmdata", "dcmimgle", "dcmimage", "ijg8", "ijg12", "ijg16"],
"ijg8" : [],
"ijg12" : [],
"ijg16" : [],
"dcmjpls" : ["ofstd", "oflog", "dcmdata", "dcmimgle", "dcmimage", charls],
charls : ["ofstd", "oflog"],
"dcmtls" : ["ofstd", "dcmdata", "dcmnet"] + openssl(),
"dcmnet" : ["ofstd", "oflog", "dcmdata"] + tcpwrappers(),
"dcmsr" : ["ofstd", "oflog", "dcmdata", "dcmimgle", "dcmimage"] + xml2(),
"cmr" : ["dcmsr"],
"dcmdsig" : ["ofstd", "dcmdata"] + openssl(),
"dcmwlm" : ["ofstd", "dcmdata", "dcmnet"],
"dcmqrdb" : ["ofstd", "dcmdata", "dcmnet"],
"dcmpstat": ["ofstd", "oflog", "dcmdata", "dcmimgle", "dcmimage", "dcmnet", "dcmdsig", "dcmtls", "dcmsr", "dcmqrdb"] + openssl(),
"dcmrt" : ["ofstd", "oflog", "dcmdata", "dcmimgle"],
"dcmiod" : ["dcmdata", "ofstd", "oflog"],
"dcmfg" : ["dcmiod", "dcmdata", "ofstd", "oflog"],
"dcmseg" : ["dcmfg", "dcmiod", "dcmdata", "ofstd", "oflog"],
"dcmtract": ["dcmiod", "dcmdata", "ofstd", "oflog"],
"dcmpmap" : ["dcmfg", "dcmiod", "dcmdata", "ofstd", "oflog"],
}
@property
def _dcm_datadictionary_path(self):
return os.path.join(self.package_folder, "bin", "share")
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "DCMTK"
self.cpp_info.names["cmake_find_package_multi"] = "DCMTK"
def register_components(components):
for target_lib, requires in components.items():
self.cpp_info.components[target_lib].names["cmake_find_package"] = target_lib
self.cpp_info.components[target_lib].names["cmake_find_package_multi"] = target_lib
self.cpp_info.components[target_lib].builddirs.append(self._module_subfolder)
self.cpp_info.components[target_lib].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components[target_lib].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.components[target_lib].libs = [target_lib]
self.cpp_info.components[target_lib].includedirs.append(os.path.join("include", "dcmtk"))
self.cpp_info.components[target_lib].requires = requires
if self.settings.os == "Windows":
self.cpp_info.components["ofstd"].system_libs.extend([
"iphlpapi", "ws2_32", "netapi32", "wsock32"
])
elif self.settings.os == "Linux":
self.cpp_info.components["ofstd"].system_libs.append("m")
if self.options.with_multithreading:
self.cpp_info.components["ofstd"].system_libs.append("pthread")
register_components(self._dcmtk_components)
dcmdictpath = os.path.join(self._dcm_datadictionary_path, "dcmtk", "dicom.dic")
self.output.info("Settings DCMDICTPATH environment variable: {}".format(dcmdictpath))
self.env_info.DCMDICTPATH = dcmdictpath
if self.options.with_applications:
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
|
the-stack_106_14264
|
"""send -- Construct and send Apple events. """
from .ae import newappleevent, stringsforosstatus, MacOSError
from . import kae
from .aemcodecs import Codecs
__all__ = ['Event', 'EventError']
######################################################################
# PRIVATE
######################################################################
_defaultcodecs = Codecs()
def sendappleevent(evt, flags, timeout):
""" Default function for sending Apple events.
evt : aem.ae.AEDesc -- the AppleEvent to send
flags : int -- send mode flags
timeout : int -- timeout delay
Result : aem.ae.AEDesc -- the reply AppleEvent
"""
return evt.send(flags, timeout)
######################################################################
# PUBLIC
######################################################################
class Event:
"""Represents an Apple event (serialised message)."""
def __init__(self, address, event, params={}, atts={},
transaction=kae.kAnyTransactionID, returnid= kae.kAutoGenerateReturnID,
codecs=_defaultcodecs, createproc=newappleevent, sendproc=sendappleevent):
"""Called by aem.Application.event(); users shouldn't instantiate this class themselves.
address : AEAddressDesc -- the target application
event : bytes -- 8-letter code indicating event's class and id, e.g. b'coregetd'
params : dict -- a dict of form {AE_code:anything,...} containing zero or more event parameters (message arguments)
atts : dict -- a dict of form {AE_code:anything,...} containing zero or more event attributes (event info)
transaction : int -- transaction number (default = kAnyTransactionID)
returnid : int -- reply event's ID (default = kAutoGenerateReturnID)
codecs : Codecs -- user can provide custom parameter & result encoder/decoder (default = standard codecs); supplied by Application class
createproc : function -- function to create a new AppleEvent descriptor
sendproc : function -- function to send an AppleEvent descriptor
"""
self._eventcode = event
self._codecs = codecs
self._sendproc = sendproc
self.AEM_event = createproc(event[:4], event[4:], address, returnid, transaction)
for key, value in atts.items():
self.AEM_event.setattr(key, codecs.pack(value))
for key, value in params.items():
self.AEM_event.setparam(key, codecs.pack(value))
# Public
def send(self, timeout= kae.kAEDefaultTimeout, flags= kae.kAECanSwitchLayer + kae.kAEWaitReply):
"""Send this Apple event (may be called any number of times).
timeout : int | aem.k.DefaultTimeout | aem.k.NoTimeout -- number of ticks to wait for target process
to reply before raising timeout error (default=DefaultTimeout)
flags : int -- bitwise flags [1] indicating how target process should handle event (default=WaitReply)
Result : anything -- value returned by application, if any
[1] aem.k provides the following constants for convenience:
[ aem.k.NoReply | aem.k.QueueReply | aem.k.WaitReply ]
[ aem.k.DontReconnect ]
[ aem.k.WantReceipt ]
[ aem.k.NeverInteract | aem.k.CanInteract | aem.k.AlwaysInteract ]
[ aem.k.CanSwitchLayer ]
"""
try:
replyevent = self._sendproc(self.AEM_event, flags, timeout)
except MacOSError as err: # an OS-level error occurred
if not (self._eventcode == b'aevtquit' and err.args[0] == -609): # Ignore invalid connection error (-609) when quitting
raise EventError(err.args[0]) from err
else: # decode application's reply, if any
if replyevent.type != kae.typeNull:
eventresult = dict([replyevent.getitem(i + 1, kae.typeWildCard)
for i in range(replyevent.count())])
# note: while Apple docs say that both keyErrorNumber and keyErrorString should be
# tested for when determining if an error has occurred, AppleScript tests for keyErrorNumber
# only, so do the same here for compatibility
if kae.keyErrorNumber in eventresult: # an application-level error occurred
# note: uses standard codecs to unpack error info to ensure consistent conversion
errornum = _defaultcodecs.unpack(eventresult[kae.keyErrorNumber])
if errornum != 0: # Stupid Finder returns non-error error number and message for successful move/duplicate command, so just ignore it
errormsg = eventresult.get(kae.keyErrorString)
if errormsg:
errormsg = _defaultcodecs.unpack(errormsg)
raise EventError(errornum, errormsg, eventresult)
if kae.keyAEResult in eventresult: # application has returned a value
# note: unpack result with [optionally] user-specified codecs, allowing clients to customise unpacking (e.g. appscript)
return self._codecs.unpack(eventresult[kae.keyAEResult])
######################################################################
class EventError(MacOSError):
""" Raised by aem.Event.send() when sending an event fails; contains error information
provided by Apple Event Manager or target application.
Notes:
- the 'raw' attribute contains either a dict containing the reply event's
raw parameters, or an empty dict if the error occurred while sending
the outgoing event; used by appscript.CommandError; third-parties
should avoid using it directly
"""
_carbonerrors = { # Following error descriptions are mostly cribbed from AppleScript Language Guide.
# OS errors
-34: "Disk is full.",
-35: "Disk wasn't found.",
-37: "Bad name for file.",
-38: "File wasn't open.",
-39: "End of file error.",
-42: "Too many files open.",
-43: "File wasn't found.",
-44: "Disk is write protected.",
-45: "File is locked.",
-46: "Disk is locked.",
-47: "File is busy.",
-48: "Duplicate file name.",
-49: "File is already open.",
-50: "Parameter error.",
-51: "File reference number error.",
-61: "File not open with write permission.",
-108: "Out of memory.",
-120: "Folder wasn't found.",
-124: "Disk is disconnected.",
-128: "User canceled.",
-192: "A resource wasn't found.",
-600: "Application isn't running.",
-601: "Not enough room to launch application with special requirements.",
-602: "Application is not 32-bit clean.",
-605: "More memory is needed than is specified in the size resource.",
-606: "Application is background-only.",
-607: "Buffer is too small.",
-608: "No outstanding high-level event.",
-609: "Connection is invalid.",
-904: "Not enough system memory to connect to remote application.",
-905: "Remote access is not allowed.",
-906: "Application isn't running or program linking isn't enabled.",
-915: "Can't find remote machine.",
-30720: "Invalid date and time.",
# AE errors
-1700: "Can't make some data into the expected type.",
-1701: "Some parameter is missing for command.",
-1702: "Some data could not be read.",
-1703: "Some data was the wrong type.",
-1704: "Some parameter was invalid.",
-1705: "Operation involving a list item failed.",
-1706: "Need a newer version of the Apple Event Manager.",
-1707: "Event isn't an Apple event.",
-1708: "Application could not handle this command.",
-1709: "AEResetTimer was passed an invalid reply.",
-1710: "Invalid sending mode was passed.",
-1711: "User canceled out of wait loop for reply or receipt.",
-1712: "Apple event timed out.",
-1713: "No user interaction allowed.",
-1714: "Wrong keyword for a special function.",
-1715: "Some parameter wasn't understood.",
-1716: "Unknown Apple event address type.",
-1717: "The handler is not defined.",
-1718: "Reply has not yet arrived.",
-1719: "Can't get reference. Invalid index.",
-1720: "Invalid range.",
-1721: "Wrong number of parameters for command.",
-1723: "Can't get reference. Access not allowed.",
-1725: "Illegal logical operator called.",
-1726: "Illegal comparison or logical.",
-1727: "Expected a reference.",
-1728: "Can't get reference.",
-1729: "Object counting procedure returned a negative count.",
-1730: "Container specified was an empty list.",
-1731: "Unknown object type.",
-1739: "Attempting to perform an invalid operation on a null descriptor.",
# Application scripting errors
-10000: "Apple event handler failed.",
-10001: "Type error.",
-10002: "Invalid key form.",
-10003: "Can't set reference to given value. Access not allowed.",
-10004: "A privilege violation occurred.",
-10005: "The read operation wasn't allowed.",
-10006: "Can't set reference to given value.",
-10007: "The index of the event is too large to be valid.",
-10008: "The specified object is a property, not an element.",
-10009: "Can't supply the requested descriptor type for the data.",
-10010: "The Apple event handler can't handle objects of this class.",
-10011: "Couldn't handle this command because it wasn't part of the current transaction.",
-10012: "The transaction to which this command belonged isn't a valid transaction.",
-10013: "There is no user selection.",
-10014: "Handler only handles single objects.",
-10015: "Can't undo the previous Apple event or user action.",
-10023: "Enumerated value is not allowed for this property.",
-10024: "Class can't be an element of container.",
-10025: "Illegal combination of properties settings.",
}
# Following Cocoa Scripting error descriptions taken from:
# http://developer.apple.com/documentation/Cocoa/Reference/Foundation/ObjC_classic/Classes/NSScriptCommand.html
# http://developer.apple.com/documentation/Cocoa/Reference/Foundation/ObjC_classic/Classes/NSScriptObjectSpecifier.html
_cocoaerrors = (
('NSReceiverEvaluationScriptError', 'The object or objects specified by the direct parameter to a command could not be found.'),
('NSKeySpecifierEvaluationScriptError', 'The object or objects specified by a key (for commands that support key specifiers) could not be found.'),
('NSArgumentEvaluationScriptError', 'The object specified by an argument could not be found.'),
('NSReceiversCantHandleCommandScriptError', "The receivers don't support the command sent to them."),
('NSRequiredArgumentsMissingScriptError', 'An argument (or more than one argument) is missing.'),
('NSArgumentsWrongScriptError', 'An argument (or more than one argument) is of the wrong type or is otherwise invalid.'),
('NSUnknownKeyScriptError', 'An unidentified error occurred; indicates an error in the scripting support of your application.'),
('NSInternalScriptError', 'An unidentified internal error occurred; indicates an error in the scripting support of your application.'),
('NSOperationNotSupportedForKeyScriptError', 'The implementation of a scripting command signaled an error.'),
('NSCannotCreateScriptCommandError', 'Could not create the script command; an invalid or unrecognized Apple event was received.'),
('NSNoSpecifierError', 'No error encountered.'),
('NSNoTopLevelContainersSpecifierError', 'Someone called evaluate with nil.'),
('NSContainerSpecifierError', 'Error evaluating container specifier.'),
('NSUnknownKeySpecifierError', 'Receivers do not understand the key.'),
('NSInvalidIndexSpecifierError', 'Index out of bounds.'),
('NSInternalSpecifierError', 'Other internal error.'),
('NSOperationNotSupportedForKeySpecifierError', 'Attempt made to perform an unsupported operation on some key.'),
)
def __init__(self, number, message=None, raw=None):
MacOSError.__init__(self, number)
self._number, self._message, self._raw = number, str(message or ''), raw
raw = property(lambda self: self._raw or {},
doc="dict -- raw error data from reply event, if any (note: clients should not need to use this directly)")
def __repr__(self):
return "aem.EventError({!r}, {!r}, {!r})".format(self._number, self._message, self._raw)
def __int__(self):
return self._number
def __str__(self):
return "Command failed: {} ({})".format(self.errormessage, self.errornumber)
# basic error info (an error number is always given by AEM/application;
# message is either supplied by application or generated here)
errornumber = property(lambda self: self._number, doc="int -- Mac OS error number")
def errormessage(self):
message = self._message
if self._number > 0 and message:
for name, description in self._cocoaerrors:
if message.startswith(name):
message = '{} ({})'.format(message, description)
break
elif not message:
message = self._carbonerrors.get(self._number)
if not message:
message = stringsforosstatus(self._number)[1] or 'OS error'
return message
errormessage = property(errormessage,
doc="str -- application-supplied/generic error description")
# extended error info (some apps may return additional error info, though most don't)
def _errorinfo(self, key):
if self._raw:
desc = self._raw.get(key)
if desc:
return _defaultcodecs.unpack(desc)
return None
offendingobject = property(lambda self: self._errorinfo(kae.kOSAErrorOffendingObject),
doc="anything | None -- object that caused the error, if given by application")
expectedtype = property(lambda self: self._errorinfo(kae.kOSAErrorExpectedType),
doc="anything | None -- object that caused a coercion error, if given by application")
partialresult = property(lambda self: self._errorinfo(kae.kOSAErrorPartialResult),
doc="anything | None -- part of return value constructed before error occurred, if given by application")
|
the-stack_106_14265
|
import re
from streamer import Streamer
class ChannelChooser(object):
NUM_PER_PAGE = 15
channel_list = []
search_filter = None
def __init__(self, channels):
self.full_channel_list = channels
def get_num_pages(self, channel_list):
num_pages = round(len(channel_list) / self.NUM_PER_PAGE)
if num_pages < 1:
return 1
else:
return num_pages
def get_user_input(self):
user_input = input()
if user_input == 'q':
exit(0)
elif user_input[:1] == '!':
channel_id = user_input[1:]
self.stream_channel(channel_id)
elif user_input[:1] == 'r':
self.search_filter = None
self.display(self.full_channel_list)
elif str.isdigit(user_input):
try:
page_number = int(user_input)
except:
print("Please enter a valid number. Defaulting to 1")
page_number = 1
self.display(self.channel_list, page_number)
else:
# search
self.search_filter = user_input
p = re.compile(user_input, re.IGNORECASE )
search_list = []
for channel in self.full_channel_list:
if p.match(channel.getName()):
search_list.append(channel)
self.display(search_list)
def stream_channel(self, channel_id):
for channel in self.full_channel_list:
if channel.getChannelId() == channel_id:
Streamer(channel).stream()
def display(self, channels=None, page=1):
if channels is None:
self.channel_list = self.full_channel_list
else:
self.channel_list = channels
if self.search_filter is None:
print("All channels:")
else:
print("Channels matching '%s'" % self.search_filter)
num_pages = self.get_num_pages(self.channel_list)
offset = 0
limit = self.NUM_PER_PAGE
if page > num_pages:
print("Bad page number, only %s pages. defaulting to 1" % int(num_pages))
page = 1
elif page > 0:
offset = page * self.NUM_PER_PAGE - self.NUM_PER_PAGE
channel_counter = 0
num_printed = 0
for channel in self.channel_list:
if channel_counter >= offset and num_printed < limit:
print("%s: %s" % (channel.getChannelId(), channel.getName()))
num_printed += 1
channel_counter += 1
print("Showing page %d of %d" % (page, num_pages))
print("Choose page to show, r to reset, q to quit or ![channelnumber] to broadcast the channel: ")
self.get_user_input()
|
the-stack_106_14266
|
# MIT License
# Copyright (c) 2018 Guillaume St-Onge
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from _SamplableSet import *
#wrap some methods to return python type errors
def error_decorator(error_type):
def decorator(func):
def wrapper(*args, **kwargs):
try:
val = func(*args,**kwargs)
except IndexError as err:
raise error_type(err)
return val
return wrapper
return decorator
template_classes = {
'int': IntSamplableSet,
'str': StringSamplableSet,
'2int': Tuple2IntSamplableSet,
'3int': Tuple3IntSamplableSet,
'2str': Tuple2StringSamplableSet,
'3str': Tuple3StringSamplableSet,
'Int': IntSamplableSet,
'String': StringSamplableSet,
'Tuple2Int': Tuple2IntSamplableSet,
'Tuple3Int': Tuple3IntSamplableSet,
'Tuple2String': Tuple2StringSamplableSet,
'Tuple3String': Tuple3StringSamplableSet
}
cpp_methods = ['size', 'total_weight', 'count', 'insert', 'next',
'init_iterator', 'set_weight', 'get_weight', 'empty',
'get_at_iterator', 'erase', 'clear']
class SamplableSet:
"""
This class implements a set which is samplable according to the weight distribution of the elements.
The SamplableSet can be instanciated empty or from an iterable of pairs of elements and weights respectively.
This class is a wrapper around a C++ implementation.
"""
def __init__(self, min_weight, max_weight, elements_weights=None,
cpp_type=None):
"""
Creates a new SamplableSet instance.
Args:
min_weight (float): Minimum weight a given element can have. This is needed for a good repartition of the elements inside the internal tree structure.
max_weight (float): Maximum weight a given element can have. This is needed for a good repartition of the elements inside the internal tree structure.
elements_weights (iterable of iterables or dict, optional): If an iterable, should be yield iterables of 2 items (element, weight) with which the set will be instanciated. If a dict, keys should be the elements and values should be the weights. If not specified, the set will be empty.
cpp_type (str, optional): Type used in the C++ implementation. If 'elements_weights' is specified, the type will be infered from it.
"""
if min_weight <= 0 or max_weight == float('inf') or\
max_weight < min_weight:
raise ValueError('Invalid min_weight or max_weight')
self.max_weight = max_weight
self.min_weight = min_weight
self.cpp_type = cpp_type
# Unpacking
if elements_weights:
if isinstance(elements_weights, dict):
elements_weights = elements_weights.items()
first_element, first_weight = next(iter(elements_weights))
# Inferring cpp_type
if self.cpp_type is None:
self._infer_type(first_element)
# Instanciate the set
if self.cpp_type is not None:
self._samplable_set = template_classes[self.cpp_type](min_weight, max_weight)
self._wrap_methods()
else:
self._wrap_methods_unspecified()
# Initialize the set
if elements_weights:
self[first_element] = first_weight
for element, weight in elements_weights:
self[element] = weight
def _infer_type(self,element):
if isinstance(element, int):
self.cpp_type = 'int'
elif isinstance(element, str):
self.cpp_type = 'str'
elif isinstance(element, tuple):
m = len(element)
if m in {2,3}:
if isinstance(element[0], int):
self.cpp_type = f'{m}int'
elif isinstance(element[0], str):
self.cpp_type = f'{m}str'
else:
raise ValueError('Cannot infer the type from the element')
else:
raise ValueError('Cannot infer the type from the element')
else:
raise ValueError('Cannot infer the type from the element')
def _unspecified_method(self):
raise RuntimeError('The method is undefined until the underlying type is known')
def _wrap_methods_unspecified(self):
"""
Assigns the methods of the C++ class to a dummy method that raises
a RuntimeError
"""
for func_name in cpp_methods:
setattr(self, func_name, self._unspecified_method)
def _wrap_methods(self):
"""
Assigns the methods of the C++ class to the wrapper.
"""
for func_name in cpp_methods:
setattr(self, func_name, getattr(self._samplable_set, func_name))
#decorates method to return python error
self.next = error_decorator(StopIteration)(self.next)
self.init_iterator = error_decorator(StopIteration)(self.init_iterator)
self.get_weight = error_decorator(KeyError)(self.get_weight)
self.cpp_sample = error_decorator(KeyError)(self._samplable_set.sample)
def __contains__(self, element):
return True if self.count(element) else False
def __getitem__(self, element):
return self.get_weight(element)
def __setitem__(self, element, weight):
if self.cpp_type is None:
self._infer_type(element)
#instanciate the set
self._samplable_set = template_classes[self.cpp_type](
self.min_weight,self.max_weight)
self._wrap_methods()
self.set_weight(element,weight)
def __delitem__(self, element):
self.erase(element)
def __str__(self):
if self.cpp_type is None:
outstr = 'SamplableSet of unspecified type'
else:
outstr = f'SamplableSet of {self.cpp_type} '\
+ f'containing {len(self)} element'\
+ ('s' if len(self) > 1 else '')
return outstr
def __repr__(self):
return str(self)
def __len__(self):
return self.size()
def copy(self):
cpp_copy_samplable_set = type(self._samplable_set)(self._samplable_set) # Copy of the C++ class with the copy constructor
wrapped_samplable_set_copy = type(self)(self.min_weight, self.max_weight, cpp_type=self.cpp_type) # New wrapper object to be returned
# Link the wrapper with the wrappee
wrapped_samplable_set_copy._samplable_set = cpp_copy_samplable_set
wrapped_samplable_set_copy._wrap_methods()
return wrapped_samplable_set_copy
def __deepcopy__(self, memo_dict):
return self.copy()
def __copy__(self):
return self.copy()
def __iter__(self):
return self.element_generator()
def sample(self, n_samples=1, replace=True):
"""
Randomly samples the set according to the weights of each element.
Args:
n_samples (int, optional): If equal to 1, returns one element. If greater than 1, returns a generator that will return 'n_samples' elements.
Returns: An element of the set or a generator of 'n_samples' elements.
"""
if n_samples == 1:
x = self.cpp_sample()
if not replace:
self.erase(x[0])
return x
else:
return self.sample_generator(n_samples, replace)
def sample_generator(self, n_samples, replace):
for _ in range(n_samples):
x = self.cpp_sample()
if not replace:
self.erase(x[0])
yield x
def element_generator(self):
try:
self.init_iterator()
while True:
yield self.get_at_iterator()
self.next()
except StopIteration:
pass
@staticmethod
def seed(seed_value):
IntSamplableSet.seed(seed_value) #does not matter which seeds the RNG
|
the-stack_106_14269
|
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
"""Helper functions for writing tests."""
import asyncio
import json
import logging
import urllib.request
from typing import Dict
from pytest_operator.plugin import OpsTest
logger = logging.getLogger(__name__)
async def get_unit_address(ops_test: OpsTest, app_name: str, unit_num: int) -> str:
"""Get private address of a unit."""
status = await ops_test.model.get_status() # noqa: F821
return status["applications"][app_name]["units"][f"{app_name}/{unit_num}"]["address"]
def interleave(l1: list, l2: list) -> list:
"""Interleave two lists.
>>> interleave([1,2,3], ['a', 'b', 'c'])
[1, 'a', 2, 'b', 3, 'c']
Reference: https://stackoverflow.com/a/11125298/3516684
"""
return [x for t in zip(l1, l2) for x in t]
async def cli_upgrade_from_path_and_wait(
ops_test: OpsTest,
path: str,
alias: str,
resources: Dict[str, str] = None,
wait_for_status: str = None,
):
if resources is None:
resources = {}
resource_pairs = [f"{k}={v}" for k, v in resources.items()]
resource_arg_prefixes = ["--resource"] * len(resource_pairs)
resource_args = interleave(resource_arg_prefixes, resource_pairs)
cmd = [
"juju",
"refresh",
"--path",
path,
alias,
*resource_args,
]
retcode, stdout, stderr = await ops_test.run(*cmd)
assert retcode == 0, f"Upgrade failed: {(stderr or stdout).strip()}"
logger.info(stdout)
await ops_test.model.wait_for_idle(apps=[alias], status=wait_for_status, timeout=120)
class IPAddressWorkaround:
"""Context manager for deploying a charm that needs to have its IP address.
Due to a juju bug, occasionally some charms finish a startup sequence without
having an ip address returned by `bind_address`.
https://bugs.launchpad.net/juju/+bug/1929364
Issuing dummy update_status just to trigger an event, and then restore it.
"""
def __init__(self, ops_test: OpsTest):
self.ops_test = ops_test
async def __aenter__(self):
"""On entry, the update status interval is set to the minimum 10s."""
config = await self.ops_test.model.get_config()
self.revert_to = config["update-status-hook-interval"]
await self.ops_test.model.set_config({"update-status-hook-interval": "10s"})
return self
async def __aexit__(self, exc_type, exc_value, exc_traceback):
"""On exit, the update status interval is reverted to its original value."""
await self.ops_test.model.set_config({"update-status-hook-interval": self.revert_to})
async def get_leader_unit_num(ops_test: OpsTest, app_name: str):
units = ops_test.model.applications[app_name].units
is_leader = [await units[i].is_leader_from_status() for i in range(len(units))]
logger.info("Leaders: %s", is_leader)
return is_leader.index(True)
async def is_leader_elected(ops_test: OpsTest, app_name: str):
units = ops_test.model.applications[app_name].units
return any([await units[i].is_leader_from_status() for i in range(len(units))])
async def block_until_leader_elected(ops_test: OpsTest, app_name: str):
# await ops_test.model.block_until(is_leader_elected)
# block_until does not take async (yet?) https://github.com/juju/python-libjuju/issues/609
while not await is_leader_elected(ops_test, app_name):
await asyncio.sleep(5)
async def is_alertmanage_unit_up(ops_test: OpsTest, app_name: str, unit_num: int):
address = await get_unit_address(ops_test, app_name, unit_num)
url = f"http://{address}:9093"
logger.info("am public address: %s", url)
response = urllib.request.urlopen(f"{url}/api/v2/status", data=None, timeout=2.0)
return response.code == 200 and "versionInfo" in json.loads(response.read())
async def is_alertmanager_up(ops_test: OpsTest, app_name: str):
return all(
[
await is_alertmanage_unit_up(ops_test, app_name, unit_num)
for unit_num in range(len(ops_test.model.applications[app_name].units))
]
)
|
the-stack_106_14272
|
from typing import Any, TYPE_CHECKING
from urllib.parse import urlencode
from dis_snek.client.const import MISSING, Absent
from ..route import Route
from dis_snek.client.utils.serializer import dict_filter_missing
if TYPE_CHECKING:
from dis_snek.models.discord.snowflake import Snowflake_Type
class ScheduledEventsRequests:
request: Any
async def list_schedules_events(self, guild_id: "Snowflake_Type", with_user_count: bool = False) -> list[dict]:
"""
Get the scheduled events for a guild.
parameters:
guild_id: The guild to get scheduled events from
with_user_count: Whether to include the user count in the response
returns:
List of Scheduled Events or None
"""
return await self.request(
Route("GET", f"/guilds/{guild_id}/scheduled-events?with_user_count={with_user_count}")
)
async def get_scheduled_event(
self, guild_id: "Snowflake_Type", scheduled_event_id: "Snowflake_Type", with_user_count: bool = False
) -> dict:
"""
Get a scheduled event for a guild.
parameters:
guild_id: The guild to get scheduled event from
with_user_count: Whether to include the user count in the response
returns:
Scheduled Event or None
"""
return await self.request(
Route("GET", f"/guilds/{guild_id}/scheduled-events/{scheduled_event_id}?with_user_count={with_user_count}")
)
async def create_scheduled_event(
self,
guild_id: "Snowflake_Type",
payload: dict,
reason: Absent[str] = MISSING,
) -> dict:
"""
Create a scheduled event for a guild.
parameters:
guild_id: The guild to create scheduled event from
payload: The scheduled event payload
reason: The reason to be displayed in audit logs
returns:
Scheduled Event or None
"""
return await self.request(Route("POST", f"/guilds/{guild_id}/scheduled-events"), data=payload, reason=reason)
async def modify_scheduled_event(
self,
guild_id: "Snowflake_Type",
scheduled_event_id: "Snowflake_Type",
payload: dict,
reason: Absent[str] = MISSING,
) -> dict:
"""
Modify a scheduled event for a guild.
parameters:
guild_id: The guild to modify scheduled event from
scheduled_event_id: The scheduled event to modify
payload: The payload to modify the scheduled event with
reason: The reason to be displayed in audit logs
returns:
Scheduled Event or None
"""
return await self.request(
Route("PATCH", f"/guilds/{guild_id}/scheduled-events/{scheduled_event_id}"), data=payload, reason=reason
)
async def delete_scheduled_event(
self,
guild_id: "Snowflake_Type",
scheduled_event_id: "Snowflake_Type",
reason: Absent[str] = MISSING,
) -> dict:
"""
Delete a scheduled event for a guild.
parameters:
guild_id: The guild to delete scheduled event from
scheduled_event_id: The scheduled event to delete
reason: The reason to be displayed in audit logs
returns:
Scheduled Event or None
"""
return await self.request(
Route("DELETE", f"/guilds/{guild_id}/scheduled-events/{scheduled_event_id}"), reason=reason
)
async def get_scheduled_event_users(
self,
guild_id: "Snowflake_Type",
scheduled_event_id: "Snowflake_Type",
limit: int = 100,
with_member: bool = False,
before: "Snowflake_Type" = MISSING,
after: "Snowflake_Type" = MISSING,
) -> list[dict]:
"""
Get the users for a scheduled event.
parameters:
guild_id: The guild to get scheduled event users from
scheduled_event_id: The scheduled event to get users from
limit: how many users to receive from the event
with_member: include guild member data if it exists
before: consider only users before given user id
after: consider only users after given user id
returns:
List of Scheduled Event Users or None
"""
query_params = urlencode(
dict_filter_missing({"limit": limit, "with_member": with_member, "before": before, "after": after})
)
return await self.request(
Route(
"GET",
f"/guilds/{guild_id}/scheduled-events/{scheduled_event_id}/users?{query_params}",
)
)
|
the-stack_106_14273
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenTestQueryModel(object):
def __init__(self):
self._user_name = None
self._user_type = None
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
@property
def user_type(self):
return self._user_type
@user_type.setter
def user_type(self, value):
if isinstance(value, list):
self._user_type = list()
for i in value:
self._user_type.append(i)
def to_alipay_dict(self):
params = dict()
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
if self.user_type:
if isinstance(self.user_type, list):
for i in range(0, len(self.user_type)):
element = self.user_type[i]
if hasattr(element, 'to_alipay_dict'):
self.user_type[i] = element.to_alipay_dict()
if hasattr(self.user_type, 'to_alipay_dict'):
params['user_type'] = self.user_type.to_alipay_dict()
else:
params['user_type'] = self.user_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenTestQueryModel()
if 'user_name' in d:
o.user_name = d['user_name']
if 'user_type' in d:
o.user_type = d['user_type']
return o
|
the-stack_106_14274
|
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. The regressor of interest and the data are orthogonalized
wrt constant regressors.
2. The cross correlation between data and regressors is computed.
3. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
# XXX could use corr /= row_norms(X.T) here, but the test doesn't pass
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
self.scores_, self.pvalues_ = self.score_func(X, y)
self.scores_ = np.asarray(self.scores_)
self.pvalues_ = np.asarray(self.pvalues_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = len(scores) * self.percentile // 100
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
http://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features
* np.arange(n_features)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
|
the-stack_106_14276
|
#!/usr/bin/env python
# Copyright 2020 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch as th
import torch.nn as nn
from typing import Optional, Dict
from aps.asr.base.attention import padding_mask
from aps.asr.base.encoder import EncRetType
from aps.asr.xfmr.impl import get_xfmr_encoder
from aps.asr.xfmr.pose import get_xfmr_pose
from aps.asr.xfmr.proj import get_xfmr_proj
class TransformerEncoder(nn.Module):
"""
Transformer based encoders. Currently it supports {xfmr|cfmr}_{abs|rel|xl}
The relationship between type of the encoder and positional encoding layer:
{xfmr|cfmr}_abs <=> inp_sin
{xfmr|cfmr}_rel <=> rel
{xfmr|cfmr}_xl <=> sin
"""
def __init__(self,
enc_type: str,
input_size: int,
proj_layer: str = "conv2d",
proj_kwargs: Optional[Dict] = None,
att_dim: int = 512,
nhead: int = 8,
feedforward_dim: int = 2048,
num_layers: int = 6,
radius: int = 128,
scale_embed: bool = False,
pos_dropout: float = 0.1,
att_dropout: float = 0.1,
ffn_dropout: float = 0.1,
kernel_size: int = 16,
post_norm: bool = True,
untie_rel: bool = True):
super(TransformerEncoder, self).__init__()
self.type = enc_type.split("_")[-1]
self.proj = get_xfmr_proj(proj_layer, input_size, att_dim, proj_kwargs)
self.pose = get_xfmr_pose(enc_type,
att_dim,
nhead=nhead,
radius=radius,
dropout=pos_dropout,
scale_embed=scale_embed)
self.encoder = get_xfmr_encoder(enc_type,
num_layers,
att_dim,
nhead,
dim_feedforward=feedforward_dim,
att_dropout=att_dropout,
ffn_dropout=ffn_dropout,
kernel_size=kernel_size,
pre_norm=not post_norm,
untie_rel=untie_rel)
def forward(self, inp_pad: th.Tensor,
inp_len: Optional[th.Tensor]) -> EncRetType:
"""
Go through projection layer
Args:
inp_pad: N x Ti x F
inp_len: N or None
Return:
enc_inp: N x Ti x D
inp_len: N or None
src_pad_mask: N x Ti or None
"""
inp_len = self.proj.num_frames(inp_len)
enc_inp = self.proj(inp_pad)
src_pad_mask = None if inp_len is None else (padding_mask(inp_len) == 1)
if self.type == "abs":
# enc_inp: N x Ti x D => Ti x N x D
enc_inp = self.pose(enc_inp)
inj_pose = None
else:
# enc_inp: N x Ti x D => Ti x N x D
enc_inp = enc_inp.transpose(0, 1)
nframes = enc_inp.shape[0]
# 2Ti-1 x D
if self.type == "rel":
inj_pose = self.pose(
th.arange(-nframes + 1, nframes, device=enc_inp.device))
else:
inj_pose = self.pose(
th.arange(0, 2 * nframes - 1, 1.0, device=enc_inp.device))
# Ti x N x D
enc_out = self.encoder(enc_inp,
inj_pose=inj_pose,
src_key_padding_mask=src_pad_mask)
# N x Ti x D
return enc_out.transpose(0, 1), inp_len
|
the-stack_106_14277
|
import ctypes as ct
c_float_p = ct.POINTER(ct.c_float)
c_int_p = ct.POINTER(ct.c_int)
class CWALL(ct.Structure):
_fields_ = [
('dim', ct.c_int),
('absorption', ct.c_float),
('normal', ct.c_float * 3),
('n_corners', ct.c_int),
('corners', c_float_p),
('origin', ct.c_float * 3),
('basis', ct.c_float * 6),
('flat_corners', c_float_p),
]
c_wall_p = ct.POINTER(CWALL)
class CROOM(ct.Structure):
_fields_ = [
('dim', ct.c_int),
('n_walls', ct.c_int),
('walls', c_wall_p),
('n_sources', ct.c_int),
('sources', c_float_p),
('parents', c_int_p),
('gen_walls', c_int_p),
('orders', c_int_p),
('attenuations', c_float_p),
('n_obstructing_walls', ct.c_int),
('obstructing_walls', c_int_p),
('n_microphones', ct.c_int),
('microphones', c_float_p),
('is_visible', c_int_p),
]
c_room_p = ct.POINTER(CROOM)
|
the-stack_106_14279
|
from collections import OrderedDict
import pytest
from rdflib.term import BNode
from rdflib_rdna import rdna
@pytest.fixture
def get_issuer():
issuer = rdna.IdentifierIssuer()
yield issuer
del issuer
def test_rdna_identifierissuer_init(get_issuer):
issuer = get_issuer
assert isinstance(issuer._existing, OrderedDict)
assert len(list(issuer._existing.keys())) == 0
assert issuer.get_id() is not None
def test_rdna_identifierissuer_issue(get_issuer):
issuer = get_issuer
bnode1 = BNode()
id1 = issuer.get_id(bnode1)
assert issuer.get_id(bnode1) == id1
assert id1 == '_:c14n0'
assert issuer.has_id(bnode1) is True
assert len(list(issuer._existing.keys())) == 1
def test_rdna_identifierissuer_clone(get_issuer):
issuer = get_issuer
bnode1 = BNode()
id1 = issuer.get_id(bnode1)
assert len(list(issuer._existing.keys())) == 1
issuerclone = issuer.clone()
assert len(list(issuerclone._existing.keys())) == 1
assert issuerclone.get_id(bnode1) == id1
def test_rdna_identifierissuer_old_ids(get_issuer):
issuer = get_issuer
bnode1 = BNode()
bnode2 = BNode()
id1 = issuer.get_id(bnode1)
id2 = issuer.get_id(bnode2)
assert list(issuer.get_old_ids()) == [bnode1, bnode2]
|
the-stack_106_14281
|
###############################################################################
# Name: haxe.py #
# Purpose: Syntax Definitions for haXe web language #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
@summary: Lexer configuration module for haXe web programming language
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: _haxe.py 62364 2009-10-11 01:02:12Z CJP $"
__revision__ = "$Revision: 62364 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
import _cpp
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
HAXE_KW = (0, "abstract break case catch class const continue trace do else "
"enum extends finally for function goto if implements import in "
"instanceof int interface new package private public return "
"static super switch this throw throws transient try typeof var "
"void volatile while with" )
HAXE_TYPES = (1, "Bool Enum false Float Int null String true Void ")
#---- End Keyword Definitions ----#
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for HaXe"""
def __init__(self, langid):
syndata.SyntaxDataBase.__init__(self, langid)
# Setup
self.SetLexer(stc.STC_LEX_CPP)
self.RegisterFeature(synglob.FEATURE_AUTOINDENT, _cpp.AutoIndenter)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [HAXE_KW, HAXE_TYPES, _cpp.DOC_KEYWORDS]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return _cpp.SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [_cpp.FOLD,]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return ['//']
|
the-stack_106_14283
|
import math
from collections import deque
import bmesh
import bpy
import numpy
from bpy_extras import view3d_utils
from mathutils import Vector, Matrix, Quaternion
from mathutils.bvhtree import BVHTree
from mathutils.geometry import intersect_line_plane, distance_point_to_plane
from rx import Observable
from sprytile_tools.tool_build import ToolBuild
from sprytile_tools.tool_paint import ToolPaint
from sprytile_tools.tool_fill import ToolFill
import sprytile_uv
from sprytile_uv import UvDataLayers
import sprytile_utils
import sprytile_preview
class DataObjectDict(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
class VIEW3D_OP_SprytileModalTool(bpy.types.Operator):
"""Tile based mesh creation/UV layout tool"""
bl_idname = "sprytile.modal_tool"
bl_label = "Sprytile Paint"
bl_options = {'REGISTER'}
no_undo = False
addon_keymaps = []
default_keymaps = []
tool_keymaps = {
'MAKE_FACE' : "Sprytile Build Tool Map",
'PAINT' : "Sprytile Paint Tool Map",
'FILL' : "Sprytile Fill Tool Map"
}
draw_preview = False
@staticmethod
def calculate_view_axis(context):
if context.area.type != 'VIEW_3D':
return None, None
region = context.region
rv3d = context.region_data
if rv3d is None:
return None, None
# Get the view ray from center of screen
coord = Vector((int(region.width / 2), int(region.height / 2)))
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
# Get the up vector. The default scene view camera is pointed
# downward, with up on Y axis. Apply view rotation to get current up
view_up_vector = rv3d.view_rotation @ Vector((0.0, 1.0, 0.0))
plane_normal = sprytile_utils.snap_vector_to_axis(view_vector, mirrored=True)
up_vector = sprytile_utils.snap_vector_to_axis(view_up_vector)
# calculated vectors are not perpendicular, don't set data
if plane_normal.dot(up_vector) != 0.0:
return None, None
return plane_normal, up_vector
@staticmethod
def find_view_axis(context):
scene = context.scene
if scene.sprytile_data.lock_normal is True:
return
plane_normal, up_vector = VIEW3D_OP_SprytileModalTool.calculate_view_axis(context)
if plane_normal is None:
return
scene.sprytile_data.paint_normal_vector = plane_normal
scene.sprytile_data.paint_up_vector = up_vector
if abs(plane_normal.x) > 0:
new_mode = 'X'
elif abs(plane_normal.y) > 0:
new_mode = 'Y'
else:
new_mode = 'Z'
return new_mode
def get_tiledata_from_index(self, face_index):
return VIEW3D_OP_SprytileModalTool.get_face_tiledata(self.bmesh, self.bmesh.faces[face_index])
@staticmethod
def get_face_tiledata(bmesh, face):
grid_id_layer = bmesh.faces.layers.int.get(UvDataLayers.GRID_INDEX)
tile_id_layer = bmesh.faces.layers.int.get(UvDataLayers.GRID_TILE_ID)
if grid_id_layer is None or tile_id_layer is None:
return None, None, None, None, None
grid_id = face[grid_id_layer]
tile_packed_id = face[tile_id_layer]
width = 1
width_layer = bmesh.faces.layers.int.get(UvDataLayers.GRID_SEL_WIDTH)
if width_layer is not None:
width = face[width_layer]
if width is None:
width = 1
height = 1
height_layer = bmesh.faces.layers.int.get(UvDataLayers.GRID_SEL_HEIGHT)
if height_layer is not None:
height = face[height_layer]
if height is None:
height = 1
origin = -1
origin_layer = bmesh.faces.layers.int.get(UvDataLayers.GRID_SEL_ORIGIN)
if origin_layer is not None:
origin = face[origin_layer]
if origin is None:
origin = -1
# For backwards compatibility. Origin/width/height
# did not exist before 0.4.2
if origin == 0 and height == 0 and width == 0:
origin = tile_packed_id
height = max(1, height)
width = max(1, width)
# print("get tile data - grid:{0}, tile_id:{1}, w:{2}, h:{3}, o:{4}"
# .format(grid_id, tile_packed_id, width, height, origin))
return grid_id, tile_packed_id, width, height, origin
def add_virtual_cursor(self, cursor_pos):
cursor_len = len(self.virtual_cursor)
if cursor_len == 0:
self.virtual_cursor.append(cursor_pos)
return
last_pos = self.virtual_cursor[cursor_len - 1]
last_vector = cursor_pos - last_pos
if last_vector.magnitude < 0.1:
return
self.virtual_cursor.append(cursor_pos)
def get_virtual_cursor_vector(self):
cursor_direction = Vector((0.0, 0.0, 0.0))
cursor_len = len(self.virtual_cursor)
if cursor_len <= 1:
return cursor_direction
for idx in range(cursor_len - 1):
segment = self.virtual_cursor[idx + 1] - self.virtual_cursor[idx]
cursor_direction += segment
cursor_direction /= cursor_len
return cursor_direction
def face_to_world_verts(self, context, face_index):
if face_index is None:
pass
face = self.bmesh.faces[face_index]
world_verts = []
for idx, vert in enumerate(face.verts):
vert_world_pos = context.object.matrix_world @ vert.co
world_verts.append(vert_world_pos)
return world_verts
def flow_cursor(self, context, face_index, virtual_cursor):
"""Move the cursor along the given face, using virtual_cursor direction"""
world_verts = self.face_to_world_verts(context, face_index)
self.flow_cursor_verts(context, world_verts, virtual_cursor)
def flow_cursor_verts(self, context, verts, virtual_cursor):
cursor_len = len(self.virtual_cursor)
if cursor_len <= 1:
return None
cursor_direction = self.get_virtual_cursor_vector()
cursor_direction.normalize()
max_dist = -1.0
closest_pos = None
for idx, vert in enumerate(verts):
vert_vector = vert - virtual_cursor
vert_dist = vert_vector.length
vert_vector.normalize()
vert_dot = vert_vector.dot(cursor_direction)
if vert_dot > 0.5 and vert_dist > max_dist:
closest_pos = vert
max_dist = vert_dist
return closest_pos
def raycast_grid_coord(self, context, x, y, up_vector, right_vector, normal, work_layer_mask=0):
"""
Raycast agains the object using grid coordinates around the cursor
:param context:
:param x:
:param y:
:param up_vector:
:param right_vector:
:param normal:
:param work_layer_mask:
:return:
"""
obj = context.object
ray_origin = Vector(context.scene.cursor.location.copy())
ray_origin += (x + 0.5) * right_vector
ray_origin += (y + 0.5) * up_vector
ray_offset = 0.01
ray_origin += normal * ray_offset
ray_direction = -normal
return VIEW3D_OP_SprytileModalTool.raycast_object(obj, ray_origin, ray_direction, ray_dist=ray_offset*2,
work_layer_mask=work_layer_mask)
@staticmethod
def raycast_object(obj, ray_origin, ray_direction, ray_dist=1000.0,
world_normal=False, work_layer_mask=0, pass_dist=0.001):
matrix = obj.matrix_world.copy()
# get the ray relative to the object
matrix_inv = matrix.inverted()
ray_origin_obj = matrix_inv @ ray_origin
ray_target_obj = matrix_inv @ (ray_origin + ray_direction)
ray_direction_obj = ray_target_obj - ray_origin_obj
mesh = bmesh.from_edit_mesh(obj.data)
tree = BVHTree.FromBMesh(mesh)
location, normal, face_index, distance = tree.ray_cast(ray_origin_obj, ray_direction_obj, ray_dist)
if face_index is None:
return None, None, None, None
face = mesh.faces[face_index]
work_layer_id = mesh.faces.layers.int.get(UvDataLayers.WORK_LAYER)
if work_layer_id is None:
return None, None, None, None
work_layer_value = face[work_layer_id]
# Pass through faces under certain conditions
do_pass_through = False
# Layer mask not matching
if work_layer_value != work_layer_mask:
do_pass_through = True
# Hit face is backface
if face.normal.dot(ray_direction) > 0:
do_pass_through = not bpy.context.scene.sprytile_data.allow_backface
# Hit face is hidden
if face.hide:
do_pass_through = True
# Translate location back to world space
location = matrix @ location
if do_pass_through:
# add shift offset if passing through
shift_vec = ray_direction.normalized() * pass_dist
new_ray_origin = location + shift_vec
return VIEW3D_OP_SprytileModalTool.raycast_object(obj, new_ray_origin, ray_direction, work_layer_mask=work_layer_mask)
if world_normal:
normal = matrix @ normal
return location, normal, face_index, distance
def update_bmesh_tree(self, context, update_index=False):
self.bmesh = bmesh.from_edit_mesh(context.object.data)
if update_index:
# Verify layers are created
VIEW3D_OP_SprytileModalTool.verify_bmesh_layers(self.bmesh)
self.bmesh = bmesh.from_edit_mesh(context.object.data)
self.tree = BVHTree.FromBMesh(self.bmesh)
@staticmethod
def verify_bmesh_layers(bmesh):
# Verify layers are created
for layer_name in UvDataLayers.LAYER_NAMES:
layer_data = bmesh.faces.layers.int.get(layer_name)
if layer_data is None:
print('Creating face layer:', layer_name)
bmesh.faces.layers.int.new(layer_name)
for el in [bmesh.faces, bmesh.verts, bmesh.edges]:
el.index_update()
el.ensure_lookup_table()
bmesh.loops.layers.uv.verify()
def construct_face(self, context, grid_coord, grid_size,
tile_xy, tile_origin,
grid_up, grid_right,
up_vector, right_vector, plane_normal,
require_base_layer=False,
work_layer_mask=0,
threshold=None):
"""
Create a new face at grid_coord or remap the existing face
:type work_layer_mask: bitmask integer
:param context:
:param grid_coord: Grid coordinate to create at
:param grid_size: Tile unit size of face
:param tile_xy: Tilegrid coordinate to map
:param tile_origin: Origin of tilegrid coordinate, for mapping data
:param grid_up:
:param grid_right:
:param up_vector:
:param right_vector:
:param plane_normal:
:param require_base_layer:
:param threshold:
:return:
"""
scene = context.scene
data = scene.sprytile_data
# Run a raycast on target work layer mask
hit_loc, hit_normal, face_index, hit_dist = self.raycast_grid_coord(
context, grid_coord[0], grid_coord[1],
grid_up, grid_right, plane_normal,
work_layer_mask=work_layer_mask
)
# Didn't hit target layer, and require base layer
if face_index is None and require_base_layer:
# Check if there is a base layer underneath
base_hit_loc, hit_normal, base_face_index, base_hit_dist = self.raycast_grid_coord(
context, grid_coord[0], grid_coord[1],
grid_up, grid_right, plane_normal
)
# Didn't hit required base layer, do nothing
if base_face_index is None:
return None
# Calculate where the origin of the grid is
grid_origin = scene.cursor.location.copy()
# If doing mesh decal, offset the grid origin
if data.work_layer == 'DECAL_1':
grid_origin += plane_normal * data.mesh_decal_offset
did_build = False
# No face index, assume build face
if face_index is None or face_index < 0:
face_position = grid_origin + grid_coord[0] * grid_right + grid_coord[1] * grid_up
face_verts = sprytile_utils.get_build_vertices(face_position,
grid_right * grid_size[0], grid_up * grid_size[1],
up_vector, right_vector)
face_index = self.create_face(context, face_verts)
did_build = True
if face_index is None or face_index < 0:
return None
# Didn't create face, only want to remap face. Check for coplanarity and dot
if did_build is False:
check_dot = abs(plane_normal.dot(hit_normal))
check_dot -= 1
check_coplanar = distance_point_to_plane(hit_loc, grid_origin, plane_normal)
check_coplanar = abs(check_coplanar) < 0.05
check_dot = abs(check_dot) < 0.05
# Can't remap face
if not check_coplanar or not check_dot:
return None
sprytile_uv.uv_map_face(context, up_vector, right_vector,
tile_xy, tile_origin, face_index,
self.bmesh, grid_size)
if did_build and data.auto_merge:
if threshold is None:
threshold = (1 / data.world_pixels) * 1.25
face = self.bmesh.faces[face_index]
face_position += grid_right * 0.5 + grid_up * 0.5
face_position += plane_normal * 0.01
face_index = self.merge_doubles(context, face, face_position, -plane_normal, threshold)
# Auto merge refreshes the mesh automatically
self.refresh_mesh = not data.auto_merge
return face_index
def merge_doubles(self, context, face, ray_origin, ray_direction, threshold):
face.select = True
work_layer_id = self.bmesh.faces.layers.int.get(UvDataLayers.WORK_LAYER)
work_layer_value = face[work_layer_id]
for check_face in self.bmesh.faces:
check_face.select = check_face[work_layer_id] == work_layer_value
merge_threshold = 0.00
if context.scene.sprytile_data.work_layer != 'BASE':
merge_threshold = 0.01
bpy.ops.mesh.remove_doubles(threshold=merge_threshold, use_unselected=False)
for el in [self.bmesh.faces, self.bmesh.verts, self.bmesh.edges]:
el.index_update()
el.ensure_lookup_table()
self.bmesh.select_flush_mode()
for iter_face in self.bmesh.faces:
iter_face.select = False
# Modified the mesh, refresh and raycast to find the new face index
self.update_bmesh_tree(context)
hit_loc, norm, new_face_idx, hit_dist = self.raycast_object(
context.object,
ray_origin,
ray_direction,
0.02
)
if new_face_idx is not None:
self.bmesh.faces[new_face_idx].select = False
return new_face_idx
def create_face(self, context, world_vertices):
"""
Create a face in the bmesh using the given world space vertices
:param context:
:param world_vertices: Vector array of world space positions
:return:
"""
face_vertices = []
# Convert world space position to object space
world_inv = context.object.matrix_world.copy().inverted()
for face_vtx in world_vertices:
vtx = self.bmesh.verts.new(face_vtx)
vtx.co = world_inv @ vtx.co
face_vertices.append(vtx)
face = self.bmesh.faces.new(face_vertices)
face.normal_update()
for el in [self.bmesh.faces, self.bmesh.verts, self.bmesh.edges]:
el.index_update()
el.ensure_lookup_table()
bmesh.update_edit_mesh(context.object.data, True, True)
# Update the collision BVHTree with new data
self.refresh_mesh = True
return face.index
@staticmethod
def get_face_up_vector(obj, context, face_index, sensitivity=0.1, bias_right=False):
"""
Find the edge of the given face that most closely matches view up vector
:param context:
:param face_index:
:param sensitivity:
:param bias_right:
:return:
"""
# Get the view up vector. The default scene view camera is pointed
# downward, with up on Y axis. Apply view rotation to get current up
rv3d = context.region_data
view_up_vector = rv3d.view_rotation @ Vector((0.0, 1.0, 0.0))
view_right_vector = rv3d.view_rotation @ Vector((1.0, 0.0, 0.0))
data = context.scene.sprytile_data
mesh = bmesh.from_edit_mesh(obj.data)
#if mesh is None or mesh.faces is None:
# self.refresh_mesh = True
# return None, None
world_matrix = context.object.matrix_world
face = mesh.faces[face_index]
# Convert the face normal to world space
normal_inv = context.object.matrix_world.copy().inverted().transposed()
face_normal = normal_inv @ face.normal.copy()
def calc_up_sel_vectors(vtx1, vtx2):
edge_center = (vtx1 + vtx2) / 2
face_center = world_matrix @ face.calc_center_bounds()
# Get the rough heading of the up vector
estimated_up = face_center - edge_center
estimated_up.normalize()
sel_vector = vtx2 - vtx1
sel_vector.normalize()
# Cross the face normal and hint vector to get the up vector
view_up_vector = face_normal.cross(sel_vector)
view_up_vector.normalize()
# If the calculated up faces away from rough up, reverse it
if view_up_vector.dot(estimated_up) < 0:
view_up_vector *= -1
sel_vector *= -1
return view_up_vector, sel_vector
do_hint = data.paint_mode in {'PAINT', 'SET_NORMAL'} and data.paint_hinting
if do_hint:
for edge in face.edges:
if not edge.select:
continue
vtx1 = world_matrix @ edge.verts[0].co
vtx2 = world_matrix @ edge.verts[1].co
view_up_vector, sel_vector = calc_up_sel_vectors(vtx1, vtx2)
return view_up_vector, sel_vector
# if face didn't have any selected edges, use the active edge selection
selection = mesh.select_history.active
if isinstance(selection, bmesh.types.BMEdge):
vtx1 = world_matrix @ selection.verts[0].co.copy()
vtx2 = world_matrix @ selection.verts[1].co.copy()
view_up_vector, sel_vector = calc_up_sel_vectors(vtx1, vtx2)
return view_up_vector, sel_vector
# No edges or edge selection, use normal face up vector finding
# Find the edge of the hit face that most closely matches
# the view up / view right vectors
closest_up = None
closest_up_dot = 2.0
closest_right = None
closest_right_dot = 2.0
idx = -1
for edge in face.edges:
idx += 1
# Move vertices to world space
vtx1 = world_matrix @ edge.verts[0].co
vtx2 = world_matrix @ edge.verts[1].co
edge_vec = vtx2 - vtx1
edge_vec.normalize()
edge_up_dot = 1 - abs(edge_vec.dot(view_up_vector))
edge_right_dot = 1 - abs(edge_vec.dot(view_right_vector))
# print(idx, edge_vec, "up dot", edge_up_dot, "right dot", edge_right_dot)
if edge_up_dot < sensitivity and edge_up_dot < closest_up_dot:
closest_up_dot = edge_up_dot
closest_up = edge_vec
# print("Setting", idx, "as closest up")
if edge_right_dot < sensitivity and edge_right_dot < closest_right_dot:
closest_right_dot = edge_right_dot
closest_right = edge_vec
# print("Setting", idx, "as closest right")
# print("Closest indices: up", closest_up, "right", closest_right)
chosen_up = None
if closest_up is not None and not bias_right:
if closest_up.dot(view_up_vector) < 0:
closest_up *= -1
chosen_up = closest_up
elif closest_right is not None:
if closest_right.dot(view_right_vector) < 0:
closest_right *= -1
chosen_up = face_normal.cross(closest_right)
if do_hint and closest_right is not None:
if closest_right.dot(view_right_vector) < 0:
closest_right *= -1
chosen_up = face_normal.cross(closest_right)
# print("Chosen up", chosen_up)
return chosen_up, closest_right
@staticmethod
def cursor_move_layer(context, direction):
scene = context.scene
target_grid = sprytile_utils.get_grid(context, context.object.sprytile_gridid)
grid_x = target_grid.grid[0]
grid_y = target_grid.grid[1]
layer_move = min(grid_x, grid_y)
layer_move = math.ceil(layer_move/2)
layer_move *= (1 / context.scene.sprytile_data.world_pixels)
plane_normal = scene.sprytile_data.paint_normal_vector.copy()
plane_normal *= layer_move * direction
grid_position = scene.cursor.location + plane_normal
scene.cursor.location = grid_position
def modal(self, context, event):
do_exit = False
sprytile_data = context.scene.sprytile_data
# Check that the mouse is inside the region
region = context.region
coord = Vector((event.mouse_region_x, event.mouse_region_y))
out_of_region = coord.x < 0 or coord.y < 0 or coord.x > region.width or coord.y > region.height
if event.type == 'LEFTMOUSE' and event.value == 'RELEASE':
do_exit = True
if context.object.mode != 'EDIT':
do_exit = True
if do_exit:
self.exit_modal(event, context)
return {'CANCELLED'}
if VIEW3D_OP_SprytileModalTool.no_undo and sprytile_data.is_grid_translate is False:
VIEW3D_OP_SprytileModalTool.no_undo = False
# Mouse in Sprytile UI, eat this event without doing anything
if context.scene.sprytile_ui.use_mouse:
sprytile_preview.clear_preview_data()
return {'RUNNING_MODAL'}
# Mouse move triggers preview drawing
draw_preview = sprytile_data.paint_mode in {'MAKE_FACE', 'FILL', 'PAINT'}
if draw_preview:
if (event.alt or context.scene.sprytile_ui.use_mouse) or sprytile_data.is_snapping:
draw_preview = False
# Refreshing the mesh, preview needs constantly refreshed
# mesh or bad things seem to happen. This can potentially get expensive
#if self.refresh_mesh or self.bmesh.is_valid is False or draw_preview:
# @Blender 2.8 note: this now happens inside the GUI operator so no need to do it here
if self.refresh_mesh or self.bmesh.is_valid is False:
self.update_bmesh_tree(context, True)
self.refresh_mesh = False
# Potentially expensive, test if there is a selected mesh element
if event.type == 'MOUSEMOVE':
sprytile_data.has_selection = False
for v in self.bmesh.verts:
if v.select:
sprytile_data.has_selection = True
break
context.area.tag_redraw()
# If outside the region, pass through
if out_of_region:
# If preview data exists, clear it
if sprytile_preview.preview_verts is not None:
sprytile_preview.clear_preview_data()
return {'PASS_THROUGH'}
modal_return = {'PASS_THROUGH'}
# Process keyboard events, if returned something end here
key_return = self.handle_keys(context, event)
if key_return is not None:
sprytile_preview.clear_preview_data()
modal_return = key_return
# Didn't process keyboard, process mouse now
else:
mouse_return = self.handle_mouse(context, event, draw_preview)
if mouse_return is not None:
modal_return = mouse_return
# Signals tools to draw preview
self.draw_preview = draw_preview and self.refresh_mesh is False
# Clear preview data if not drawing preview
if not self.draw_preview:
sprytile_preview.preview_verts = None
sprytile_preview.preview_uvs = None
# Build the data that will be used by tool observers
region = context.region
rv3d = context.region_data
coord = event.mouse_region_x, event.mouse_region_y
no_data = self.tree is None or rv3d is None
if no_data is False:
# get the ray from the viewport and mouse
ray_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
self.rx_data = DataObjectDict(
context=context,
ray_vector=ray_vector,
ray_origin=ray_origin
)
else:
self.rx_data = None
self.call_tool(event, True, context)
return modal_return
def call_tool(self, event, left_down, context):
# Push the event data out through rx_observer for tool observers
sprytile_data = bpy.context.scene.sprytile_data
# If the selected object does not own the painting material, add a slot for it here
if left_down:
grid = sprytile_utils.get_grid(context, context.object.sprytile_gridid)
if grid is not None:
grid_mat = sprytile_utils.get_grid_material(grid)
if not sprytile_utils.has_material(context.object, grid_mat):
bpy.ops.object.material_slot_add()
context.object.active_material = grid_mat
if self.rx_observer is not None:
self.rx_observer.on_next(
DataObjectDict(
paint_mode=sprytile_data.paint_mode,
event=event,
left_down=left_down,
build_preview=self.draw_preview,
)
)
def handle_mouse(self, context, event, draw_preview):
""""""
# Eat any tweak mouse events, default blender keymap has a translate command on tweak
if event.type in {'EVT_TWEAK_L', 'EVT_TWEAK_M', 'EVT_TWEAK_R'}:
return {'RUNNING_MODAL'}
if 'MOUSE' not in event.type:
return None
#if event.type in {'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
# if context.scene.sprytile_data.is_snapping:
# direction = -1 if event.type == 'WHEELUPMOUSE' else 1
# self.cursor_move_layer(context, direction)
# return {'RUNNING_MODAL'}
# no_undo flag is up, process no other mouse events until it is cleared
if VIEW3D_OP_SprytileModalTool.no_undo:
# print("No undo flag is on", event.type, event.value)
clear_types = {'LEFTMOUSE', 'RIGHTMOUSE'}
if event.type in clear_types and event.value == 'RELEASE':
print("Clearing no undo")
self.refresh_mesh = True
VIEW3D_OP_SprytileModalTool.no_undo = False
return {'PASS_THROUGH'} if VIEW3D_OP_SprytileModalTool.no_undo else {'RUNNING_MODAL'}
elif event.type == 'LEFTMOUSE':
# check_modifier = False
# TODO: Support preferences
#addon_prefs = context.preferences.addons[__package__].preferences
#if addon_prefs.tile_picker_key == 'Alt':
# check_modifier = event.alt
#if addon_prefs.tile_picker_key == 'Ctrl':
# check_modifier = event.ctrl
#if addon_prefs.tile_picker_key == 'Shift':
# check_modifier = event.shift
# if event.value == 'PRESS' and check_modifier is True:
# self.find_face_tile(context, event)
return {'RUNNING_MODAL'}
elif event.type == 'MOUSEMOVE':
if draw_preview and not VIEW3D_OP_SprytileModalTool.no_undo and event.type not in self.is_keyboard_list:
self.draw_preview = True
#if context.scene.sprytile_data.is_snapping:
# self.cursor_snap(context, event)
return None
def handle_keys(self, context, event):
"""Process keyboard presses"""
if event.type not in self.is_keyboard_list:
return None
def keymap_is_evt(kmi, evt):
is_mapped_key = kmi.type == event.type and \
kmi.value in {event.value, 'ANY'} and \
kmi.ctrl is event.ctrl and \
kmi.alt is event.alt and \
kmi.shift is event.shift
return is_mapped_key
# Process intercepts for special keymaps
for key_intercept in self.intercept_keys:
key = key_intercept[0]
arg = key_intercept[1]
if not keymap_is_evt(key, event):
continue
# print("Special key is", arg)
if arg == 'move_sel':
sprytile_preview.preview_uvs = None
sprytile_preview.preview_verts = None
VIEW3D_OP_SprytileModalTool.no_undo = True
bpy.ops.sprytile.translate_grid('INVOKE_REGION_WIN')
return {'RUNNING_MODAL'}
if arg == 'sel_mesh':
return {'PASS_THROUGH'}
#sprytile_data = context.scene.sprytile_data
#if event.shift and context.scene.sprytile_data.is_snapping:
# self.cursor_snap(context, event)
# return {'RUNNING_MODAL'}
# Pass through every key event we don't handle ourselves
return {'PASS_THROUGH'}
def execute(self, context):
return self.invoke(context, None)
def invoke(self, context, event):
if context.space_data.type != 'VIEW_3D':
self.report({'WARNING'}, "Active space must be a View3d: {0}".format(context.space_data.type))
return {'CANCELLED'}
obj = context.object
if not obj.visible_get() or obj.type != 'MESH':
self.report({'WARNING'}, "Active object must be a visible mesh")
return {'CANCELLED'}
if len(context.scene.sprytile_mats) < 1:
bpy.ops.sprytile.validate_grids()
if len(context.scene.sprytile_mats) < 1:
self.report({'WARNING'}, "No valid materials")
return {'CANCELLED'}
use_default_grid_id = obj.sprytile_gridid == -1
if sprytile_utils.get_grid(context, obj.sprytile_gridid) is None:
use_default_grid_id = True
if use_default_grid_id:
obj.sprytile_gridid = context.scene.sprytile_mats[0].grids[0].id
addon_prefs = context.preferences.addons[__package__].preferences
auto_adjust = addon_prefs.auto_adjust_viewport_shading
if auto_adjust:
cur_space = context.area.spaces.active
if cur_space.shading.type != 'MATERIAL':
cur_space.shading.type = 'MATERIAL'
self.virtual_cursor = deque([], 3)
VIEW3D_OP_SprytileModalTool.no_undo = False
self.update_bmesh_tree(context)
self.refresh_mesh = False
# Setup Rx Observer and Observables
self.rx_observer = None
observable_source = Observable.create(self.setup_rx_observer)
# Setup multi casting Observable
self.rx_source = observable_source.publish().auto_connect(1)
# Tools receive events from the Observable
self.tools = {
"build": ToolBuild(self, self.rx_source),
"paint": ToolPaint(self, self.rx_source),
"fill": ToolFill(self, self.rx_source)
}
win_mgr = context.window_manager
self.setup_user_keys(context)
win_mgr.modal_handler_add(self)
sprytile_data = context.scene.sprytile_data
sprytile_data.is_snapping = False
context.scene.sprytile_ui.is_dirty = True
#bpy.ops.sprytile.gui_win('INVOKE_REGION_WIN') #TODO: Renable once ui works
#Update view axis
view_axis = self.find_view_axis(context)
if view_axis is not None:
if view_axis != sprytile_data.normal_mode:
sprytile_data.normal_mode = view_axis
sprytile_data.lock_normal = False
self.update_bmesh_tree(context, True)
self.modal(context, event)
return {'RUNNING_MODAL'}
def setup_rx_observer(self, observer):
self.rx_observer = observer
def setup_user_keys(self, context):
"""Find the keymaps to pass through to Blender"""
self.is_keyboard_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'ZERO', 'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE',
'LEFT_CTRL', 'LEFT_ALT', 'LEFT_SHIFT', 'RIGHT_ALT',
'RIGHT_CTRL', 'RIGHT_SHIFT', 'OSKEY', 'GRLESS', 'ESC', 'TAB', 'RET', 'SPACE',
'LINE_FEED', 'BACK_SPACE', 'DEL', 'SEMI_COLON', 'PERIOD', 'COMMA', 'QUOTE',
'ACCENT_GRAVE', 'MINUS', 'SLASH', 'BACK_SLASH', 'EQUAL', 'LEFT_BRACKET',
'RIGHT_BRACKET', 'LEFT_ARROW', 'DOWN_ARROW', 'RIGHT_ARROW', 'UP_ARROW',
'NUMPAD_2', 'NUMPAD_4', 'NUMPAD_6', 'NUMPAD_8', 'NUMPAD_1', 'NUMPAD_3', 'NUMPAD_5',
'NUMPAD_7', 'NUMPAD_9', 'NUMPAD_PERIOD', 'NUMPAD_SLASH', 'NUMPAD_ASTERIX', 'NUMPAD_0',
'NUMPAD_MINUS', 'NUMPAD_ENTER', 'NUMPAD_PLUS',
'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12', 'F13',
'F14', 'F15', 'F16', 'F17', 'F18', 'F19', 'PAUSE', 'INSERT', 'HOME', 'PAGE_UP',
'PAGE_DOWN', 'END', 'MEDIA_PLAY', 'MEDIA_STOP', 'MEDIA_FIRST', 'MEDIA_LAST']
self.intercept_keys = []
user_keymaps = context.window_manager.keyconfigs.user.keymaps
def get_keymap_entry(keymap_name, command):
keymap = user_keymaps[keymap_name]
if keymap is None:
return False, None
key_list = keymap.keymap_items
cmd_idx = key_list.find(command)
if cmd_idx < 0:
return True, None
return True, key_list[cmd_idx]
# These keymaps intercept existing shortcuts and repurpose them
keymap_intercept = {
'3D View': [
('view3d.select_circle', 'sel_mesh'),
('transform.translate', 'move_sel')
]
}
for keymap_id in keymap_intercept:
cmd_list = keymap_intercept[keymap_id]
for cmd_data in cmd_list:
cmd = cmd_data[0]
arg = cmd_data[1]
has_map, cmd_entry = get_keymap_entry(keymap_id, cmd)
if not has_map:
break
if cmd_entry is None:
continue
self.intercept_keys.append((cmd_entry, arg))
def exit_modal(self, event, context):
self.call_tool(event, False, context)
if self.rx_observer is not None:
self.rx_observer.on_completed()
self.tree = None
self.tools = None
if context.object.mode == 'EDIT':
bmesh.update_edit_mesh(context.object.data, True, True)
# module classes
classes = (
VIEW3D_OP_SprytileModalTool,
)
def register():
for cl in classes:
bpy.utils.register_class(cl)
def unregister():
for cl in classes:
bpy.utils.unregister_class(cl)
if __name__ == '__main__':
register()
|
the-stack_106_14285
|
#!/usr/bin/env python3
#
# Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import subprocess
import shlex
from lib import dockerfile, commands, config_parser
def validate_tools(commands_list):
errors = []
for command in commands_list:
if isinstance(command, commands.Curl):
for tool in command.get_tools():
try:
subprocess.run(
["curl", "-sLf", shlex.quote(tool.get_from())],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True)
except subprocess.CalledProcessError as e:
errors.append('Command "{}" failed with exit code {}'.format(e.cmd, e.returncode))
return errors
parser = argparse.ArgumentParser()
parser.add_argument("--dockerfile-configs", nargs='+', action="append", dest="dockerfile_configs", default=[], required=False, help="yaml file which lists the tools to be tested")
args = parser.parse_args()
dockerfile_config = config_parser.parse_dockerfile_configs(args.dockerfile_configs)
commands_list = config_parser.parse_commands(dockerfile_config)
validation_errors = validate_tools(commands_list)
if len(validation_errors) == 0:
print("Validation of tools was successful")
exit(0)
else:
print("Validation of tools failed with the following errors:\n{}".format('\n'.join(validation_errors)))
exit(1)
|
the-stack_106_14287
|
# -*- coding: utf-8 -*-
# all kernels expect numpy arrays of data.i
# Arrays must have two dimensions: the first for the Number of data, the second for the dimension of the data.
'''
Title: MLP of pythonGPLVM
Author: James Hensman
Date: 2009
Code version: 81a4ce9 on 23 Nov 2009
Availability: https://github.com/jameshensman/pythonGPLVM
'''
import numpy as np
class RBF:
"""Radial Basis Funcion (or 'Squared Exponential') kernel, with the same scale in all directions...
k(x_i,x_j) = \alpha \exp \{ -\gamma ||x_1-x_2||^2 \}
"""
def __init__(self,alpha,gamma):
self.alpha = np.exp(alpha)
self.gamma = np.exp(gamma)
self.nparams = 2
def set_params(self,new_params):
assert new_params.size == self.nparams
self.alpha,self.gamma = np.exp(new_params).copy().flatten()#try to unpack np array safely.
def get_params(self):
#return np.array([self.alpha, self.gamma])
return np.log(np.array([self.alpha, self.gamma]))
def __call__(self,x1,x2):
N1,D1 = x1.shape
N2,D2 = x2.shape
assert D1==D2, "Vectors must be of matching dimension"
#use broadcasting to avoid for loops.
#should be uber fast
diff = x1.reshape(N1,1,D1)-x2.reshape(1,N2,D2)
diff = self.alpha*np.exp(-np.sum(np.square(diff),-1)*self.gamma)
return diff
def gradients(self,x1):
"""Calculate the gradient of the matrix K wrt the (log of the) free parameters"""
N1,D1 = x1.shape
diff = x1.reshape(N1,1,D1)-x1.reshape(1,N1,D1)
diff = np.sum(np.square(diff),-1)
#dalpha = np.exp(-diff*self.gamma)
dalpha = self.alpha*np.exp(-diff*self.gamma)
#dgamma = -self.alpha*diff*np.exp(-diff*self.gamma)
dgamma = -self.alpha*self.gamma*diff*np.exp(-diff*self.gamma)
return (dalpha, dgamma)
def gradients_wrt_data(self,x1,indexn=None,indexd=None):
"""compute the derivative matrix of the kernel wrt the _data_. Crazy
This returns a list of matices: each matrix is NxN, and there are N*D of them!"""
N1,D1 = x1.shape
diff = x1.reshape(N1,1,D1)-x1.reshape(1,N1,D1)
diff = np.sum(np.square(diff),-1)
expdiff = np.exp(- self.gamma * diff)
if (indexn==None) and(indexd==None):#calculate all gradients
rets = []
for n in range(N1):
for d in range(D1):
K = np.zeros((N1,N1))
K[n,:] = -2*self.alpha*self.gamma*(x1[n,d]-x1[:,d])*expdiff[n,:]
K[:,n] = K[n,:]
rets.append(K.copy())
return rets
else:
K = np.zeros((N1,N1))
K[indexn,:] = -2*self.alpha*self.gamma*(x1[indexn,indexd]-x1[:,indexd])*expdiff[indexn,:]
K[:,indexn] = K[indexn,:]
return K
class RBF_full:
def __init__(self,alpha,gammas):
self.gammas = np.exp(gammas.flatten())
self.dim = gammas.size
self.alpha = np.exp(alpha)
self.nparams = self.dim+1
def set_params(self,params):
assert params.size==self.nparams
self.alpha = np.exp(params.flatten()[0])
self.gammas = np.exp(params.flatten()[1:])
def get_params(self):
return np.log(np.hstack((self.alpha,self.gammas)))
def __call__(self,x1,x2):
N1,D1 = x1.shape
N2,D2 = x2.shape
assert D1==D2, "Vectors must be of matching dimension"
assert D1==self.dim, "That data does not match the dimensionality of this kernel"
diff = x1.reshape(N1,1,D1)-x2.reshape(1,N2,D2)
diff = self.alpha*np.exp(-np.sum(np.square(diff)*self.gammas,-1))
return diff
def gradients(self,x1):
"""Calculate the gradient of the matrix K wrt the (log of the) free parameters"""
N1,D1 = x1.shape
diff = x1.reshape(N1,1,D1)-x1.reshape(1,N1,D1)
sqdiff = np.sum(np.square(diff)*self.gammas,-1)
expdiff = np.exp(-sqdiff)
grads = [-g*np.square(diff[:,:,i])*self.alpha*expdiff for i,g in enumerate(self.gammas)]
grads.insert(0, self.alpha*expdiff)
return grads
def gradients_wrt_data(self,x1,indexn=None,indexd=None):
"""compute the derivative matrix of the kernel wrt the _data_. Crazy
This returns a list of matices: each matrix is NxN, and there are N*D of them!"""
N1,D1 = x1.shape
diff = x1.reshape(N1,1,D1)-x1.reshape(1,N1,D1)
sqdiff = np.sum(np.square(diff)*self.gammas,-1)
expdiff = np.exp(-sqdiff)
if (indexn==None) and(indexd==None):#calculate all gradients
rets = []
for n in range(N1):
for d in range(D1):
K = np.zeros((N1,N1))
K[n,:] = -2*self.alpha*self.gammas[d]*(x1[n,d]-x1[:,d])*expdiff[n,:]
K[:,n] = K[n,:]
rets.append(K.copy())
return rets
else:
K = np.zeros((N1,N1))
K[indexn,:] = -2*self.alpha*self.gammas[indexd]*(x1[indexn,indexd]-x1[:,indexd])*expdiff[indexn,:]
K[:,indexn] = K[indexn,:]
return K.copy()
class linear:
"""effectively the inner product, I think"""
def __init__(self,alpha,bias):
self.alpha = np.exp(alpha)
self.bias = np.exp(bias)
self.nparams = 2
def set_params(self,new_params):
assert new_params.size == self.nparams
self.alpha,self.bias = np.exp(new_params).flatten()#try to unpack np array safely.
def get_params(self):
return np.log(np.array([self.alpha,self.bias]))
def __call__(self,x1,x2):
N1,D1 = x1.shape
N2,D2 = x2.shape
assert D1==D2, "Vectors must be of matching dimension"
prod = x1.reshape(N1,1,D1)*x2.reshape(1,N2,D2)
prod = self.alpha*np.sum(prod,-1) + self.bias
#diff = self.alpha*np.sqrt(np.square(np.sum(diff,-1)))
return prod
def gradients(self,x1):
"""Calculate the gradient of the kernel matrix wrt the (log of the) parameters"""
dalpha = self(x1,x1)-self.bias
dbias = np.ones((x1.shape[0],x1.shape[0]))*self.bias
return dalpha, dbias
class combined:
""" a combined kernel - linear in X and RBF in Y.
treats first Dimensiona linearly, RBf on the remainder.
TODO: specify which dimensions should be linear and which should be RBF"""
def __init__(self,alpha_x,alpha_y,gamma,bias):
self.linear_kernel = linear(alpha_x, bias)
self.RBF_kernel = RBF(alpha_y, gamma)
self.nparams = 4
def set_params(self,new_params):
assert new_params.size == self.nparams
self.linear_kernel.set_params(new_params[:2])
self.RBF_kernel.set_params(new_params[2:])
def __call__(self,x1,x2):
N1,D1 = x1.shape
N2,D2 = x2.shape
assert D1==D2, "Vectors must be of matching dimension"
return self.linear_kernel(x1[:,0:1],x2[:,0:1])*self.RBF_kernel(x1[:,1:],x2[:,1:])
class polynomial:
def __init__(self,alpha,order):
"""Order of the polynomila is considered fixed...TODO: make the order optimisable..."""
self.alpha = alpha
self.order = order
self.nparams = 1
def set_params(self,new_params):
assert new_params.size == self.nparams
self.alpha, = new_params.flatten()
def __call__(self,x1,x2):
N1,D1 = x1.shape
N2,D2 = x2.shape
assert D1==D2, "Vectors must be of matching dimension"
prod = x1.reshape(N1,1,D1)*x2.reshape(1,N2,D2)
prod = self.alpha*np.power(np.sum(prod,-1) + 1, self.order)
return prod
|
the-stack_106_14289
|
# !/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-self-use,too-many-arguments,unused-argument,not-callable,no-member,attribute-defined-outside-init
""" Cifar10 Custom Handler."""
import base64
import io
import json
import logging
import os
from abc import ABC
from base64 import b64encode
from io import BytesIO
import numpy as np
import torch
from PIL import Image
from captum.attr import (
IntegratedGradients, Occlusion, LayerGradCam, LayerAttribution
)
from captum.attr import visualization as viz
from classifier import CIFAR10CLASSIFIER
from matplotlib.colors import LinearSegmentedColormap
from torchvision import transforms
from ts.torch_handler.image_classifier import ImageClassifier
logger = logging.getLogger(__name__)
class CIFAR10Classification(ImageClassifier, ABC):
"""
Base class for all vision handlers
"""
def initialize(self, ctx): # pylint: disable=arguments-differ
"""In this initialize function, the CIFAR10 trained model is loaded and
the Integrated Gradients,occlusion and layer_gradcam Algorithm for
Captum Explanations is initialized here.
Args:
ctx (context): It is a JSON Object containing information
pertaining to the model artifacts parameters.
"""
self.manifest = ctx.manifest
properties = ctx.system_properties
model_dir = properties.get("model_dir")
print("Model dir is {}".format(model_dir))
serialized_file = self.manifest["model"]["serializedFile"]
model_pt_path = os.path.join(model_dir, serialized_file)
self.device = torch.device(
"cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available(
) else "cpu"
)
self.model = CIFAR10CLASSIFIER()
self.model.load_state_dict(torch.load(model_pt_path))
self.model.to(self.device)
self.model.eval()
self.model.zero_grad()
logger.info("CIFAR10 model from path %s loaded successfully", model_dir)
# Read the mapping file, index to object name
mapping_file_path = os.path.join(model_dir, "class_mapping.json")
if os.path.isfile(mapping_file_path):
print("Mapping file present")
with open(mapping_file_path) as pointer:
self.mapping = json.load(pointer)
else:
print("Mapping file missing")
logger.warning("Missing the class_mapping.json file.")
self.ig = IntegratedGradients(self.model)
self.layer_gradcam = LayerGradCam(
self.model, self.model.model_conv.layer4[2].conv3
)
self.occlusion = Occlusion(self.model)
self.initialized = True
self.image_processing = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
])
def _get_img(self, row):
"""Compat layer: normally the envelope should just return the data
directly, but older version of KFServing envelope and
Torchserve in general didn't have things set up right
"""
if isinstance(row, dict):
image = row.get("data") or row.get("body")
else:
image = row
if isinstance(image, str):
# if the image is a string of bytesarray.
image = base64.b64decode(image)
return image
def preprocess(self, data):
"""The preprocess function of cifar10 program
converts the input data to a float tensor
Args:
data (List): Input data from the request is in the form of a Tensor
Returns:
list : The preprocess function returns
the input image as a list of float tensors.
"""
images = []
for row in data:
image = self._get_img(row)
# If the image is sent as bytesarray
if isinstance(image, (bytearray, bytes)):
image = Image.open(io.BytesIO(image))
image = self.image_processing(image)
else:
# if the image is a list
image = torch.FloatTensor(image)
images.append(image)
return torch.stack(images).to(self.device)
def attribute_image_features(self, algorithm, data, **kwargs):
"""Calculate tensor attributions"""
self.model.zero_grad()
tensor_attributions = algorithm.attribute(data, target=0, **kwargs)
return tensor_attributions
def output_bytes(self, fig):
"""Convert image to bytes"""
fout = BytesIO()
fig.savefig(fout, format="png")
fout.seek(0)
return fout.getvalue()
def get_insights(self, tensor_data, _, target=0):
default_cmap = LinearSegmentedColormap.from_list(
"custom blue",
[(0, "#ffffff"), (0.25, "#0000ff"), (1, "#0000ff")],
N=256,
)
attributions_ig, _ = self.attribute_image_features(
self.ig,
tensor_data,
baselines=tensor_data * 0,
return_convergence_delta=True,
n_steps=15,
)
attributions_occ = self.attribute_image_features(
self.occlusion,
tensor_data,
strides=(3, 8, 8),
sliding_window_shapes=(3, 15, 15),
baselines=tensor_data * 0,
)
attributions_lgc = self.attribute_image_features(
self.layer_gradcam, tensor_data
)
upsamp_attr_lgc = LayerAttribution.interpolate(
attributions_lgc, tensor_data.shape[2:]
)
matplot_viz_ig, _ = viz.visualize_image_attr_multiple(
np.transpose(
attributions_ig.squeeze().cpu().detach().numpy(), (1, 2, 0)
),
np.transpose(
tensor_data.squeeze().cpu().detach().numpy(), (1, 2, 0)
),
use_pyplot=False,
methods=["original_image", "heat_map"],
cmap=default_cmap,
show_colorbar=True,
signs=["all", "positive"],
titles=["Original", "Integrated Gradients"],
)
matplot_viz_occ, _ = viz.visualize_image_attr_multiple(
np.transpose(
attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0)
),
np.transpose(
tensor_data.squeeze().cpu().detach().numpy(), (1, 2, 0)
),
[
"original_image",
"heat_map",
"heat_map",
],
["all", "positive", "negative"],
show_colorbar=True,
titles=[
"Original",
"Positive Attribution",
"Negative Attribution",
],
fig_size=(18, 6),
use_pyplot=False,
)
matplot_viz_lgc, _ = viz.visualize_image_attr_multiple(
upsamp_attr_lgc[0].cpu().permute(1, 2, 0).detach().numpy(),
tensor_data.squeeze().permute(1, 2, 0).cpu().numpy(),
use_pyplot=False,
methods=["original_image", "blended_heat_map", "blended_heat_map"],
signs=["all", "positive", "negative"],
show_colorbar=True,
titles=[
"Original",
"Positive Attribution",
"Negative Attribution",
],
fig_size=(18, 6)
)
occ_bytes = self.output_bytes(matplot_viz_occ)
ig_bytes = self.output_bytes(matplot_viz_ig)
lgc_bytes = self.output_bytes(matplot_viz_lgc)
output = [{
"b64": b64encode(row).decode("utf8")
} if isinstance(row, (bytes, bytearray)) else row
for row in [ig_bytes, occ_bytes, lgc_bytes]]
return output
|
the-stack_106_14291
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Expression evaluator for Full Text Search API stub.
An associated ExpressionEvaluator object is created for every scored document in
search results, and that object evaluates all expressions for that document. The
expression syntax is detailed here:
https://developers.google.com/appengine/docs/python/search/overview#Expressions
Usage examples:
# Evaluate one expression for scored_doc
expression = search_service_pb.FieldSpec_Expression()
expression.set_name('total_value')
expression.set_expression('max(0, 3 * value + _score)')
ExpressionEvaluator(scored_doc, inverted_index).Evaluate(expression)
# scored_doc.expressions['total_value'] is now set to the expression result.
# Attach the result of all expressions for documents in scored_docs
for scored_doc in scored_docs:
evaluator = ExpressionEvaluator(scored_doc, inverted_index)
for expression in expression_protos:
evaluator.Evaluate(expression)
Note that this is not used for the production Full Text Search API; this
provides an approximation to the API for local testing with dev_appserver.
"""
from __future__ import division
from builtins import object
from past.utils import old_div
import logging
import math
from google.appengine.datastore import document_pb
from google.appengine.api.search import expression_parser
from google.appengine.api.search import ExpressionParser
from google.appengine.api.search import geo_util
from google.appengine.api.search import query_parser
from google.appengine.api.search import search_util
from google.appengine.api.search.stub import simple_tokenizer
from google.appengine.api.search.stub import tokens
import six
_SNIPPET_PREFIX = '...'
_SNIPPET_SUFFIX = '...'
class QueryExpressionEvaluationError(Exception):
"""ExpressionEvaluation Error that needs to return query as error status."""
class ExpressionEvaluationError(Exception):
"""Exposed version of _ExpressionError."""
class _ExpressionError(Exception):
"""Raised when evaluating an expression fails."""
class ExpressionEvaluator(object):
"""Evaluates an expression on scored documents."""
def __init__(self, document, inverted_index, is_sort_expression=False):
"""Constructor.
Args:
document: The ScoredDocument to evaluate the expression for.
inverted_index: The search index (used for snippeting).
is_sort_expression: The flag indicates if this is a sort expression. Some
operations (such as COUNT) are not supported in sort expressions.
"""
self._doc = document
self._doc_pb = document.document
self._inverted_index = inverted_index
self._tokenizer = simple_tokenizer.SimpleTokenizer(preserve_case=False)
self._case_preserving_tokenizer = simple_tokenizer.SimpleTokenizer(
preserve_case=True)
self._function_table = {
ExpressionParser.ABS: self._Abs,
ExpressionParser.COUNT: self._Count,
ExpressionParser.DISTANCE: self._Distance,
ExpressionParser.GEOPOINT: self._Geopoint,
ExpressionParser.LOG: self._Log,
ExpressionParser.MAX: self._Max,
ExpressionParser.MIN: self._Min,
ExpressionParser.POW: self._Pow,
ExpressionParser.SNIPPET: self._Snippet,
ExpressionParser.SWITCH: self._Unsupported('switch'),
}
self._is_sort_expression = is_sort_expression
@classmethod
def _GetFieldValue(cls, field):
"""Returns the value of a field as the correct type.
Args:
field: The field whose value is extracted. If the given field is None, this
function also returns None. This is to make it easier to chain with
GetFieldInDocument().
Returns:
The value of the field with the correct type (float for number fields,
datetime.datetime for date fields, etc).
Raises:
TypeError: if the type of the field isn't recognized.
"""
if not field:
return None
value_type = field.value().type()
if value_type in search_util.TEXT_DOCUMENT_FIELD_TYPES:
return field.value().string_value()
if value_type == document_pb.FieldValue.DATE:
value = field.value().string_value()
return search_util.DeserializeDate(value)
if value_type == document_pb.FieldValue.NUMBER:
value = field.value().string_value()
return float(value)
if value_type == document_pb.FieldValue.GEO:
value = field.value().geo()
return geo_util.LatLng(value.lat(), value.lng())
raise TypeError('No conversion defined for type %s' % value_type)
def _Min(self, return_type, *nodes):
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Min cannot be converted to a text type')
return min(self._Eval(
node, document_pb.FieldValue.NUMBER) for node in nodes)
def _Max(self, return_type, *nodes):
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Max cannot be converted to a text type')
return max(self._Eval(
node, document_pb.FieldValue.NUMBER) for node in nodes)
def _Abs(self, return_type, node):
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Abs cannot be converted to a text type')
return abs(self._Eval(node, document_pb.FieldValue.NUMBER))
def _Log(self, return_type, node):
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Log cannot be converted to a text type')
return math.log(self._Eval(node, document_pb.FieldValue.NUMBER))
def _Pow(self, return_type, *nodes):
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Pow cannot be converted to a text type')
lhs, rhs = nodes
return pow(self._Eval(lhs, document_pb.FieldValue.NUMBER),
self._Eval(rhs, document_pb.FieldValue.NUMBER))
def _Distance(self, return_type, *nodes):
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Distance cannot be converted to a text type')
lhs, rhs = nodes
return (self._Eval(lhs, document_pb.FieldValue.GEO) -
self._Eval(rhs, document_pb.FieldValue.GEO))
def _Geopoint(self, return_type, *nodes):
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Geopoint cannot be converted to a text type')
latitude, longitude = (self._Eval(
node, document_pb.FieldValue.NUMBER) for node in nodes)
return geo_util.LatLng(latitude, longitude)
def _Count(self, return_type, node):
if node.getType() != ExpressionParser.NAME:
raise _ExpressionError(
'The argument to count() must be a simple field name')
if self._is_sort_expression:
raise query_parser.QueryException(
'Failed to parse sort expression \'count(' + node.getText() +
')\': count() is not supported in sort expressions')
return search_util.GetFieldCountInDocument(
self._doc_pb, query_parser.GetQueryNodeText(node))
def _GenerateSnippet(self, doc_words, position, max_length):
"""Generate a snippet that fills a given length from a list of tokens.
Args:
doc_words: A list of tokens from the document.
position: The index of the highlighted word.
max_length: The maximum length of the output snippet.
Returns:
A summary of the given words with the word at index position highlighted.
"""
snippet = '<b>%s</b>' % doc_words[position]
next_len, prev_len = 0, 0
if position + 1 < len(doc_words):
next_len = len(doc_words[position+1]) + 1
if position > 0:
prev_len = len(doc_words[position-1]) + 1
i = 1
length_offset = len(_SNIPPET_PREFIX) + len(_SNIPPET_SUFFIX)
while (len(snippet) + next_len + prev_len + length_offset < max_length and
(position + i < len(doc_words) or position - i > 0)):
if position + i < len(doc_words):
snippet = '%s %s' % (snippet, doc_words[position+i])
next_len = len(doc_words[position+i]) + 1
else:
next_len = 0
if position - i >= 0:
snippet = '%s %s' % (doc_words[position-i], snippet)
prev_len = len(doc_words[position-i]) + 1
else:
prev_len = 0
i += 1
return '%s%s%s' % (_SNIPPET_PREFIX, snippet, _SNIPPET_SUFFIX)
def _Snippet(self, return_type, query, field, *args):
"""Create a snippet given a query and the field to query on.
Args:
query: A query string containing only a bare term (no operators).
field: The field name to query on.
*args: Unused optional arguments. These are not used on dev_appserver.
Returns:
A snippet for the field with the query term bolded.
Raises:
ExpressionEvaluationError: if this is a sort expression.
"""
field = query_parser.GetQueryNodeText(field)
if self._is_sort_expression:
raise ExpressionEvaluationError(
'Failed to parse sort expression \'snippet(' +
query_parser.GetQueryNodeText(query) + ', ' + field +
')\': snippet() is not supported in sort expressions')
schema = self._inverted_index.GetSchema()
if schema.IsType(field, document_pb.FieldValue.NUMBER):
raise ExpressionEvaluationError(
'Failed to parse field expression \'snippet(' +
query_parser.GetQueryNodeText(query) + ', ' + field +
')\': snippet() argument 2 must be text')
terms = self._tokenizer.TokenizeText(
query_parser.GetQueryNodeText(query).strip('"'))
for term in terms:
search_token = tokens.Token(chars='%s:%s' % (field, term.chars))
postings = self._inverted_index.GetPostingsForToken(search_token)
for posting in postings:
if posting.doc_id != self._doc_pb.id() or not posting.positions:
continue
field_val = self._GetFieldValue(
search_util.GetFieldInDocument(self._doc_pb, field))
if not field_val:
continue
doc_words = [token.chars for token in
self._case_preserving_tokenizer.TokenizeText(field_val)]
position = posting.positions[0]
return self._GenerateSnippet(
doc_words, position, search_util.DEFAULT_MAX_SNIPPET_LENGTH)
else:
field_val = self._GetFieldValue(
search_util.GetFieldInDocument(self._doc_pb, field))
if not field_val:
return ''
return '%s...' % field_val[:search_util.DEFAULT_MAX_SNIPPET_LENGTH]
def _Unsupported(self, method):
"""Returns a function that raises an unsupported error when called.
This should be used for methods that are not yet implemented in
dev_appserver but are present in the API. If users call this function, the
expression will be skipped and a warning will be logged.
Args:
method: The name of the method that was called (used for logging).
Returns:
A function that raises a UnsupportedOnDevError when called.
"""
def RaiseUnsupported(*args):
raise search_util.UnsupportedOnDevError(
'%s is currently unsupported on dev_appserver.' % method)
return RaiseUnsupported
def _EvalNumericBinaryOp(self, op, op_name, node, return_type):
"""Evaluate a Numeric Binary operator on the document.
Args:
op: The operator function. Must take exactly two arguments.
op_name: The name of the operator. Used in error messages.
node: The expression AST node representing the operator application.
return_type: The type to retrieve for fields with multiple types
in the expression. Used when the field type is ambiguous and cannot be
inferred from the context. If None, we retrieve the first field type
found in doc list.
Returns:
The result of applying op to node's two children.
Raises:
ValueError: The node does not have exactly two children.
_ExpressionError: The return type is Text.
"""
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Expression cannot be converted to a text type')
if len(node.children) != 2:
raise ValueError('%s operator must always have two arguments' % op_name)
n1, n2 = node.children
return op(self._Eval(n1, document_pb.FieldValue.NUMBER),
self._Eval(n2, document_pb.FieldValue.NUMBER))
def _EvalNumericUnaryOp(self, op, op_name, node, return_type):
"""Evaluate a unary operator on the document.
Args:
op: The operator function. Must take exactly one argument.
op_name: The name of the operator. Used in error messages.
node: The expression AST node representing the operator application.
return_type: The type to retrieve for fields with multiple types
in the expression. Used when the field type is ambiguous and cannot be
inferred from the context. If None, we retrieve the first field type
found in doc list.
Returns:
The result of applying op to node's child.
Raises:
ValueError: The node does not have exactly one child.
_ExpressionError: The return type is Text.
"""
if return_type == search_util.EXPRESSION_RETURN_TYPE_TEXT:
raise _ExpressionError('Expression cannot be converted to a text type')
if len(node.children) != 1:
raise ValueError('%s operator must always have one arguments' % op_name)
return op(self._Eval(node.children[0], document_pb.FieldValue.NUMBER))
def _Eval(self, node, return_type=None):
"""Evaluate an expression node on the document.
Args:
node: The expression AST node representing an expression subtree.
return_type: The type to retrieve for fields with multiple types
in the expression. Used when the field type is ambiguous and cannot be
inferred from the context. If None, we retrieve the first field type
found in doc list.
Returns:
The Python value that maps to the value of node. Types are inferred from
the expression, so expressions with numeric results will return as python
int/long/floats, textual results will be strings, and dates will be
datetimes.
Raises:
_ExpressionError: The expression cannot be evaluated on this document
because either the expression is malformed or the document does not
contain the required fields. Callers of _Eval should catch
_ExpressionErrors and optionally log them; these are not fatal in any way,
and are used to indicate that this expression should not be set on this
document.
"""
if node.getType() in self._function_table:
func = self._function_table[node.getType()]
return func(return_type, *node.children)
if node.getType() == ExpressionParser.PLUS:
return self._EvalNumericBinaryOp(lambda a, b: a + b, 'addition', node,
return_type)
if node.getType() == ExpressionParser.MINUS:
return self._EvalNumericBinaryOp(lambda a, b: a - b, 'subtraction', node,
return_type)
if node.getType() == ExpressionParser.DIV:
return self._EvalNumericBinaryOp(lambda a, b: old_div(a, b), 'division', node,
return_type)
if node.getType() == ExpressionParser.TIMES:
return self._EvalNumericBinaryOp(lambda a, b: a * b,
'multiplication', node, return_type)
if node.getType() == ExpressionParser.NEG:
return self._EvalNumericUnaryOp(lambda a: -a, 'negation', node,
return_type)
if node.getType() in (ExpressionParser.INT, ExpressionParser.FLOAT):
return float(query_parser.GetQueryNodeText(node))
if node.getType() == ExpressionParser.PHRASE:
return query_parser.GetQueryNodeText(node).strip('"')
if node.getType() == ExpressionParser.NAME:
name = query_parser.GetQueryNodeText(node)
if name == '_score':
return self._doc.score
field = search_util.GetFieldInDocument(self._doc_pb, name,
return_type)
if field:
return self._GetFieldValue(field)
raise _ExpressionError('No field %s in document' % name)
raise _ExpressionError('Unable to handle node %s' % node)
def ValueOf(self, expression, default_value=None, return_type=None):
"""Returns the value of an expression on a document.
Args:
expression: The expression string.
default_value: The value to return if the expression cannot be evaluated.
return_type: The type the expression should evaluate to. Used to create
multiple sorts for ambiguous expressions. If None, the expression
evaluates to the inferred type or first type of a field it encounters in
a document.
Returns:
The value of the expression on the evaluator's document, or default_value
if the expression cannot be evaluated on the document.
Raises:
ExpressionEvaluationError: sort expression cannot be evaluated
because the expression or default value is malformed. Callers of
ValueOf should catch and return error to user in response.
QueryExpressionEvaluationError: same as ExpressionEvaluationError but
these errors should return query as error status to users.
"""
expression_tree = Parse(expression)
if not expression_tree.getType() and expression_tree.children:
expression_tree = expression_tree.children[0]
name = query_parser.GetQueryNodeText(expression_tree)
schema = self._inverted_index.GetSchema()
if (expression_tree.getType() == ExpressionParser.NAME and
name in schema):
contains_text_result = False
for field_type in schema[name].type_list():
if field_type in search_util.TEXT_DOCUMENT_FIELD_TYPES:
contains_text_result = True
if (schema.IsType(name, document_pb.FieldValue.DATE) and
not contains_text_result):
if isinstance(default_value, six.string_types):
try:
default_value = search_util.DeserializeDate(default_value)
except ValueError:
raise QueryExpressionEvaluationError(
'Default text value is not appropriate for sort expression \'' +
name + '\': failed to parse date \"' + default_value + '\"')
result = default_value
try:
result = self._Eval(expression_tree, return_type=return_type)
except _ExpressionError as e:
logging.debug('Skipping expression %s: %s', expression, e)
except search_util.UnsupportedOnDevError as e:
logging.warning(e.args[0])
return result
def Evaluate(self, expression):
"""Evaluates the expression for a document and attaches the result.
Args:
expression: The Expression protobuffer object.
"""
name = expression.name()
result = self.ValueOf(expression.expression())
if isinstance(result, six.text_type):
result = result.encode('utf-8')
if result != None:
self._doc.expressions[name] = result
def Parse(expression):
"""Parse an expression and return its parse tree.
Args:
expression: An expression string.
Returns:
A parse tree for the expression, as generated by expression_parser.
"""
return expression_parser.Parse(expression).tree
|
the-stack_106_14294
|
"""
Settings we use for production. Some of these could eventually be moved into a settings.ini file
"""
from .base_settings import *
from decouple import config, Csv
TELNET_INTERFACES = config('TELNET_INTERFACES', default='45.33.87.194', cast=Csv())
WEBSOCKET_CLIENT_INTERFACE = config('WEBSOCKET_CLIENT_INTERFACE', default='45.33.87.194')
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default='.arxmush.org, .arxgame.org', cast=Csv())
WEBSERVER_PORTS = [(8000, 5001)]
WEBSOCKET_CLIENT_PORT = 8001
SSH_PORTS = [8022]
SSL_PORTS = [4001]
AMP_PORT = 5000
SITE_HEADER = "ArxPrime Admin"
INDEX_TITLE = "ArxPrime Admin"
IDMAPPER_CACHE_MAXSIZE = 4000
CHECK_VPN = True
MAX_CHAR_LIMIT = 8000
######################################################################
# Contrib config
######################################################################
if SEND_GAME_INDEX:
GAME_INDEX_LISTING = {
'game_status': 'beta',
# Optional, comment out or remove if N/A
'game_website': 'http://play.arxgame.org',
'short_description': 'MUX-style game in an original fantasy setting',
# Optional but highly recommended. Markdown is supported.
'long_description': (
"Arx is a MUX-style game in an original low-fantasy setting, "
"inspired by series such as Game of Thrones and The First Law. "
),
'listing_contact': '[email protected]',
# At minimum, specify this or the web_client_url options. Both is fine, too.
'telnet_hostname': 'play.arxgame.org',
'telnet_port': 3000,
# At minimum, specify this or the telnet_* options. Both is fine, too.
'web_client_url': 'http://play.arxgame.org/webclient',
}
|
the-stack_106_14295
|
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger
from keras import backend as K
from keras.models import load_model
from math import ceil
import numpy as np
from matplotlib import pyplot as plt
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
img_height = 300 # Height of the model input images
img_width = 300 # Width of the model input images
img_channels = 3 # Number of color channels of the model input images
mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
n_classes = 20 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets
scales = scales_pascal
aspect_ratios = [[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters
two_boxes_for_ar1 = True
steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.
offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation
normalize_coords = True
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=mean_color,
swap_channels=swap_channels)
# 2: Load some weights into the model.
# TODO: Set the path to the weights you want to load.
weights_path = 'path/to/VGG_ILSVRC_16_layers_fc_reduced.h5'
model.load_weights(weights_path, by_name=True)
# 3: Instantiate an optimizer and the SSD loss function and compile the model.
# If you want to follow the original Caffe implementation, use the preset SGD
# optimizer, otherwise I'd recommend the commented-out Adam optimizer.
#adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=sgd, loss=ssd_loss.compute_loss)
model_path = '/home/suraj/suraj/project/novus_pilot_deep_learning/ssd_keras_updated/pre-trained-model/mobilenet.h5'
# We need to create an SSDLoss object in order to pass that to the model loader.
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
K.clear_session() # Clear previous models from memory.
model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,
'L2Normalization': L2Normalization,
'compute_loss': ssd_loss.compute_loss})
train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
# 2: Parse the image and label lists for the training and validation datasets. This can take a while.
# TODO: Set the paths to the datasets here.
# The directories that contain the images.
VOC_2007_images_dir = '../../datasets/VOCdevkit/VOC2007/JPEGImages/'
VOC_2012_images_dir = '../../datasets/VOCdevkit/VOC2012/JPEGImages/'
# The directories that contain the annotations.
VOC_2007_annotations_dir = '../../datasets/VOCdevkit/VOC2007/Annotations/'
VOC_2012_annotations_dir = '../../datasets/VOCdevkit/VOC2012/Annotations/'
# The paths to the image sets.
VOC_2007_train_image_set_filename = '../../datasets/VOCdevkit/VOC2007/ImageSets/Main/train.txt'
VOC_2012_train_image_set_filename = '../../datasets/VOCdevkit/VOC2012/ImageSets/Main/train.txt'
VOC_2007_val_image_set_filename = '../../datasets/VOCdevkit/VOC2007/ImageSets/Main/val.txt'
VOC_2012_val_image_set_filename = '../../datasets/VOCdevkit/VOC2012/ImageSets/Main/val.txt'
VOC_2007_trainval_image_set_filename = '../../datasets/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt'
VOC_2012_trainval_image_set_filename = '../../datasets/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt'
VOC_2007_test_image_set_filename = '../../datasets/VOCdevkit/VOC2007/ImageSets/Main/test.txt'
# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = ['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
train_dataset.parse_xml(images_dirs=[VOC_2007_images_dir,
VOC_2012_images_dir],
image_set_filenames=[VOC_2007_trainval_image_set_filename,
VOC_2012_trainval_image_set_filename],
annotations_dirs=[VOC_2007_annotations_dir,
VOC_2012_annotations_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=False,
ret=False)
val_dataset.parse_xml(images_dirs=[VOC_2007_images_dir],
image_set_filenames=[VOC_2007_test_image_set_filename],
annotations_dirs=[VOC_2007_annotations_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=True,
ret=False)
# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will
# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`
# option in the constructor, because in that cas the images are in memory already anyway. If you don't
# want to create HDF5 datasets, comment out the subsequent two function calls.
train_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07+12_trainval.h5',
resize=False,
variable_image_size=True,
verbose=True)
val_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07_test.h5',
resize=False,
variable_image_size=True,
verbose=True)
batch_size = 32 # Change the batch size if you like, or if you run into GPU memory issues.
# 4: Set the image transformations for pre-processing and data augmentation options.
# For the training generator:
ssd_data_augmentation = SSDDataAugmentation(img_height=img_height,
img_width=img_width,
background=mean_color)
# For the validation generator:
convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=img_height, width=img_width)
# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
model.get_layer('fc7_mbox_conf').output_shape[1:3],
model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=normalize_coords)
# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=[ssd_data_augmentation],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[convert_to_3_channels,
resize],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
def lr_schedule(epoch):
if epoch < 80:
return 0.001
elif epoch < 100:
return 0.0001
else:
return 0.00001
# Define model callbacks.
# TODO: Set the filepath under which you want to save the model.
model_checkpoint = ModelCheckpoint(filepath='ssd300_pascal_07+12_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
#model_checkpoint.best =
csv_logger = CSVLogger(filename='ssd300_pascal_07+12_training_log.csv',
separator=',',
append=True)
learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
verbose=1)
terminate_on_nan = TerminateOnNaN()
callbacks = [model_checkpoint,
csv_logger,
learning_rate_scheduler,
terminate_on_nan]
initial_epoch = 0
final_epoch = 120
steps_per_epoch = 1000
history = model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=final_epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
|
the-stack_106_14298
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .base_job_parameters import BaseJobParameters
class CreateJobParameters(BaseJobParameters):
"""The parameters used to submit a new Data Lake Analytics job.
All required parameters must be populated in order to send to Azure.
:param type: Required. The job type of the current job (Hive, USql, or
Scope (for internal use only)). Possible values include: 'USql', 'Hive',
'Scope'
:type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType
:param properties: Required. The job specific properties.
:type properties:
~azure.mgmt.datalake.analytics.job.models.CreateJobProperties
:param name: Required. The friendly name of the job to submit.
:type name: str
:param degree_of_parallelism: The degree of parallelism to use for this
job. This must be greater than 0, if set to less than 0 it will default to
1. Default value: 1 .
:type degree_of_parallelism: int
:param priority: The priority value to use for the current job. Lower
numbers have a higher priority. By default, a job has a priority of 1000.
This must be greater than 0.
:type priority: int
:param log_file_patterns: The list of log file name patterns to find in
the logFolder. '*' is the only matching character allowed. Example format:
jobExecution*.log or *mylog*.txt
:type log_file_patterns: list[str]
:param related: The recurring job relationship information properties.
:type related:
~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties
"""
_validation = {
'type': {'required': True},
'properties': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'JobType'},
'properties': {'key': 'properties', 'type': 'CreateJobProperties'},
'name': {'key': 'name', 'type': 'str'},
'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'},
'priority': {'key': 'priority', 'type': 'int'},
'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'},
'related': {'key': 'related', 'type': 'JobRelationshipProperties'},
}
def __init__(self, **kwargs):
super(CreateJobParameters, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.degree_of_parallelism = kwargs.get('degree_of_parallelism', 1)
self.priority = kwargs.get('priority', None)
self.log_file_patterns = kwargs.get('log_file_patterns', None)
self.related = kwargs.get('related', None)
|
the-stack_106_14299
|
import numpy as np
from tqdm import tqdm
from nuimages.nuimages import NuImages
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
from nuscenes.utils.geometry_utils import view_points, transform_matrix
from nuscenes.utils.data_classes import Box
from nuscenes.utils.geometry_utils import BoxVisibility
from pycocotools import mask
from detectron2.structures import Boxes, BoxMode, PolygonMasks
from detectron2.utils.file_io import PathManager
from .. import DatasetCatalog, MetadataCatalog
categories = ['human.pedestrian',
'vehicle.car',
'vehicle.bus',
'vehicle.truck',
'vehicle.cycle',
'vehicle.cycle.withrider']
full_categories = ["animal",
"flat.driveable_surface",
"human.pedestrian.adult",
"human.pedestrian.child",
"human.pedestrian.construction_worker",
"human.pedestrian.personal_mobility",
"human.pedestrian.police_officer",
"human.pedestrian.stroller",
"human.pedestrian.wheelchair",
"movable_object.barrier",
"movable_object.debris",
"movable_object.pushable_pullable",
"movable_object.trafficcone",
"static_object.bicycle_rack",
"vehicle.bicycle",
"vehicle.bus.bendy",
"vehicle.bus.rigid",
"vehicle.car",
"vehicle.construction",
"vehicle.ego",
"vehicle.emergency.ambulance",
"vehicle.emergency.police",
"vehicle.motorcycle",
"vehicle.trailer",
"vehicle.truck"]
categories_mapping = [[2,3,4,5,6,7,8],
[17,18,20,21],
[15,16],
[24,25],
[14,22]]
def convert_categories(cid,categories_mapping):
for i in range(len(categories_mapping)):
if cid in categories_mapping[i]:
return i
return None
def load_nuimages_dicts(path, version, categories = categories):
assert (path[-1] == "/"), "Insert '/' in the end of path"
nuim = NuImages(dataroot='/mnt/disk1/nuImages', version=version, verbose=True, lazy=True)
if categories == None:
categories = [data["name"] for data in nuim.category]
assert (isinstance(categories, list)), "Categories type must be list"
dataset_dicts = []
#idx = 0
# for i in tqdm(range(0, len(nuim.scene))):
# scene = nuim.scene[i]
# scene_rec = nuim.get('scene', scene['token'])
# sample_rec_cur = nuim.get('sample', scene_rec['first_sample_token'])
# Go through all frame in current scene
flag = 1
for idx in tqdm(range(0, len(nuim.sample))):
data = nuim.sample_data[idx]
# if not nuim.get('calibrated_sensor', data['calibrated_sensor_token'])['sensor_token']=="23b8c1e9392446debeb13b9046685257":
# #if not data['filename'][6:17] =="/CAM_FRONT/":
# continue
if not (data['filename'][:17] =="sweeps/CAM_FRONT/" or data['filename'][:18] =="samples/CAM_FRONT/"):
continue
record = {}
record["file_name"] = path + data["filename"]
record["image_id"] = idx
record["height"] = data["height"]
record["width"] = data["width"]
#idx += 1
# Get sample_content
objs = []
if data['is_key_frame']:
#print(version,data['filename'][:18])
#content = nuim.get_sample_content(data['sample_token'])
nuim.load_tables(['object_ann','sample_data','category','attribute'])
#print(nuim.object_ann[0])
objects = []
for i in nuim.object_ann:
if i['sample_data_token']==nuim.sample_data[idx]['token']:
objects.append(i)
#print(boxes)
#boxes = [[11,12,13,14]]
_, segs = nuim.get_segmentation(data['token'])
objnum=1
for object in objects:
#seg = np.zeros(segs.shape)
seg = (segs == objnum)
seg = seg.astype('uint8')
#seg = segs[selection]
# for x in range(len(segs)):
# for y in range(len(segs[0])):
# if segs[x][y] == objnum:
# seg[x][y] = 1
for j in range(len(nuim.category)):
if nuim.category[j]['token'] == object['category_token']:
catid = j
break
catid = convert_categories(catid,categories_mapping)
if catid == None:
continue
if catid == 4:
if object['attribute_tokens']== nuim.attribute[0]['token']:
catid = 5
obj = {
"bbox": object['bbox'],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": catid,
"iscrowd": 0,
"segmentation": mask.encode(np.asarray(seg, order="F"))
}
objs.append(obj)
objnum += 1
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
root_path = '/mnt/disk1/nuImages/'
# categories = ['human.pedestrian.adult',
# 'human.pedestrian.child',
# 'human.pedestrian.stroller',
# 'human.pedestrian.personal_mobility',
# 'human.pedestrian.police_officer',
# 'human.pedestrian.construction_worker',
# 'vehicle.car',
# 'vehicle.bus.bendy',
# 'vehicle.bus.rigid',
# 'vehicle.truck',
# 'vehicle.trailer']
#
# dataset = 'nuimages_test'
# version = 'v1.0-test'
#
# get_dicts = lambda p = root_path, c = categories: load_nuimages_dicts(path=p,version = version, categories=c)
# DatasetCatalog.register(dataset,get_dicts)
# MetadataCatalog.get(dataset).thing_classes = categories
# MetadataCatalog.get(dataset).evaluator_type = "coco"
#
# dataset = 'nuimages_train'
# version = 'v1.0-train'
# get_dicts = lambda p = root_path, c = categories: load_nuimages_dicts(path=p,version = version, categories=c)
# DatasetCatalog.register(dataset,get_dicts)
# MetadataCatalog.get(dataset).thing_classes = categories
# MetadataCatalog.get(dataset).evaluator_type = "coco"
#
# dataset = 'nuimages_mini'
# version = 'v1.0-mini'
#
# get_dicts = lambda p = root_path, c = categories: load_nuimages_dicts(path=p,version = version, categories=c)
# DatasetCatalog.register(dataset,get_dicts)
# MetadataCatalog.get(dataset).thing_classes = categories
# MetadataCatalog.get(dataset).evaluator_type = "coco"
|
the-stack_106_14300
|
# code-checked
# server-checked
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models as models
import os
class ToyNet(nn.Module):
def __init__(self, model_id, project_dir):
super(ToyNet, self).__init__()
self.model_id = model_id
self.project_dir = project_dir
self.create_model_dirs()
input_dim = 1
hidden_dim = 10
output_dim = 1
self.fc1_mean = nn.Linear(input_dim, hidden_dim)
self.fc2_mean = nn.Linear(hidden_dim, hidden_dim)
self.fc3_mean = nn.Linear(hidden_dim, output_dim)
self.fc1_var = nn.Linear(input_dim, hidden_dim)
self.fc2_var = nn.Linear(hidden_dim, hidden_dim)
self.fc3_var = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
# (x has shape (batch_size, input_dim))
mean = F.relu(self.fc1_mean(x)) # (shape: (batch_size, hidden_dim))
mean = F.relu(self.fc2_mean(mean)) # (shape: (batch_size, hidden_dim))
mean = self.fc3_mean(mean) # (shape: batch_size, output_dim))
var = F.relu(self.fc1_var(x)) # (shape: (batch_size, hidden_dim))
var = F.relu(self.fc2_var(var)) # (shape: (batch_size, hidden_dim))
var = self.fc3_var(var) # (shape: batch_size, output_dim))
return (mean, var)
def create_model_dirs(self):
self.logs_dir = self.project_dir + "/training_logs"
self.model_dir = self.logs_dir + "/model_%s" % self.model_id
self.checkpoints_dir = self.model_dir + "/checkpoints"
if not os.path.exists(self.logs_dir):
os.makedirs(self.logs_dir)
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
os.makedirs(self.checkpoints_dir)
|
the-stack_106_14301
|
from django.shortcuts import render
from selenium import webdriver
from crawler.parser_geneerator import JsonToGetText
from selenium.webdriver.chrome.options import Options
import os
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def parser_post(request):
return_text = {}
if request.method == 'POST':
url = request.POST['url']
html = request.POST['html']
json = request.POST['json']
return_text['URL'] = url
return_text['html'] = html
if url is not '' and html is '':
#driver = webdriver.Chrome(
# executable_path=r'/Users/qq/PycharmProjects/parseltongue/chromedriver/chromedriver')
chrome_bin = os.environ.get('GOOGLE_CHROME_SHIM', None)
chrome_options = Options()
chrome_options.binary_location = chrome_bin
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(url)
html = driver.page_source
driver.close()
if html is not '' and json is not '':
get_html = JsonToGetText(html, json)
return_text['tag'] = get_html.get_tag_text()
return_text['table'] = get_html.get_table_text()
return render(request, "crawler.html", return_text)
|
the-stack_106_14302
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Keyvault client - adapted from Bluehound code."""
import base64
import json
from typing import Any, List
from azure.core.exceptions import ResourceNotFoundError
from azure.keyvault.secrets import KeyVaultSecret, SecretClient
from azure.mgmt.keyvault import KeyVaultManagementClient
from azure.mgmt.keyvault.models import (
AccessPolicyEntry,
CertificatePermissions,
KeyPermissions,
Permissions,
SecretPermissions,
Sku,
Vault,
VaultCreateOrUpdateParameters,
VaultProperties,
)
from msrestazure.azure_exceptions import CloudError
from .._version import VERSION
from .azure_auth_core import az_connect_core
from .exceptions import MsticpyKeyVaultConfigError, MsticpyKeyVaultMissingSecretError
from .keyvault_settings import KeyVaultSettings
from .utility import export
__version__ = VERSION
__author__ = "Matt Richard, Ian Hellen"
@export
class BHKeyVaultClient:
"""Core KeyVault client."""
_KEYRING_NAME = "keyvault"
def __init__(
self,
tenant_id: str = None,
vault_uri: str = None,
vault_name: str = None,
settings: KeyVaultSettings = None,
**kwargs,
):
"""
Initialize the BHKeyVault client.
Parameters
----------
tenant_id : str
The tenant ID of the service
vault_uri : str, optional
The full URI of the keyvault, by default None
vault_name : str, optional
The name of the keyvault in the public cloud, by default None
auth_methods : List[str]
The authentication methods to use for Key Vault auth
Possible values are:
- "env" - to get authentication details from environment varibales
- "cli" - to use Azure CLI authentication details
- "msi" - to user Managed Service Indenity details
- "interactive" - to prompt for interactive login
authn_type : str, optional
[deprecated - use auth_methods]
Authentication mode, by default 'interactive'
Supported options are:
- 'device' for device code authentication
- 'interactive' for interactive browser authentication
authority : str, optional
The AAD authority - one of 'global', 'usgov', 'de' or 'chi'
authority_uri : str, optional
The AAD authority URI - overrides `authority`
settings : KeyVaultSettings
An instance of KeyVaultSettings containing KV parameters.
debug : bool, optional
[description], by default False
Raises
------
KeyVaultMissingVaultException
No Vault name or URI supplied.
Notes
-----
The parameter values can also be obtained from the
KeyVault section of msticpyconfig.yaml.
"""
self.debug = kwargs.pop("debug", False)
self.settings: KeyVaultSettings = settings or KeyVaultSettings()
self.tenant_id = tenant_id or self.settings.get("tenantid")
if not self.tenant_id:
raise MsticpyKeyVaultConfigError(
"Could not get TenantId from function parameters or configuration.",
"Please add this to the KeyVault section of msticpyconfig.yaml",
title="missing tenant ID value.",
)
self.authn_type = kwargs.pop(
"authn_type", self.settings.get("authntype", "interactive")
)
self.auth_methods = kwargs.pop(
"auth_methods", self.settings.get("auth_methods", ["interactive"])
)
# for authority and authority_uri, any parameters take priority
# and fall back on settings if not specified.
if "authority" in kwargs:
self.settings["authority"] = kwargs.pop("authority")
self.authority_uri = self.settings.get_tenant_authority_host(
authority_uri=kwargs.get("authority_uri"), tenant=self.tenant_id
)
if not vault_uri and not vault_name:
if "vaultname" in self.settings:
vault_name = self.settings["vaultname"]
else:
raise MsticpyKeyVaultConfigError(
"Check that you have specified the right value for VaultName"
+ " in your configuration",
title="Key Vault vault name not found.",
)
if vault_uri:
self.vault_uri = vault_uri
else:
vault_uri = self.settings.keyvault_uri
if vault_uri:
self.vault_uri = vault_uri.format(vault=vault_name)
else:
cloud = self.settings.cloud
raise MsticpyKeyVaultConfigError(
f"Could not determine keyvault URI for national cloud {cloud}.",
"Please verify that you have the correct national cloud"
+ "specified in the KeyVault section of msticpyconfig.yaml",
title="no Key Vault URI for national cloud",
)
if self.debug:
print(f"Using Vault URI {self.vault_uri}")
self.kv_client = self._get_secret_client()
def _get_secret_client(self):
credentials = az_connect_core(auth_methods=self.auth_methods)
# Create a secret client
secret_client = SecretClient(self.vault_uri, credentials.modern)
return secret_client
@property
def secrets(self):
"""Return the list of secret names from the vault."""
return [x.id for x in self.kv_client.list_properties_of_secrets()]
def get_secret(self, secret_name: str) -> Any:
"""
Retrieve a secret from the Vault.
Parameters
----------
secret_name : str
Name of the secret
Returns
-------
Any
The secret value
Raises
------
KeyVaultMissingSecretException
Secret not found in the Vault.
"""
if "/" in secret_name:
# If we're passed the full URL to the secret - extract just the
# name
secret_name = secret_name.rsplit("/", maxsplit=1)[-1]
try:
secret_bundle = self.kv_client.get_secret(name=secret_name)
except ResourceNotFoundError as err:
if self.debug:
print(
"Secret: '%s' missing from vault: %s"
% (secret_name, self.vault_uri)
)
raise MsticpyKeyVaultMissingSecretError(
f"Secret name {secret_name} could not be found in {self.vault_uri}",
f"Provider returned: {err}",
title=f"secret {secret_name} not found.",
) from err
if secret_bundle.value is None or not secret_bundle.value:
if self.debug:
print(
"Secret: '%s' was empty in vault %s" % (secret_name, self.vault_uri)
)
raise MsticpyKeyVaultMissingSecretError(
f"Secret name {secret_name} in {self.vault_uri}",
"has blank or null value.",
title=f"secret {secret_name} empty.",
)
return secret_bundle.value
def set_secret(self, secret_name: str, value: Any) -> KeyVaultSecret:
"""
Set a secret in the Vault.
Parameters
----------
secret_name : str
Name of the secret
value: Any
Secret value
Returns
-------
KeyVaultSecret
The secrets bundle for the secret
"""
if self.debug:
print("Storing %s in %s" % (secret_name, self.vault_uri))
return self.kv_client.set_secret(name=secret_name, value=value)
# pylint: disable=too-many-instance-attributes
@export
class BHKeyVaultMgmtClient:
"""Core KeyVault Management client."""
# pylint: disable=too-many-arguments
def __init__(
self,
tenant_id: str = None,
subscription_id: str = None,
resource_group: str = None,
azure_region: str = None,
settings: KeyVaultSettings = None,
**kwargs,
):
"""
Initialize BH KeyVault Management Client.
Parameters
----------
tenant_id : str, Optional
Tenant ID
subscription_id : str, Optional
Subscription ID
resource_group : str, Optional
Resource Group name
azure_region : str, Optional
Azure region - needed to create a new vault.
By default, None
settings : KeyVaultSettings
An instance of KeyVaultSettings containing KV parameters.
mgmt_uri : str, Optional
The URI for Azure management endpoints.
Notes
-----
The parameter values can also be obtained from the
KeyVault section of msticpyconfig.yaml.
"""
self.debug = kwargs.pop("debug", False)
self.settings: KeyVaultSettings = settings or KeyVaultSettings()
self.tenant_id = tenant_id or self.settings.get("tenantid")
if not self.tenant_id:
raise MsticpyKeyVaultConfigError(
"Could not get TenantId from function parameters or configuration.",
"Please add this to the KeyVault section of msticpyconfig.yaml",
title="missing tenant ID value.",
)
self.subscription_id = subscription_id or self.settings.get("subscriptionid")
if not self.subscription_id:
raise MsticpyKeyVaultConfigError(
"Could not get SubscriptionId from function parameters or configuration.",
"Please add this to the KeyVault section of msticpyconfig.yaml",
title="missing SubscriptionId value.",
)
self._client_uri = kwargs.pop("mgmt_uri", None) or self.settings.mgmt_uri
if not self._client_uri:
cloud = self.settings.cloud
raise MsticpyKeyVaultConfigError(
f"Could not obtain an azure management URI for national cloud {cloud}.",
"Please verify that you have the correct national cloud"
+ "specified in the KeyVault section of msticpyconfig.yaml",
title="no Azure Management URI for national cloud",
)
self.auth_client = az_connect_core()
self.resource_group = resource_group or self.settings.get("resourcegroup")
self.azure_region = azure_region or self.settings.get("azureregion")
# pylint: enable=too-many-arguments
def list_vaults(self) -> List[str]:
"""
Return a list of vaults for the subscription.
Returns
-------
List[str]
Vault names
"""
mgmt = KeyVaultManagementClient(self.auth_client.legacy, self.subscription_id)
return [v.name for v in mgmt.vaults.list()]
def get_vault_uri(self, vault_name: str) -> str:
"""
Return the URI for a vault name.
Parameters
----------
vault_name : str
The Vault name.
Returns
-------
str
Vault URI.
"""
mgmt = KeyVaultManagementClient(self.auth_client.legacy, self.subscription_id)
try:
vault = mgmt.vaults.get(self.resource_group, vault_name)
except (CloudError, ResourceNotFoundError) as cloud_err:
raise MsticpyKeyVaultConfigError(
"Check that you have specified the right value for VaultName"
+ " in your configuration",
f"Error returned from provider was {cloud_err}",
title=f"Key Vault vault '{vault_name}' not found.",
) from cloud_err
return vault.properties.vault_uri
def create_vault(self, vault_name: str) -> Vault:
"""
Create new or update existing vault.
Parameters
----------
vault_name : str
Name of the Vault
Returns
-------
Vault
The Vault object.
"""
if not self.azure_region:
raise MsticpyKeyVaultConfigError(
"Could not get Azure region in which to create the vault.",
"Please add AzureRegion to the KeyVault section of msticpyconfig.yaml",
title="missing AzureRegion value.",
)
parameters = self._get_params()
if not self.resource_group:
raise MsticpyKeyVaultConfigError(
"Could not get Azure resource group in which to create the vault.",
"Please add ResourceGroup to the KeyVault section of msticpyconfig.yaml",
title="missing ResourceGroup value.",
)
mgmt = KeyVaultManagementClient(self.auth_client.legacy, self.subscription_id)
return mgmt.vaults.create_or_update(
self.resource_group, vault_name, parameters
).result()
def _get_params(self):
"""Build the vault parameters block."""
oid = _user_oid(self.auth_client.legacy.token)
sec_perms_all = [perm.value for perm in SecretPermissions]
key_perms_all = [perm.value for perm in KeyPermissions]
cert_perms_all = [perm.value for perm in CertificatePermissions]
permissions = Permissions()
permissions.keys = key_perms_all
permissions.secrets = sec_perms_all
permissions.certificates = cert_perms_all
policy = AccessPolicyEntry(
tenant_id=self.tenant_id, object_id=oid, permissions=permissions
)
properties = VaultProperties(
tenant_id=self.tenant_id,
sku=Sku(name="standard", family="A"),
access_policies=[policy],
)
parameters = VaultCreateOrUpdateParameters(
location=self.azure_region, properties=properties
)
parameters.properties.enabled_for_deployment = True
parameters.properties.enabled_for_disk_encryption = True
parameters.properties.enabled_for_template_deployment = True
return parameters
# pylint: enable=too-many-instance-attributes
def _user_oid(token) -> str:
"""
Return the user Object ID.
Returns
-------
str
User OID.
"""
data = _get_parsed_token_data(token)
return data.get("oid")
def _get_parsed_token_data(token) -> Any:
tok_data = token
tok_data = tok_data.split(".")[1]
tok_data += "=" * ((4 - len(tok_data) % 4) % 4)
return json.loads(base64.b64decode(tok_data))
|
the-stack_106_14303
|
import time
from functools import wraps
def timer(func):
@wraps(func)
def wrapped(*args, **kwargs):
start = time.time()
rv = func(*args, **kwargs)
end = time.time()
print ('Executed %s in %2.2f seconds' % (func.__name__, end - start))
return rv
return wrapped
|
the-stack_106_14304
|
# -*- coding: utf-8 -*-
"""Solph Optimization Models.
SPDX-FileCopyrightText: Uwe Krien <[email protected]>
SPDX-FileCopyrightText: Simon Hilpert
SPDX-FileCopyrightText: Cord Kaldemeyer
SPDX-FileCopyrightText: gplssm
SPDX-FileCopyrightText: Patrik Schönfeldt
SPDX-License-Identifier: MIT
"""
import logging
import warnings
from pyomo import environ as po
from pyomo.core.plugins.transform.relax_integrality import RelaxIntegrality
from pyomo.opt import SolverFactory
from oemof.solph import blocks
from oemof.solph import processing
from oemof.solph.plumbing import sequence
class BaseModel(po.ConcreteModel):
"""The BaseModel for other solph-models (Model, MultiPeriodModel, etc.)
Parameters
----------
energysystem : EnergySystem object
Object that holds the nodes of an oemof energy system graph
constraint_groups : list (optional)
Solph looks for these groups in the given energy system and uses them
to create the constraints of the optimization problem.
Defaults to `Model.CONSTRAINTS`
objective_weighting : array like (optional)
Weights used for temporal objective function
expressions. If nothing is passed `timeincrement` will be used which
is calculated from the freq length of the energy system timeindex .
auto_construct : boolean
If this value is true, the set, variables, constraints, etc. are added,
automatically when instantiating the model. For sequential model
building process set this value to False
and use methods `_add_parent_block_sets`,
`_add_parent_block_variables`, `_add_blocks`, `_add_objective`
Attributes:
-----------
timeincrement : sequence
Time increments.
flows : dict
Flows of the model.
name : str
Name of the model.
es : solph.EnergySystem
Energy system of the model.
meta : `pyomo.opt.results.results_.SolverResults` or None
Solver results.
dual : ... or None
rc : ... or None
"""
CONSTRAINT_GROUPS = []
def __init__(self, energysystem, **kwargs):
super().__init__()
# ######################## Arguments #################################
self.name = kwargs.get("name", type(self).__name__)
self.es = energysystem
self.timeincrement = sequence(
kwargs.get("timeincrement", self.es.timeincrement)
)
if self.timeincrement[0] is None:
try:
self.timeincrement = sequence(
self.es.timeindex.freq.nanos / 3.6e12
)
except AttributeError:
msg = (
"No valid time increment found. Please pass a valid "
"timeincremet parameter or pass an EnergySystem with "
"a valid time index. Please note that a valid time"
"index need to have a 'freq' attribute."
)
raise AttributeError(msg)
self.objective_weighting = kwargs.get(
"objective_weighting", self.timeincrement
)
self._constraint_groups = type(self).CONSTRAINT_GROUPS + kwargs.get(
"constraint_groups", []
)
self._constraint_groups += [
i
for i in self.es.groups
if hasattr(i, "CONSTRAINT_GROUP")
and i not in self._constraint_groups
]
self.flows = self.es.flows()
self.solver_results = None
self.dual = None
self.rc = None
if kwargs.get("auto_construct", True):
self._construct()
def _construct(self):
""" """
self._add_parent_block_sets()
self._add_parent_block_variables()
self._add_child_blocks()
self._add_objective()
def _add_parent_block_sets(self):
""" " Method to create all sets located at the parent block, i.e. the
model itself as they are to be shared across all model components.
"""
pass
def _add_parent_block_variables(self):
""" " Method to create all variables located at the parent block,
i.e. the model itself as these variables are to be shared across
all model components.
"""
pass
def _add_child_blocks(self):
"""Method to add the defined child blocks for components that have
been grouped in the defined constraint groups.
"""
for group in self._constraint_groups:
# create instance for block
block = group()
# Add block to model
self.add_component(str(block), block)
# create constraints etc. related with block for all nodes
# in the group
block._create(group=self.es.groups.get(group))
def _add_objective(self, sense=po.minimize, update=False):
"""Method to sum up all objective expressions from the child blocks
that have been created. This method looks for `_objective_expression`
attribute in the block definition and will call this method to add
their return value to the objective function.
"""
if update:
self.del_component("objective")
expr = 0
for block in self.component_data_objects():
if hasattr(block, "_objective_expression"):
expr += block._objective_expression()
self.objective = po.Objective(sense=sense, expr=expr)
def receive_duals(self):
"""Method sets solver suffix to extract information about dual
variables from solver. Shadow prices (duals) and reduced costs (rc) are
set as attributes of the model.
"""
# shadow prices
self.dual = po.Suffix(direction=po.Suffix.IMPORT)
# reduced costs
self.rc = po.Suffix(direction=po.Suffix.IMPORT)
def results(self):
"""Returns a nested dictionary of the results of this optimization"""
return processing.results(self)
def solve(self, solver="cbc", solver_io="lp", **kwargs):
r"""Takes care of communication with solver to solve the model.
Parameters
----------
solver : string
solver to be used e.g. "glpk","gurobi","cplex"
solver_io : string
pyomo solver interface file format: "lp","python","nl", etc.
\**kwargs : keyword arguments
Possible keys can be set see below:
Other Parameters
----------------
solve_kwargs : dict
Other arguments for the pyomo.opt.SolverFactory.solve() method
Example : {"tee":True}
cmdline_options : dict
Dictionary with command line options for solver e.g.
{"mipgap":"0.01"} results in "--mipgap 0.01"
{"interior":" "} results in "--interior"
Gurobi solver takes numeric parameter values such as
{"method": 2}
"""
solve_kwargs = kwargs.get("solve_kwargs", {})
solver_cmdline_options = kwargs.get("cmdline_options", {})
opt = SolverFactory(solver, solver_io=solver_io)
# set command line options
options = opt.options
for k in solver_cmdline_options:
options[k] = solver_cmdline_options[k]
solver_results = opt.solve(self, **solve_kwargs)
status = solver_results["Solver"][0]["Status"]
termination_condition = solver_results["Solver"][0][
"Termination condition"
]
if status == "ok" and termination_condition == "optimal":
logging.info("Optimization successful...")
else:
msg = (
"Optimization ended with status {0} and termination "
"condition {1}"
)
warnings.warn(
msg.format(status, termination_condition), UserWarning
)
self.es.results = solver_results
self.solver_results = solver_results
return solver_results
def relax_problem(self):
"""Relaxes integer variables to reals of optimization model self."""
relaxer = RelaxIntegrality()
relaxer._apply_to(self)
return self
class Model(BaseModel):
"""An energy system model for operational and investment
optimization.
Parameters
----------
energysystem : EnergySystem object
Object that holds the nodes of an oemof energy system graph
constraint_groups : list
Solph looks for these groups in the given energy system and uses them
to create the constraints of the optimization problem.
Defaults to `Model.CONSTRAINTS`
**The following basic sets are created**:
NODES :
A set with all nodes of the given energy system.
TIMESTEPS :
A set with all timesteps of the given time horizon.
FLOWS :
A 2 dimensional set with all flows. Index: `(source, target)`
**The following basic variables are created**:
flow
Flow from source to target indexed by FLOWS, TIMESTEPS.
Note: Bounds of this variable are set depending on attributes of
the corresponding flow object.
"""
CONSTRAINT_GROUPS = [
blocks.Bus,
blocks.Transformer,
blocks.InvestmentFlow,
blocks.Flow,
blocks.NonConvexFlow,
]
def __init__(self, energysystem, **kwargs):
super().__init__(energysystem, **kwargs)
def _add_parent_block_sets(self):
""" """
# set with all nodes
self.NODES = po.Set(initialize=[n for n in self.es.nodes])
# pyomo set for timesteps of optimization problem
self.TIMESTEPS = po.Set(
initialize=range(len(self.es.timeindex)), ordered=True
)
# previous timesteps
previous_timesteps = [x - 1 for x in self.TIMESTEPS]
previous_timesteps[0] = self.TIMESTEPS.last()
self.previous_timesteps = dict(zip(self.TIMESTEPS, previous_timesteps))
# pyomo set for all flows in the energy system graph
self.FLOWS = po.Set(
initialize=self.flows.keys(), ordered=True, dimen=2
)
self.BIDIRECTIONAL_FLOWS = po.Set(
initialize=[
k
for (k, v) in self.flows.items()
if hasattr(v, "bidirectional")
],
ordered=True,
dimen=2,
within=self.FLOWS,
)
self.UNIDIRECTIONAL_FLOWS = po.Set(
initialize=[
k
for (k, v) in self.flows.items()
if not hasattr(v, "bidirectional")
],
ordered=True,
dimen=2,
within=self.FLOWS,
)
def _add_parent_block_variables(self):
""" """
self.flow = po.Var(self.FLOWS, self.TIMESTEPS, within=po.Reals)
for (o, i) in self.FLOWS:
if self.flows[o, i].nominal_value is not None:
if self.flows[o, i].fix[self.TIMESTEPS[1]] is not None:
for t in self.TIMESTEPS:
self.flow[o, i, t].value = (
self.flows[o, i].fix[t]
* self.flows[o, i].nominal_value
)
self.flow[o, i, t].fix()
else:
for t in self.TIMESTEPS:
self.flow[o, i, t].setub(
self.flows[o, i].max[t]
* self.flows[o, i].nominal_value
)
if not self.flows[o, i].nonconvex:
for t in self.TIMESTEPS:
self.flow[o, i, t].setlb(
self.flows[o, i].min[t]
* self.flows[o, i].nominal_value
)
elif (o, i) in self.UNIDIRECTIONAL_FLOWS:
for t in self.TIMESTEPS:
self.flow[o, i, t].setlb(0)
else:
if (o, i) in self.UNIDIRECTIONAL_FLOWS:
for t in self.TIMESTEPS:
self.flow[o, i, t].setlb(0)
|
the-stack_106_14306
|
# transfer learning with VGG network
from keras.applications.vgg16 import VGG16
import tensorflow as tf
from tensorflow.keras import datasets, layers
from sklearn.model_selection import train_test_split
import numpy as np
# load data
(xtrain, ytrain), (xtest, ytest) = datasets.cifar10.load_data()
classes = np.unique(ytrain)
num_class = len(classes)
# split data into train & validate
xtrain, xvalid, ytrain, yvalid = train_test_split(xtrain, ytrain, test_size=0.2, stratify=ytrain)
# normalize
xtrain = xtrain / 255.0
xtest = xtest / 255.0
xvalid = xvalid / 255.0
# prepare network
input_shape = (32, 32, 3)
base_model = VGG16(input_shape=input_shape, include_top=False, weights='imagenet')
# skip training phase in some layers
for layer in base_model.layers:
layer.trainable = False
# Flatten the output layer to 1 dimension
x = layers.Flatten()(base_model.output)
# Add a fully connected layer with 1024 hidden units and ReLU activation
x = layers.Dense(1024, activation='relu')(x)
# Add a final softmax layer for classification
x = layers.Dense(num_class, activation='softmax')(x)
model = tf.keras.models.Model(base_model.input, x)
# compile
learning_rate = 0.001
model.compile(tf.keras.optimizers.RMSprop(lr=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# training & validation
max_epochs = 10
history = model.fit(xtrain, ytrain,
validation_data=(xvalid, yvalid),
steps_per_epoch=100,
epochs=max_epochs)
# testing
loss, acc = model.evaluate(xtest, ytest)
pred = model.predict(xtest)
pred = np.argmax(pred, axis=1)
num_data = len(ytest)
correct = 0
for i in range(num_data):
if pred[i] == ytest[i]:
correct += 1
acc_test = correct / num_data
|
the-stack_106_14307
|
import logging
from rest_framework import serializers
from taggit.serializers import TagListSerializerField
from apps.authentication.models import OnlineUser as User
from apps.authentication.serializers import UserNameSerializer
from apps.gallery.fields import ImageField
from apps.gallery.models import ResponsiveImage
from apps.gallery.serializers import ResponsiveImageSerializer
from .models import Album, Photo, UserTag
logger = logging.getLogger(__name__)
class ResponsiveImagePreviewSerializer(serializers.ModelSerializer):
class Meta:
model = ResponsiveImage
fields = ("id", "thumb", "lg", "md", "sm", "xs")
class PhotoListSerializer(serializers.ModelSerializer):
image = ResponsiveImagePreviewSerializer()
class Meta:
model = Photo
fields = (
"id",
"album",
"created_date",
"title",
"photographer_name",
"image",
"user_tags",
)
read_only = True
class PhotoRetrieveSerializer(serializers.ModelSerializer):
photographer = UserNameSerializer()
tags = TagListSerializerField()
image = ResponsiveImageSerializer()
class Meta:
model = Photo
fields = (
"id",
"album",
"relative_id",
"image",
"created_date",
"title",
"description",
"tags",
"photographer_name",
"photographer",
"user_tags",
)
read_only = True
class PhotoCreateOrUpdateSerializer(serializers.ModelSerializer):
photographer = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), required=False
)
title = serializers.CharField(required=False, default=None)
tags = TagListSerializerField(required=False)
raw_image = ImageField(required=True)
album = serializers.PrimaryKeyRelatedField(
queryset=Album.objects.all(), required=True
)
class Meta:
model = Photo
fields = (
"id",
"album",
"relative_id",
"image",
"created_date",
"title",
"description",
"tags",
"raw_image",
"photographer_name",
"photographer",
)
read_only_fields = ("image", "created_date")
class AlbumListSerializer(serializers.ModelSerializer):
tags = TagListSerializerField()
cover_photo = PhotoListSerializer()
class Meta:
model = Album
fields = (
"id",
"title",
"description",
"created_date",
"published_date",
"tags",
"public",
"created_by",
"cover_photo",
)
read_only = True
class AlbumRetrieveSerializer(serializers.ModelSerializer):
created_by = UserNameSerializer()
tags = TagListSerializerField()
cover_photo = PhotoRetrieveSerializer()
class Meta:
model = Album
fields = (
"id",
"title",
"description",
"created_date",
"published_date",
"tags",
"photos",
"public",
"created_by",
"cover_photo",
)
read_only = True
class AlbumCreateOrUpdateSerializer(serializers.ModelSerializer):
created_by = serializers.HiddenField(default=serializers.CurrentUserDefault())
tags = TagListSerializerField(required=False)
cover_photo = PhotoRetrieveSerializer(required=False)
published_date = serializers.DateTimeField(required=False)
public = serializers.BooleanField(default=False)
class Meta:
model = Album
fields = (
"id",
"title",
"description",
"created_date",
"published_date",
"tags",
"public",
"created_by",
"cover_photo",
)
class UserTagListSerializer(serializers.ModelSerializer):
class Meta:
model = UserTag
fields = ("id", "user", "created_date", "photo")
read_only = True
class UserTagRetrieveSerializer(serializers.ModelSerializer):
user = UserNameSerializer()
class Meta:
model = UserTag
fields = ("id", "user", "created_date", "photo")
read_only = True
class UserTagCreateSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
photo = serializers.PrimaryKeyRelatedField(queryset=Photo.objects.all())
class Meta:
model = UserTag
fields = ("id", "user", "created_date", "photo")
|
the-stack_106_14308
|
"""
Allow to configure a SCSGate cover.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.scsgate/
"""
import logging
import voluptuous as vol
from homeassistant.components import scsgate
from homeassistant.components.cover import (CoverDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_DEVICES, CONF_NAME)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['scsgate']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICES): vol.Schema({cv.slug: scsgate.SCSGATE_SCHEMA}),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the SCSGate cover."""
devices = config.get(CONF_DEVICES)
covers = []
logger = logging.getLogger(__name__)
if devices:
for _, entity_info in devices.items():
if entity_info[scsgate.CONF_SCS_ID] in scsgate.SCSGATE.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[scsgate.CONF_SCS_ID]
logger.info("Adding %s scsgate.cover", name)
cover = SCSGateCover(name=name, scs_id=scs_id, logger=logger)
scsgate.SCSGATE.add_device(cover)
covers.append(cover)
add_devices(covers)
class SCSGateCover(CoverDevice):
"""Representation of SCSGate cover."""
def __init__(self, scs_id, name, logger):
"""Initialize the cover."""
self._scs_id = scs_id
self._name = name
self._logger = logger
@property
def scs_id(self):
"""Return the SCSGate ID."""
return self._scs_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def is_closed(self):
"""Return if the cover is closed."""
return None
def open_cover(self, **kwargs):
"""Move the cover."""
from scsgate.tasks import RaiseRollerShutterTask
scsgate.SCSGATE.append_task(
RaiseRollerShutterTask(target=self._scs_id))
def close_cover(self, **kwargs):
"""Move the cover down."""
from scsgate.tasks import LowerRollerShutterTask
scsgate.SCSGATE.append_task(
LowerRollerShutterTask(target=self._scs_id))
def stop_cover(self, **kwargs):
"""Stop the cover."""
from scsgate.tasks import HaltRollerShutterTask
scsgate.SCSGATE.append_task(HaltRollerShutterTask(target=self._scs_id))
def process_event(self, message):
"""Handle a SCSGate message related with this cover."""
self._logger.debug("Cover %s, got message %s",
self._scs_id, message.toggled)
|
the-stack_106_14311
|
# -*- coding: utf-8 -*-
"""
@File: Patent2VecApp.py
@Description: This is a Patent2Vec Application.
@Author: Chetan Borse
@EMail: [email protected]
@Created_on: 04/05/2017
@License Copyright [2017] [Chetan Borse]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
@python_version: 3.5
===============================================================================
"""
import os
import time
import codecs
import logging
from collections import OrderedDict
from Configuration import config
from Utils.exceptions import PathNotFoundError
from Utils.exceptions import ModelNotFoundError
from Utils.database import Database
from Utils.cleanup import clean
from Preprocessing.preprocessor import PatentDocument
from Model.patent2vec import Patent2Vec
from Model.patent2vec import ConcatenatedPatent2Vec
from Model.patent2vec import AvgPatent2Vec
# Set logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s [%(levelname)s] %(message)s',)
log = logging.getLogger("Patent2Vec Application")
# Global variables
CPU_CORE = config.CPU_CORE
# SOURCE_DATASET = config.SOURCE_DATASET
SOURCE_DATASET = config.CLUSTERING_BENCHMARK_DATA
TESTING_DATA = config.TESTING_DATA
TEST_DOCUMENT = config.TEST_DOCUMENT
PRETRAINED_EMBEDDING = config.PRETRAINED_EMBEDDING
PATENT2VEC_MODEL = config.PATENT2VEC_MODEL
DOCVECS_MAP = config.DOCVECS_MAP
PATENT_EMBEDDING = config.PATENT_EMBEDDING
PATENT_EMBEDDING_DATABASE = config.PATENT_EMBEDDING_DATABASE
PATENT_EMBEDDING_TABLE = config.PATENT_EMBEDDING_TABLE
PRIMARY_KEY = config.PRIMARY_KEY
FIELDS = config.FIELDS
PATENT_EMBEDDING_INDEX = config.PATENT_EMBEDDING_INDEX
def main():
log.info("*****Patent2Vec Application*****")
# Preprocess patent documents
log.info("Preprocessing patent documents")
patents = PatentDocument(SOURCE_DATASET,
extension="",
enable_pos_tagging=True,
enable_lemmatization=True,
use_conceptualizer=True,
transform_conceptualizer=False,
enable_sampling=True,
train_ratio=1.0,
test_ratio=0.0,
java_options='-mx4096m')
# Create Patent2Vec model
models = OrderedDict()
# PV-DM with average
models["PV_DM_Mean"] = \
Patent2Vec(dm=1, dm_mean=1, dm_concat=0, min_word_count=5, size=500,
context_window_size=8, negative=2, iter=50, workers=CPU_CORE,
use_less_memory=False, docvecs_mapfile=DOCVECS_MAP)
models["PV_DM_Mean"].build(patents)
models["PV_DM_Mean"].intersect_with_pretrained_embedding(PRETRAINED_EMBEDDING,
binary=False)
# models["PV_DM_Mean"].load(PATENT2VEC_MODEL)
# # PV-DM with concatenation
# models["PV_DM_Concatenation"] = \
# Patent2Vec(dm=1, dm_mean=0, dm_concat=1, min_word_count=5, size=500,
# context_window_size=8, negative=2, iter=50, workers=CPU_CORE,
# use_less_memory=False, docvecs_mapfile=DOCVECS_MAP)
# models["PV_DM_Concatenation"].reuse_from(models["PV_DM_Mean"])
# # models["PV_DM_Concatenation"].build(patents)
# # models["PV_DM_Concatenation"].intersect_with_pretrained_embedding(PRETRAINED_EMBEDDING,
# # binary=False)
# # # models["PV_DM_Concatenation"].load(PATENT2VEC_MODEL)
# # PV-DBOW
# models["PV_DBOW"] = \
# Patent2Vec(dm=0, dm_mean=0, dm_concat=0, min_word_count=5, size=500,
# context_window_size=8, negative=2, iter=50, workers=CPU_CORE,
# use_less_memory=False, docvecs_mapfile=DOCVECS_MAP)
# models["PV_DBOW"].reuse_from(models["PV_DM_Mean"])
# # models["PV_DBOW"].build(patents)
# # models["PV_DBOW"].intersect_with_pretrained_embedding(PRETRAINED_EMBEDDING,
# # binary=False)
# # # models["PV_DBOW"].load(PATENT2VEC_MODEL)
# # Mixed models
# models["DBOW + DM with average"] = ConcatenatedPatent2Vec([models["PV_DBOW"],
# models["PV_DM_Mean"]])
# models["DBOW + DM with concatenation"] = ConcatenatedPatent2Vec([models["PV_DBOW"],
# models["PV_DM_Concatenation"]])
for name, model in models.items():
# Train Patent2Vec model
start_time = time.time()
model.train(patents, alpha=0.1, min_alpha=0.0001, passes=10,
fixed_alpha=False)
end_time = time.time()
log.info("Total time elapsed: %r", (end_time-start_time))
# Evaluate Patent2Vec model
model.evaluate()
# Save Patent2Vec model
model.save(model=PATENT2VEC_MODEL)
# Create a database object
db = Database(verbose=True)
# Connect to database
db.connect(in_memory=True)
# Create a new table for storing document embeddings
db.create_table(table=PATENT_EMBEDDING_TABLE,
primary_column=PRIMARY_KEY,
other_columns=FIELDS)
# Save document embeddings
model.save_document_embeddings(document_embeddings=PATENT_EMBEDDING,
rows=len(patents),
columns=500,
database=db,
table_name=PATENT_EMBEDDING_TABLE,
save_patent_category=True,
prepend_document_category=True)
# Test documents
if not os.path.exists(TESTING_DATA):
raise PathNotFoundError("Path does not exist: %s" % TESTING_DATA)
with open(TESTING_DATA, "r") as t:
test_documents = t.readlines()
test_documents = map(lambda x: x.strip(), test_documents)
test_documents = filter(None, test_documents)
# Preprocessed test documents
preprocessed_test_documents = patents.get_preprocessed_corpus(test_documents)
# Predict document embeddings
model.predict(preprocessed_test_documents,
alpha=0.1,
min_alpha=0.0001,
steps=50,
save=True,
database=db,
table_name=PATENT_EMBEDDING_TABLE,
save_patent_category=True,
prepend_document_category=True)
# Create an index on document embedding table
db.create_index(index=PATENT_EMBEDDING_INDEX,
table=PATENT_EMBEDDING_TABLE,
index_by_column=PRIMARY_KEY[0])
# Close database connection
db.close(save_to=PATENT_EMBEDDING_DATABASE)
# Delete temporary training data
model.clean()
# Test document for checking the quality of Patent2Vec model
patents.set_token_only(True)
preprocessed_test_document = patents.get_preprocessed_document(TEST_DOCUMENT)
patents.set_token_only(False)
# Check quality of Patent2Vec model
if preprocessed_test_document is not None:
log.info("Check quality of Patent2Vec model")
log.info("Top matches for test document: %s", TEST_DOCUMENT)
for name, model in models.items():
embedding = model.infer(preprocessed_test_document)
top_matches = model.model.docvecs.most_similar(positive=[embedding],
negative=[],
topn=10)
top_matches = map(lambda x: x[0]+"\t\t"+str(x[1]), top_matches)
for top_match in top_matches:
log.info(top_match)
# Clean all un-necessary files
clean(cleanSample=True,
cleanModel=False,
cleanDocvecs=True,
cleanDatabase=False,
cleanClusters=False,
filter=[])
if __name__ == "__main__":
main()
|
the-stack_106_14312
|
import threading
from datetime import datetime
def s():
for i in range(1,10):
print("cubes :",i**2)
time.sleep(5)
def c():
for i in range(1,10):
print('Square :',i**3)
time.sleep(5)
if __name__=='__main__':
tc=threading.Thread(target=c,args=())
ts=threading.Thread(target=s,args=())
tc.start()
ts.start()
for i in range(1,10):
print("main :",i)
time.sleep(5)
|
the-stack_106_14313
|
# !/usr/bin/python
#
# Example code to go through the hokuyo_30m.bin file, read timestamps and the hits
# in each packet, and plot them.
#
# To call:
#
# python read_hokuyo_30m.py hokuyo_30m.bin
#
import sys
import struct
import numpy as np
import matplotlib.pyplot as plt
def convert(x_s):
scaling = 0.005 # 5 mm
offset = -100.0
x = x_s * scaling + offset
return x
def main(args):
if len(sys.argv) < 2:
print("Please specifiy input bin file")
return 1
# hokuyo_30m always has 1081 hits
num_hits = 1081
# angles for each range observation
rad0 = -135 * (np.pi/180.0)
radstep = 0.25 * (np.pi/180.0)
angles = np.linspace(rad0, rad0 + (num_hits-1)*radstep, num_hits)
f_bin = open(sys.argv[1], "r")
plt.ion()
while True:
# Read timestamp
utime = struct.unpack('<Q', f_bin.read(8))[0]
print('Timestamp', utime)
r = np.zeros(num_hits)
for i in range(num_hits):
s = struct.unpack('<H', f_bin.read(2))[0]
r[i] = convert(s)
#print s
x = r * np.cos(angles)
y = r * np.sin(angles)
plt.clf()
plt.plot(x, y, '.')
plt.title(utime)
plt.draw()
f_bin.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
the-stack_106_14317
|
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import os
import argparse
import json
import sys
import numpy as np
import torch
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-p', '--params', nargs='+', default=[])
parser.add_argument('-f', '--flowtron_path',
help='Path to flowtron state dict', type=str)
parser.add_argument('-w', '--waveglow_path',
help='Path to waveglow state dict', type=str)
parser.add_argument('-t', '--text', help='Text to synthesize', type=str)
parser.add_argument('-i', '--id', help='Speaker id', type=int)
parser.add_argument('-n', '--n_frames', help='Number of frames',
default=400, type=int)
parser.add_argument('-o', "--output_dir", default="results/inferences")
parser.add_argument("-s", "--sigma", default=0.5, type=float)
parser.add_argument("-g", "--gate", default=0.5, type=float)
parser.add_argument("--seed", default=1234, type=int)
return parser.parse_args()
args = parse_args()
with open(args.config) as f:
data = f.read()
global config
config = json.loads(data)
language_config = config["language_config"]
from flowtron import Flowtron
from torch.utils.data import DataLoader
exec("from data" + "_" + language_config["language"] + " import Data")
from train import update_params
sys.path.insert(0, "tacotron2")
sys.path.insert(0, "tacotron2/waveglow")
from glow import WaveGlow
from scipy.io.wavfile import write
def infer(flowtron_path, waveglow_path, output_dir, text, speaker_id, n_frames,
sigma, gate_threshold, seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# load waveglow
waveglow = torch.load(waveglow_path)['model'].cuda().eval()
waveglow.cuda().half()
for k in waveglow.convinv:
k.float()
waveglow.eval()
# load flowtron
model = Flowtron(**model_config).cuda()
checkpoint = torch.load(flowtron_path, map_location='cpu')
if 'model' in checkpoint:
state_dict = checkpoint['model'].state_dict()
else:
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
model.eval()
print("Loaded checkpoint '{}')" .format(flowtron_path))
ignore_keys = ['training_files', 'validation_files']
trainset = Data(
data_config['training_files'],
**dict((k, v) for k, v in data_config.items() if k not in ignore_keys))
speaker_vecs = trainset.get_speaker_id(speaker_id).cuda()
text = trainset.get_text(text).cuda()
speaker_vecs = speaker_vecs[None]
text = text[None]
with torch.no_grad():
residual = torch.cuda.FloatTensor(1, 80, n_frames).normal_() * sigma
mels, attentions = model.infer(
residual, speaker_vecs, text, gate_threshold=gate_threshold)
for k in range(len(attentions)):
attention = torch.cat(attentions[k]).cpu().numpy()
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
axes[0].imshow(mels[0].cpu().numpy(), origin='lower', aspect='auto')
axes[1].imshow(attention[:, 0].transpose(), origin='lower', aspect='auto')
fig.savefig(os.path.join(output_dir, 'sid{}_sigma{}_attnlayer{}.png'.format(speaker_id, sigma, k)))
plt.close("all")
with torch.no_grad():
audio = waveglow.infer(mels.half(), sigma=0.8).float()
audio = audio.cpu().numpy()[0]
# normalize audio for now
audio = audio / np.abs(audio).max()
print(audio.shape)
write(os.path.join(output_dir, 'sid{}_sigma{}.wav'.format(speaker_id, sigma)),
data_config['sampling_rate'], audio)
if __name__ == "__main__":
update_params(config, args.params)
data_config = config["data_config"]
global model_config
model_config = config["model_config"]
# Make directory if it doesn't exist
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
os.chmod(args.output_dir, 0o775)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
infer(args.flowtron_path, args.waveglow_path, args.output_dir, args.text,
args.id, args.n_frames, args.sigma, args.gate, args.seed)
|
the-stack_106_14318
|
from luma.core.interface.serial import spi
from luma.core.render import canvas
from luma.oled.device import sh1106
import requests
import time
import datetime
def get_btc_to_usd():
URL = "https://api.coincap.io/v2/rates/bitcoin"
response = requests.get(url=URL)
response_dict = eval(response.text)
current_usd_rate = float(response_dict['data']['rateUsd'])
current_timestamp = response_dict['timestamp']
return current_usd_rate, current_timestamp
def main():
today_last_time = "Unknown"
while True:
now = datetime.datetime.now()
today_date = now.strftime("%d %b %y")
today_time = now.strftime("%H:%M:%S")
if today_time != today_last_time:
today_last_time = today_time
with canvas(device) as draw:
now = datetime.datetime.now()
today_date = now.strftime("%d %b %y")
margin = 4
cx = 30
cy = min(device.height, 64) / 2
draw.text((2 * (cx + margin), cy - 8), today_date, fill="white")
draw.text((2 * (cx + margin), cy), today_time, fill="white")
time.sleep(10)
if __name__ == "__main__":
try:
serial = spi(device=0, port=0, cs_high=True)
device = sh1106(serial)
main()
except KeyboardInterrupt:
pass
|
the-stack_106_14319
|
import numpy as np
import pandas as pd
def create_str_from_list(df, ftr, column_a, column_b):
try:
todos_os_usuarios = ';'.join(list(set(df[df[column_a] == ftr][column_b])))
return '; '.join(set(todos_os_usuarios.split(';')))
except TypeError:
return ''
def single_result_parcer(df, ftr, column_a, columb_b):
n = df.loc[df[column_a] == ftr, columb_b]
if type(n) is str:
return n
else:
return list(n)[0]
#Lecom
def ofertas_lecom(df):
processos = []
etapas = []
ofertas = []
n_de_ofertas = []
vagas = []
for processo in set(df['#Processo']):
processos.append(processo)
etapas.append(single_result_parcer(df, processo, '#Processo', 'Etapa'))
oferta = create_str_from_list(df, processo, '#Processo', 'Ofertas')
ofertas.append(oferta)
n_de_ofertas.append(len(ofertas.split("; ")))
vagas.append(create_str_from_list(df, processo, '#Processo', 'Vagas'))
new_df = pd.DataFrame({
"#Processo": processos,
"Etapa": etapas,
"Ofertas": ofertas,
"Número de Ofertas": n_de_ofertas,
"Vagas": vagas
})
return new_df
def ofertas_access_column(df):
protocolos = []
entidades = []
ofertas = []
n_de_ofertas = []
print(df.head())
for protocolo, row in df.iterrows():
protocolos.append(protocolo)
lista_de_ofertas = []
for column in ["OFERTA_I", "OFERTA_II", "OFERTA_III",
"OFERTA_IV", "OFERTA_V", "OFERTA_VI",
"OFERTA_VII"]:
oferta = df.loc[protocolo, column]
if oferta is not np.NaN:
lista_de_ofertas.append(oferta)
entidades.append(df.loc[protocolo, 'ENTIDADE'])
ofertas.append("; ".join(lista_de_ofertas))
n_de_ofertas.append(len(lista_de_ofertas))
new_df = pd.DataFrame({
"PROTOCOLO": protocolos,
"ENTIDADE": entidades,
"OFERTAS": ofertas,
"NÚMERO DE OFERTAS": n_de_ofertas
})
return new_df
#Access
def ofertas_access(df):
cnpjs = []
cebas = []
entidade = []
municipio = []
uf = []
oferta = []
n_de_ofertas = []
usuario_access = []
for cnpj in set(df['CNPJ']):
cnpjs.append(cnpj)
entidade.append(single_result_parcer(df, cnpj, 'CNPJ', 'ENTIDADE'))
cebas.append(single_result_parcer(df, cnpj, 'CNPJ', 'CEBAS'))
municipio.append(single_result_parcer(df, cnpj, 'CNPJ', 'MUNICIPIO'))
uf.append(single_result_parcer(df, cnpj, 'CNPJ', 'UF'))
lista_de_ofertas = create_str_from_list(df, cnpj, 'CNPJ','OFERTA')
n_de_ofertas.append(len(lista_de_ofertas.split("; ")))
oferta.append(lista_de_ofertas)
usuario_access.append(create_str_from_list(df, cnpj, 'CNPJ', 'USUARIO'))
new_df = pd.DataFrame({
"CNPJ": cnpjs,
"ENTIDADE": entidade,
"CEBAS": cebas,
"MUNICIPIO": municipio,
"UF": uf,
"OFERTA": oferta,
"NÚMERO DE OFERTAS": n_de_ofertas,
"USUARIO": usuario_access
})
return new_df
try:
lecom_df = pd.read_excel("input/ofertas_e_usuarios.xlsx")
new_lecom_df = ofertas_lecom(lecom_df)
new_lecom_df.to_excel('output/ofertas_e_usuarios.xlsx', index=False)
except FileNotFoundError:
print('LECOM não encontrada')
try:
access_df = pd.read_excel("input/ofertas_access.xlsx")
new_access_df = ofertas_access(access_df)
new_access_df.to_excel('output/ofertas_access.xlsx', index=False)
except FileNotFoundError:
print('ACCESS não encontrada')
try:
access_columns_df = pd.read_excel("input/ofertas_access_por_colunas.xlsx", index_col='PROTOCOLO')
new_access_columns_df = ofertas_access_column(access_columns_df)
new_access_columns_df.to_excel('output/ofertas_access_columns.xlsx', index=False)
except FileNotFoundError:
print('ACCESS de coluna não encontrada')
def motivo_indeferimento(df):
motivos = []
processos = []
for processo in set(df['#Processo']):
processos.append(processo)
motivos.append(create_str_from_list(df, processo, "#Processo", 'Exposição dos Motivos'))
new_df = pd.DataFrame({
'#Processos': processos,
'Motivos Indeferimento': motivos
})
return new_df
motivos_df = pd.read_excel("input/motivos.xlsx")
new_motivos_df = motivo_indeferimento(motivos_df)
new_motivos_df.to_excel('output/motivos.xlsx', index=False)
|
the-stack_106_14320
|
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
EPISILON=1e-6
def softmax_focal_loss_cuda(logits, targets, gamma, alpha):
num_classes = logits.shape[1] # 2?3? (n,c)
gamma = gamma
alpha = alpha
dtype = targets.dtype
device = targets.device
class_range = torch.arange(0, num_classes, dtype=dtype, device=device).unsqueeze(0)
# print('softmax logits', logits)
p = torch.softmax(logits, dim=1)
# print('softmax_value', p)
t = targets.unsqueeze(1)
# print('softmax tar', t)
term1 = (1 - p) ** gamma * torch.log(p+EPISILON)
alpha = torch.tensor([[1-alpha, alpha, alpha]]).float().cuda(logits.get_device()) # keep balance
# term2 = p ** gamma * torch.log(1 - p+EPISILON)
losses = -(t == class_range).float() * term1 * alpha #- ((t != class_range) * (t >= 0)).float() * term2 * (1 - alpha)
loss_isnan = torch.isnan(losses)
assert torch.sum(loss_isnan) == 0, ['softmax loss', losses]
losses = losses.sum(dim=1, keepdim=True)
return losses
class SoftmaxFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SoftmaxFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets):
device = logits.device
if logits.is_cuda:
loss_func = softmax_focal_loss_cuda
else:
loss_func = softmax_focal_loss_cpu
loss = loss_func(logits, targets, self.gamma, self.alpha)
return loss.sum()
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "gamma=" + str(self.gamma)
tmpstr += ", alpha=" + str(self.alpha)
tmpstr += ")"
return tmpstr
|
the-stack_106_14321
|
#Made for getting notification from telegram bot when a user tweets or reply
from telegram.ext.updater import Updater
from telegram.update import Update
from telegram.ext.callbackcontext import CallbackContext
from telegram.ext.commandhandler import CommandHandler
from server import update_username_to_the_data_base as update_db
from server import get_chat_id_from_the_data_base as get_db
import os
#Bot API key
API_key = "Enter your bot APi key"
username_list = get_db()
updater = Updater(API_key, use_context=True)
PORT = int(os.environ.get('PORT', 5000))
#getting the data from the data base if the bot is stopped
telegram_token = API_key
#starting the bot
def start(update: Update, context: CallbackContext):
#checking whether the telegram chat _id is already there
if user_chat_id(update) in username_list.keys():
pass
else:
username_list[user_chat_id(update)] = []
update.message.reply_text("""HELLO!
I am a telegram bot used to notify you when a user name of your choice tweets or replies to a tweet.
/help -> Helps to choose commands.
/usernameList -> Shows the list of usernames.
/addUsername <username> -> Adds username to notify you.
/removeUsername <username> -> remove username to not to notify you.""")
#help command to display the commands of the bot
def help(update: Update, context: CallbackContext):
update.message.reply_text("""/help -> Helps to choose commands.
/usernameList -> Shows the list of usernames.
/addUsername <username> -> Adds username to notify you.
/removeUsername <username> -> remove username to not to notify you.""")
#getting chat id and returning it
def user_chat_id(update: Update):
user = update.message.chat_id
return str(user)
#displaying the chat id's username list
def username_list_reply(update: Update, context: CallbackContext):
username_list
username_list_str = ""
if username_list[user_chat_id(update)] == []:
update.message.reply_text("""There is no username in the list.
If you need any help /help.""")
else:
twitter_username_list = username_list.get(user_chat_id(update))
for usr in twitter_username_list:
username_list_str += f"{usr}\n"
update.message.reply_text(f"""The usernames are
{username_list_str}""")
#command to add username to the list
def add_username(update: Update, context: CallbackContext):
global username_list
username = str(update.message.text).split(" ")[1].strip()
#for adding "@" symbol before if the telegram user forgot to add
if username[0] != "@":
username = "@" + username
#verifying whether the given username y the user is there in twitter. if the username is there it will return true else false
verification_state = verify_username(username)
#checking whether the user name is already there in the list
if username in username_list[user_chat_id(update)]:
update.message.reply_text(f"""The username {username} is already there in the list.
if you want any help /help.""")
elif verification_state == True:
username_list[user_chat_id(update)].append(username)
update_db(user_chat_id(update), username_list[user_chat_id(update)])
update.message.reply_text(f"""The user name is successfully added to the list of usernames.
if you want any help /help.
https://www.twitter.com/{username}""")
else:
update.message.reply_text("""The username is not found in twitter
Make sure you have typed the correct letters(lower or upper), numbers and special characters.
If you want any help /help.
If you still have error contact the developers @Rakesh or @Arudhran""")
def verify_username(twitter_username):
#verify whether the given username by the user is there in twitter by using API
return True
#command to remove username form the list
def remove_username(update: Update, context: CallbackContext):
global username_list
username = str(update.message.text).split(" ")[1].strip()
if username[0] != "@":
username = "@" + username
#if the list of chat id is empty
if username_list[user_chat_id(update)] == []:
update.message.reply_text("""There is no username in the username list to remove.
If you want any help /help.""")
elif username in username_list[user_chat_id(update)]:
username_list[user_chat_id(update)].remove(username)
update_db(user_chat_id(update), username_list[user_chat_id(update)])
update.message.reply_text("""The username is successfully removed from the list.
If you want any help /help.""")
#if there is no username in the list as chat id specified
else:
update.message.reply_text(f"""There is no username as {username} in the username list.
If youwant any help /help.
If you want to see the list of usernames /usernameList""")
#adding handlers
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.dispatcher.add_handler(CommandHandler('help', help))
updater.dispatcher.add_handler(CommandHandler('usernameList', username_list_reply))
updater.dispatcher.add_handler(CommandHandler('addUsername', add_username))
updater.dispatcher.add_handler(CommandHandler('removeUsername', remove_username))
#starting the bot and to tell the bot for seeking for commands
updater.start_webhook(listen="0.0.0.0",port=int(PORT), url_path=telegram_token)
updater.bot.setWebhook('https://YourHerokuAppName.herokuapp.com/' + telegram_token)
#This code was made by Arudhran:- https://github.com/ArudhranPK/ and Rakesh:- https://github.com/ARKS-INDUSTRY/
|
the-stack_106_14322
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Sergey Makeev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import io
import sys
import os
import signal
import json
import gzip
import hashlib
import time
import fbx
import rbmesh
import logger
from http.server import BaseHTTPRequestHandler, HTTPServer
import email.utils as email_utils
import urllib.request
import urllib.error
def ensure_path_exist(file_path: str) -> str:
dir_name = os.path.dirname(file_path)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
return dir_name
def detect_asset_type(content: bytes) -> str:
if len(content) > 8:
data_stream = io.BytesIO(content)
header = data_stream.read(8)
ktx_header = b'\xab\x4b\x54\x58\x20\x31\x31\xbb'
if header == ktx_header:
return 'ktx'
# ascii mesh
if len(content) > 12:
data_stream = io.BytesIO(content)
header = data_stream.read(12)
mesh_v1_header = b'version 1.00'
if header == mesh_v1_header:
return 'mesh'
# ascii mesh
if len(content) > 12:
data_stream = io.BytesIO(content)
header = data_stream.read(12)
mesh_v1_header = b'version 1.01'
if header == mesh_v1_header:
return 'mesh'
# binary mesh
if len(content) > 12:
data_stream = io.BytesIO(content)
header = data_stream.read(12)
mesh_v1_header = b'version 2.00'
if header == mesh_v1_header:
return 'mesh'
# binary mesh with LODs
if len(content) > 12:
data_stream = io.BytesIO(content)
header = data_stream.read(12)
mesh_v1_header = b'version 3.00'
if header == mesh_v1_header:
return 'mesh'
# binary mesh with LODs and skinning data
if len(content) > 12:
data_stream = io.BytesIO(content)
header = data_stream.read(12)
mesh_v4_header = b'version 4.00'
if header == mesh_v4_header:
return 'mesh'
mesh_v41_header = b'version 4.01'
if header == mesh_v41_header:
return 'mesh'
if len(content) > 8:
data_stream = io.BytesIO(content)
header = data_stream.read(8)
png_header = b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
if header == png_header:
return 'png'
if len(content) > 10:
data_stream = io.BytesIO(content)
header = data_stream.read(1)
_ = data_stream.read(5)
signature = data_stream.read(4)
if header == b'\xFF' and signature == b'\x4A\x46\x49\x46':
return 'jpg'
if len(content) > 32:
data_stream = io.BytesIO(content)
header = data_stream.read(3)
if header == b'\x44\x44\x53':
return 'dds'
return 'raw'
def fetch_local_asset(file_path: str):
with open(file_path, 'rb') as bin_file:
data = bin_file.read()
bin_file.close()
h256 = hashlib.sha256()
h256.update(data)
return {"hash": h256.hexdigest(),
"cdn_url": file_path,
"ts": int(0),
"code": 200,
"fetched_bytes": len(data),
"payload_bytes": len(data),
"payload": data}, None
def fetch_asset(url: str) -> dict or None:
if not url:
return None, "Invalid URL"
if url.startswith('rbxasset://'):
url = "./built-in/" + url[11:]
return fetch_local_asset(url)
asset_fetch_endpoint = 'https://assetdelivery.roblox.com/v1/asset/?id='
if url.startswith('rbxassetid://'):
url = asset_fetch_endpoint + url[13:]
elif url.startswith('https://www.roblox.com/asset/?id='):
url = asset_fetch_endpoint + url[33:]
elif url.startswith('http://roblox.com/asset/?id='):
url = asset_fetch_endpoint + url[28:]
elif url.startswith('http://www.roblox.com/asset/?id='):
url = asset_fetch_endpoint + url[32:]
try:
request = urllib.request.Request(url)
request.add_header('Roblox-Place-Id', '0')
request.add_header('Accept-Encoding', 'gzip')
request.add_header('User-Agent', 'RobloxStudio/WinInet')
# noinspection PyUnusedLocal
fetched_bytes = 0
response = urllib.request.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
compressed_data = response.read()
fetched_bytes = len(compressed_data)
data = gzip.decompress(compressed_data)
else:
data = response.read()
fetched_bytes = len(data)
cdn_url = str(response.geturl())
h256 = hashlib.sha256()
h256.update(data)
html_timestamp = response.info().get('Last-Modified')
timestamp = int(time.mktime(email_utils.parsedate(html_timestamp)))
return {"hash": h256.hexdigest(),
"cdn_url": cdn_url,
"ts": timestamp,
"code": response.getcode(),
"fetched_bytes": fetched_bytes,
"payload_bytes": len(data),
"payload": data}, None
except urllib.error.HTTPError as ex:
logger.warn("Can't fetch asset '" + url + "'")
logger.warn("Code: " + str(ex.getcode()))
logger.warn("Exception: '" + str(ex) + "'")
return None, str(ex)
except ValueError as ex:
logger.warn("ValueError. Can't fetch asset " + url)
logger.warn("Exception: '" + str(ex) + "'")
return None, str(ex)
except urllib.error.URLError as ex:
logger.warn("URLError. Can't fetch asset " + url)
logger.warn("Exception: '" + str(ex) + "'")
return None, str(ex)
def resolve_id_to_reference(object_id: int, id_to_object: dict):
if object_id == -1:
return None
else:
return id_to_object.get(object_id, None)
class SceneDescription:
def __init__(self):
self.textures_folder = ""
self.attachments_layer_id = 0
self.bones_layer_id = 0
self.geos_layer_id = 0
self.accs_layer_id = 0
self.attachments_material_id = 0
class Connection:
def __init__(self, is_active, part0, part1):
self.active = is_active
self.part0 = part0
self.part1 = part1
class CFrame:
def __init__(self):
self.tx = 0
self.ty = 0
self.tz = 0
self.r00 = 1
self.r01 = 0
self.r02 = 0
self.r10 = 0
self.r11 = 1
self.r12 = 0
self.r20 = 0
self.r21 = 0
self.r22 = 1
def cframe_rotation_x(rad: float) -> CFrame:
cos = math.cos(rad)
sin = math.sin(rad)
res = CFrame()
res.r11 = cos
res.r12 = -sin
res.r21 = sin
res.r22 = cos
return res
def cframe_translation(x: float, y: float, z: float) -> CFrame:
res = CFrame()
res.tx = x
res.ty = y
res.tz = z
return res
def cframe_rotation_y(rad: float) -> CFrame:
cos = math.cos(rad)
sin = math.sin(rad)
res = CFrame()
res.r00 = cos
res.r02 = sin
res.r20 = -sin
res.r22 = cos
return res
def cframe_rotation_z(rad: float) -> CFrame:
cos = math.cos(rad)
sin = math.sin(rad)
res = CFrame()
res.r00 = cos
res.r01 = -sin
res.r10 = sin
res.r11 = cos
return res
def cframe_roblox_to_maya(cframe: CFrame) -> CFrame:
res = CFrame()
res.r00 = cframe.r00
res.r01 = cframe.r01
res.r02 = cframe.r02
res.r10 = cframe.r10
res.r11 = cframe.r11
res.r12 = cframe.r12
res.r20 = cframe.r20
res.r21 = cframe.r21
res.r22 = cframe.r22
res.tx = -cframe.tx
res.ty = cframe.ty
res.tz = -cframe.tz
return res
def cframe_inverse(cframe: CFrame) -> CFrame:
res = CFrame()
# transposition
res.r00 = cframe.r00
res.r01 = cframe.r10
res.r02 = cframe.r20
res.r10 = cframe.r01
res.r11 = cframe.r11
res.r12 = cframe.r21
res.r20 = cframe.r02
res.r21 = cframe.r12
res.r22 = cframe.r22
res.tx = -(res.r00 * cframe.tx + res.r01 * cframe.ty + res.r02 * cframe.tz)
res.ty = -(res.r10 * cframe.tx + res.r11 * cframe.ty + res.r12 * cframe.tz)
res.tz = -(res.r20 * cframe.tx + res.r21 * cframe.ty + res.r22 * cframe.tz)
return res
def cframe_multiply(a: CFrame, b: CFrame) -> CFrame:
# 3x3 matrix multiplication
res = CFrame()
res.r00 = a.r00 * b.r00 + a.r01 * b.r10 + a.r02 * b.r20
res.r01 = a.r00 * b.r01 + a.r01 * b.r11 + a.r02 * b.r21
res.r02 = a.r00 * b.r02 + a.r01 * b.r12 + a.r02 * b.r22
res.r10 = a.r10 * b.r00 + a.r11 * b.r10 + a.r12 * b.r20
res.r11 = a.r10 * b.r01 + a.r11 * b.r11 + a.r12 * b.r21
res.r12 = a.r10 * b.r02 + a.r11 * b.r12 + a.r12 * b.r22
res.r20 = a.r20 * b.r00 + a.r21 * b.r10 + a.r22 * b.r20
res.r21 = a.r20 * b.r01 + a.r21 * b.r11 + a.r22 * b.r21
res.r22 = a.r20 * b.r02 + a.r21 * b.r12 + a.r22 * b.r22
res.tx = a.r00 * b.tx + a.r01 * b.ty + a.r02 * b.tz + a.tx
res.ty = a.r10 * b.tx + a.r11 * b.ty + a.r12 * b.tz + a.ty
res.tz = a.r20 * b.tx + a.r21 * b.ty + a.r22 * b.tz + a.tz
return res
def cframe_transform_pos(cframe: CFrame, x: float, y: float, z: float):
rx = cframe.r00 * x + cframe.r01 * y + cframe.r02 * z + cframe.tx
ry = cframe.r10 * x + cframe.r11 * y + cframe.r12 * z + cframe.ty
rz = cframe.r20 * x + cframe.r21 * y + cframe.r22 * z + cframe.tz
return rx, ry, rz
def cframe_transform_vec(cframe: CFrame, x: float, y: float, z: float):
rx = cframe.r00 * x + cframe.r01 * y + cframe.r02 * z
ry = cframe.r10 * x + cframe.r11 * y + cframe.r12 * z
rz = cframe.r20 * x + cframe.r21 * y + cframe.r22 * z
return rx, ry, rz
class Instance:
def __init__(self):
self.name = ""
self.parent = None
self.children = list()
def resolve(self, id_to_object: dict):
self.parent = resolve_id_to_reference(self.parent, id_to_object)
return
class Part(Instance):
def __init__(self):
super().__init__()
self.sx = 1
self.sy = 1
self.sz = 1
self.cframe = CFrame()
def resolve(self, id_to_object: dict):
super().resolve(id_to_object)
return
class MeshPart(Instance):
def __init__(self):
super().__init__()
self.mesh_id = ""
self.mesh_type = ""
self.texture_id = ""
self.cframe = CFrame()
self.texture_blob = None
self.mesh_blob = None
self.offset_x = 0
self.offset_y = 0
self.offset_z = 0
self.scale_x = 1
self.scale_y = 1
self.scale_z = 1
self.size_x = 1
self.size_y = 1
self.size_z = 1
def resolve(self, id_to_object: dict):
super().resolve(id_to_object)
return
class Model(Instance):
def __init__(self):
super().__init__()
self.primary_part = None
def resolve(self, id_to_object: dict):
super().resolve(id_to_object)
self.primary_part = resolve_id_to_reference(self.primary_part, id_to_object)
return
class Bone(Instance):
def __init__(self):
super().__init__()
self.cframe = CFrame()
self.m6d = None
self.cframe_local = None
def resolve(self, id_to_object: dict):
super().resolve(id_to_object)
return
class Attachment(Instance):
def __init__(self):
super().__init__()
self.cframe = CFrame()
self.geo = None
def resolve(self, id_to_object: dict):
super().resolve(id_to_object)
return
class Accessory(Instance):
def __init__(self):
super().__init__()
self.attach_point = CFrame()
class Motor6D(Instance):
def __init__(self):
super().__init__()
self.transform = CFrame()
self.c0 = CFrame()
self.c1 = CFrame()
self.part0 = None
self.part1 = None
def resolve(self, id_to_object: dict):
super().resolve(id_to_object)
self.part0 = resolve_id_to_reference(self.part0, id_to_object)
self.part1 = resolve_id_to_reference(self.part1, id_to_object)
return
class Weld(Instance):
def __init__(self):
super().__init__()
self.part0 = None
self.part1 = None
def resolve(self, id_to_object: dict):
super().resolve(id_to_object)
self.part0 = resolve_id_to_reference(self.part0, id_to_object)
self.part1 = resolve_id_to_reference(self.part1, id_to_object)
return
def get_cframe(json_cframe) -> CFrame:
res = CFrame()
res.tx = json_cframe.get('tx', 0)
res.ty = json_cframe.get('ty', 0)
res.tz = json_cframe.get('tz', 0)
res.r00 = json_cframe.get('r00', 1)
res.r01 = json_cframe.get('r01', 0)
res.r02 = json_cframe.get('r02', 0)
res.r10 = json_cframe.get('r10', 0)
res.r11 = json_cframe.get('r11', 1)
res.r12 = json_cframe.get('r12', 0)
res.r20 = json_cframe.get('r20', 0)
res.r21 = json_cframe.get('r21', 0)
res.r22 = json_cframe.get('r22', 1)
return res
def parse_model_desc(model_desc) -> Instance or None:
objects = list()
id_to_object = dict()
# 1st pass - parse desc and instantiate objects
for key, dm_object in model_desc.items():
obj = None
obj_class = dm_object.get('Class', None)
assert obj_class is not None
if obj_class == "Model":
obj = Model()
obj.primary_part = dm_object.get('PrimaryPart', -1)
elif obj_class == "Part":
obj = Part()
obj.cframe = get_cframe(dm_object.get('CFrame', CFrame()))
obj.sx = dm_object.get('SizeX', 1)
obj.sy = dm_object.get('SizeY', 1)
obj.sz = dm_object.get('SizeZ', 1)
elif obj_class == "MeshPart":
obj = MeshPart()
obj.mesh_id = dm_object.get('MeshId', '')
obj.texture_id = dm_object.get('TextureId', '')
obj.mesh_type = dm_object.get('MeshType', 'Unsupported')
obj.cframe = get_cframe(dm_object.get('CFrame', CFrame()))
obj.offset_x = dm_object.get('OffsetX', 1)
obj.offset_y = dm_object.get('OffsetY', 1)
obj.offset_z = dm_object.get('OffsetZ', 1)
obj.scale_x = dm_object.get('ScaleX', 1)
obj.scale_y = dm_object.get('ScaleY', 1)
obj.scale_z = dm_object.get('ScaleZ', 1)
obj.size_x = dm_object.get('SizeX', 1)
obj.size_y = dm_object.get('SizeY', 1)
obj.size_z = dm_object.get('SizeZ', 1)
elif obj_class == "Bone":
obj = Bone()
obj.cframe = get_cframe(dm_object.get('CFrame', CFrame()))
elif obj_class == "Attachment":
obj = Attachment()
obj.cframe = get_cframe(dm_object.get('CFrame', CFrame()))
elif obj_class == "WeldConstraint":
obj = Weld()
obj.part0 = dm_object.get('Part0', -1)
obj.part1 = dm_object.get('Part1', -1)
elif obj_class == "Motor6D":
obj = Motor6D()
obj.part0 = dm_object.get('Part0', -1)
obj.part1 = dm_object.get('Part1', -1)
obj.c0 = get_cframe(dm_object.get('C0', CFrame()))
obj.c1 = get_cframe(dm_object.get('C1', CFrame()))
obj.transform = get_cframe(dm_object.get('Transform', CFrame()))
elif obj_class == "Accessory":
obj = Accessory()
obj.attach_point = get_cframe(dm_object.get('AttachPoint', CFrame()))
else:
logger.fatal("Unknown object type: " + str(obj_class))
assert obj is not None
obj.name = dm_object.get('Name', None)
obj.parent = dm_object.get('Parent', None)
assert obj.name is not None
assert obj.parent is not None
id_to_object[key] = obj
objects.append(obj)
# 2nd pass - resolve numeric IDs to real references (and build hierarchy)
root = None
for obj in objects:
obj.resolve(id_to_object)
if obj.parent is None:
# multi-root objects not supported
assert root is None
root = obj
else:
obj.parent.children.append(obj)
# 3rd pass - fetch actual data from CDN
data_cache = dict()
for obj in objects:
if isinstance(obj, MeshPart):
obj.mesh_blob = data_cache.get(obj.mesh_id, None)
if obj.mesh_blob is None:
logger.message("Fetch mesh: " + obj.mesh_id)
obj.mesh_blob, err = fetch_asset(obj.mesh_id)
data_cache[obj.mesh_id] = obj.mesh_blob
else:
logger.message(" Cached mesh: " + obj.mesh_id)
obj.texture_blob = data_cache.get(obj.texture_id, None)
if obj.texture_blob is None:
logger.message(" Fetch texture: " + obj.texture_id)
obj.texture_blob, err = fetch_asset(obj.texture_id)
data_cache[obj.texture_id] = obj.texture_blob
else:
logger.message(" Cached texture: " + obj.texture_id)
return root
def is_close(x, y, r_tol=1.e-5, a_tol=1.e-8):
return abs(x-y) <= a_tol + r_tol * abs(y)
def get_bone_name_from_m6d(node: Motor6D):
return node.part1.name
def get_fbx_transform(cframe: CFrame) -> fbx.FbxTransform:
xform = fbx.FbxTransform()
xform.px = cframe.tx
xform.py = cframe.ty
xform.pz = cframe.tz
# Computing Euler angles from a rotation matrix
# https://www.gregslabaugh.net/publications/euler.pdf
# R = Rz(phi) * Ry(theta) * Rx(psi)
phi = 0.0
if is_close(cframe.r20, -1.0):
theta = math.pi / 2.0
psi = math.atan2(cframe.r01, cframe.r02)
elif is_close(cframe.r20, 1.0):
theta = -math.pi / 2.0
psi = math.atan2(-cframe.r01, -cframe.r02)
else:
theta = -math.asin(cframe.r20)
cos_theta = math.cos(theta)
psi = math.atan2(cframe.r21 / cos_theta, cframe.r22 / cos_theta)
phi = math.atan2(cframe.r10 / cos_theta, cframe.r00 / cos_theta)
xform.rx = math.degrees(psi)
xform.ry = math.degrees(theta)
xform.rz = math.degrees(phi)
xform.sx = 1.0
xform.sy = 1.0
xform.sz = 1.0
return xform
def load_mesh(file_name: str) -> rbmesh.Mesh or None:
mesh_handle = open(file_name, 'rb')
mesh_payload = mesh_handle.read()
mesh_handle.close()
mesh = rbmesh.parse_mesh(mesh_payload)
return mesh
def load_mesh_as_fbx_geo(file_name: str, cframe: CFrame):
mesh = load_mesh(file_name)
mesh_transform_vertices(mesh, cframe)
geo = rbmesh.convert_mesh_to_fbx_geometry(mesh, 0)
return geo
def get_texture_name(url: str):
texture_name = "url_resolve_error"
if url.startswith('rbxassetid://'):
texture_name = url[13:]
elif url.startswith('https://www.roblox.com/asset/?id='):
texture_name = url[33:]
elif url.startswith('http://www.roblox.com/asset/?id='):
texture_name = url[32:]
elif url.startswith('http://roblox.com/asset/?id='):
texture_name = url[28:]
texture_name = texture_name.replace(" ", "")
texture_name = texture_name.replace("/", "")
texture_name = texture_name.replace("\\", "")
texture_name = texture_name.replace("?", "")
texture_name = texture_name.replace("%", "")
texture_name = texture_name.replace("*", "")
texture_name = texture_name.replace(":", "")
texture_name = texture_name.replace("|", "")
texture_name = texture_name.replace('"', "")
texture_name = texture_name.replace('<', "")
texture_name = texture_name.replace('>', "")
texture_name = texture_name.replace('.', "")
texture_name = texture_name.replace('@', "")
return texture_name
def append_to_fbx(doc, node, fbx_parent_id: int, desc: SceneDescription):
# noinspection PyUnusedLocal
fbx_id = 0
if isinstance(node, MeshPart):
logger.message("FBX Mesh: " + node.name)
logger.message(" geo: " + node.mesh_id)
logger.message(" img: " + node.texture_id)
xform = get_fbx_transform(node.cframe)
mesh = None
if node.mesh_blob is None:
if node.mesh_type == "Head":
mesh = load_mesh("./built-in/sm_head.mesh")
scale_xz = min(node.scale_x, node.scale_z)
node.scale_x = scale_xz
node.scale_z = scale_xz
node.scale_x = node.scale_x / 1.25
node.scale_y = node.scale_y / 1.25
node.scale_z = node.scale_z / 1.25
elif node.mesh_type == "Sphere":
mesh = load_mesh("./built-in/sm_sphere.mesh")
node.scale_x = node.scale_x / 1.45
node.scale_y = node.scale_y / 1.45
node.scale_z = node.scale_z / 1.45
else:
mesh_payload = node.mesh_blob["payload"]
mesh = rbmesh.parse_mesh(mesh_payload)
if mesh is None:
fbx_id = doc.create_locator(node.name, xform, fbx_parent_id)
else:
mat_id, mat_name = doc.create_material(node.name + "Mat", fbx.FbxColor4(1, 1, 1, 1))
texture_file_name = "empty.png"
if node.texture_blob is not None:
texture_payload = node.texture_blob["payload"]
texture_hash = hashlib.sha256(texture_payload).hexdigest()
texture_ext = detect_asset_type(texture_payload)
# texture_name = get_texture_name(node.texture_id)
texture_name = str(texture_hash)
texture_file_name = texture_name + "." + texture_ext
full_texture_file_name = desc.textures_folder + texture_file_name
ensure_path_exist(full_texture_file_name)
dest_file = open(full_texture_file_name, 'wb')
dest_file.write(texture_payload)
dest_file.close()
doc.create_texture(node.name + "Tex", texture_file_name, mat_id)
mesh_transform_vertices(mesh, cframe_rotation_y(3.14159),
node.offset_x, node.offset_y, node.offset_z,
node.scale_x, node.scale_y, node.scale_z)
geo = rbmesh.convert_mesh_to_fbx_geometry(mesh, 0)
fbx_id = doc.create_mesh(node.name, xform, geo, mat_id, fbx_parent_id)
doc.connect_objects(fbx_id, desc.geos_layer_id)
elif isinstance(node, Bone):
logger.message("FBX Bone: " + node.name)
xform = get_fbx_transform(node.cframe)
if node.cframe_local is not None:
xform = get_fbx_transform(node.cframe_local)
fbx_id = doc.create_bone(node.name, xform, fbx_parent_id)
doc.connect_objects(fbx_id, desc.bones_layer_id)
elif isinstance(node, Attachment):
logger.message("FBX Attachment: " + node.name)
xform = get_fbx_transform(node.cframe)
if node.geo is None:
fbx_id = doc.create_locator(node.name, xform, fbx_parent_id)
else:
fbx_id = doc.create_mesh(node.name, xform, node.geo, desc.attachments_material_id, fbx_parent_id)
doc.connect_objects(fbx_id, desc.attachments_layer_id)
else:
logger.message("FBX Group: " + node.name)
fbx_id = doc.create_group(node.name, fbx_parent_id)
for child in node.children:
append_to_fbx(doc, child, fbx_id, desc)
return
def _get_linearized_tree_recursive(res: list, node: Instance):
res.append(node)
for child in node.children:
_get_linearized_tree_recursive(res, child)
def get_linearized_tree(root: Instance) -> list:
res = list()
res.append(root)
for child in root.children:
_get_linearized_tree_recursive(res, child)
return res
def mesh_transform_vertices(mesh: rbmesh.Mesh, cframe: CFrame,
ox: float = 0, oy: float = 0, oz: float = 0,
sx: float = 1, sy: float = 1, sz: float = 1):
for vertex in mesh.vertices:
x = (vertex.p_x + ox) * sx
y = (vertex.p_y + oy) * sy
z = (vertex.p_z + oz) * sz
vertex.p_x, vertex.p_y, vertex.p_z = cframe_transform_pos(cframe, x, y, z)
nx = vertex.n_x
ny = vertex.n_y
nz = vertex.n_z
vertex.n_x, vertex.n_y, vertex.n_z = cframe_transform_vec(cframe, nx, ny, nz)
return
def export_roblox_model(model_desc) -> str:
root = parse_model_desc(model_desc)
# logger.message(str(root))
file_folder = "./Avatars/" + root.name + "/"
file_name = file_folder + root.name + ".fbx"
rot_y_180 = cframe_rotation_y(3.14159)
spike_pivot = cframe_translation(0, 0.5, 0)
logger.message("Create FBX...")
doc = fbx.FbxDocument(file_name)
sphere_geo = load_mesh_as_fbx_geo("./built-in/sphere.mesh", rot_y_180)
spike_geo = load_mesh_as_fbx_geo("./built-in/spike.mesh", cframe_multiply(rot_y_180, spike_pivot))
scene_desc = SceneDescription()
scene_desc.textures_folder = file_folder
scene_desc.attachments_material_id, _ = doc.create_material("AttachmentMat", fbx.FbxColor4(1, 0.8, 0.8, 1))
scene_desc.attachments_layer_id = doc.create_layer("Attachments", fbx.FbxColor4(1, 0, 0))
scene_desc.bones_layer_id = doc.create_layer("Bones", fbx.FbxColor4(0, 0, 1))
scene_desc.geos_layer_id = doc.create_layer("Geos", fbx.FbxColor4(0, 1, 0))
scene_desc.accs_layer_id = doc.create_layer("Accs", fbx.FbxColor4(1, 1, 0))
root_primary_part = None
scene_center_cframe = CFrame()
if root.primary_part is not None:
root_primary_part = root.primary_part
scene_center_cframe = root.primary_part.cframe
assert root_primary_part is not None
# Accessories handler
accessories = list()
for child in root.children:
if isinstance(child, Accessory):
child.parent = None
accessories.append(child)
logger.message("Accessory: " + child.name)
for accessory in accessories:
root.children.remove(accessory)
# convert part based rig (Motor6Ds) to bone based
nodes = get_linearized_tree(root)
# Step 0. Cover a special case, in R15 case everything should be centered around LowerTorso
for node in nodes:
if isinstance(node, Motor6D) and node.name == "Root":
scene_center_cframe = cframe_multiply(node.part0.cframe, node.c0)
break
scene_center_cframe_inv = cframe_inverse(scene_center_cframe)
# Step 1. Center the scene
logger.message("1. Center scene")
for node in nodes:
if isinstance(node, Part) or isinstance(node, MeshPart) or isinstance(node, Bone):
node.cframe = cframe_multiply(scene_center_cframe_inv, node.cframe)
# Step 2. Generate bones from motor6Ds
logger.message("2. Generate bones")
bones = list()
humanoid_root_bone = Bone()
humanoid_root_bone.name = "HumanoidRootNode"
humanoid_root_bone.parent = None
humanoid_root_bone.cframe = CFrame()
humanoid_root_bone.cframe_local = CFrame()
humanoid_root_bone.m6d = None
bones.append(humanoid_root_bone)
for node in nodes:
# skip HumanoidRootPart
if node == root_primary_part:
continue
if isinstance(node, Motor6D):
bone = Bone()
bone.name = get_bone_name_from_m6d(node)
bone.parent = None
bone.m6d = node
#
# these two matrices below are equal
# get_fbx_transform(cframe_multiply(node.part1.cframe, node.c1))
# get_fbx_transform(cframe_multiply(node.part0.cframe, node.c0))
bone.cframe = cframe_roblox_to_maya(cframe_multiply(node.part0.cframe, node.c0))
bones.append(bone)
# Step 3. Rename geos
logger.message("3. Rename geos")
for node in nodes:
if isinstance(node, Part) or isinstance(node, MeshPart):
node.name = node.name + "_Geo"
# Step 4. Reconstruct hierarchy
logger.message("4. Build hierarchy")
already_connected_parts = dict()
already_connected_parts[root_primary_part] = humanoid_root_bone
bones_to_process = list()
while True:
bones_to_process.clear()
for bone in bones:
# ignore already processed bones
if bone.m6d is None:
continue
parent_bone0 = already_connected_parts.get(bone.m6d.part0, None)
parent_bone1 = already_connected_parts.get(bone.m6d.part1, None)
child_part = None
parent_bone = None
if parent_bone0 is not None:
assert parent_bone1 is None
parent_bone = parent_bone0
child_part = bone.m6d.part1
if parent_bone1 is not None:
assert parent_bone0 is None
parent_bone = parent_bone1
child_part = bone.m6d.part0
if parent_bone is None:
continue
bones_to_process.append((parent_bone, child_part, bone))
for parent_bone, child_part, child_bone in bones_to_process:
logger.message(parent_bone.name + " -> " + child_bone.name + "/" + child_part.name)
child_bone.m6d = None
child_bone.parent = parent_bone
parent_bone.children.append(child_bone)
child_bone.cframe_local = cframe_multiply(cframe_inverse(parent_bone.cframe), child_bone.cframe)
already_connected_parts[child_part] = child_bone
number_of_bones_to_process = 0
for bone in bones:
if bone.m6d is not None:
number_of_bones_to_process += 1
if number_of_bones_to_process == 0:
break
# Step 6. Rotate by 180 degree and add root bones to the FBX scene
for node in nodes:
if isinstance(node, Attachment):
# from Roblox local space to Maya world space
node.cframe = cframe_roblox_to_maya(cframe_multiply(node.parent.cframe, node.cframe))
for node in nodes:
if isinstance(node, Part) or isinstance(node, MeshPart):
# from Roblox world space to Maya world space
node.cframe = cframe_roblox_to_maya(node.cframe)
# Step 7. Attach mesh part to corresponding bones
# a) built attachments list
geom_to_attachments = dict()
for node in nodes:
if isinstance(node, Attachment) and not node.name.endswith("RigAttachment"):
if node.name.endswith("Attachment"):
node.name = node.name[:-10] + "_Att"
parent_geo_name = node.parent.name
geo_attachments = geom_to_attachments.get(parent_geo_name, None)
if not geo_attachments:
geo_attachments = list()
geom_to_attachments[parent_geo_name] = geo_attachments
geo_attachments.append(node)
# b) destroy existing hierarchy (unlink)
for node in nodes:
node.children.clear()
node.parent = None
# c) add geo/attachments to corresponding bones
for bone in bones:
part_name = bone.name + "_Geo"
for node in nodes:
if node.name == part_name and (isinstance(node, Part) or isinstance(node, MeshPart)):
node.cframe = cframe_multiply(cframe_inverse(bone.cframe), node.cframe)
node.parent = bone
bone.children.append(node)
geo_attachments = geom_to_attachments.get(part_name, None)
if geo_attachments:
for attachment in geo_attachments:
if attachment.name == "LeftGrip_Att" or attachment.name == "RightGrip_Att":
#
# https://developer.roblox.com/en-us/articles/using-avatar-importer
#
# The LeftGrip_Att and RightGrip_Att attachments have a 90 deg rotation on the X axis.
# In short, their rotation should be (90, 0, 0).
#
attachment.cframe = cframe_multiply(attachment.cframe, cframe_rotation_x(3.14159))
attachment.geo = spike_geo
else:
attachment.geo = sphere_geo
attachment.cframe = cframe_multiply(cframe_inverse(bone.cframe), attachment.cframe)
attachment.parent = bone
bone.children.append(attachment)
root_bone_id = doc.create_bone("Root", fbx.FbxTransform())
doc.connect_objects(root_bone_id, scene_desc.bones_layer_id)
root_att_id = doc.create_mesh("Root_Att", fbx.FbxTransform(),
sphere_geo, scene_desc.attachments_material_id, root_bone_id)
doc.connect_objects(root_att_id, scene_desc.attachments_layer_id)
append_to_fbx(doc, humanoid_root_bone, root_bone_id, scene_desc)
if len(accessories) > 0:
accessories_id = doc.create_group("Accessories")
doc.connect_objects(accessories_id, scene_desc.accs_layer_id)
for accessory in accessories:
accessory_name = accessory.name
if accessory_name.endswith("Accessory"):
accessory_name = accessory_name[:-9] + "_Acc"
accessory_nodes = get_linearized_tree(accessory)
# move attachments to world space
for accessory_node in accessory_nodes:
if isinstance(accessory_node, Attachment) and accessory_node.parent is not None:
accessory_node.cframe = cframe_multiply(accessory_node.parent.cframe, accessory_node.cframe)
# destroy existing hierarchy
for accessory_node in accessory_nodes:
accessory_node.children.clear()
accessory_node.parent = None
root_accessory_id = doc.create_group(accessory_name, accessories_id)
for accessory_node in accessory_nodes:
if isinstance(accessory_node, MeshPart):
# Center accessory
accessory_node.cframe = cframe_multiply(scene_center_cframe_inv, accessory_node.cframe)
# from Roblox world space to Maya world space
accessory_node.cframe = cframe_roblox_to_maya(accessory_node.cframe)
append_to_fbx(doc, accessory_node, root_accessory_id, scene_desc)
if isinstance(accessory_node, Attachment):
accessory_node.geo = sphere_geo
# Center accessory
accessory_node.cframe = cframe_multiply(scene_center_cframe_inv, accessory_node.cframe)
# from Roblox world space to Maya world space
accessory_node.cframe = cframe_roblox_to_maya(accessory_node.cframe)
append_to_fbx(doc, accessory_node, root_accessory_id, scene_desc)
text = doc.finalize()
logger.message("Save FBX '" + file_name + "'")
ensure_path_exist(file_name)
file_handle = open(file_name, 'w+')
file_handle.write(text)
file_handle.close()
return "Saved file:" + file_name
class ForgeHTTPArtServerRequestHandler(BaseHTTPRequestHandler):
# noinspection PyPep8Naming
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length).decode('utf-8')
model_description = json.loads(body)
# result = fetch_roblox_model_to_disk(model_description)
result = export_roblox_model(model_description)
self.send_response(200)
# Send headers
self.send_header('Content-type', 'text/html')
self.end_headers()
# Write content as utf-8 data
self.wfile.write(bytes(result, "utf8"))
return
# noinspection PyPep8Naming
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
response = "{"
# add accessories
accessories_file = open('./accessories.txt', 'r')
if accessories_file:
response += '"accessories": ['
lines = accessories_file.readlines()
need_comma = False
for line in lines:
ln = line.rstrip()
if not ln.isdigit():
continue
if need_comma:
response += ", "
response += ln
need_comma = True
response += '], '
else:
logger.warn("Can't open accessories.txt")
# add heads
heads_file = open('./heads.txt', 'r')
if heads_file:
response += '"heads": ['
lines = heads_file.readlines()
need_comma = False
for line in lines:
ln = line.rstrip()
if not ln.isdigit():
continue
if need_comma:
response += ", "
response += ln
need_comma = True
response += '], '
else:
logger.warn("Can't open heads.txt")
# add bundles
bundles_file = open('./bundles.txt', 'r')
if bundles_file:
response += '"bundles": ['
lines = bundles_file.readlines()
need_comma = False
for line in lines:
ln = line.rstrip()
if not ln.isdigit():
continue
if need_comma:
response += ", "
response += ln
need_comma = True
response += ']'
else:
logger.warn("Can't open bundles.txt")
response += "}"
self.wfile.write(bytes(response, "utf8"))
return
def signal_handler(_signal, _frame):
logger.message('\nAvatar FBX Exporter Server closed by user request.')
sys.exit(0)
def main():
if sys.version_info[0] != 3:
logger.fatal("Python3 required")
signal.signal(signal.SIGINT, signal_handler)
server_address = ('127.0.0.1', 49999)
httpd = HTTPServer(server_address, ForgeHTTPArtServerRequestHandler)
logger.message('Roblox Avatar FBX Exporter Server "{0}:{1}"'.format(server_address[0], server_address[1]))
logger.message('by Sergey Makeev\n')
logger.message('Press Ctrl+C to exit')
httpd.serve_forever()
main()
|
the-stack_106_14323
|
#!/usr/bin/env python3
# std
from abc import abstractmethod
from typing import Dict
# 3rd
import numpy as np
# ours
from clusterking.result import AbstractResult
from clusterking.worker import AbstractWorker
from clusterking.data.data import Data
class PreprocessorResult(AbstractResult):
def __init__(self, data1, data2):
super().__init__()
self.data1 = data1
self.data2 = data2
class Preprocessor(AbstractWorker):
def __init__(self, name=None):
super().__init__()
self._name = name
@property
def name(self):
if self._name is None:
return str(type(self).__name__)
return self._name
@name.setter
def name(self, value):
self._name = value
def run(self, data1: Data, data2: Data) -> PreprocessorResult:
"""Run.
Args:
data1: "original" :class:`~clusterking.data.data.Data` object
data2: "other" :class:`~clusterking.data.data.Data` object
Returns:
:class:`~PreprocessorResult`
"""
return PreprocessorResult(data1=data1, data2=data2)
class ClusterMatcherResult(PreprocessorResult):
def __init__(self, data1, data2, rename_dct):
super().__init__(data1=data1, data2=data2)
self.rename_dct = rename_dct
class ClusterMatcher(Preprocessor):
"""Cluster names are arbitrary in general, i.e. when trying to compare
two clustered datasets and trying to calculate a figure of merit, we have
to match the names together.
This is donen by this worker class.
"""
def __init__(self, *args, cluster_column="cluster", **kwargs):
super().__init__(*args, **kwargs)
self.cluster_column = cluster_column
@abstractmethod
def run(self, data1: Data, data2: Data) -> ClusterMatcherResult:
"""
Args:
data1: "original" :class:`~clusterking.data.data.Data` object
data2: "other" :class:`~clusterking.data.data.Data` object
Returns:
:class:`~ClusterMatcherResult`
"""
pass
class TrivialClusterMatcher(ClusterMatcher):
"""Thus subclass of :class:`CCMatcher` maps cluster names from the
first clustering to the cluster name of the second that maximizes
the number of sample points that lie in the same cluster.
It also only returns the intersection of the indizes of both Series.
"""
def run(self, data1: Data, data2: Data) -> ClusterMatcherResult:
# todo [perf, low effort, med prio]: for speedup: only use pd.Series of
# clusters
ndata1 = data1.copy(deep=True)
ndata2 = data2.copy(deep=True)
# 1. Throw out
index_intersection = set(ndata1.df.index).intersection(
set(ndata2.df.index)
)
ndata1.df = ndata1.df.loc[index_intersection]
ndata2.df = ndata2.df.loc[index_intersection]
# 2. Rename clusters
clusters2 = set(ndata2.df[self.cluster_column])
dct = {}
for cluster2 in clusters2:
mask = ndata2.df[self.cluster_column] == cluster2
most_likely = np.argmax(
np.bincount(ndata1.df[self.cluster_column][mask])
)
dct[cluster2] = most_likely
ndata2.df[self.cluster_column] = ndata2.df[self.cluster_column].map(dct)
return ClusterMatcherResult(data1=ndata1, data2=ndata2, rename_dct=dct)
class FirstComeFirstServe1DClusterMatcher(ClusterMatcher):
"""This subclass of :class:`CCMatcher` works only for 1D parameter spaces.
It simply sorts the first points of each cluster and enumerates them
in order to get a unique name for each cluster."""
def run(self, data1: Data, data2: Data) -> ClusterMatcherResult:
ndata1 = data1.copy(deep=True)
ndata2 = data2.copy(deep=True)
nclusters1 = len(data1.df[self.cluster_column].unique())
nclusters2 = len(data2.df[self.cluster_column].unique())
if nclusters1 != nclusters2:
raise ValueError("Cluster numbers don't match")
order1 = self._get_order_of_clusters(data1)
order2 = self._get_order_of_clusters(data2)
order1_inverted = {value: key for key, value in order1.items()}
rename_dct = {}
for cluster in order2:
rename_dct[cluster] = order1_inverted[order2[cluster]]
ndata2.df[self.cluster_column] = ndata2.df[self.cluster_column].map(
rename_dct
)
return ClusterMatcherResult(
data1=ndata1, data2=ndata2, rename_dct=rename_dct
)
def _get_order_of_clusters(self, data: Data) -> Dict[int, int]:
cluster2min = {}
uclusters = data.df[self.cluster_column].unique()
for ucluster in uclusters:
cluster2min[ucluster] = data.df[
data.df[self.cluster_column] == ucluster
][data.par_cols[0]].min()
sorted_mins = sorted(list(cluster2min.values()))
return {
ucluster: sorted_mins.index(cluster2min[ucluster])
for ucluster in uclusters
}
|
the-stack_106_14325
|
import tweepy
# Consumer keys and access tokens, used for OAuth
consumer_key = 'auYS8IWtVrGYzfLngBjhfR3jT'
consumer_secret = 'eY3TQzpsdrb74znfN4V80mtcK2n0YA36o7GWcZqFLbqCFpprWp'
access_token = '4244605367-uBSzPDdnbLLVpropRgV1UBs3IURxvcsZr0vtrho'
access_token_secret = 'yclXtFvvUi5FobPq0tn9ecAnmKef5UhV4TZVu6K4nTFLX'
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
def get_tweets(username):
for status in tweepy.Cursor(api.user_timeline, screen_name='@{0}'.format(username)).items(20):
yield status._json['text']
|
the-stack_106_14327
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
from statefun import *
import asyncio
from aiohttp import web
functions = StatefulFunctions()
def serialize_json_utf8(obj) -> bytes:
"""
serialize the given object as a JSON utf-8 bytes.
"""
str = json.dumps(obj, ensure_ascii=False)
return str.encode('utf-8')
GREET_REQUEST_TYPE = simple_type(typename="example/GreetRequest",
serialize_fn=serialize_json_utf8,
deserialize_fn=json.loads)
@functions.bind(typename="example/person", specs=[ValueSpec(name="visits", type=IntType)])
async def person(context: Context, message: Message):
# update the visit count.
visits = context.storage.visits or 0
visits += 1
context.storage.visits = visits
# enrich the request with the number of vists.
request = message.as_type(GREET_REQUEST_TYPE)
request['visits'] = visits
# next, we will forward a message to a special greeter function,
# that will compute a super-doper-personalized greeting based on the
# number of visits that this person has.
context.send(
message_builder(target_typename="example/greeter",
target_id=request['name'],
value=request,
value_type=GREET_REQUEST_TYPE))
@functions.bind(typename="example/greeter")
async def greeter(context, message):
request = message.as_type(GREET_REQUEST_TYPE)
person_name = request['name']
visits = request['visits']
greeting = await compute_fancy_greeting(person_name, visits)
context.send_egress(kafka_egress_message(typename="example/greets",
topic="greetings",
key=person_name,
value=greeting))
async def compute_fancy_greeting(name: str, seen: int):
"""
Compute a personalized greeting, based on the number of times this @name had been seen before.
"""
templates = ["", "Welcome %s", "Nice to see you again %s", "Third time is a charm %s"]
if seen < len(templates):
greeting = templates[seen] % name
else:
greeting = f"Nice to see you at the {seen}-nth time {name}!"
await asyncio.sleep(1)
return greeting
#
# Serve the endpoint
#
handler = RequestReplyHandler(functions)
async def handle(request):
req = await request.read()
res = await handler.handle_async(req)
return web.Response(body=res, content_type="application/octet-stream")
app = web.Application()
app.add_routes([web.post('/statefun', handle)])
if __name__ == '__main__':
web.run_app(app, port=8000)
|
the-stack_106_14330
|
#!/usr/bin/python3
# importing custom lib:
import run_latest_version as run
# import default libs:
import os
import time
import re
import json
from pprint import pprint
# this sleep is used:
# by making new sessions (sleep_short * 2)
# by selecting a window, this is really buggy in the tmux api.
# by sending a file every 200 characters
sleep_short = 0.20
sleep_at_chars = 200
files = run.get_last_version_of_files('../scripts/')
# tmp_config is needed for the tinydb reader
# tinydb reads the db with the open(file, 'a') flag.
# so its trying to open the file with rw persmissions, this will fail if it is read only.
tmp_config = "/tmp/dashboard_tinydb.json"
class Session():
def __init__(self, session_dict, spawn_dir):
# init of an session
self.file_content = {}
self.spawn_dir = spawn_dir
self.make_session(session_dict[0]['session_name'])
self.current_pane = 0
spawned_tabs = {}
win_pane = {}
# loop through the session dict
for counter, row in enumerate(session_dict):
name = row['window_name']
self.actual_window_name = name
if (counter == 0):
self.rename_window(name)
# print(row)
win_pane[name] = row['same_pane']
try:
# causes an KeyError if not available
win_pane[name]
if win_pane[name] != row['same_pane']:
self.split('v')
self.set_mode('even-vertical')
win_pane[name] = row['same_pane']
except KeyError:
self.set_mode()
self.new_window(name)
win_pane[name] = row['same_pane']
# dont show change dir
#self.send_keys("clear", send_enter = True)
if row['exec_script'] == '':
self.send_keys(
row['pre_arg'] + "\n" + ' '.join(row['args']), send_enter=row['send_enter'])
else:
self.send_file(row['exec_script'], send_enter=row['send_enter'],
pre_arg=row['pre_arg'], args=' '.join(row['args']))
# print(win_pane)
self.set_mode('tiled')
def new_window(self, name):
self.own_os_system(
"(cd " + self.spawn_dir + " ; tmux new-window" + self.dash_t() + " /bin/bash)")
self.current_pane = 0
self.rename_window(name)
self.actual_window(name)
def own_os_system(self, cmd):
# print(cmd)
os.system(cmd)
def set_mode(self, mode='tiled'):
output = "tmux select-layout" + self.dash_t() + "" + mode
# print(output)
self.own_os_system(output)
def make_session(self, session_name):
print('Making session: ' + session_name +
" (manual connect: tmux attach -t " + session_name + ")")
self.session_name = session_name
self.kill_session()
self.own_os_system('tmux -2 new-session -d -s ' + session_name)
def kill_session(self):
output = "tmux kill-session" + self.dash_t() + "2>/dev/null"
# print(output)
self.own_os_system(output)
# unused function because its not
def select_window(self, name):
# time.sleep(sleep_short)
self.own_os_system("tmux select-window" +
self.dash_t()[:-1] + ":" + name)
def rename_window(self, name):
self.own_os_system("tmux rename-window" + self.dash_t() + name)
self.send_keys("cd " + self.spawn_dir, send_enter=True)
self.own_os_system('tmux bind c new-window -c "#{pane_current_path}"')
def actual_window(self, name):
self.actual_window_name = name
def dash_t(self):
return ' -t ' + self.session_name + ' '
def dash_t_with_pane(self):
return ' -t ' + self.session_name + ':' + self.actual_window_name + ' '
def send_keys(self, keys, send_enter=False):
keys_to_send = keys.replace('\n', '" C-m "')
self.send_keys_raw(keys_to_send, send_enter)
def convert_quotes(self, cmd):
filtered_cmd_rows = []
for cmd_row in cmd.split('\n'):
keys_to_send = ""
keys_to_send = cmd_row.replace('"', "`echo '22' | xxd -p -r`")
if r"\`echo '22' | xxd -p -r`" in keys_to_send:
keys_to_send = keys_to_send.replace(
r"\`echo '22' | xxd -p -r`", r"\\`echo '22' | xxd -p -r`")
keys_to_send = keys_to_send.replace('$', '\$')
# rightsplit last semicolon
replace_last = keys_to_send.rsplit(';', 1)
keys_to_send = '\;'.join(replace_last)
filtered_cmd_rows.append(keys_to_send)
return '\n'.join(filtered_cmd_rows)
def send_keys_raw(self, cmd, send_enter=False):
send_string = "tmux send-keys" + self.dash_t() + "\"" + cmd + "\""
if send_enter:
send_string += " C-m"
self.own_os_system(send_string)
# splitting the current pane
def split(self, char):
time.sleep(sleep_short)
self.own_os_system("tmux split-window" + self.dash_t() + "-" + char)
self.send_keys("cd " + self.spawn_dir, send_enter=True)
def move_pane(self, from_nr, to_nr):
send_string = "tmux movep -s " + str(from_nr) + " -t " + str(to_nr)
print(send_string)
self.send_keys(send_string, send_enter=True)
def select_pane(self, pane_number):
self.current_pane = pane_number
send_string = "tmux select-pane" + self.dash_t_with_pane() + "-t " + \
str(pane_number)
# print(send_string)
self.own_os_system(send_string)
def get_file_content(self, file_name_middle_part):
# print(files.get_list())
# path = [string for string in files.get_list() if re.match(re.compile('.*' + file_name_middle_part + '.*'), string)][0]
path = files.get_specific_file(file_name_middle_part)
try:
retval = self.file_content[path]
except KeyError:
with open(path, 'r') as file:
retval = ''.join(file.readlines())
self.file_content[path] = retval
return retval
def send_file(self, filename, send_enter=False, auto_increase=True, pre_arg="", args=""):
# print(filename)
#file_content = files.get_specific_file(filename)
file_content = self.get_file_content(filename)
file_content_plus_args = pre_arg + "\n" + file_content + args
# self.select_pane(self.current_pane)
content_converted = self.convert_quotes(file_content_plus_args)
# print(content_converted)
send_counter = 0
for i in content_converted.split('\n')[:-1]:
send_counter = + len(i)
if send_counter > sleep_at_chars:
# print('sleeping')
send_counter = 0
time.sleep(sleep_short)
self.send_keys(i, send_enter=True)
self.send_keys(content_converted.split('\n')[-1], send_enter)
#self.send_keys(content_converted, send_enter)
if auto_increase:
self.current_pane += 1
# user input function
# returns [is_valid_input, is_empty, input value] if executed normaly
def user_input(input_message):
try:
ret_val = input(input_message)
if ret_val == "":
return [True, True, ""]
return [True, False, str(ret_val)]
except KeyboardInterrupt:
return [False, 0, ""]
def exit_():
os.popen('/usr/bin/rm ' + tmp_config)
print('\nCya next time ;)', end="")
exit(0)
def info():
print("This cli will (re)start sessions by the syntax:")
print("Type:")
print("\tq - to close program")
print("\ta - for spawning all tmux sessions")
print("\t-index- for spawning a specific session")
def create_tmp():
caller_db = files.get_specific_file('caller_db')
# creating a temp file so you dont have to change perms.
cp_func = 'cp ' + caller_db + " " + tmp_config
os.popen(cp_func).readlines()
os.popen("chmod +rw " + tmp_config).readlines()
def interactive_interface(spawn_dir):
print("This is a program for spawning tmux processes.\n")
create_tmp()
db = TinyDB(tmp_config)
query = Query()
all_records = []
db_table_default = db.table('_default')
sections = []
for i in range(len(db_table_default)):
session = db_table_default.get(doc_id=i+1)['session_name']
#print(str(i) , str(db_table_default.get(doc_id=i+1)))
if not (session in sections):
sections.append(session)
number_with_sessions = []
for counter, i in enumerate(sections):
number_with_sessions.append([counter, i])
while 1:
#print(db.search(query.session_name == 'VPN'))
print("The following sessions are available:")
print('--------------------')
for counter, section in enumerate(sections):
print(counter, section)
print('--------------------')
info()
selected_input = user_input(
"Enter the corresponding number to respawn it: ")
if(selected_input[1]):
print("Sorry i didn't understand that.")
continue
if not(selected_input[0]) or selected_input[2][0] == 'q':
exit_()
filterd_input_var = selected_input[2]
if filterd_input_var[0] == 'a':
print('Making all sessions:')
for session in sections:
session_json = db.search(query.session_name == session)
Session(session_json, spawn_dir)
else:
try:
print("Index selected: " + sections[int(filterd_input_var)])
actual_db = db.search(
query.session_name == sections[int(filterd_input_var)])
Session(actual_db, spawn_dir)
time.sleep(sleep_short * 2)
except KeyboardInterrupt:
exit_()
except ValueError:
print("Sorry i didn't understand that.")
# except Exception as e:
# print("an error has accured: " + str(e))
if __name__ == "__main__":
from tinydb import TinyDB, Query
path = '/tmp'
user_path = user_input('Path to spawn tmux clients in:')
if (not(user_path[0])):
exit_()
else:
if not(user_path[1]):
path = user_path[2]
print('using: ' + path)
if (os.path.isdir(path)):
interactive_interface(path)
print("selected path (" + str(path) + ")")
print("doesn't exists. Exiting now.")
|
the-stack_106_14331
|
from matplotlib import pyplot as plt
from cross_correlation_func import cross_correlation_using_fft, compute_shift
from scipy.stats import kurtosis
from statistics import mean, stdev, median
from textwrap import wrap
def drift_confidence(df_resample, out_path, fps, pca=1, save_fig=0):
"""
Args:
df_resample:
out_path:
fps:
pca:
save_fig:
Returns:
"""
if pca:
flow_key = 'diffflow_pca'
acc_key = 'acc_pca'
else:
flow_key = 'diff_flowx'
acc_key = 'accx'
fftshift = cross_correlation_using_fft(df_resample[flow_key].values, df_resample[acc_key].values)
dist = max(abs(fftshift-median(fftshift)))
shift = compute_shift(fftshift)
fx_ay_drift = shift * 1000/fps
fx_ay_conf = dist/stdev(fftshift)
if save_fig:
fig, ax = plt.subplots(2, 4, figsize=(20, 10))
plt.subplot(2, 4, 1)
plt.plot(df_resample['accx'])
plt.plot(df_resample['accy'])
plt.plot(df_resample['accz'])
plt.title('acc x, y, z')
plt.subplot(2, 4, 5)
plt.plot(df_resample['diff_flowx'])
plt.plot(df_resample['diff_flowy'])
plt.title('diff_flow x & y')
plt.subplot(2, 4, 2)
fftshift = cross_correlation_using_fft(df_resample['diff_flowx'].values, df_resample['diff_flowy'].values)
dist = max(abs(fftshift-median(fftshift)))
shift = compute_shift(fftshift)
plt.plot(fftshift)
plt.title("\n".join(wrap('fx fy {:.1f} ms, k{:.1f}, std{:.1f}, dm{:.1f}, ndm{:.1f}'.format(\
shift * 1000/fps, kurtosis(fftshift), stdev(fftshift), dist, dist/stdev(fftshift)), 40)))
plt.subplot(2, 4, 3)
fftshift = cross_correlation_using_fft(df_resample['diff_flowsquare'].values, df_resample['accsquare'].values)
dist = max(abs(fftshift-median(fftshift)))
shift = compute_shift(fftshift)
plt.plot(fftshift)
plt.title("\n".join(wrap('fsq asq {:.1f} ms, k{:.1f}, std{:.1f}, dm{:.1f}, ndm{:.1f}'.format(\
shift * 1000/fps, kurtosis(fftshift), stdev(fftshift), dist, dist/stdev(fftshift)), 40)))
plt.subplot(2, 4, 4)
fftshift = cross_correlation_using_fft(df_resample['diff_flowx'].values, df_resample['accx'].values)
dist = max(abs(fftshift-median(fftshift)))
shift = compute_shift(fftshift)
plt.plot(fftshift)
plt.title("\n".join(wrap('fx ax {:.1f} ms, k{:.1f}, std{:.1f}, dm{:.1f}, ndm{:.1f}'.format(\
shift * 1000/fps, kurtosis(fftshift), stdev(fftshift), dist, dist/stdev(fftshift)), 40)))
plt.subplot(2, 4, 6)
fftshift = cross_correlation_using_fft(df_resample['diff_flowy'].values, df_resample['accz'].values)
dist = max(abs(fftshift-median(fftshift)))
shift = compute_shift(fftshift)
plt.plot(fftshift)
plt.title("\n".join(wrap('fy az {:.1f} ms, k{:.1f}, std{:.1f}, dm{:.1f}, ndm{:.1f}'.format(\
shift * 1000/fps, kurtosis(fftshift), stdev(fftshift), dist, dist/stdev(fftshift)), 40)))
plt.subplot(2, 4, 7)
fftshift = cross_correlation_using_fft(df_resample['diff_flowx'].values, df_resample['accy'].values)
dist = max(abs(fftshift-median(fftshift)))
shift = compute_shift(fftshift)
plt.plot(fftshift)
plt.title("\n".join(wrap(r'fx ay $\bf{{{:.1f}}}$ ms, k{:.1f}, std{:.1f}, dm{:.1f}, ndm$\bf{{{:.1f}}}$'.format(\
shift * 1000/fps, kurtosis(fftshift), stdev(fftshift), dist, dist/stdev(fftshift)), 40)))
fig.tight_layout()
fig.subplots_adjust(top=0.8)
plt.savefig(out_path)
plt.close()
return fx_ay_drift, fx_ay_conf
|
the-stack_106_14333
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Update a simple plot as rapidly as possible to measure speed.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from collections import deque
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
import numpy as np
import pyqtgraph as pg
from time import perf_counter
import pyqtgraph.parametertree as ptree
import pyqtgraph.functions as fn
import argparse
# defaults here result in the same configuration as the original PlotSpeedTest
parser = argparse.ArgumentParser()
parser.add_argument('--noise', dest='noise', action='store_true')
parser.add_argument('--no-noise', dest='noise', action='store_false')
parser.set_defaults(noise=True)
parser.add_argument('--nsamples', default=5000, type=int)
parser.add_argument('--frames', default=50, type=int)
parser.add_argument('--fsample', default=1000, type=float)
parser.add_argument('--frequency', default=0, type=float)
parser.add_argument('--amplitude', default=5, type=float)
parser.add_argument('--opengl', dest='use_opengl', action='store_true')
parser.add_argument('--no-opengl', dest='use_opengl', action='store_false')
parser.set_defaults(use_opengl=None)
parser.add_argument('--allow-opengl-toggle', action='store_true',
help="""Allow on-the-fly change of OpenGL setting. This may cause unwanted side effects.
""")
args = parser.parse_args()
if args.use_opengl is not None:
pg.setConfigOption('useOpenGL', args.use_opengl)
pg.setConfigOption('enableExperimental', args.use_opengl)
# don't limit frame rate to vsync
sfmt = QtGui.QSurfaceFormat()
sfmt.setSwapInterval(0)
QtGui.QSurfaceFormat.setDefaultFormat(sfmt)
class MonkeyCurveItem(pg.PlotCurveItem):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.monkey_mode = ''
def setMethod(self, param, value):
self.monkey_mode = value
def paint(self, painter, opt, widget):
if self.monkey_mode not in ['drawPolyline']:
return super().paint(painter, opt, widget)
painter.setRenderHint(painter.RenderHint.Antialiasing, self.opts['antialias'])
painter.setPen(pg.mkPen(self.opts['pen']))
if self.monkey_mode == 'drawPolyline':
painter.drawPolyline(fn.arrayToQPolygonF(self.xData, self.yData))
app = pg.mkQApp("Plot Speed Test")
default_pen = pg.mkPen()
children = [
dict(name='sigopts', title='Signal Options', type='group', children=[
dict(name='noise', type='bool', value=args.noise),
dict(name='nsamples', type='int', limits=[0, None], value=args.nsamples),
dict(name='frames', type='int', limits=[1, None], value=args.frames),
dict(name='fsample', title='sample rate', type='float', value=args.fsample, units='Hz'),
dict(name='frequency', type='float', value=args.frequency, units='Hz'),
dict(name='amplitude', type='float', value=args.amplitude),
]),
dict(name='useOpenGL', type='bool', value=pg.getConfigOption('useOpenGL'),
readonly=not args.allow_opengl_toggle),
dict(name='enableExperimental', type='bool', value=pg.getConfigOption('enableExperimental')),
dict(name='pen', type='pen', value=default_pen),
dict(name='antialias', type='bool', value=pg.getConfigOption('antialias')),
dict(name='connect', type='list', limits=['all', 'pairs', 'finite', 'array'], value='all'),
dict(name='fill', type='bool', value=False),
dict(name='skipFiniteCheck', type='bool', value=False),
dict(name='plotMethod', title='Plot Method', type='list', limits=['pyqtgraph', 'drawPolyline'])
]
params = ptree.Parameter.create(name='Parameters', type='group', children=children)
pt = ptree.ParameterTree(showHeader=False)
pt.setParameters(params)
pw = pg.PlotWidget()
splitter = QtWidgets.QSplitter()
splitter.addWidget(pt)
splitter.addWidget(pw)
splitter.show()
pw.setWindowTitle('pyqtgraph example: PlotSpeedTest')
pw.setLabel('bottom', 'Index', units='B')
curve = MonkeyCurveItem(pen=default_pen, brush='b')
pw.addItem(curve)
rollingAverageSize = 1000
elapsed = deque(maxlen=rollingAverageSize)
def resetTimings(*args):
elapsed.clear()
def makeData(*args):
global data, connect_array, ptr
sigopts = params.child('sigopts')
nsamples = sigopts['nsamples']
frames = sigopts['frames']
Fs = sigopts['fsample']
A = sigopts['amplitude']
F = sigopts['frequency']
ttt = np.arange(frames * nsamples, dtype=np.float64) / Fs
data = A*np.sin(2*np.pi*F*ttt).reshape((frames, nsamples))
if sigopts['noise']:
data += np.random.normal(size=data.shape)
connect_array = np.ones(data.shape[-1], dtype=bool)
ptr = 0
pw.setRange(QtCore.QRectF(0, -10, nsamples, 20))
def onUseOpenGLChanged(param, enable):
pw.useOpenGL(enable)
def onEnableExperimentalChanged(param, enable):
pg.setConfigOption('enableExperimental', enable)
def onPenChanged(param, pen):
curve.setPen(pen)
def onFillChanged(param, enable):
curve.setFillLevel(0.0 if enable else None)
params.child('sigopts').sigTreeStateChanged.connect(makeData)
params.child('useOpenGL').sigValueChanged.connect(onUseOpenGLChanged)
params.child('enableExperimental').sigValueChanged.connect(onEnableExperimentalChanged)
params.child('pen').sigValueChanged.connect(onPenChanged)
params.child('fill').sigValueChanged.connect(onFillChanged)
params.child('plotMethod').sigValueChanged.connect(curve.setMethod)
params.sigTreeStateChanged.connect(resetTimings)
makeData()
fpsLastUpdate = perf_counter()
def update():
global curve, data, ptr, elapsed, fpsLastUpdate
options = ['antialias', 'connect', 'skipFiniteCheck']
kwds = { k : params[k] for k in options }
if kwds['connect'] == 'array':
kwds['connect'] = connect_array
# Measure
t_start = perf_counter()
curve.setData(data[ptr], **kwds)
app.processEvents(QtCore.QEventLoop.ProcessEventsFlag.AllEvents)
t_end = perf_counter()
elapsed.append(t_end - t_start)
ptr = (ptr + 1) % data.shape[0]
# update fps at most once every 0.2 secs
if t_end - fpsLastUpdate > 0.2:
fpsLastUpdate = t_end
average = np.mean(elapsed)
fps = 1 / average
pw.setTitle('%0.2f fps - %0.1f ms avg' % (fps, average * 1_000))
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
pg.exec()
|
the-stack_106_14334
|
import pytest
import datetime
import decimal
from datetime import date
from dateutil.tz import tzoffset
from decimal import Decimal
from pyticketswitch import utils
from pyticketswitch import exceptions
class TestDateRangeStr:
def test_date_range_str(self):
start_date = date(2016, 6, 21)
end_date = date(2017, 1, 1)
date_range_str = utils.date_range_str(start_date, end_date)
assert date_range_str == '20160621:20170101'
def test_date_range_str_with_datetimes(self):
start_date = datetime.datetime(2016, 6, 21, 19, 30, 15)
end_date = datetime.datetime(2017, 1, 1, 13, 45, 30)
date_range_str = utils.date_range_str(start_date, end_date)
assert date_range_str == '20160621:20170101'
def test_date_range_str_with_no_end_date(self):
start_date = date(2016, 6, 21)
end_date = None
date_range_str = utils.date_range_str(start_date, end_date)
assert date_range_str == '20160621:'
def test_date_range_str_with_no_start_date(self):
start_date = None
end_date = date(2017, 1, 1)
date_range_str = utils.date_range_str(start_date, end_date)
assert date_range_str == ':20170101'
def test_date_range_str_with_no_start_date_or_end_date(self):
start_date = None
end_date = None
date_range_str = utils.date_range_str(start_date, end_date)
assert date_range_str == ''
def test_date_range_str_with_invalid_end_date(self):
start_date = date(2016, 6, 21)
end_date = 'FOOBAR!'
with pytest.raises(exceptions.InvalidParametersError):
utils.date_range_str(start_date, end_date)
def test_date_range_str_with_invalid_start_date(self):
start_date = 'SAUSAGES!'
end_date = date(2017, 1, 1)
with pytest.raises(exceptions.InvalidParametersError):
utils.date_range_str(start_date, end_date)
class TestIsoStrToDatetime:
BST = tzoffset('BST', 3600)
ZULU = tzoffset('ZULU', 0)
def test_with_core_iso(self):
date_str = '2016-09-16T19:30:00+01:00'
dt = utils.isostr_to_datetime(date_str)
assert dt == datetime.datetime(2016, 9, 16, 19, 30, 0, tzinfo=self.BST)
def test_with_core_zulu(self):
date_str = '2016-09-16T19:30:00Z'
dt = utils.isostr_to_datetime(date_str)
assert dt == datetime.datetime(2016, 9, 16, 19, 30, 0, tzinfo=self.ZULU)
def test_with_python_iso(self):
date_str = '2016-09-16T19:30:00+0100'
dt = utils.isostr_to_datetime(date_str)
assert dt == datetime.datetime(2016, 9, 16, 19, 30, 0, tzinfo=self.BST)
def test_with_not_datetime(self):
date_str = 'When the moon is in the forth corner of the jelly bean'
with pytest.raises(ValueError):
utils.isostr_to_datetime(date_str)
def test_with_none(self):
date_str = None
with pytest.raises(ValueError):
utils.isostr_to_datetime(date_str)
def test_with_empty(self):
date_str = ''
with pytest.raises(ValueError):
utils.isostr_to_datetime(date_str)
class TestYYYYToDate:
def test_yyyymmdd_to_date_valid_string(self):
date = utils.yyyymmdd_to_date('20160801')
assert date.year == 2016
assert date.month == 8
assert date.day == 1
def test_yyyymmdd_to_date_invalid_string(self):
with pytest.raises(TypeError):
utils.yyyymmdd_to_date(123)
with pytest.raises(ValueError):
utils.yyyymmdd_to_date('')
with pytest.raises(ValueError):
utils.yyyymmdd_to_date('wrong_date')
class TestSpecificDatesFromAPI:
def test_specific_dates_from_api(self):
api_data = {
'year_2016': {
'nov': {
'day_30': False,
'day_18': False,
},
'oct': {
'day_4': True,
'day_3': True,
'day_2': False,
'day_1': True,
}
}
}
results = utils.specific_dates_from_api_data(api_data)
assert len(results) == 3
assert type(results[0]) == datetime.date
class TestBitmaskToBooleanList:
"""
NOTE: we are expecting big endian masks so the last bit of of our mask
should the first element in our array
"""
def test_simple_masks(self):
# 0 == 0b0 so we only have one bit an it's a 0
assert utils.bitmask_to_boolean_list(0) == [False]
# 1 == 0b1 so we only have one bit an it's a 1
assert utils.bitmask_to_boolean_list(1) == [True]
# 5 == 0b101 so we expecting a length of 3 with the bits 1, 0 and 1
assert utils.bitmask_to_boolean_list(5) == [True, False, True]
# 6 == 0b110 so we expecting a length of 3 with the bits 0, 1 and 1
assert utils.bitmask_to_boolean_list(6) == [False, True, True]
class TestBitmaskToNumberedList:
def test_simple_masks(self):
# 0 == 0b0 so we only have one bit an it's a 0
assert utils.bitmask_to_numbered_list(0) == []
# 1 == 0b1 so we only hanumberedbit an it's a 1
assert utils.bitmask_to_numbered_list(1) == [1]
# 5 == 0b101 so the 1st numbered bitst are 1's
assert utils.bitmask_to_numbered_list(5) == [1, 3]
# 6 == 0b110 so the 2nd numbered bits are 1's
assert utils.bitmask_to_numbered_list(6) == [2, 3]
def test_with_none(self):
assert utils.bitmask_to_numbered_list(None) == []
class TestGetPrice:
def test_with_keys(self):
data = {
'min_price': 12.40,
'max_price': 22.00,
}
min_price = utils.get_price(data, 'min_price')
assert min_price == 12.40
max_price = utils.get_price(data, 'max_price')
assert max_price == 22.00
def test_with_float(self):
data = {'price': 12.40}
price = utils.get_price(data, 'price')
assert isinstance(price, float)
assert price == 12.40
def test_with_ints(self):
data = {'price': 12}
price = utils.get_price(data, 'price')
assert isinstance(price, float)
assert price == 12.0
def test_with_str_int(self):
data = {'price': '12'}
price = utils.get_price(data, 'price')
assert isinstance(price, float)
assert price == 12.0
def test_with_str_float(self):
data = {'price': '12.40'}
price = utils.get_price(data, 'price')
assert isinstance(price, float)
assert price == 12.40
def test_with_None(self):
data = {'price': None}
price = utils.get_price(data, 'price')
assert price is None
def test_with_missing_key(self):
data = {'price': 12.40}
max_price = utils.get_price(data, 'max_price')
assert max_price is None
def test_with_zero(self):
data = {'price': 0}
price = utils.get_price(data, 'price')
assert isinstance(price, float)
assert price == 0.0
def test_with_str_zero(self):
data = {'price': '0'}
price = utils.get_price(data, 'price')
assert isinstance(price, float)
assert price == 0
class TestAddPrices:
def test_adding_two_floats(self):
combined = utils.add_prices(1.0, 1.0)
assert combined == 2.0
def test_adding_two_decimals(self):
combined = utils.add_prices(Decimal('1.0'), Decimal('1.0'))
assert combined == Decimal('2.0')
def test_adding_two_ints(self):
combined = utils.add_prices(1, 1)
assert combined == 2
def test_adding_two_strs(self):
combined = utils.add_prices('1.0', '1.0')
assert combined == '2.0'
def test_adding_inexact_floats(self):
combined = utils.add_prices(1.1, 2.2)
assert combined == 3.3
def test_adding_three_floats(self):
combined = utils.add_prices(1.0, 1.0, 1.0)
assert combined == 3.0
def test_adding_a_decimal_and_a_float(self):
combined = utils.add_prices(Decimal('1.0'), 1.0)
assert combined == Decimal('2.0')
def test_adding_a_float_and_a_decimal(self):
combined = utils.add_prices(1.0, Decimal('1.0'))
assert combined == Decimal('2.0')
def test_adding_a_float_to_none(self):
with pytest.raises(TypeError):
utils.add_prices(1.0, None)
def test_adding_a_decimal_to_none(self):
with pytest.raises(TypeError):
utils.add_prices(Decimal('1.0'), None)
def test_zero_arguments(self):
with pytest.raises(TypeError):
utils.add_prices()
def test_one_argument(self):
with pytest.raises(TypeError):
utils.add_prices(1)
def test_invalid_string_price(self):
with pytest.raises(decimal.InvalidOperation):
utils.add_prices('not a price', 5)
class TestFilterNoneFromParameters:
def test_with_parameters(self):
params = {
'foo': 'bar',
'thing': None,
'lol': 'beans',
}
assert utils.filter_none_from_parameters(params) == {
'foo': 'bar',
'lol': 'beans',
}
|
the-stack_106_14336
|
"""
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Parameters
----------
points : (Npoints, Ndims) ndarray of floats
Data point coordinates.
values : (Npoints,) ndarray of float or complex
Data values.
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y):
x = _ndim_coords_from_arrays(x)
self._check_init_shape(x, y)
self.tree = cKDTree(x)
self.points = x
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args)
xi = self._check_call_shape(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan):
"""
Interpolate unstructured N-dimensional data.
.. versionadded:: 0.9
Parameters
----------
points : ndarray of floats, shape (N, ndim)
Data point coordinates. Can either be an array of
size (N, ndim), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (N,)
Data values.
xi : ndarray of float, shape (M, ndim)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
>>> return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
|
the-stack_106_14337
|
#! /usr/bin/env python
# coding=utf-8
import os
import cv2
import random
import numpy as np
import tensorflow as tf
import tfyolo.core.utils as utils
from tfyolo.core.config import cfg
class Dataset(object):
"""implement Dataset here"""
def __init__(self, FLAGS, is_training: bool, dataset_type: str = "converted_coco"):
self.tiny = FLAGS.tiny
self.strides, self.anchors, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
self.dataset_type = dataset_type
self.annot_path = (
cfg.TRAIN.ANNOT_PATH if is_training else cfg.TEST.ANNOT_PATH
)
self.input_sizes = (
cfg.TRAIN.INPUT_SIZE if is_training else cfg.TEST.INPUT_SIZE
)
self.batch_size = (
cfg.TRAIN.BATCH_SIZE if is_training else cfg.TEST.BATCH_SIZE
)
self.data_aug = cfg.TRAIN.DATA_AUG if is_training else cfg.TEST.DATA_AUG
self.train_input_sizes = cfg.TRAIN.INPUT_SIZE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.max_bbox_per_scale = 150
self.annotations = self.load_annotations()
self.num_samples = len(self.annotations)
self.num_batchs = int(np.ceil(self.num_samples / self.batch_size))
self.batch_count = 0
def load_annotations(self):
with open(self.annot_path, "r") as f:
txt = f.readlines()
if self.dataset_type == "converted_coco":
annotations = [
line.strip()
for line in txt
if len(line.strip().split()[1:]) != 0
]
elif self.dataset_type == "yolo":
annotations = []
for line in txt:
image_path = line.strip()
root, _ = os.path.splitext(image_path)
with open(root + ".txt") as fd:
boxes = fd.readlines()
string = ""
for box in boxes:
box = box.strip()
box = box.split()
class_num = int(box[0])
center_x = float(box[1])
center_y = float(box[2])
half_width = float(box[3]) / 2
half_height = float(box[4]) / 2
string += " {},{},{},{},{}".format(
center_x - half_width,
center_y - half_height,
center_x + half_width,
center_y + half_height,
class_num,
)
annotations.append(image_path + string)
np.random.shuffle(annotations)
return annotations
def __iter__(self):
return self
def __next__(self):
with tf.device("/cpu:0"):
# self.train_input_size = random.choice(self.train_input_sizes)
self.train_input_size = cfg.TRAIN.INPUT_SIZE
self.train_output_sizes = self.train_input_size // self.strides
batch_image = np.zeros(
(
self.batch_size,
self.train_input_size,
self.train_input_size,
3,
),
dtype=np.float32,
)
batch_label_sbbox = np.zeros(
(
self.batch_size,
self.train_output_sizes[0],
self.train_output_sizes[0],
self.anchor_per_scale,
5 + self.num_classes,
),
dtype=np.float32,
)
batch_label_mbbox = np.zeros(
(
self.batch_size,
self.train_output_sizes[1],
self.train_output_sizes[1],
self.anchor_per_scale,
5 + self.num_classes,
),
dtype=np.float32,
)
batch_label_lbbox = np.zeros(
(
self.batch_size,
self.train_output_sizes[2],
self.train_output_sizes[2],
self.anchor_per_scale,
5 + self.num_classes,
),
dtype=np.float32,
)
batch_sbboxes = np.zeros(
(self.batch_size, self.max_bbox_per_scale, 4), dtype=np.float32
)
batch_mbboxes = np.zeros(
(self.batch_size, self.max_bbox_per_scale, 4), dtype=np.float32
)
batch_lbboxes = np.zeros(
(self.batch_size, self.max_bbox_per_scale, 4), dtype=np.float32
)
num = 0
if self.batch_count < self.num_batchs:
while num < self.batch_size:
index = self.batch_count * self.batch_size + num
if index >= self.num_samples:
index -= self.num_samples
annotation = self.annotations[index]
image, bboxes = self.parse_annotation(annotation)
(
label_sbbox,
label_mbbox,
label_lbbox,
sbboxes,
mbboxes,
lbboxes,
) = self.preprocess_true_boxes(bboxes)
batch_image[num, :, :, :] = image
batch_label_sbbox[num, :, :, :, :] = label_sbbox
batch_label_mbbox[num, :, :, :, :] = label_mbbox
batch_label_lbbox[num, :, :, :, :] = label_lbbox
batch_sbboxes[num, :, :] = sbboxes
batch_mbboxes[num, :, :] = mbboxes
batch_lbboxes[num, :, :] = lbboxes
num += 1
self.batch_count += 1
batch_smaller_target = batch_label_sbbox, batch_sbboxes
batch_medium_target = batch_label_mbbox, batch_mbboxes
batch_larger_target = batch_label_lbbox, batch_lbboxes
return (
batch_image,
(
batch_smaller_target,
batch_medium_target,
batch_larger_target,
),
)
else:
self.batch_count = 0
np.random.shuffle(self.annotations)
raise StopIteration
def random_horizontal_flip(self, image, bboxes):
if random.random() < 0.5:
_, w, _ = image.shape
image = image[:, ::-1, :]
bboxes[:, [0, 2]] = w - bboxes[:, [2, 0]]
return image, bboxes
def random_crop(self, image, bboxes):
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate(
[
np.min(bboxes[:, 0:2], axis=0),
np.max(bboxes[:, 2:4], axis=0),
],
axis=-1,
)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
crop_xmin = max(
0, int(max_bbox[0] - random.uniform(0, max_l_trans))
)
crop_ymin = max(
0, int(max_bbox[1] - random.uniform(0, max_u_trans))
)
crop_xmax = max(
w, int(max_bbox[2] + random.uniform(0, max_r_trans))
)
crop_ymax = max(
h, int(max_bbox[3] + random.uniform(0, max_d_trans))
)
image = image[crop_ymin:crop_ymax, crop_xmin:crop_xmax]
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin
return image, bboxes
def random_translate(self, image, bboxes):
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate(
[
np.min(bboxes[:, 0:2], axis=0),
np.max(bboxes[:, 2:4], axis=0),
],
axis=-1,
)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1))
ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1))
M = np.array([[1, 0, tx], [0, 1, ty]])
image = cv2.warpAffine(image, M, (w, h))
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty
return image, bboxes
def parse_annotation(self, annotation):
line = annotation.split()
image_path = line[0]
if not os.path.exists(image_path):
raise KeyError("%s does not exist ... " % image_path)
image = cv2.imread(image_path)
if self.dataset_type == "converted_coco":
bboxes = np.array(
[list(map(int, box.split(","))) for box in line[1:]]
)
elif self.dataset_type == "yolo":
height, width, _ = image.shape
bboxes = np.array(
[list(map(float, box.split(","))) for box in line[1:]]
)
bboxes = bboxes * np.array([width, height, width, height, 1])
bboxes = bboxes.astype(np.int64)
if self.data_aug:
image, bboxes = self.random_horizontal_flip(
np.copy(image), np.copy(bboxes)
)
image, bboxes = self.random_crop(np.copy(image), np.copy(bboxes))
image, bboxes = self.random_translate(
np.copy(image), np.copy(bboxes)
)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image, bboxes = utils.image_preprocess(
np.copy(image),
[self.train_input_size, self.train_input_size],
np.copy(bboxes),
)
return image, bboxes
def preprocess_true_boxes(self, bboxes):
label = [
np.zeros(
(
self.train_output_sizes[i],
self.train_output_sizes[i],
self.anchor_per_scale,
5 + self.num_classes,
)
)
for i in range(3)
]
bboxes_xywh = [np.zeros((self.max_bbox_per_scale, 4)) for _ in range(3)]
bbox_count = np.zeros((3,))
for bbox in bboxes:
bbox_coor = bbox[:4]
bbox_class_ind = bbox[4]
onehot = np.zeros(self.num_classes, dtype=np.float)
onehot[bbox_class_ind] = 1.0
uniform_distribution = np.full(
self.num_classes, 1.0 / self.num_classes
)
deta = 0.01
smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution
bbox_xywh = np.concatenate(
[
(bbox_coor[2:] + bbox_coor[:2]) * 0.5,
bbox_coor[2:] - bbox_coor[:2],
],
axis=-1,
)
bbox_xywh_scaled = (
1.0 * bbox_xywh[np.newaxis, :] / self.strides[:, np.newaxis]
)
iou = []
exist_positive = False
for i in range(3):
anchors_xywh = np.zeros((self.anchor_per_scale, 4))
anchors_xywh[:, 0:2] = (
np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32) + 0.5
)
anchors_xywh[:, 2:4] = self.anchors[i]
iou_scale = utils.bbox_iou(
bbox_xywh_scaled[i][np.newaxis, :], anchors_xywh
)
iou.append(iou_scale)
iou_mask = iou_scale > 0.3
if np.any(iou_mask):
xind, yind = np.floor(bbox_xywh_scaled[i, 0:2]).astype(
np.int32
)
label[i][yind, xind, iou_mask, :] = 0
label[i][yind, xind, iou_mask, 0:4] = bbox_xywh
label[i][yind, xind, iou_mask, 4:5] = 1.0
label[i][yind, xind, iou_mask, 5:] = smooth_onehot
bbox_ind = int(bbox_count[i] % self.max_bbox_per_scale)
bboxes_xywh[i][bbox_ind, :4] = bbox_xywh
bbox_count[i] += 1
exist_positive = True
if not exist_positive:
best_anchor_ind = np.argmax(np.array(iou).reshape(-1), axis=-1)
best_detect = int(best_anchor_ind / self.anchor_per_scale)
best_anchor = int(best_anchor_ind % self.anchor_per_scale)
xind, yind = np.floor(
bbox_xywh_scaled[best_detect, 0:2]
).astype(np.int32)
label[best_detect][yind, xind, best_anchor, :] = 0
label[best_detect][yind, xind, best_anchor, 0:4] = bbox_xywh
label[best_detect][yind, xind, best_anchor, 4:5] = 1.0
label[best_detect][yind, xind, best_anchor, 5:] = smooth_onehot
bbox_ind = int(
bbox_count[best_detect] % self.max_bbox_per_scale
)
bboxes_xywh[best_detect][bbox_ind, :4] = bbox_xywh
bbox_count[best_detect] += 1
label_sbbox, label_mbbox, label_lbbox = label
sbboxes, mbboxes, lbboxes = bboxes_xywh
return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes
def __len__(self):
return self.num_batchs
|
the-stack_106_14341
|
from setuptools import setup
package_name = 'examples_rclpy_minimal_publisher'
setup(
name=package_name,
version='0.10.2',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='Mikael Arguedas',
author_email='[email protected]',
maintainer='Mikael Arguedas',
maintainer_email='[email protected]',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Examples of minimal publishers using rclpy.',
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'publisher_old_school = examples_rclpy_minimal_publisher.publisher_old_school:main',
'publisher_local_function ='
' examples_rclpy_minimal_publisher.publisher_local_function:main',
'publisher_member_function ='
' examples_rclpy_minimal_publisher.publisher_member_function:main',
],
},
)
|
the-stack_106_14342
|
# model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
# pretrained='open-mmlab://resnext101_32x4d',
pretrained=None,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
# dcn=dict(
# modulated=False,
# groups=32,
# deformable_groups=1,
# fallback_on_stride=False),
# stage_with_dcn=(False, True, True, True),
),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.02, 0.05, 0.1, 0.5, 1.0, 2.0, 10.0, 20.0, 50.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100),
keep_all_stages=False)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(2048, 905), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 905),
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=1,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=["/data1/bupi_data/round2/sparse_train_2coco_padding_1.json",
"/data1/bupi_data/round2/val_coco.json",
"/data1/bupi_data/round2/crop_val_image/after_slice_coco.json",
"/data1/bupi_data/round2/dense_crop_train_image/crop_dense_train_coco_fixbox.json",
"/data1/bupi_data/round2/sparse_crop_train_image/after_slice_coco.json"
],
img_prefix=["/data1/bupi_data/round2/sparse_trian_2coo_padding/",
"/data1/bupi_data/round2/val/",
"/data1/bupi_data/round2/crop_val_image/defect_image/",
"/data1/bupi_data/round2/dense_crop_train_image/defect/",
"/data1/bupi_data/round2/sparse_crop_train_image/defect_image/",
],
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '/data1/lgj/bupi/round2/work_dirs/resnext101_data_aug/'
load_from = "/data1/lgj/bupi/round2/pretrained_model/epoch_12.pth"
# load_from = None
resume_from = None
workflow = [('train', 1)]
gpus_id = '0,1'
gpus_num = 2
|
the-stack_106_14343
|
from rl_coach.agents.actor_critic_agent import ActorCriticAgentParameters
from rl_coach.agents.policy_optimization_agent import PolicyGradientRescaler
from rl_coach.base_parameters import VisualizationParameters, PresetValidationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps, RunPhase
from rl_coach.environments.environment import SelectedPhaseOnlyDumpMethod, MaxDumpMethod
from rl_coach.environments.gym_environment import MujocoInputFilter, Mujoco
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.filters.reward.reward_rescale_filter import RewardRescaleFilter
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
####################
# Graph Scheduling #
####################
schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(10000000000)
schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(10)
schedule_params.evaluation_steps = EnvironmentEpisodes(1)
schedule_params.heatup_steps = EnvironmentSteps(0)
#########
# Agent #
#########
agent_params = ActorCriticAgentParameters()
agent_params.algorithm.policy_gradient_rescaler = PolicyGradientRescaler.GAE
agent_params.algorithm.discount = 0.99
agent_params.algorithm.apply_gradients_every_x_episodes = 1
agent_params.algorithm.num_steps_between_gradient_updates = 5
agent_params.algorithm.gae_lambda = 1
agent_params.algorithm.beta_entropy = 0.01
agent_params.network_wrappers['main'].optimizer_type = 'Adam'
agent_params.network_wrappers['main'].learning_rate = 0.0001
agent_params.input_filter = MujocoInputFilter()
agent_params.input_filter.add_reward_filter('rescale', RewardRescaleFilter(1/200.))
agent_params.exploration = CategoricalParameters()
###############
# Environment #
###############
env_params = Mujoco()
env_params.level = 'CartPole-v0'
vis_params = VisualizationParameters()
vis_params.video_dump_methods = [SelectedPhaseOnlyDumpMethod(RunPhase.TEST), MaxDumpMethod()]
vis_params.dump_mp4 = False
########
# Test #
########
preset_validation_params = PresetValidationParameters()
preset_validation_params.test = True
preset_validation_params.min_reward_threshold = 150
preset_validation_params.max_episodes_to_achieve_reward = 300
preset_validation_params.num_workers = 8
graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
schedule_params=schedule_params, vis_params=vis_params,
preset_validation_params=preset_validation_params)
|
the-stack_106_14346
|
import json # note: ujson fails this test due to float equality
import numpy as np
import pytest
from environments.mujoco.rand_param_envs.gym.spaces import (
Tuple,
Box,
Discrete,
MultiDiscrete,
)
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5), Box(np.array([0, 0]), np.array([1, 5]))]),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([[0, 1], [0, 1], [0, 100]]),
],
)
def test_roundtripping(space):
sample_1 = space.sample()
sample_2 = space.sample()
assert space.contains(sample_1)
assert space.contains(sample_2)
json_rep = space.to_jsonable([sample_1, sample_2])
json_roundtripped = json.loads(json.dumps(json_rep))
samples_after_roundtrip = space.from_jsonable(json_roundtripped)
sample_1_prime, sample_2_prime = samples_after_roundtrip
s1 = space.to_jsonable([sample_1])
s1p = space.to_jsonable([sample_1_prime])
s2 = space.to_jsonable([sample_2])
s2p = space.to_jsonable([sample_2_prime])
assert s1 == s1p, "Expected {} to equal {}".format(s1, s1p)
assert s2 == s2p, "Expected {} to equal {}".format(s2, s2p)
|
the-stack_106_14347
|
class DynamodbDecoder:
RESERVED_FIELDS = [
"uid",
"_id",
"_type",
"_source",
"_all",
"_parent",
"_fieldnames",
"_routing",
"_index",
"_size",
"_timestamp",
"_ttl",
]
@staticmethod
def decode_json(node):
data = {}
data["M"] = node
return DynamodbDecoder.decode_value(data, True)
@staticmethod
def decode_value(node, force_num=False):
for key, value in list(node.items()):
if key == "NULL":
return None
if key in ("S", "BOOL"):
return value
if key == "N":
if force_num:
return float(value)
return value
if key == "M":
data = {}
for key1, value1 in list(value.items()):
if key1 in DynamodbDecoder.RESERVED_FIELDS:
key1 = key1.replace("_", "__", 1)
data[key1] = DynamodbDecoder.decode_value(value1, True)
return data
if key in ("BS", "L"):
data = []
for item in value:
data.append(DynamodbDecoder.decode_value(item))
return data
if key == "SS":
data = []
for item in value:
data.append(item)
return data
if key == "NS":
data = []
for item in value:
if force_num:
data.append(float(item))
else:
data.append(item)
return data
|
the-stack_106_14349
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core classes and core ops for LabeledTensor.
Core ops are ops which will eventually be called by LabeledTensor methods,
and ops which a core op depends upon.
For example, `add` is a core op because we'll eventually support the `+`
operator.
Non-core ops should go in `ops.py`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import numbers
import types
import numpy as np
from six import binary_type
from six import string_types
from six import text_type
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# pylint: disable=invalid-name
# Types coercible to Axis.labels
# We use this instead of collections.Sequence to exclude strings.
LabelsLike = tc.Union(np.ndarray, range, list, tuple)
# Types coercible to a tf.Dimension
DimensionLike = tc.Optional(tc.Union(tensor_shape.Dimension, int))
# Types usable for axis values
AxisValue = tc.Union(LabelsLike, DimensionLike)
# Valid scalar values for TensorFlow
Scalar = tc.Union(numbers.Number, bool, binary_type, text_type)
# pylint: enable=invalid-name
class Axis(object):
"""Size and label information for an axis.
Axis contains either a tf.Dimension indicating the size of an axis,
or a tuple of tick labels for the axis.
If tick labels are provided, they must be unique.
"""
@tc.accepts(object, string_types, AxisValue)
def __init__(self, name, value):
"""Construct an Axis.
Args:
name: Name of the axis.
value: Either None, an int or tf.Dimension giving the size of the axis,
or a sequence that is not a string additionally providing coordinate
(tick) labels.
Raises:
ValueError: If the user provides labels with duplicate values.
"""
if isinstance(value, tensor_shape.Dimension):
dimension = value
labels = None
elif isinstance(value, int) or value is None:
dimension = tensor_shape.Dimension(value)
labels = None
else:
dimension = tensor_shape.Dimension(len(value))
labels = tuple(value)
if dimension.value == 0:
# Treat a zero-length axis as if it has labels.
labels = ()
if labels is not None:
index = dict(zip(labels, range(len(labels))))
if len(index) != len(labels):
raise ValueError('Tick labels must be unique, but got {}'
.format(labels))
else:
index = None
self._name = name # type: string_types
self._dimension = dimension # type: tensor_shape.Dimension
self._labels = labels # type: Optional[tuple]
self._index = index # type: Optional[Dict[Any, int]]
@property
@tc.returns(string_types)
def name(self):
return self._name
@tc.returns(string_types)
def __repr__(self):
# Axis('x', Dimension(2))
# TODO(shoyer): make very long reprs more succint?
return "%s('%s', %r)" % (type(self).__name__, self.name, self.value)
@tc.returns(bool)
def __eq__(self, other):
return (isinstance(other, Axis) and self.name == other.name and
self.size == other.size and self.labels == other.labels)
def __hash__(self):
return hash((self.name, self.size, self.labels))
@tc.returns(bool)
def __ne__(self, other):
return not self == other
@tc.returns(int)
def __len__(self):
size = self.size
if size is None:
raise ValueError('axis %r has unknown length' % self.name)
return size
@property
@tc.returns(tc.Optional(tensor_shape.Dimension))
def dimension(self):
return self._dimension
@property
@tc.returns(tc.Optional(int))
def size(self):
return self._dimension.value
@property
@tc.returns(tc.Union(tuple, tensor_shape.Dimension))
def value(self):
"""Returns the tf.Dimension or tuple specifying axis ticks."""
if self.labels is None:
return self.dimension
else:
return self.labels
@property
@tc.returns(tc.Optional(tuple))
def labels(self):
"""Returns the tuple containing coordinate labels, else None."""
return self._labels
def index(self, value):
"""Returns the integer position of the given tick label."""
if self._index is None:
raise ValueError('Axis does not have tick labels')
return self._index[value]
# tc class for anything that can be coerced into an Axis
# pylint: disable=invalid-name
AxisLike = tc.Union(Axis, tc.Tuple(string_types, AxisValue))
# pylint: enable=invalid-name
@tc.returns(Axis)
@tc.accepts(AxisLike)
def as_axis(axis_data):
"""Convert an AxisLike object into an Axis.
Args:
axis_data: Axis object or tuple (axis_name, axis_value) describing an axis.
Returns:
Axis object. This may be the original object if axis_data is an Axis.
"""
if isinstance(axis_data, Axis):
axis = axis_data
else:
axis = Axis(*axis_data)
return axis
class Axes(collections.Mapping):
"""Axis names and indices for a tensor.
It is an ordered mapping, with keys given by axis name and values given
by Axis objets. Duplicate axis names are not allowed.
"""
@tc.accepts(object, tc.List(AxisLike))
def __init__(self, axes):
"""Construct an Axes.
Args:
axes: A list of Axis objects or (axis_name, axis_value) tuples.
Raises:
ValueError: If the user provides empty or duplicate axis names.
"""
self._axes = collections.OrderedDict()
for axis_data in axes:
axis = as_axis(axis_data)
name = axis.name
if name in self._axes:
raise ValueError('Duplicate axis name: %s' % name)
self._axes[name] = axis
def __iter__(self):
return iter(self._axes)
@tc.returns(string_types)
def __repr__(self):
# Axes([('x', Dimension(2)),
# ('y', ['a', 'b', 'c']),
# ('z', Dimension(4))])
cls_name = type(self).__name__
values = ["('%s', %r)" % (v.name, v.value) for v in self._axes.values()]
values_repr = (',\n' + ' ' * len(cls_name + '([')).join(values)
return '%s([%s])' % (cls_name, values_repr)
@tc.returns(Axis)
@tc.accepts(object, string_types)
def __getitem__(self, name):
return self._axes[name]
@tc.returns(bool)
def __contains__(self, name):
return name in self._axes
@tc.returns(int)
def __len__(self):
return len(self._axes)
def __hash__(self):
return hash(tuple(self.items()))
@tc.accepts(object, string_types)
def remove(self, axis_name):
"""Creates a new Axes object without the given axis."""
if axis_name not in self:
raise KeyError(axis_name)
remaining_axes = [axis for axis in self.values() if axis.name != axis_name]
return Axes(remaining_axes)
class LabeledTensor(object):
"""A tensor with annotated axes.
It has the following invariants:
1) The dimensionality of the tensor is equal to the number of elements
in axes.
2) The number of coordinate values in the ith dimension is equal to the
size of the tensor in the ith dimension.
Attributes:
tensor: tf.Tensor containing the data.
axes: lt.Axes containing axis names and coordinate labels.
"""
@tc.accepts(object, ops.Tensor,
tc.Union(Axes, tc.Collection(tc.Union(string_types, AxisLike))))
def __init__(self, tensor, axes):
"""Construct a LabeledTenor.
Args:
tensor: The underlying tensor containing the data.
axes: An Axes object, or a collection of strings, Axis objects or tuples
of (name, value) pairs indicating the axes.
Raises:
ValueError: If the provided axes do not satisfy the class invariants.
"""
self._tensor = tensor
shape = tensor.get_shape()
if isinstance(axes, Axes):
unvalidated_axes = axes
else:
mutable_axes = []
for position, axis_like in enumerate(axes):
if isinstance(axis_like, string_types):
# The coordinates for this axes are unlabeled.
# Infer the size of the axis.
value = shape[position]
axis_like = (axis_like, value)
mutable_axes.append(axis_like)
# Construct the Axis object, which will additionally validate the contents
# of the object.
unvalidated_axes = Axes(mutable_axes)
# Check our invariants.
# First, the rank of the tensor must be equal to the number of axes.
if len(shape) != len(unvalidated_axes):
raise ValueError('Tensor rank was not equal to the number of axes: %r, %r'
% (shape, unvalidated_axes))
# Second, the size of each tensor dimension must match the size of the
# corresponding indices.
for (d, axis) in zip(shape, unvalidated_axes.values()):
if d != axis.size:
raise ValueError(
'Provided axis size %d does not match tensor dimension size %d' %
(axis.size, d))
self._axes = unvalidated_axes
def __repr__(self):
# <LabeledTensor 'foo' shape=(2, 3, 4) dtype=float32
# axes=[('x', Dimension(2)),
# ('y', ('a', 'b', 'c'),
# ('z', Dimension(4))]>
axes = ["('%s', %r)" % (v.name, v.value) for v in self.axes.values()]
axes_repr = (',\n' + ' ' * len(' axes=[')).join(axes)
return ("<%s '%s' shape=%s dtype=%s\n axes=[%s]>" %
(type(self).__name__, self.tensor.name, self.tensor.get_shape(),
self.tensor.dtype.name, axes_repr))
@property
def tensor(self):
return self._tensor
def _as_graph_element(self):
"""Support tf.Graph.as_graph_element on LabeledTensor objects.
This allows operations such as tf.name_scope to take labeled tensors.
Returns:
self.tensor
"""
return self.tensor
@property
def axes(self):
return self._axes
# properties/methods directly borrowed from tf.Tensor:
@property
def dtype(self):
return self._tensor.dtype
@property
def name(self):
return self._tensor.name
def get_shape(self):
"""Returns the TensorShape that represents the shape of this tensor.
See tf.Tensor.get_shape().
Returns:
A TensorShape representing the shape of this tensor.
"""
return self._tensor.get_shape()
# TODO(shoyer): consider how/if to implement .eval(). Maybe it should return
# an xarray.DataArray?
def __getitem__(self, key):
# This should work exactly like tf.Tensor.__getitem__, except it preserves
# labels.
if not isinstance(key, tuple):
key = (key,)
if len(key) != len(self.axes):
raise ValueError('indexer %r must have the same length as the Tensor '
'rank (%r)' % (key, len(self.axes)))
selection = {a: k for a, k in zip(self.axes.keys(), key)}
return slice_function(self, selection)
# special methods for overloading arithmetic operations:
def __abs__(self):
return abs_function(self)
def __neg__(self):
return neg(self)
def __pos__(self):
return self
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(other, self)
def __sub__(self, other):
return sub(self, other)
def __rsub__(self, other):
return sub(other, self)
def __mul__(self, other):
return mul(self, other)
def __rmul__(self, other):
return mul(other, self)
def __truediv__(self, other):
return div(self, other)
__div__ = __truediv__
def __rtruediv__(self, other):
return div(other, self)
__rdiv__ = __rtruediv__
def __mod__(self, other):
return mod(self, other)
def __rmod__(self, other):
return mod(other, self)
def __pow__(self, other):
return pow_function(self, other)
def __rpow__(self, other):
return pow_function(other, self)
# logical operations:
def __invert__(self):
return logical_not(self)
def __and__(self, other):
return logical_and(self, other)
def __or__(self, other):
return logical_or(self, other)
def __xor__(self, other):
return logical_xor(self, other)
# boolean operations:
def __lt__(self, other):
return less(self, other)
def __le__(self, other):
return less_equal(self, other)
def __gt__(self, other):
return greater(self, other)
def __ge__(self, other):
return greater_equal(self, other)
def __eq__(self, other):
# for consistency with tf.Tensor
if not isinstance(other, LabeledTensor):
return False
return self.tensor == other.tensor and self.axes == other.axes
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.tensor, self.axes))
# typecheck type abbreviations:
# abbreviations for third-party types with very long reprs
tc.register_type_abbreviation(tensor_shape.Dimension, 'tensorflow.Dimension')
tc.register_type_abbreviation(ops.Tensor, 'tensorflow.Tensor')
tc.register_type_abbreviation(dtypes.DType, 'tensorflow.DType')
# core LabeledTensor types
tc.register_type_abbreviation(Axis, 'labeled_tensor.Axis')
tc.register_type_abbreviation(Axes, 'labeled_tensor.Axes')
tc.register_type_abbreviation(LabeledTensor, 'labeled_tensor.LabeledTensor')
@tc.returns(ops.Tensor)
@tc.accepts(LabeledTensor)
def _convert_labeled_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.tensor, *args, **kwargs)
ops.register_tensor_conversion_function(LabeledTensor,
_convert_labeled_tensor_to_tensor)
# tc class for anything that can be coerced into a LabeledTensor
# pylint: disable=invalid-name
LabeledTensorLike = tc.Union(LabeledTensor, ops.Tensor, np.ndarray, Scalar)
# pylint: enable=invalid-name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, object, tc.Optional(string_types))
def convert_to_labeled_tensor(value, dtype=None, name=None):
"""Converts the given `value` to a `LabeledTensor`.
This function accepts `LabeledTensor` objects, 0-dimensional `Tensor` objects
and numpy arrays, and Python scalars. Higher dimensional unlabeled tensors
must use the `LabeledTensor` constructor explicitly.
Args:
value: Object to convert.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of value.
name: Optional name to use if a new Tensor is created.
Returns:
`value` converted into a `LabeledTensor` object.
Raises:
ValueError: If the output would have rank>0 but the input was not already a
`LabeledTensor`.
"""
# TODO(shoyer): consider extending to accept xarray.DataArray as input.
if isinstance(value, LabeledTensor):
axes = value.axes.values()
value = value.tensor
else:
axes = []
# We call convert_to_tensor even for LabeledTensor input because it also
# checks to make sure the dtype argument is compatible.
tensor = ops.convert_to_tensor(value, dtype=dtype, name=name)
if len(tensor.get_shape()) != len(axes):
raise ValueError('cannot automatically convert unlabeled arrays or tensors '
'with rank>0 into LabeledTensors: %r' % value)
return LabeledTensor(tensor, axes)
@tc.returns(Axis)
@tc.accepts(tc.Collection(Axis))
def concat_axes(axes):
"""Concatenate a list of Axes.
Args:
axes: A collection of Axis objects.
Returns:
The concatenation of the axes.
If all axes have labels, the result has the concatenation of the labels.
Else, the result has no labels, and its size is the sum of the sizes
of the axes.
Raises:
ValueError: If `others` is not a collection of Axes or if it is empty.
"""
if not axes:
raise ValueError('axes must not be empty')
for a in axes:
if not isinstance(a, Axis):
raise ValueError('Expected an Axis, but got %r of type %r' % (a, type(a)))
names = set(a.name for a in axes)
if len(names) > 1:
raise ValueError('axes do not all have the same name: %r' % names)
name, = names
all_have_labels = all(a.labels is not None for a in axes)
any_has_unknown_size = any(a.size is None for a in axes)
if all_have_labels:
value = tuple(label for a in axes for label in a.labels)
elif any_has_unknown_size:
value = None
else:
value = sum(len(a) for a in axes)
return Axis(name, value)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(string_types))
def identity(labeled_tensor, name=None):
"""The identity op.
See tf.identity.
Args:
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
The tensor.
"""
with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
return LabeledTensor(
array_ops.identity(
labeled_tensor.tensor, name=scope),
labeled_tensor.axes)
# We don't call this slice because that shadows a built-in. Instead, we alias
# this to lt.slice in __init__.py.
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike,
tc.Mapping(string_types, tc.Union(int, slice)),
tc.Optional(string_types))
def slice_function(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
This is an analog of tf.slice.
For example:
>>> tensor = tf.reshape(tf.range(0, 6), [3, 2])
>>> labeled_tensor = lt.LabeledTensor(tensor, ['a', ('b', ['foo', 'bar'])])
>>> lt.slice(labeled_tensor, {'a': slice(0, 2), 'b': 1})
<LabeledTensor 'lt_slice:...' shape=(2,) dtype=int32
axes=[('a', Dimension(2))]>
Args:
labeled_tensor: The input tensor.
selection: A dictionary of type str -> Union(int, slice of int) mapping
axis names to sub-selections.
name: Optional op name.
Returns:
The slice as a `LabeledTensor`.
"""
with ops.name_scope(name, 'lt_slice', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
slices = []
for axis_name in labeled_tensor.axes:
if axis_name not in selection:
# We're not sub-selecting this axis, so use the full slice.
slices.append(slice(None))
else:
slices.append(selection[axis_name])
sliced_tensor = labeled_tensor.tensor[tuple(slices)]
sliced_axes = []
for axis, s in zip(labeled_tensor.axes.values(), slices):
# We sub-select this axis's index with the slice s.
# `s` is either an int or a proper slice.
if isinstance(s, slice):
if axis.labels is None:
# We're not tracking coordinate names for this axis.
sliced_axes.append(axis.name)
else:
sliced_axes.append((axis.name, axis.labels[s]))
else:
# If the slice is an int this dimension now has size 1, so we remove it.
assert isinstance(s, int)
return LabeledTensor(
array_ops.identity(
sliced_tensor, name=scope), sliced_axes)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def transpose(labeled_tensor, axis_order=None, name=None):
"""Permute a tensor's axes.
See tf.transpose.
Args:
labeled_tensor: The input tensor.
axis_order: Optional desired axis order, as a list of names. By default, the
order of axes is reversed.
name: Optional op name.
Returns:
The permuted tensor.
Raises:
ValueError: If axis_order isn't a permutation of the existing axes.
"""
with ops.name_scope(name, 'lt_transpose', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
original_order = list(labeled_tensor.axes.keys())
if axis_order is None:
axis_order = list(reversed(original_order))
elif sorted(axis_order) != sorted(original_order):
raise ValueError(
'The new axis order must have the same names as the original axes, '
'but the new order is %r while the original order is %r' %
(axis_order, original_order))
axis_names = list(labeled_tensor.axes.keys())
permutation = [axis_names.index(n) for n in axis_order]
# Note: TensorFlow doesn't copy data for the identity transpose.
transpose_tensor = array_ops.transpose(
labeled_tensor.tensor, permutation, name=scope)
permuted_axes = [labeled_tensor.axes[n] for n in axis_order]
return LabeledTensor(transpose_tensor, permuted_axes)
@tc.returns(LabeledTensor)
@tc.accepts(
LabeledTensorLike,
tc.Collection(
tc.Union(string_types, tc.Tuple(string_types, collections.Hashable))),
tc.Optional(string_types))
def expand_dims(labeled_tensor, axes, name=None):
"""Insert dimensions of size 1.
See tf.expand_dims.
Args:
labeled_tensor: The input tensor.
axes: The desired axis names as strings or tuples of (name, label),
where `label` is the coordinate name for the new dimension `name`.
These must include the existing axis names, and the existing names must
appear in the same order in this list as they do in the input tensor.
name: Optional op name.
Returns:
A tensor with an axis for each axis in axes.
New axes are created with size 1 and do not have labeled coordinates.
Raises:
AxisOrderError: If axis names don't appear in the same order in axes
and the labeled tensor.
"""
with ops.name_scope(name, 'lt_expand_dims', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
axis_names = [a if isinstance(a, string_types) else a[0] for a in axes]
check_axis_order(labeled_tensor, axis_names)
reshaped_axes = []
shape = []
for axis_spec in axes:
if axis_spec in labeled_tensor.axes:
axis = labeled_tensor.axes[axis_spec]
reshaped_axes.append(axis)
shape.append(-1 if axis.size is None else axis.size)
else:
if isinstance(axis_spec, string_types):
reshaped_axes.append((axis_spec, 1))
else:
(name, label) = axis_spec
reshaped_axes.append((name, (label,)))
shape.append(1)
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
return LabeledTensor(reshaped_tensor, reshaped_axes)
# This should only be added to a graph collection once.
_AXIS_ORDER_KEY = ('__axis_order',)
@tc.returns(tc.Optional(tc.List(string_types)))
def get_axis_order():
"""Get the axis_order set by any containing axis_order_scope.
Returns:
List of strings giving an order to use for axis names, or None, if no axis
order is set.
"""
# By storing axis_order in the graph, we can ensure that axis_order_scope is
# thread-safe.
axis_order_list = ops.get_collection(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order, = axis_order_list
else:
axis_order = None
return axis_order
@tc.accepts(tc.Optional(tc.List(string_types)))
def _set_axis_order(axis_order):
axis_order_list = ops.get_collection_ref(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order_list[0] = axis_order
else:
axis_order_list.append(axis_order)
@contextlib.contextmanager
@tc.accepts(tc.Optional(tc.List(string_types)))
def axis_order_scope(axis_order=None):
"""Set axis order for the result of broadcasting operations within a scope.
This allows you to ensure that tensors resulting from arithmetic have a
predictable axis order.
Example usage:
with lt.axis_order_scope(['x', 'y', 'z']):
# result is guaranteed to have the correct axis order
result = w + b
You can nest scopes, in which case only the inner-most scope applies, e.g.,
with lt.axis_order(['x', 'y', 'z']):
with lt.axis_order():
result = w + b # uses the default (left-most) axis ordering
Args:
axis_order: optional list of strings providing axis names. By default,
creates a scope without axis order.
Yields:
The provided axis_order or `None`.
"""
original_axis_order = get_axis_order()
_set_axis_order(axis_order)
try:
yield axis_order
finally:
_set_axis_order(original_axis_order)
@tc.returns(tc.List(string_types))
def _get_valid_axis_order():
axis_order = get_axis_order()
if axis_order is None:
raise AxisOrderError('an explicit axis order must be provided with the '
'axis_order argument or by using an axis_order_scope')
return axis_order
class AxisOrderError(ValueError):
"""Error class for cases where there is no valid axis order."""
# TODO(shoyer): should this function accept a list of labeled tensors instead?
@tc.returns(type(None))
@tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)))
def check_axis_order(labeled_tensor, axis_order=None):
"""Verify that the given tensor has a consistent axis order.
Args:
labeled_tensor: The input tensor. All axes on this tensor must appear in
axis_order.
axis_order: Optional desired axis order, as a list of names. If not
provided, defaults to the current axis_order_scope (if set).
Raises:
AxisOrderError: If the axis_order is unavailable, inconsistent or does not
include all existing axes.
"""
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
if axis_order is None:
axis_order = _get_valid_axis_order()
relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]
if len(relevant_axis_order) < len(labeled_tensor.axes):
raise AxisOrderError(
'not all axis names appear in the required axis order %r: %r' %
(axis_order, labeled_tensor))
if relevant_axis_order != list(labeled_tensor.axes):
raise AxisOrderError(
'axes on a labeled tensor do not appear in the same order as the '
'required axis order %r: %r' % (axis_order, labeled_tensor))
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def impose_axis_order(labeled_tensor, axis_order=None, name=None):
"""Impose desired axis order on a labeled tensor.
Args:
labeled_tensor: The input tensor.
axis_order: Optional desired axis order, as a list of names. If not
provided, defaults to the current axis_order_scope (if set).
name: Optional op name.
Returns:
Labeled tensor with possibly transposed axes.
Raises:
AxisOrderError: If no axis_order is provided or axis_order does not contain
all axes on the input tensor.
"""
with ops.name_scope(name, 'lt_impose_axis_order', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
if axis_order is None:
axis_order = _get_valid_axis_order()
relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]
return transpose(labeled_tensor, relevant_axis_order, name=scope)
@tc.returns(tc.Optional(list))
@tc.accepts(list, list)
def _find_consistent_ordering(a, b):
"""Find the left-most consistent ordering between two lists of unique items.
A consistent ordering combines all elements in both a and b while keeping all
elements in their original order in both inputs. The left-most consistent
ordering orders elements from `a` not found in `b` before elements in `b` not
found in `a`.
For example, given ['x', 'z'] and ['y', 'z'], both ['x', 'y', 'z'] and ['y',
'x', 'z'] are consistent orderings because each of the inputs appears in
each consistent ordering in the same order, and ['x', 'y', 'z'] is the
left-most, because 'x' appears only in `a` and 'y' appears only in `b`. In
contrast, there is no consistent ordering between ['x', 'y'] and ['y', 'x'].
Args:
a: list with unique elements.
b: list with unique elements.
Returns:
List containing all elements in either a or b, or None, if no consistent
ordering exists.
"""
a_set = set(a)
b_set = set(b)
i = 0
j = 0
ordering = []
while i < len(a) and j < len(b):
if a[i] not in b_set:
ordering.append(a[i])
i += 1
elif b[j] not in a_set:
ordering.append(b[j])
j += 1
elif a[i] == b[j]:
ordering.append(a[i])
i += 1
j += 1
else:
return None
ordering.extend(a[i:])
ordering.extend(b[j:])
return ordering
@tc.returns(LabeledTensor, LabeledTensor, Axes)
@tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types))
def align(labeled_tensor_0, labeled_tensor_1, name=None):
"""Align the axes of two tensors so they may be broadcast to each other.
Axes are ordered by the current axis order scope, if present, or by the left-
most consistent ordering. An exception is raised if it is impossible to align
the tensors without a transpose (align never copies the input data).
Example usage:
>>> a = lt.LabeledTensor(tf.ones((2, 4)), ['x', 'z'])
>>> b = lt.LabeledTensor(tf.ones((3, 4)), ['y', 'z'])
>>> a2, b2, axes = lt.align(a, b)
>>> a2
<LabeledTensor 'lt_align_1/lt_align_1/0:...' shape=(2, 1, 4) dtype=float32
axes=[('x', Dimension(2)),
('y', Dimension(1)),
('z', Dimension(4))]>
>>> b2
<LabeledTensor 'lt_align_1/lt_align_1/1:...' shape=(1, 3, 4) dtype=float32
axes=[('x', Dimension(1)),
('y', Dimension(3)),
('z', Dimension(4))]>
>>> axes
Axes([('x', Dimension(2)),
('y', Dimension(3)),
('z', Dimension(4))])
Args:
labeled_tensor_0: An input tensor.
labeled_tensor_1: An input tensor.
name: Optional op name.
Returns:
The aligned tensors and the axes the resulting tensor would have if the two
aligned tensors were broadcast to each other. The aligned tensors have the
same rank but not necessarily the same shape, with axes in the same order.
Raises:
ValueError: If axes with the same name on the inputs are not equal.
AxisOrderError: If there is no way to reshape the input tensors into the
output without a transpose.
"""
with ops.name_scope(name, 'lt_align',
[labeled_tensor_0, labeled_tensor_1]) as scope:
labeled_tensor_0 = convert_to_labeled_tensor(labeled_tensor_0)
labeled_tensor_1 = convert_to_labeled_tensor(labeled_tensor_1)
axes_0 = labeled_tensor_0.axes
axes_1 = labeled_tensor_1.axes
for axis_name in axes_0:
if axis_name in axes_1:
if axes_0[axis_name] != axes_1[axis_name]:
raise ValueError('Mismatched %r axis on input tensors: %r and %r' %
(axis_name, axes_0[axis_name], axes_1[axis_name]))
axis_scope_order = get_axis_order()
if axis_scope_order is not None:
# we are in an axis_order_scope
axis_names_set = set(axes_0) | set(axes_1)
new_axis_names = [a for a in axis_scope_order if a in axis_names_set]
check_axis_order(labeled_tensor_0, axis_scope_order)
check_axis_order(labeled_tensor_1, axis_scope_order)
else:
# attempt to find a consistent ordering
new_axis_names = _find_consistent_ordering(list(axes_0), list(axes_1))
if new_axis_names is None:
raise AxisOrderError(
'No consistent axis order allows for aligning tensors with axis '
'orders %r and %r without copying data. Use transpose or '
'impose_axis_order to reorder axes on one of more of the inputs.' %
(axes_0.keys(), axes_1.keys()))
labeled_tensor_0 = expand_dims(
labeled_tensor_0, new_axis_names, name=scope + '0')
labeled_tensor_1 = expand_dims(
labeled_tensor_1, new_axis_names, name=scope + '1')
broadcast_axes = []
for axis_name in new_axis_names:
if axis_name in axes_0:
broadcast_axes.append(axes_0[axis_name])
else:
broadcast_axes.append(axes_1[axis_name])
return labeled_tensor_0, labeled_tensor_1, Axes(broadcast_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_unary_op(op_name, elementwise_function):
"""Define a unary operation for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
elementwise_function: function to call to evaluate the op on a single
tf.Tensor object. This function must accept two arguments: a tf.Tensor
object, and an optional `name`.
Returns:
Function defining the given op that acts on LabeledTensors.
"""
default_name = 'lt_%s' % op_name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(string_types))
def op(labeled_tensor, name=None):
"""LabeledTensor version of `tf.{op_name}`.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: Input tensor.
name: Optional op name.
Returns:
A LabeledTensor with result of applying `tf.{op_name}` elementwise.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
result_tensor = elementwise_function(labeled_tensor.tensor, name=scope)
return LabeledTensor(result_tensor, labeled_tensor.axes)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
abs_function = define_unary_op('abs', math_ops.abs)
neg = define_unary_op('neg', math_ops.negative)
sign = define_unary_op('sign', math_ops.sign)
reciprocal = define_unary_op('reciprocal', math_ops.reciprocal)
square = define_unary_op('square', math_ops.square)
round_function = define_unary_op('round', math_ops.round)
sqrt = define_unary_op('sqrt', math_ops.sqrt)
rsqrt = define_unary_op('rsqrt', math_ops.rsqrt)
exp = define_unary_op('exp', math_ops.exp)
log = define_unary_op('log', math_ops.log)
ceil = define_unary_op('ceil', math_ops.ceil)
floor = define_unary_op('floor', math_ops.floor)
cos = define_unary_op('cos', math_ops.cos)
sin = define_unary_op('sin', math_ops.sin)
tan = define_unary_op('tan', math_ops.tan)
acos = define_unary_op('acos', math_ops.acos)
asin = define_unary_op('asin', math_ops.asin)
atan = define_unary_op('atan', math_ops.atan)
lgamma = define_unary_op('lgamma', math_ops.lgamma)
digamma = define_unary_op('digamma', math_ops.digamma)
erf = define_unary_op('erf', math_ops.erf)
erfc = define_unary_op('erfc', math_ops.erfc)
logical_not = define_unary_op('logical_not', math_ops.logical_not)
tanh = define_unary_op('tanh', math_ops.tanh)
sigmoid = define_unary_op('sigmoid', math_ops.sigmoid)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_binary_op(op_name, elementwise_function):
"""Define a binary operation that broadcasts labeled tensors.
Args:
op_name: string name of the TensorFlow op.
elementwise_function: function to call to evaluate the op on tf.Tensor
objects. This function must accept three arguments: two tf.Tensor objects,
and an optional `name`.
Returns:
Function defining the given op that acts on LabeledTensors.
"""
default_name = 'lt_%s' % op_name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types))
def op(labeled_tensor_0, labeled_tensor_1, name=None):
"""LabeledTensor version of `tf.{op_name}` with label based alignment.
See `tf.{op_name}` for full details.
Args:
labeled_tensor_0: Input tensor.
labeled_tensor_1: Input tensor.
name: Optional op name.
Returns:
A LabeledTensor with result of applying `tf.{op_name}` elementwise.
"""
with ops.name_scope(name, default_name,
[labeled_tensor_0, labeled_tensor_1]) as scope:
align_0, align_1, broadcast_axes = align(labeled_tensor_0,
labeled_tensor_1)
tensor = elementwise_function(align_0.tensor, align_1.tensor, name=scope)
return LabeledTensor(tensor, broadcast_axes)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
add = define_binary_op('add', math_ops.add)
sub = define_binary_op('sub', math_ops.subtract)
mul = define_binary_op('mul', math_ops.multiply)
div = define_binary_op('div', math_ops.div)
mod = define_binary_op('mod', math_ops.mod)
pow_function = define_binary_op('pow', math_ops.pow)
equal = define_binary_op('equal', math_ops.equal)
greater = define_binary_op('greater', math_ops.greater)
greater_equal = define_binary_op('greater_equal', math_ops.greater_equal)
not_equal = define_binary_op('not_equal', math_ops.not_equal)
less = define_binary_op('less', math_ops.less)
less_equal = define_binary_op('less_equal', math_ops.less_equal)
logical_and = define_binary_op('logical_and', math_ops.logical_and)
logical_or = define_binary_op('logical_or', math_ops.logical_or)
logical_xor = define_binary_op('logical_xor', math_ops.logical_xor)
maximum = define_binary_op('maximum', math_ops.maximum)
minimum = define_binary_op('minimum', math_ops.minimum)
squared_difference = define_binary_op('squared_difference',
math_ops.squared_difference)
igamma = define_binary_op('igamma', math_ops.igamma)
igammac = define_binary_op('igammac', math_ops.igammac)
zeta = define_binary_op('zeta', math_ops.zeta)
polygamma = define_binary_op('polygamma', math_ops.polygamma)
|
the-stack_106_14350
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import time
from oslo.config import cfg
from quantumclient.common import exceptions as qexceptions
from nova.compute import instance_types
from nova import conductor
from nova import context
from nova.db import base
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network import quantumv2
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from quantumclient.quantum import v2_0 as quantumv20
quantum_opts = [
cfg.StrOpt('quantum_url',
default='http://127.0.0.1:9696',
help='URL for connecting to quantum'),
cfg.IntOpt('quantum_url_timeout',
default=30,
help='timeout value for connecting to quantum in seconds'),
cfg.StrOpt('quantum_admin_username',
help='username for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_password',
help='password for connecting to quantum in admin context',
secret=True),
cfg.StrOpt('quantum_admin_tenant_name',
help='tenant name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_region_name',
help='region name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_auth_url',
default='http://localhost:5000/v2.0',
help='auth url for connecting to quantum in admin context'),
cfg.BoolOpt('quantum_api_insecure',
default=False,
help='if set, ignore any SSL validation issues'),
cfg.StrOpt('quantum_auth_strategy',
default='keystone',
help='auth strategy for connecting to '
'quantum in admin context'),
# TODO(berrange) temporary hack until Quantum can pass over the
# name of the OVS bridge it is configured with
cfg.StrOpt('quantum_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.IntOpt('quantum_extension_sync_interval',
default=600,
help='Number of seconds before querying quantum for'
' extensions'),
cfg.StrOpt('quantum_default_private_network',
help='Name of Private Network used by this host'),
]
CONF = cfg.CONF
CONF.register_opts(quantum_opts)
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
refresh_cache = network_api.refresh_cache
update_instance_info_cache = network_api.update_instance_cache_with_nw_info
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
conductor_api = conductor.API()
security_group_api = openstack_driver.get_openstack_security_group_driver()
def __init__(self):
super(API, self).__init__()
self.last_quantum_extension_sync = None
self.extensions = {}
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
quantum = quantumv2.get_client(context)
# If user has specified to attach instance only to specific
# networks, add them to **search_opts
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {"tenant_id": project_id, 'shared': False}
if net_ids:
search_opts['id'] = net_ids
nets = quantum.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
if net_ids:
search_opts['id'] = net_ids
nets += quantum.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
@refresh_cache
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
TODO(someone): document the rest of these parameters.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: QuantumV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
"""
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['display_name'])
requested_networks = kwargs.get('requested_networks')
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = quantum.show_port(port_id)['port']
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['display_name'])
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip and network_id:
fixed_ips[network_id] = fixed_ip
if network_id:
net_ids.append(network_id)
private_net_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'network', CONF.quantum_default_private_network)
if not private_net_id:
raise Exception(_('Default Private Network ID Not Found'))
else:
net_ids.append(private_net_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance['project_id']}
user_security_groups = quantum.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
msg = (_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific."),
security_group)
raise exception.NoUniqueMatch(msg)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
security_group_ids.append(name_match)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
for network in nets:
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
# 'port_security_enabled' to be True for security groups.
# That is why True is returned if 'port_security_enabled'
# is not found.
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
if port:
quantum.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
else:
fixed_ip = fixed_ips.get(network_id)
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address':
fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = (
security_group_ids)
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['display_name'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
self._populate_quantum_extension_values(instance,
port_req_body)
created_port_ids.append(
quantum.create_port(port_req_body)['port']['id'])
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
port_in_server = quantum.show_port(port_id).get('port')
if not port_in_server:
raise Exception(_('Port not found'))
port_req_body = {'port': {'device_id': None}}
quantum.update_port(port_id, port_req_body)
for port_id in created_port_ids:
try:
quantum.delete_port(port_id)
except Exception as ex:
msg = _("Fail to delete port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': port_id,
'exception': ex})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_add_security_group_refresh(context, instance)
nw_info = self._get_instance_nw_info(context, instance, networks=nets)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([port for port in nw_info
if port['id'] in created_port_ids +
touched_port_ids])
def _refresh_quantum_extensions_cache(self):
if (not self.last_quantum_extension_sync or
((time.time() - self.last_quantum_extension_sync)
>= CONF.quantum_extension_sync_interval)):
quantum = quantumv2.get_client(context.get_admin_context())
extensions_list = quantum.list_extensions()['extensions']
self.last_quantum_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _populate_quantum_extension_values(self, instance, port_req_body):
self._refresh_quantum_extensions_cache()
if 'nvp-qos' in self.extensions:
instance_type = instance_types.extract_instance_type(instance)
rxtx_factor = instance_type.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug(_('deallocate_for_instance() for %s'),
instance['display_name'])
search_opts = {'device_id': instance['uuid']}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
for port in ports:
try:
quantumv2.get_client(context).delete_port(port['id'])
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(portid)s ")
% {'portid': port['id']})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
@refresh_cache
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
conductor_api=None):
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)],
conductor_api=conductor_api)
@refresh_cache
def deallocate_port_for_instance(self, context, instance, port_id,
conductor_api=None):
try:
quantumv2.get_client(context).delete_port(port_id)
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(port_id)s ") %
locals())
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
return self._get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
return quantumv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
return quantumv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, conductor_api=None,
networks=None):
result = self._get_instance_nw_info(context, instance, networks)
update_instance_info_cache(self, context, instance, result,
conductor_api)
return result
def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
instance['display_name'])
nw_info = self._build_network_info_model(context, instance, networks)
return network_model.NetworkInfo.hydrate(nw_info)
@refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id,
conductor_api=None):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
quantumv2.get_client(context).update_port(p['id'],
port_req_body)
return
except Exception as ex:
msg = _("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address,
conductor_api=None):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
quantumv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = _("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks):
"""Validate that the tenant can use the requested networks."""
LOG.debug(_('validate_networks() for %s'),
requested_networks)
if not requested_networks:
return
net_ids = []
for (net_id, _i, port_id) in requested_networks:
if port_id:
port = (quantumv2.get_client(context)
.show_port(port_id)
.get('port'))
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
net_id = port['network_id']
if net_id in net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
net_ids.append(net_id)
nets = self._get_available_networks(context, context.project_id,
net_ids)
if len(nets) != len(net_ids):
requsted_netid_set = set(net_ids)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requsted_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def trigger_instance_add_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.conductor_api.security_groups_trigger_handler(context,
'instance_add_security_group', instance_ref, group['name'])
def trigger_instance_remove_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.conductor_api.security_groups_trigger_handler(context,
'instance_remove_security_group', instance_ref, group['name'])
def trigger_security_group_members_refresh(self, context, instance_ref):
admin_context = context.elevated()
group_ids = [group['id'] for group in instance_ref['security_groups']]
self.conductor_api.security_groups_trigger_members_refresh(
admin_context, group_ids)
self.conductor_api.security_groups_trigger_handler(admin_context,
'security_group_members', group_ids)
def _get_port_id_by_fixed_address(self, client,
instance, address):
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
def get_all(self, context):
client = quantumv2.get_client(context)
networks = client.list_networks().get('networks') or {}
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
client = quantumv2.get_client(context)
network = client.show_network(network_uuid).get('network') or {}
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
raise NotImplementedError()
def disassociate(self, context, network_uuid):
raise NotImplementedError()
def get_fixed_ip(self, context, id):
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
client = quantumv2.get_client(context)
fip = client.show_floatingip(id)['floatingip']
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
client = quantumv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Quantum v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Quantum v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = quantumv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
return []
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to."""
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = quantumv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
fip = client.create_floatingip(param)
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address."""
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floatingips from fixed ip and port."""
try:
data = client.list_floatingips(fixed_ip_address=fixed_ip,
port_id=port)
# If a quantum plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except qexceptions.QuantumClientException as e:
if e.status_code == 404:
return []
raise
return data['floatingips']
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _build_network_info_model(self, context, instance, networks=None):
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
client = quantumv2.get_client(context, admin=True)
data = client.list_ports(**search_opts)
ports = data.get('ports', [])
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'])
else:
# ensure ports are in preferred network order
_ensure_requested_network_ordering(
lambda x: x['network_id'],
ports,
[n['id'] for n in networks])
nw_info = network_model.NetworkInfo()
for port in ports:
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
break
if network_name is None:
raise exception.NotFound(_('Network %(net)s for '
'port %(port_id)s not found!') %
{'net': port['network_id'],
'port': port['id']})
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Quantum should pass the bridge name
# in another binding metadata field
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.quantum_ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
devname = "tap" + port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=net['tenant_id']
)
network['subnets'] = subnets
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
nw_info.append(network_model.VIF(
id=port['id'],
address=port['mac_address'],
network=network,
type=port.get('binding:vif_type'),
ovs_interfaceid=ovs_interfaceid,
devname=devname))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = quantumv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
# TODO(gongysh) get the routes for this subnet
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
|
the-stack_106_14351
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test rule that compares two binary files.
The rule uses a Bash command (diff) on Linux/macOS/non-Windows, and a cmd.exe
command (fc.exe) on Windows (no Bash is required).
"""
load("//lib:shell.bzl", "shell")
def _runfiles_path(f):
if f.root.path:
return f.path[len(f.root.path) + 1:] # generated file
else:
return f.path # source file
def _diff_test_impl(ctx):
if ctx.attr.is_windows:
test_bin = ctx.actions.declare_file(ctx.label.name + "-test.bat")
ctx.actions.write(
output = test_bin,
content = """@rem Generated by diff_test.bzl, do not edit.
@echo off
SETLOCAL ENABLEEXTENSIONS
SETLOCAL ENABLEDELAYEDEXPANSION
set MF=%RUNFILES_MANIFEST_FILE:/=\\%
set PATH=%SYSTEMROOT%\\system32
set F1={file1}
set F2={file2}
if "!F1:~0,9!" equ "external/" (set F1=!F1:~9!) else (set F1=!TEST_WORKSPACE!/!F1!)
if "!F2:~0,9!" equ "external/" (set F2=!F2:~9!) else (set F2=!TEST_WORKSPACE!/!F2!)
for /F "tokens=2* usebackq" %%i in (`findstr.exe /l /c:"!F1! " "%MF%"`) do (
set RF1=%%i
set RF1=!RF1:/=\\!
)
if "!RF1!" equ "" (
if exist "{file1}" (
set RF1="{file1}"
set RF1=!RF1:/=\\!
) else (
echo>&2 ERROR: !F1! not found
exit /b 1
)
)
for /F "tokens=2* usebackq" %%i in (`findstr.exe /l /c:"!F2! " "%MF%"`) do (
set RF2=%%i
set RF2=!RF2:/=\\!
)
if "!RF2!" equ "" (
if exist "{file2}" (
set RF2="{file2}"
set RF2=!RF2:/=\\!
) else (
echo>&2 ERROR: !F2! not found
exit /b 1
)
)
fc.exe 2>NUL 1>NUL /B "!RF1!" "!RF2!"
if %ERRORLEVEL% neq 0 (
if %ERRORLEVEL% equ 1 (
echo>&2 FAIL: files "{file1}" and "{file2}" differ. {fail_msg}
exit /b 1
) else (
fc.exe /B "!RF1!" "!RF2!"
exit /b %errorlevel%
)
)
""".format(
# TODO(arostovtsev): use shell.escape_for_bat when https://github.com/bazelbuild/bazel-skylib/pull/363 is merged
fail_msg = ctx.attr.failure_message,
file1 = _runfiles_path(ctx.file.file1),
file2 = _runfiles_path(ctx.file.file2),
),
is_executable = True,
)
else:
test_bin = ctx.actions.declare_file(ctx.label.name + "-test.sh")
ctx.actions.write(
output = test_bin,
content = r"""#!/usr/bin/env bash
set -euo pipefail
F1="{file1}"
F2="{file2}"
[[ "$F1" =~ ^external/* ]] && F1="${{F1#external/}}" || F1="$TEST_WORKSPACE/$F1"
[[ "$F2" =~ ^external/* ]] && F2="${{F2#external/}}" || F2="$TEST_WORKSPACE/$F2"
if [[ -d "${{RUNFILES_DIR:-/dev/null}}" && "${{RUNFILES_MANIFEST_ONLY:-}}" != 1 ]]; then
RF1="$RUNFILES_DIR/$F1"
RF2="$RUNFILES_DIR/$F2"
elif [[ -f "${{RUNFILES_MANIFEST_FILE:-/dev/null}}" ]]; then
RF1="$(grep -F -m1 "$F1 " "$RUNFILES_MANIFEST_FILE" | sed 's/^[^ ]* //')"
RF2="$(grep -F -m1 "$F2 " "$RUNFILES_MANIFEST_FILE" | sed 's/^[^ ]* //')"
elif [[ -f "$TEST_SRCDIR/$F1" && -f "$TEST_SRCDIR/$F2" ]]; then
RF1="$TEST_SRCDIR/$F1"
RF2="$TEST_SRCDIR/$F2"
else
echo >&2 "ERROR: could not find \"{file1}\" and \"{file2}\""
exit 1
fi
if ! diff "$RF1" "$RF2"; then
echo >&2 "FAIL: files \"{file1}\" and \"{file2}\" differ. "{fail_msg}
exit 1
fi
""".format(
fail_msg = shell.quote(ctx.attr.failure_message),
file1 = _runfiles_path(ctx.file.file1),
file2 = _runfiles_path(ctx.file.file2),
),
is_executable = True,
)
return DefaultInfo(
executable = test_bin,
files = depset(direct = [test_bin]),
runfiles = ctx.runfiles(files = [test_bin, ctx.file.file1, ctx.file.file2]),
)
_diff_test = rule(
attrs = {
"failure_message": attr.string(),
"file1": attr.label(
allow_single_file = True,
mandatory = True,
),
"file2": attr.label(
allow_single_file = True,
mandatory = True,
),
"is_windows": attr.bool(mandatory = True),
},
test = True,
implementation = _diff_test_impl,
)
def diff_test(name, file1, file2, failure_message = None, **kwargs):
"""A test that compares two files.
The test succeeds if the files' contents match.
Args:
name: The name of the test rule.
file1: Label of the file to compare to <code>file2</code>.
file2: Label of the file to compare to <code>file1</code>.
failure_message: Additional message to log if the files' contents do not match.
**kwargs: The <a href="https://docs.bazel.build/versions/main/be/common-definitions.html#common-attributes-tests">common attributes for tests</a>.
"""
_diff_test(
name = name,
file1 = file1,
file2 = file2,
failure_message = failure_message,
is_windows = select({
"@bazel_tools//src/conditions:host_windows": True,
"//conditions:default": False,
}),
**kwargs
)
|
the-stack_106_14352
|
from typing import List
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import Batch, BatchRequest
context = ge.get_context()
datasource_config = {
"name": "my_gcs_datasource",
"class_name": "Datasource",
"execution_engine": {"class_name": "PandasExecutionEngine"},
"data_connectors": {
"configured_data_connector_name": {
"class_name": "ConfiguredAssetGCSDataConnector",
"bucket_or_name": "<YOUR_GCS_BUCKET_HERE>",
"prefix": "<BUCKET_PATH_TO_DATA>",
"default_regex": {
"pattern": "data/taxi_yellow_trip_data_samples/yellow_trip_data_sample_(\\d{4})-(\\d{2})\\.csv",
"group_names": ["year", "month"],
},
"assets": {"taxi_data": None},
}
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_config["data_connectors"]["configured_data_connector_name"][
"bucket_or_name"
] = "superconductive-integration-tests"
datasource_config["data_connectors"]["configured_data_connector_name"][
"prefix"
] = "data/taxi_yellow_trip_data_samples/"
context.test_yaml_config(yaml.dump(datasource_config))
context.add_datasource(**datasource_config)
# Here is a BatchRequest naming a data_asset
batch_request = BatchRequest(
datasource_name="my_gcs_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data"
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["my_gcs_datasource"]
assert set(
context.get_available_data_asset_names()["my_gcs_datasource"][
"configured_data_connector_name"
]
) == {"taxi_data"}
batch_list: List[Batch] = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 3
batch: Batch = batch_list[0]
assert batch.data.dataframe.shape[0] == 10000
|
the-stack_106_14355
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Dataflow operators.
"""
import copy
import re
from contextlib import ExitStack
from enum import Enum
from typing import Any, Dict, List, Optional
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.dataflow import DEFAULT_DATAFLOW_LOCATION, DataflowHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
from airflow.version import version
class CheckJobRunning(Enum):
"""
Helper enum for choosing what to do if job is already running
IgnoreJob - do not check if running
FinishIfRunning - finish current dag run with no action
WaitForRun - wait for job to finish and then continue with new job
"""
IgnoreJob = 1
FinishIfRunning = 2
WaitForRun = 3
class DataflowCreateJavaJobOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
**Example**: ::
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date':
(2016, 8, 1),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=30),
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'us-central1-f',
'stagingLocation': 'gs://bucket/tmp/dataflow/staging/',
}
}
dag = DAG('test-dag', default_args=default_args)
task = DataFlowJavaOperator(
gcp_conn_id='gcp_default',
task_id='normalize-cal',
jar='{{var.value.gcp_dataflow_base}}pipeline-ingress-cal-normalize-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=dag)
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar (templated).
:type jar: str
:param job_name: The 'jobName' to use when executing the DataFlow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` in ``options`` will be overwritten.
:type job_name: str
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key-B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:type options: dict
:param project_id: Optional, the GCP project ID in which to start a job.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param location: Job location.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
:param job_class: The name of the dataflow job class to be executed, it
is often not the main class configured in the dataflow jar file.
:type job_class: str
:param multiple_jobs: If pipeline creates multiple jobs then monitor all jobs
:type multiple_jobs: boolean
:param check_if_running: before running job, validate that a previous run is not in process
:type check_if_running: CheckJobRunning(IgnoreJob = do not check if running, FinishIfRunning=
if job is running finish with nothing, WaitForRun= wait until job finished and the run job)
``jar``, ``options``, and ``job_name`` are templated so you can use variables in them.
Note that both
``dataflow_default_options`` and ``options`` will be merged to specify pipeline
execution parameter, and ``dataflow_default_options`` is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. code-block:: python
default_args = {
'dataflow_default_options': {
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar (see documentation here:
https://beam.apache.org/documentation/runners/dataflow/#self-executing-jar).
Use ``options`` to pass on options to your job.
.. code-block:: python
t1 = DataFlowJavaOperator(
task_id='dataflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY',
'labels': {'foo' : 'bar'}
},
gcp_conn_id='airflow-conn-id',
dag=my-dag)
"""
template_fields = ['options', 'jar', 'job_name']
ui_color = '#0273d4'
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(
self,
jar: str,
job_name: str = '{{task.task_id}}',
dataflow_default_options: Optional[dict] = None,
options: Optional[dict] = None,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
job_class: Optional[str] = None,
check_if_running: CheckJobRunning = CheckJobRunning.WaitForRun,
multiple_jobs: Optional[bool] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
options.setdefault('labels', {}).update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')})
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.multiple_jobs = multiple_jobs
self.job_name = job_name
self.dataflow_default_options = dataflow_default_options
self.options = options
self.poll_sleep = poll_sleep
self.job_class = job_class
self.check_if_running = check_if_running
self.job_id = None
self.hook = None
def execute(self, context):
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep
)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
is_running = False
if self.check_if_running != CheckJobRunning.IgnoreJob:
is_running = self.hook.is_job_dataflow_running(
name=self.job_name,
variables=dataflow_options,
project_id=self.project_id,
location=self.location
)
while is_running and self.check_if_running == CheckJobRunning.WaitForRun:
is_running = self.hook.is_job_dataflow_running(
name=self.job_name, variables=dataflow_options, project_id=self.project_id,
location=self.location
)
if not is_running:
with ExitStack() as exit_stack:
if self.jar.lower().startswith('gs://'):
gcs_hook = GCSHook(self.gcp_conn_id, self.delegate_to)
tmp_gcs_file = exit_stack.enter_context( # pylint: disable=no-member
gcs_hook.provide_file(object_url=self.jar)
)
self.jar = tmp_gcs_file.name
def set_current_job_id(job_id):
self.job_id = job_id
self.hook.start_java_dataflow(
job_name=self.job_name,
variables=dataflow_options,
jar=self.jar,
job_class=self.job_class,
append_job_name=True,
multiple_jobs=self.multiple_jobs,
on_new_job_id_callback=set_current_job_id,
project_id=self.project_id,
location=self.location
)
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id)
class DataflowTemplatedJobStartOperator(BaseOperator):
"""
Start a Templated Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
:param template: The reference to the DataFlow template.
:type template: str
:param job_name: The 'jobName' to use when executing the DataFlow template
(templated).
:param options: Map of job runtime environment options.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:type options: dict
:param dataflow_default_options: Map of default job environment options.
:type dataflow_default_options: dict
:param parameters: Map of job specific parameters for the template.
:type parameters: dict
:param project_id: Optional, the GCP project ID in which to start a job.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param location: Job location.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
.. code-block:: python
default_args = {
'dataflow_default_options': {
'zone': 'europe-west1-d',
'tempLocation': 'gs://my-staging-bucket/staging/',
}
}
}
You need to pass the path to your dataflow template as a file reference with the
``template`` parameter. Use ``parameters`` to pass on parameters to your job.
Use ``environment`` to pass on runtime environment variables to your job.
.. code-block:: python
t1 = DataflowTemplateOperator(
task_id='dataflow_example',
template='{{var.value.gcp_dataflow_base}}',
parameters={
'inputFile': "gs://bucket/input/my_input.txt",
'outputFile': "gs://bucket/output/my_output.txt"
},
gcp_conn_id='airflow-conn-id',
dag=my-dag)
``template``, ``dataflow_default_options``, ``parameters``, and ``job_name`` are
templated so you can use variables in them.
Note that ``dataflow_default_options`` is expected to save high-level options
for project information, which apply to all dataflow operators in the DAG.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3
/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
For more detail on job template execution have a look at the reference:
https://cloud.google.com/dataflow/docs/templates/executing-templates
"""
template_fields = [
'template',
'job_name',
'options',
'parameters',
'project_id',
'location',
'gcp_conn_id'
]
ui_color = '#0273d4'
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
template: str,
job_name: str = '{{task.task_id}}',
options: Optional[Dict[str, Any]] = None,
dataflow_default_options: Optional[Dict[str, Any]] = None,
parameters: Optional[Dict[str, str]] = None,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.template = template
self.job_name = job_name
self.options = options or {}
self.dataflow_default_options = dataflow_default_options or {}
self.parameters = parameters or {}
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.poll_sleep = poll_sleep
self.job_id = None
self.hook: Optional[DataflowHook] = None
def execute(self, context):
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep
)
def set_current_job_id(job_id):
self.job_id = job_id
options = self.dataflow_default_options
options.update(self.options)
job = self.hook.start_template_dataflow(
job_name=self.job_name,
variables=options,
parameters=self.parameters,
dataflow_template=self.template,
on_new_job_id_callback=set_current_job_id,
project_id=self.project_id,
location=self.location
)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id)
class DataflowCreatePythonJobOperator(BaseOperator):
"""
Launching Cloud Dataflow jobs written in python. Note that both
dataflow_default_options and options will be merged to specify pipeline
execution parameter, and dataflow_default_options is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param py_file: Reference to the python dataflow pipeline file.py, e.g.,
/some/local/file/path/to/your/python/pipeline/file. (templated)
:type py_file: str
:param job_name: The 'job_name' to use when executing the DataFlow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` or ``'job_name'`` in ``options`` will be overwritten.
:type job_name: str
:param py_options: Additional python options, e.g., ["-m", "-v"].
:type py_options: list[str]
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key-B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:type options: dict
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:type py_interpreter: str
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache_beam package if it is not installed on your system or you want
to use a different version.
:type py_requirements: List[str]
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is passed.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:param project_id: Optional, the GCP project ID in which to start a job.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param location: Job location.
:type location: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
"""
template_fields = ['options', 'dataflow_default_options', 'job_name', 'py_file']
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
py_file: str,
job_name: str = '{{task.task_id}}',
dataflow_default_options: Optional[dict] = None,
options: Optional[dict] = None,
py_interpreter: str = "python3",
py_options: Optional[List[str]] = None,
py_requirements: Optional[List[str]] = None,
py_system_site_packages: bool = False,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.py_file = py_file
self.job_name = job_name
self.py_options = py_options or []
self.dataflow_default_options = dataflow_default_options or {}
self.options = options or {}
self.options.setdefault('labels', {}).update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')})
self.py_interpreter = py_interpreter
self.py_requirements = py_requirements or []
self.py_system_site_packages = py_system_site_packages
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.poll_sleep = poll_sleep
self.job_id = None
self.hook = None
def execute(self, context):
"""Execute the python dataflow job."""
with ExitStack() as exit_stack:
if self.py_file.lower().startswith('gs://'):
gcs_hook = GCSHook(self.gcp_conn_id, self.delegate_to)
tmp_gcs_file = exit_stack.enter_context( # pylint: disable=no-member
gcs_hook.provide_file(object_url=self.py_file)
)
self.py_file = tmp_gcs_file.name
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep
)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key]
for key in dataflow_options}
def set_current_job_id(job_id):
self.job_id = job_id
self.hook.start_python_dataflow(
job_name=self.job_name,
variables=formatted_options,
dataflow=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
on_new_job_id_callback=set_current_job_id,
project_id=self.project_id,
location=self.location,
)
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id)
|
the-stack_106_14358
|
#!/usr/bin/python3
import sys
import os
import argparse
import traceback
import time
import logging
import zipfile
import json
import datetime
import dateutil.parser
import shutil
import multiprocessing
import numpy as np
def get_numpy_npz_headers(filename):
with zipfile.ZipFile(filename) as z:
wasbad = False
numrows = 0
npzheaders = {}
for subfilename in z.namelist():
npyfile = z.open(subfilename)
try:
version = np.lib.format.read_magic(npyfile)
except ValueError:
wasbad = True
print("WARNING: bad file, skipping it: %s (bad array %s)" % (filename,subfilename))
else:
(shape, is_fortran, dtype) = np.lib.format._read_array_header(npyfile,version)
npzheaders[subfilename] = (shape, is_fortran, dtype)
if wasbad:
return None
return npzheaders
def is_temp_npz_like(filename):
return "_" in filename
def summarize_dir(dirpath):
filenames = [filename for filename in os.listdir(dirpath) if filename.endswith('.npz')]
num_rows_this_dir = 0
filename_mtime_num_rowss = []
for filename in filenames:
filepath = os.path.join(dirpath,filename)
mtime = os.path.getmtime(filepath)
# Files that look like they are temp files should be recorded and warned
if is_temp_npz_like(filename):
print("WARNING: file looks like a temp file: ", filepath)
filename_mtime_num_rowss.append((filename,mtime,None))
continue
try:
npheaders = get_numpy_npz_headers(filepath)
except PermissionError:
print("WARNING: No permissions for reading file: ", filepath)
filename_mtime_num_rowss.append((filename,mtime,None))
continue
except zipfile.BadZipFile:
print("WARNING: Bad zip file: ", filepath)
filename_mtime_num_rowss.append((filename,mtime,None))
continue
if npheaders is None or len(npheaders) <= 0:
print("WARNING: bad npz headers for file: ", filepath)
filename_mtime_num_rowss.append((filename,mtime,None))
continue
(shape, is_fortran, dtype) = npheaders["binaryInputNCHWPacked"]
num_rows = shape[0]
num_rows_this_dir += num_rows
filename_mtime_num_rowss.append((filename,mtime,num_rows))
print("Summarizing new dir with %d rows: %s" % (num_rows_this_dir,dirpath),flush=True)
return (dirpath, filename_mtime_num_rowss, num_rows_this_dir)
class TimeStuff(object):
def __init__(self,taskstr):
self.taskstr = taskstr
def __enter__(self):
print("Beginning: %s" % self.taskstr, flush=True)
self.t0 = time.time()
def __exit__(self, exception_type, exception_val, trace):
self.t1 = time.time()
print("Finished: %s in %s seconds" % (self.taskstr, str(self.t1 - self.t0)), flush=True)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Shuffle data files')
parser.add_argument('dirs', metavar='DIR', nargs='+', help='Directories of training data files')
parser.add_argument('-old-summary-file-to-assume-correct', required=False, help='Summary json file for directory contents')
parser.add_argument('-new-summary-file', required=True, help='Summary json file for directory contents')
parser.add_argument('-num-parallel-processes', required=False, type=int, help='Number of parallel processes to use, default 4')
args = parser.parse_args()
dirs = args.dirs
old_summary_file_to_assume_correct = args.old_summary_file_to_assume_correct
new_summary_file = args.new_summary_file
num_processes = 4
if args.num_parallel_processes is not None:
num_processes = args.num_parallel_processes
summary_data_by_dirpath = {}
if old_summary_file_to_assume_correct is not None and os.path.exists(old_summary_file_to_assume_correct):
with TimeStuff("Loading " + old_summary_file_to_assume_correct):
with open(old_summary_file_to_assume_correct) as fp:
summary_data_by_dirpath = json.load(fp)
dirs_to_handle = []
with TimeStuff("Finding files"):
for d in dirs:
for (path,dirnames,filenames) in os.walk(d, followlinks=True):
had_no_dirnames = len(dirnames) == 0
i = 0
while i < len(dirnames):
dirname = dirnames[i]
dirpath = os.path.normpath(os.path.join(path, dirname))
if dirpath in summary_data_by_dirpath:
del dirnames[i]
i -= 1
elif dirname == "tdata":
del dirnames[i]
i -= 1
dirs_to_handle.append(dirpath)
else:
parseddate = None
try:
parseddate = dateutil.parser.parse(dirname)
except ValueError:
parseddate = None
if parseddate is not None and parseddate < datetime.datetime.now() - datetime.timedelta(days=2.0):
del dirnames[i]
i -= 1
dirs_to_handle.append(dirpath)
i += 1
with TimeStuff("Parallel summarizing %d dirs" % len(dirs_to_handle)):
with multiprocessing.Pool(num_processes) as pool:
results = pool.map(summarize_dir,dirs_to_handle)
num_total_rows = 0
with TimeStuff("Merging %d results" % len(results)):
for result in results:
if result is None:
continue
(dirpath, filename_mtime_num_rowss, num_rows_this_dir) = result
num_total_rows += num_rows_this_dir
summary_data_by_dirpath[os.path.abspath(dirpath)] = filename_mtime_num_rowss
if len(dirs_to_handle) == 0 and old_summary_file_to_assume_correct is not None and os.path.exists(old_summary_file_to_assume_correct):
shutil.copy(old_summary_file_to_assume_correct,new_summary_file)
print("Not writing any new summary, no results, just copying old file")
else:
with TimeStuff("Writing result"):
with open(new_summary_file,"w") as fp:
json.dump(summary_data_by_dirpath,fp)
print("Summary file written adding %d additional rows: %s" % (num_total_rows,new_summary_file),flush=True)
print("Done computing new summary",flush=True)
sys.stdout.flush()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.