filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_29496 | import pytest
import approvaltests
pytest_plugins = 'pytester'
_DEFAULT_REPORTER = approvaltests.get_default_reporter()
@pytest.fixture(autouse=True)
def reset_approvaltests_config(request):
approvaltests.set_default_reporter(_DEFAULT_REPORTER)
for option_name in [o for o in vars(request.config.option) if "approvaltests" in o]:
setattr(request.config.option, option_name, None)
|
the-stack_106_29497 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains SFTP to Azure Blob Storage operator."""
import os
import sys
from collections import namedtuple
from tempfile import NamedTemporaryFile
from typing import Dict, List, Optional, Tuple
if sys.version_info >= (3, 8):
from functools import cached_property
else:
from cached_property import cached_property
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
from airflow.providers.sftp.hooks.sftp import SFTPHook
WILDCARD = "*"
SftpFile = namedtuple('SftpFile', 'sftp_file_path, blob_name')
class SFTPToWasbOperator(BaseOperator):
"""
Transfer files to Azure Blob Storage from SFTP server.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SFTPToWasbOperator`
:param sftp_source_path: The sftp remote path. This is the specified file path
for downloading the single file or multiple files from the SFTP server.
You can use only one wildcard within your path. The wildcard can appear
inside the path or at the end of the path.
:type sftp_source_path: str
:param container_name: Name of the container.
:type container_name: str
:param blob_prefix: Prefix to name a blob.
:type blob_prefix: str
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:type sftp_conn_id: str
:param wasb_conn_id: Reference to the wasb connection.
:type wasb_conn_id: str
:param load_options: Optional keyword arguments that
``WasbHook.load_file()`` takes.
:type load_options: dict
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:param wasb_overwrite_object: Whether the blob to be uploaded
should overwrite the current data.
When wasb_overwrite_object is True, it will overwrite the existing data.
If set to False, the operation might fail with
ResourceExistsError in case a blob object already exists.
:type move_object: bool
"""
template_fields = ("sftp_source_path", "container_name", "blob_prefix")
def __init__(
self,
*,
sftp_source_path: str,
container_name: str,
blob_prefix: str = "",
sftp_conn_id: str = "sftp_default",
wasb_conn_id: str = 'wasb_default',
load_options: Optional[Dict] = None,
move_object: bool = False,
wasb_overwrite_object: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sftp_source_path = sftp_source_path
self.blob_prefix = blob_prefix
self.sftp_conn_id = sftp_conn_id
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.wasb_conn_id = wasb_conn_id
self.load_options = load_options or {"overwrite": wasb_overwrite_object}
self.move_object = move_object
def dry_run(self) -> None:
super().dry_run()
sftp_files: List[SftpFile] = self.get_sftp_files_map()
for file in sftp_files:
self.log.info(
'Process will upload file from (SFTP) %s to wasb://%s as %s',
file.sftp_file_path,
self.container_name,
file.blob_name,
)
if self.move_object:
self.log.info("Executing delete of %s", file)
def execute(self, context: Dict) -> None:
"""Upload a file from SFTP to Azure Blob Storage."""
sftp_files: List[SftpFile] = self.get_sftp_files_map()
uploaded_files = self.copy_files_to_wasb(sftp_files)
if self.move_object:
self.delete_files(uploaded_files)
def get_sftp_files_map(self) -> List[SftpFile]:
"""Get SFTP files from the source path, it may use a WILDCARD to this end."""
sftp_files = []
sftp_complete_path, prefix, delimiter = self.get_tree_behavior()
found_files, _, _ = self.sftp_hook.get_tree_map(
sftp_complete_path, prefix=prefix, delimiter=delimiter
)
self.log.info("Found %s files at sftp source path: %s", str(len(found_files)), self.sftp_source_path)
for file in found_files:
future_blob_name = self.get_full_path_blob(file)
sftp_files.append(SftpFile(file, future_blob_name))
return sftp_files
def get_tree_behavior(self) -> Tuple[str, Optional[str], Optional[str]]:
"""Extracts from source path the tree behavior to interact with the remote folder"""
self.check_wildcards_limit()
if self.source_path_contains_wildcard:
prefix, delimiter = self.sftp_source_path.split(WILDCARD, 1)
sftp_complete_path = os.path.dirname(prefix)
return sftp_complete_path, prefix, delimiter
return self.sftp_source_path, None, None
def check_wildcards_limit(self) -> None:
"""Check if there are multiple wildcards used in the SFTP source path."""
total_wildcards = self.sftp_source_path.count(WILDCARD)
if total_wildcards > 1:
raise AirflowException(
"Only one wildcard '*' is allowed in sftp_source_path parameter. "
f"Found {total_wildcards} in {self.sftp_source_path}."
)
@property
def source_path_contains_wildcard(self) -> bool:
"""Checks if the SFTP source path contains a wildcard."""
return WILDCARD in self.sftp_source_path
@cached_property
def sftp_hook(self) -> SFTPHook:
"""Property of sftp hook to be re-used."""
return SFTPHook(self.sftp_conn_id)
def get_full_path_blob(self, file: str) -> str:
"""Get a blob name based on the previous name and a blob_prefix variable"""
return self.blob_prefix + os.path.basename(file)
def copy_files_to_wasb(self, sftp_files: List[SftpFile]) -> List[str]:
"""Upload a list of files from sftp_files to Azure Blob Storage with a new Blob Name."""
uploaded_files = []
wasb_hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
for file in sftp_files:
with NamedTemporaryFile("w") as tmp:
self.sftp_hook.retrieve_file(file.sftp_file_path, tmp.name)
self.log.info(
'Uploading %s to wasb://%s as %s',
file.sftp_file_path,
self.container_name,
file.blob_name,
)
wasb_hook.load_file(tmp.name, self.container_name, file.blob_name, **self.load_options)
uploaded_files.append(file.sftp_file_path)
return uploaded_files
def delete_files(self, uploaded_files: List[str]) -> None:
"""Delete files at SFTP which have been moved to Azure Blob Storage."""
for sftp_file_path in uploaded_files:
self.log.info("Executing delete of %s", sftp_file_path)
self.sftp_hook.delete_file(sftp_file_path)
|
the-stack_106_29499 | import json
from json.decoder import JSONDecodeError
import requests
from .Device import Device
from .Spotify_auth import Spotify_Auth
spotify_token, spotify = Spotify_Auth()
class CurrentTrack:
def __init__(self):
pass
def get_track_info(self) -> json:
"""Will return the currently playing track json
Returns:
json: JSON containing the currently playing track information
"""
# this try/except is necessary when we boot up the server
# and we don't have a session on Spotify, Flask returns an error
try:
track_info = requests.get(
"https://api.spotify.com/v1/me/player/currently-playing",
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {spotify_token}",
},
)
except JSONDecodeError:
return
if track_info:
return track_info.json()
else:
return
def get_artist_name(self) -> str:
"""Will return the name of the artist currently playing
Returns:
str: Name of the artist currently playing
"""
json = self.get_track_info()
return json["item"]["album"]["artists"][0]["name"]
def get_name_and_cover(self) -> tuple:
"""Will return the name and cover of the currently playing track
Returns:
tuple: (name of the currently playing track, cover of the currently playing track)
"""
json = self.get_track_info()
return (json["item"]["name"], json["item"]["album"]["images"][0]["url"])
def get_ids_for_recomendation(self) -> tuple:
"""Will return the tuple containing the ids recommended songs based on the currently playing
Returns:
tuple: IDs of the recommended songs
"""
artists_id = []
json = self.get_track_info()
for i in range(len(json["item"]["artists"])):
artists_id.append(json["item"]["artists"][i]["id"])
if len(artists_id) > 5:
artists_id = [artists_id[i] for i in range(4)]
# id of artists on track, id of the track
return (artists_id, [json["item"]["id"]])
def get_uris_recomended_songs(self, num_of_songs: int = 20) -> list:
"""Will convert the tuple of recommended ids to uris
Args:
num_of_songs (int, optional): Number of songs you want to convert. Defaults to 20.
Raises:
ValueError: If the number of songs you want to convert is greater than 100
Returns:
list: Contains the recommended uris
"""
# the 100 here is API limit
if int(num_of_songs) > 100:
raise ValueError(
"Number of recommended songs cant be more than 100")
artists_ids, song_id = self.get_ids_for_recomendation()
recom = spotify.recommendations(
artist_ids=artists_ids, track_ids=song_id, limit=num_of_songs
).tracks
return [recom_song.uri for recom_song in recom]
def add_recomended_to_queue(self, device: str, num_of_songs: int = 20) -> None:
"""Will add recommended song to the queue
Args:
device (str, optional): Name of the device you want to add songs to queue to. Defaults to "MYPC".
num_of_songs (int, optional): Number of devices you want to add to queue. Defaults to 20.
"""
uris = self.get_uris_recomended_songs(num_of_songs)
device_id = Device.get_id(device)
for uri in range(len(uris)):
spotify.playback_queue_add(uri=uris[uri], device_id=device_id)
return
if __name__ == "__main__":
print(CurrentTrack().get_track_info())
|
the-stack_106_29500 | # Step 1 & 2 are borrowed from View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition repo
# Step 1: Get raw skeleton data
import logging
import os
import os.path as osp
import pickle
import numpy as np
from tqdm import tqdm
def get_raw_bodies_data(skes_path, ske_name, frames_drop_skes, frames_drop_logger):
"""
Get raw bodies data from a skeleton sequence.
Each body's data is a dict that contains the following keys:
- joints: raw 3D joints positions. Shape: (num_frames x 25, 3)
- colors: raw 2D color locations. Shape: (num_frames, 25, 2)
- interval: a list which stores the frame indices of this body.
- motion: motion amount (only for the sequence with 2 or more bodyIDs).
Return:
a dict for a skeleton sequence with 3 key-value pairs:
- name: the skeleton filename.
- data: a dict which stores raw data of each body.
- num_frames: the number of valid frames.
"""
ske_file = osp.join(skes_path, ske_name + '.skeleton')
assert osp.exists(ske_file), 'Error: Skeleton file %s not found' % ske_file
# Read all data from .skeleton file into a list (in string format)
# print('Reading data from %s' % ske_file[-29:])
with open(ske_file, 'r') as fr:
str_data = fr.readlines()
num_frames = int(str_data[0].strip('\r\n'))
frames_drop = []
bodies_data = dict()
valid_frames = -1 # 0-based index
current_line = 1
for f in range(num_frames):
num_bodies = int(str_data[current_line].strip('\r\n'))
current_line += 1
if num_bodies == 0: # no data in this frame, drop it
frames_drop.append(f) # 0-based index
continue
valid_frames += 1
joints = np.zeros((num_bodies, 25, 3), dtype=np.float32)
colors = np.zeros((num_bodies, 25, 2), dtype=np.float32)
for b in range(num_bodies):
bodyID = str_data[current_line].strip('\r\n').split()[0]
current_line += 1
num_joints = int(str_data[current_line].strip('\r\n')) # 25 joints
current_line += 1
for j in range(num_joints):
temp_str = str_data[current_line].strip('\r\n').split()
joints[b, j, :] = np.array(temp_str[:3], dtype=np.float32)
colors[b, j, :] = np.array(temp_str[5:7], dtype=np.float32)
current_line += 1
if bodyID not in bodies_data: # Add a new body's data
body_data = dict()
body_data['joints'] = joints[b] # ndarray: (25, 3)
body_data['colors'] = colors[b, np.newaxis] # ndarray: (1, 25, 2)
body_data['interval'] = [valid_frames] # the index of the first frame
else: # Update an already existed body's data
body_data = bodies_data[bodyID]
# Stack each body's data of each frame along the frame order
body_data['joints'] = np.vstack((body_data['joints'], joints[b]))
body_data['colors'] = np.vstack((body_data['colors'], colors[b, np.newaxis]))
pre_frame_idx = body_data['interval'][-1]
body_data['interval'].append(pre_frame_idx + 1) # add a new frame index
bodies_data[bodyID] = body_data # Update bodies_data
num_frames_drop = len(frames_drop)
assert num_frames_drop < num_frames, \
'Error: All frames data (%d) of %s is missing or lost' % (num_frames, ske_name)
if num_frames_drop > 0:
frames_drop_skes[ske_name] = np.array(frames_drop, dtype=np.int)
frames_drop_logger.info('{}: {} frames missed: {}\n'.format(ske_name, num_frames_drop,
frames_drop))
# Calculate motion (only for the sequence with 2 or more bodyIDs)
if len(bodies_data) > 1:
for body_data in bodies_data.values():
body_data['motion'] = np.sum(np.var(body_data['joints'], axis=0))
return {'name': ske_name, 'data': bodies_data, 'num_frames': num_frames - num_frames_drop}
def get_raw_skes_data():
# # save_path = './data'
# # skes_path = '/data/pengfei/NTU/nturgb+d_skeletons/'
# stat_path = osp.join(save_path, 'statistics')
#
# skes_name_file = osp.join(stat_path, 'skes_available_name.txt')
# save_data_pkl = osp.join(save_path, 'raw_skes_data.pkl')
# frames_drop_pkl = osp.join(save_path, 'frames_drop_skes.pkl')
#
# frames_drop_logger = logging.getLogger('frames_drop')
# frames_drop_logger.setLevel(logging.INFO)
# frames_drop_logger.addHandler(logging.FileHandler(osp.join(save_path, 'frames_drop.log')))
# frames_drop_skes = dict()
skes_name = np.load(skes_name_file)
# skes_name = np.array(skes_name_file, dtype=str)
num_files = skes_name.size
print('Found %d available skeleton files.' % num_files)
raw_skes_data = []
frames_cnt = np.zeros(num_files, dtype=np.int)
for (idx, ske_name) in enumerate(tqdm(skes_name)):
bodies_data = get_raw_bodies_data(skes_path, ske_name, frames_drop_skes, frames_drop_logger)
raw_skes_data.append(bodies_data)
frames_cnt[idx] = bodies_data['num_frames']
# if (idx + 1) % 1000 == 0:
# print('Processed: %.2f%% (%d / %d)' % \
# (100.0 * (idx + 1) / num_files, idx + 1, num_files))
with open(save_data_pkl, 'wb') as fw:
pickle.dump(raw_skes_data, fw, pickle.HIGHEST_PROTOCOL)
np.savetxt(osp.join(save_path, 'raw_data', 'frames_cnt.txt'), frames_cnt, fmt='%d')
print('Saved raw bodies data into %s' % save_data_pkl)
print('Total frames: %d' % np.sum(frames_cnt))
with open(frames_drop_pkl, 'wb') as fw:
pickle.dump(frames_drop_skes, fw, pickle.HIGHEST_PROTOCOL)
save_path = './'
skes_path = '/home/archive/yekenan/code/TC_LAAU_BD/data/nturaw/nturgb+d_skeletons/'
stat_path = osp.join(save_path, 'data/nturaw')
def file_name(file_dir, exts=('skeleton')):
L = []
for dirpath, dirnames, filenames in os.walk(file_dir):
for file in filenames:
if str.lower(os.path.splitext(file)[1][1:]) in exts:
L.append(os.path.join(dirpath, file))
return L
# skes_name_file = file_name(skes_path)
# skes_names = np.array(skes_name_file)
# np.savetxt(osp.join(stat_path, 'skes_available_name.txt'), skes_names)
if not osp.exists('./raw_data'):
os.makedirs('./raw_data')
skes_name_file = osp.join(stat_path, 'skes_available_name.npy')
save_data_pkl = osp.join(save_path, 'raw_data', 'raw_skes_data.pkl')
frames_drop_pkl = osp.join(save_path, 'raw_data', 'frames_drop_skes.pkl')
frames_drop_logger = logging.getLogger('frames_drop')
frames_drop_logger.setLevel(logging.INFO)
frames_drop_logger.addHandler(logging.FileHandler(osp.join(save_path, 'raw_data', 'frames_drop.log')))
# frames_drop_skes = dict()
# get_raw_skes_data()
#
# with open(frames_drop_pkl, 'wb') as fw:
# pickle.dump(frames_drop_skes, fw, pickle.HIGHEST_PROTOCOL)
# Step 2: Denoise the skeleton data
root_path = './'
raw_data_file = osp.join(root_path, 'raw_data', 'raw_skes_data.pkl')
save_path = osp.join(root_path, 'denoised_data')
if not osp.exists(save_path):
os.mkdir(save_path)
rgb_ske_path = osp.join(save_path, 'rgb+ske')
if not osp.exists(rgb_ske_path):
os.mkdir(rgb_ske_path)
actors_info_dir = osp.join(save_path, 'actors_info')
if not osp.exists(actors_info_dir):
os.mkdir(actors_info_dir)
missing_count = 0
noise_len_thres = 11
noise_spr_thres1 = 0.8
noise_spr_thres2 = 0.69754
noise_mot_thres_lo = 0.089925
noise_mot_thres_hi = 2
noise_len_logger = logging.getLogger('noise_length')
noise_len_logger.setLevel(logging.INFO)
noise_len_logger.addHandler(logging.FileHandler(osp.join(save_path, 'noise_length.log')))
noise_len_logger.info('{:^20}\t{:^17}\t{:^8}\t{}'.format('Skeleton', 'bodyID', 'Motion', 'Length'))
noise_spr_logger = logging.getLogger('noise_spread')
noise_spr_logger.setLevel(logging.INFO)
noise_spr_logger.addHandler(logging.FileHandler(osp.join(save_path, 'noise_spread.log')))
noise_spr_logger.info('{:^20}\t{:^17}\t{:^8}\t{:^8}'.format('Skeleton', 'bodyID', 'Motion', 'Rate'))
noise_mot_logger = logging.getLogger('noise_motion')
noise_mot_logger.setLevel(logging.INFO)
noise_mot_logger.addHandler(logging.FileHandler(osp.join(save_path, 'noise_motion.log')))
noise_mot_logger.info('{:^20}\t{:^17}\t{:^8}'.format('Skeleton', 'bodyID', 'Motion'))
fail_logger_1 = logging.getLogger('noise_outliers_1')
fail_logger_1.setLevel(logging.INFO)
fail_logger_1.addHandler(logging.FileHandler(osp.join(save_path, 'denoised_failed_1.log')))
fail_logger_2 = logging.getLogger('noise_outliers_2')
fail_logger_2.setLevel(logging.INFO)
fail_logger_2.addHandler(logging.FileHandler(osp.join(save_path, 'denoised_failed_2.log')))
missing_skes_logger = logging.getLogger('missing_frames')
missing_skes_logger.setLevel(logging.INFO)
missing_skes_logger.addHandler(logging.FileHandler(osp.join(save_path, 'missing_skes.log')))
missing_skes_logger.info('{:^20}\t{}\t{}'.format('Skeleton', 'num_frames', 'num_missing'))
missing_skes_logger1 = logging.getLogger('missing_frames_1')
missing_skes_logger1.setLevel(logging.INFO)
missing_skes_logger1.addHandler(logging.FileHandler(osp.join(save_path, 'missing_skes_1.log')))
missing_skes_logger1.info('{:^20}\t{}\t{}\t{}\t{}\t{}'.format('Skeleton', 'num_frames', 'Actor1',
'Actor2', 'Start', 'End'))
missing_skes_logger2 = logging.getLogger('missing_frames_2')
missing_skes_logger2.setLevel(logging.INFO)
missing_skes_logger2.addHandler(logging.FileHandler(osp.join(save_path, 'missing_skes_2.log')))
missing_skes_logger2.info('{:^20}\t{}\t{}\t{}'.format('Skeleton', 'num_frames', 'Actor1', 'Actor2'))
def denoising_by_length(ske_name, bodies_data):
"""
Denoising data based on the frame length for each bodyID.
Filter out the bodyID which length is less or equal than the predefined threshold.
"""
noise_info = str()
new_bodies_data = bodies_data.copy()
for (bodyID, body_data) in new_bodies_data.items():
length = len(body_data['interval'])
if length <= noise_len_thres:
noise_info += 'Filter out: %s, %d (length).\n' % (bodyID, length)
noise_len_logger.info('{}\t{}\t{:.6f}\t{:^6d}'.format(ske_name, bodyID,
body_data['motion'], length))
del bodies_data[bodyID]
if noise_info != '':
noise_info += '\n'
return bodies_data, noise_info
def get_valid_frames_by_spread(points):
"""
Find the valid (or reasonable) frames (index) based on the spread of X and Y.
:param points: joints or colors
"""
num_frames = points.shape[0]
valid_frames = []
for i in range(num_frames):
x = points[i, :, 0]
y = points[i, :, 1]
if (x.max() - x.min()) <= noise_spr_thres1 * (y.max() - y.min()): # 0.8
valid_frames.append(i)
return valid_frames
def denoising_by_spread(ske_name, bodies_data):
"""
Denoising data based on the spread of Y value and X value.
Filter out the bodyID which the ratio of noisy frames is higher than the predefined
threshold.
bodies_data: contains at least 2 bodyIDs
"""
noise_info = str()
denoised_by_spr = False # mark if this sequence has been processed by spread.
new_bodies_data = bodies_data.copy()
# for (bodyID, body_data) in bodies_data.items():
for (bodyID, body_data) in new_bodies_data.items():
if len(bodies_data) == 1:
break
valid_frames = get_valid_frames_by_spread(body_data['joints'].reshape(-1, 25, 3))
num_frames = len(body_data['interval'])
num_noise = num_frames - len(valid_frames)
if num_noise == 0:
continue
ratio = num_noise / float(num_frames)
motion = body_data['motion']
if ratio >= noise_spr_thres2: # 0.69754
del bodies_data[bodyID]
denoised_by_spr = True
noise_info += 'Filter out: %s (spread rate >= %.2f).\n' % (bodyID, noise_spr_thres2)
noise_spr_logger.info('%s\t%s\t%.6f\t%.6f' % (ske_name, bodyID, motion, ratio))
else: # Update motion
joints = body_data['joints'].reshape(-1, 25, 3)[valid_frames]
body_data['motion'] = min(motion, np.sum(np.var(joints.reshape(-1, 3), axis=0)))
noise_info += '%s: motion %.6f -> %.6f\n' % (bodyID, motion, body_data['motion'])
# TODO: Consider removing noisy frames for each bodyID
if noise_info != '':
noise_info += '\n'
return bodies_data, noise_info, denoised_by_spr
def denoising_by_motion(ske_name, bodies_data, bodies_motion):
"""
Filter out the bodyID which motion is out of the range of predefined interval
"""
# Sort bodies based on the motion, return a list of tuples
# bodies_motion = sorted(bodies_motion.items(), key=lambda x, y: cmp(x[1], y[1]), reverse=True)
bodies_motion = sorted(bodies_motion.items(), key=lambda x: x[1], reverse=True)
# Reserve the body data with the largest motion
denoised_bodies_data = [(bodies_motion[0][0], bodies_data[bodies_motion[0][0]])]
noise_info = str()
for (bodyID, motion) in bodies_motion[1:]:
if (motion < noise_mot_thres_lo) or (motion > noise_mot_thres_hi):
noise_info += 'Filter out: %s, %.6f (motion).\n' % (bodyID, motion)
noise_mot_logger.info('{}\t{}\t{:.6f}'.format(ske_name, bodyID, motion))
else:
denoised_bodies_data.append((bodyID, bodies_data[bodyID]))
if noise_info != '':
noise_info += '\n'
return denoised_bodies_data, noise_info
def denoising_bodies_data(bodies_data):
"""
Denoising data based on some heuristic methods, not necessarily correct for all samples.
Return:
denoised_bodies_data (list): tuple: (bodyID, body_data).
"""
ske_name = bodies_data['name']
bodies_data = bodies_data['data']
# Step 1: Denoising based on frame length.
bodies_data, noise_info_len = denoising_by_length(ske_name, bodies_data)
if len(bodies_data) == 1: # only has one bodyID left after step 1
return bodies_data.items(), noise_info_len
# Step 2: Denoising based on spread.
bodies_data, noise_info_spr, denoised_by_spr = denoising_by_spread(ske_name, bodies_data)
if len(bodies_data) == 1:
return bodies_data.items(), noise_info_len + noise_info_spr
bodies_motion = dict() # get body motion
for (bodyID, body_data) in bodies_data.items():
bodies_motion[bodyID] = body_data['motion']
# Sort bodies based on the motion
# bodies_motion = sorted(bodies_motion.items(), key=lambda x, y: cmp(x[1], y[1]), reverse=True)
bodies_motion = sorted(bodies_motion.items(), key=lambda x: x[1], reverse=True)
denoised_bodies_data = list()
for (bodyID, _) in bodies_motion:
denoised_bodies_data.append((bodyID, bodies_data[bodyID]))
return denoised_bodies_data, noise_info_len + noise_info_spr
# TODO: Consider denoising further by integrating motion method
# if denoised_by_spr: # this sequence has been denoised by spread
# bodies_motion = sorted(bodies_motion.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
# denoised_bodies_data = list()
# for (bodyID, _) in bodies_motion:
# denoised_bodies_data.append((bodyID, bodies_data[bodyID]))
# return denoised_bodies_data, noise_info
# Step 3: Denoising based on motion
# bodies_data, noise_info = denoising_by_motion(ske_name, bodies_data, bodies_motion)
# return bodies_data, noise_info
def get_one_actor_points(body_data, num_frames):
"""
Get joints and colors for only one actor.
For joints, each frame contains 75 X-Y-Z coordinates.
For colors, each frame contains 25 x 2 (X, Y) coordinates.
"""
joints = np.zeros((num_frames, 75), dtype=np.float32)
colors = np.ones((num_frames, 1, 25, 2), dtype=np.float32) * np.nan
start, end = body_data['interval'][0], body_data['interval'][-1]
joints[start:end + 1] = body_data['joints'].reshape(-1, 75)
colors[start:end + 1, 0] = body_data['colors']
return joints, colors
def remove_missing_frames(ske_name, joints, colors):
"""
Cut off missing frames which all joints positions are 0s
For the sequence with 2 actors' data, also record the number of missing frames for
actor1 and actor2, respectively (for debug).
"""
num_frames = joints.shape[0]
num_bodies = colors.shape[1] # 1 or 2
if num_bodies == 2: # DEBUG
missing_indices_1 = np.where(joints[:, :75].sum(axis=1) == 0)[0]
missing_indices_2 = np.where(joints[:, 75:].sum(axis=1) == 0)[0]
cnt1 = len(missing_indices_1)
cnt2 = len(missing_indices_2)
start = 1 if 0 in missing_indices_1 else 0
end = 1 if num_frames - 1 in missing_indices_1 else 0
if max(cnt1, cnt2) > 0:
if cnt1 > cnt2:
info = '{}\t{:^10d}\t{:^6d}\t{:^6d}\t{:^5d}\t{:^3d}'.format(ske_name, num_frames,
cnt1, cnt2, start, end)
missing_skes_logger1.info(info)
else:
info = '{}\t{:^10d}\t{:^6d}\t{:^6d}'.format(ske_name, num_frames, cnt1, cnt2)
missing_skes_logger2.info(info)
# Find valid frame indices that the data is not missing or lost
# For two-subjects action, this means both data of actor1 and actor2 is missing.
valid_indices = np.where(joints.sum(axis=1) != 0)[0] # 0-based index
missing_indices = np.where(joints.sum(axis=1) == 0)[0]
num_missing = len(missing_indices)
if num_missing > 0: # Update joints and colors
joints = joints[valid_indices]
colors[missing_indices] = np.nan
global missing_count
missing_count += 1
missing_skes_logger.info('{}\t{:^10d}\t{:^11d}'.format(ske_name, num_frames, num_missing))
return joints, colors
def get_bodies_info(bodies_data):
bodies_info = '{:^17}\t{}\t{:^8}\n'.format('bodyID', 'Interval', 'Motion')
for (bodyID, body_data) in bodies_data.items():
start, end = body_data['interval'][0], body_data['interval'][-1]
bodies_info += '{}\t{:^8}\t{:f}\n'.format(bodyID, str([start, end]), body_data['motion'])
return bodies_info + '\n'
def get_two_actors_points(bodies_data):
"""
Get the first and second actor's joints positions and colors locations.
# Arguments:
bodies_data (dict): 3 key-value pairs: 'name', 'data', 'num_frames'.
bodies_data['data'] is also a dict, while the key is bodyID, the value is
the corresponding body_data which is also a dict with 4 keys:
- joints: raw 3D joints positions. Shape: (num_frames x 25, 3)
- colors: raw 2D color locations. Shape: (num_frames, 25, 2)
- interval: a list which records the frame indices.
- motion: motion amount
# Return:
joints, colors.
"""
ske_name = bodies_data['name']
label = int(ske_name[-2:])
num_frames = bodies_data['num_frames']
bodies_info = get_bodies_info(bodies_data['data'])
bodies_data, noise_info = denoising_bodies_data(bodies_data) # Denoising data
bodies_info += noise_info
bodies_data = list(bodies_data)
if len(bodies_data) == 1: # Only left one actor after denoising
if label >= 50: # DEBUG: Denoising failed for two-subjects action
fail_logger_2.info(ske_name)
bodyID, body_data = bodies_data[0]
joints, colors = get_one_actor_points(body_data, num_frames)
bodies_info += 'Main actor: %s' % bodyID
else:
if label < 50: # DEBUG: Denoising failed for one-subject action
fail_logger_1.info(ske_name)
joints = np.zeros((num_frames, 75), dtype=np.float32)
colors = np.ones((num_frames, 2, 25, 2), dtype=np.float32) * np.nan
bodyID, actor1 = bodies_data[0] # the 1st actor with largest motion
start1, end1 = actor1['interval'][0], actor1['interval'][-1]
joints[start1:end1 + 1, :] = actor1['joints'].reshape(-1, 75)
colors[start1:end1 + 1, 0] = actor1['colors']
actor1_info = '{:^17}\t{}\t{:^8}\n'.format('Actor1', 'Interval', 'Motion') + \
'{}\t{:^8}\t{:f}\n'.format(bodyID, str([start1, end1]), actor1['motion'])
del bodies_data[0]
# actor2_info = '{:^17}\t{}\t{:^8}\n'.format('Actor2', 'Interval', 'Motion')
# start2, end2 = [0, 0] # initial interval for actor2 (virtual)
# while len(bodies_data) > 0:
# bodyID, actor = bodies_data[0]
# start, end = actor['interval'][0], actor['interval'][-1]
# if min(end1, end) - max(start1, start) <= 0: # no overlap with actor1
# joints[start:end + 1, :75] = actor['joints'].reshape(-1, 75)
# colors[start:end + 1, 0] = actor['colors']
# actor1_info += '{}\t{:^8}\t{:f}\n'.format(bodyID, str([start, end]), actor['motion'])
# # Update the interval of actor1
# start1 = min(start, start1)
# end1 = max(end, end1)
# elif min(end2, end) - max(start2, start) <= 0: # no overlap with actor2
# joints[start:end + 1, 75:] = actor['joints'].reshape(-1, 75)
# colors[start:end + 1, 1] = actor['colors']
# actor2_info += '{}\t{:^8}\t{:f}\n'.format(bodyID, str([start, end]), actor['motion'])
# # Update the interval of actor2
# start2 = min(start, start2)
# end2 = max(end, end2)
# del bodies_data[0]
bodies_info += ('\n' + actor1_info) # + '\n' + actor2_info)
with open(osp.join(actors_info_dir, ske_name + '.txt'), 'w') as fw:
fw.write(bodies_info + '\n')
return joints, colors
def get_raw_denoised_data():
"""
Get denoised data (joints positions and color locations) from raw skeleton sequences.
For each frame of a skeleton sequence, an actor's 3D positions of 25 joints represented
by an 2D array (shape: 25 x 3) is reshaped into a 75-dim vector by concatenating each
3-dim (x, y, z) coordinates along the row dimension in joint order. Each frame contains
two actor's joints positions constituting a 150-dim vector. If there is only one actor,
then the last 75 values are filled with zeros. Otherwise, select the main actor and the
second actor based on the motion amount. Each 150-dim vector as a row vector is put into
a 2D numpy array where the number of rows equals the number of valid frames. All such
2D arrays are put into a list and finally the list is serialized into a cPickle file.
For the skeleton sequence which contains two or more actors (mostly corresponds to the
last 11 classes), the filename and actors' information are recorded into log files.
For better understanding, also generate RGB+skeleton videos for visualization.
"""
with open(raw_data_file, 'rb') as fr: # load raw skeletons data
raw_skes_data = pickle.load(fr)
num_skes = len(raw_skes_data)
print('Found %d available skeleton sequences.' % num_skes)
raw_denoised_joints = []
raw_denoised_colors = []
frames_cnt = []
for (idx, bodies_data) in enumerate(tqdm(raw_skes_data)):
ske_name = bodies_data['name']
# print('Processing %s' % ske_name)
num_bodies = len(bodies_data['data'])
if num_bodies == 1: # only 1 actor
num_frames = bodies_data['num_frames']
body_data = list(bodies_data['data'].values())[0]
joints, colors = get_one_actor_points(body_data, num_frames)
else: # more than 1 actor, select two main actors
joints, colors = get_two_actors_points(bodies_data)
# Remove missing frames
joints, colors = remove_missing_frames(ske_name, joints, colors)
num_frames = joints.shape[0] # Update
# Visualize selected actors' skeletons on RGB videos.
raw_denoised_joints.append(joints)
raw_denoised_colors.append(colors)
frames_cnt.append(num_frames)
# if (idx + 1) % 1000 == 0:
# print('Processed: %.2f%% (%d / %d), ' % \
# (100.0 * (idx + 1) / num_skes, idx + 1, num_skes) + \
# 'Missing count: %d' % missing_count)
raw_skes_joints_pkl = osp.join(save_path, 'raw_denoised_joints.pkl')
with open(raw_skes_joints_pkl, 'wb') as f:
pickle.dump(raw_denoised_joints, f, pickle.HIGHEST_PROTOCOL)
raw_skes_colors_pkl = osp.join(save_path, 'raw_denoised_colors.pkl')
with open(raw_skes_colors_pkl, 'wb') as f:
pickle.dump(raw_denoised_colors, f, pickle.HIGHEST_PROTOCOL)
frames_cnt = np.array(frames_cnt, dtype=np.int)
np.savetxt(osp.join(save_path, 'frames_cnt.txt'), frames_cnt, fmt='%d')
print('Saved raw denoised positions of {} frames into {}'.format(np.sum(frames_cnt),
raw_skes_joints_pkl))
print('Found %d files that have missing data' % missing_count)
# get_raw_denoised_data()
# Step 3: Train-Test Split by Cross-View and Cross-Subject
root_path = './'
stat_path = './data/nturaw'
setup_file = osp.join(stat_path, 'setup.txt')
camera_file = osp.join(stat_path, 'camera.txt')
performer_file = osp.join(stat_path, 'performer.txt')
replication_file = osp.join(stat_path, 'replication.txt')
label_file = osp.join(stat_path, 'label.txt')
skes_name_file = osp.join(stat_path, 'skes_available_name.txt')
denoised_path = osp.join(root_path, 'denoised_data')
raw_skes_joints_pkl = osp.join(denoised_path, 'raw_denoised_joints.pkl')
frames_file = osp.join(denoised_path, 'frames_cnt.txt')
save_path = './'
with open(raw_skes_joints_pkl, 'rb') as fr:
skes_joints = pickle.load(fr)
print(len(skes_joints))
print(skes_joints[0].shape)
with open(raw_skes_joints_pkl, 'rb') as fr:
skes_joints = pickle.load(fr)
raw_data_path = osp.join(root_path, 'raw_data/raw_skes_data.pkl')
print(raw_data_path)
with open(raw_data_path, 'rb') as fr:
raw_skes_joints = pickle.load(fr)
print(len(skes_joints) == len(raw_skes_joints))
label = []
for d in raw_skes_joints:
label.append(int(d['name'][-3:]))
# print(label)
for i in range(len(skes_joints)):
if skes_joints[i].shape[1] != 75:
print("invalid", skes_joints[i].shape[0])
camera_file = osp.join(stat_path, 'camera.txt')
performer_file = osp.join(stat_path, 'performer.txt')
# camera = np.loadtxt(camera_file, dtype=np.int) # camera id: 1, 2, 3
# performer = np.loadtxt(performer_file, dtype=np.int) # subject id: 1~40
#
#
# print(sum(camera == 3))
#
# print(sum(performer==38))
import re
# Cross Step
train_skel_crv = []
test_skel_crv = []
for i in tqdm(range(len(skes_joints))):
groups = re.match(r'S([0-9]+)C([0-9]+)P([0-9]+)R([0-9]+)A([0-9]+)', raw_skes_joints[i]['name'])
groups = groups.groups()
if int(groups[0]) % 2 == 1:
test_data = skes_joints[i]
test_label = int(groups[-1])
test_skel_crv.append({'input': test_data.tolist(), 'label': test_label})
else:
train_data = skes_joints[i]
train_label = int(groups[-1])
train_skel_crv.append({'input': train_data.tolist(), 'label': train_label})
print(len(train_skel_crv))
import pickle
with open("./data/ntu120/denoised_data/cross_step_data/raw_train_data.pkl", "wb") as fout:
pickle.dump(train_skel_crv, fout)
with open("./data/ntu120/denoised_data/cross_step_data/raw_test_data.pkl", "wb") as fout:
pickle.dump(test_skel_crv, fout)
# Cross Subject
tr_sub = set(
[1, 2, 4, 5, 8, 9, 13, 14, 15, 16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38, 45, 46, 47, 49, 50, 52, 53, 54, 55, 56,
57, 58, 59, 70, 74, 78, 80, 81, 82, 83, 84, 85, 86, 89, 91, 92, 93, 94, 95, 97, 98, 100, 103])
train_skel_crs = []
test_skel_crs = []
for i in tqdm(range(len(skes_joints))):
groups = re.match(r'S([0-9]+)C([0-9]+)P([0-9]+)R([0-9]+)A([0-9]+)', raw_skes_joints[i]['name'])
groups = groups.groups()
if int(groups[2]) not in tr_sub:
test_data = skes_joints[i]
test_label = int(groups[-1])
test_skel_crs.append({'input': test_data.tolist(), 'label': test_label})
else:
train_data = skes_joints[i]
train_label = int(groups[-1])
train_skel_crs.append({'input': train_data.tolist(), 'label': train_label})
print(len(train_skel_crs), len(test_skel_crs))
with open("./data/ntu120/denoised_data/cross_subject_data/raw_train_data.pkl", "wb") as fout:
pickle.dump(train_skel_crs, fout)
with open("./data/ntu120/denoised_data/cross_subject_data/raw_test_data.pkl", "wb") as fout:
pickle.dump(test_skel_crs, fout)
|
the-stack_106_29502 | # -*- coding: utf-8 -*-
import pprint
import requests
from argh.decorators import arg
from lain_cli.auth import SSOAccess, authorize_and_check, get_auth_header
from lain_cli.utils import (TwoLevelCommandBase, check_phase, get_domain,
lain_yaml)
from lain_sdk.util import error, info
class MaintainerCommands(TwoLevelCommandBase):
'''
allow maintain the maintainer of app in lain, only used when the auth is open
'''
@classmethod
def subcommands(self):
return [self.show, self.add, self.delete]
@classmethod
def namespace(self):
return "maintainer"
@classmethod
def help_message(self):
return "maintainer operation: including add, delete or show maintainers of the app"
@classmethod
@arg('phase', help="lain cluster phase id, can be added by lain config save")
def show(cls, phase, username=None):
"""
show maintainers list or specical maitainer message of app in different phase
username: sso username
"""
check_phase(phase)
yml = lain_yaml(ignore_prepare=True)
authorize_and_check(phase, yml.appname)
auth_header = get_auth_header(SSOAccess.get_token(phase))
console = "console.%s" % get_domain(phase)
maintainer_url = "http://%s/api/v1/repos/%s/maintainers/" % (
console, yml.appname)
if username:
maintainer_url += '%s/' % username
show_response = requests.get(maintainer_url, headers=auth_header)
if show_response.status_code < 300:
info("maintainer detail:")
pprint.pprint(show_response.json())
else:
error("shit happened : %s" % show_response.text)
@classmethod
@arg('phase', help="lain cluster phase id, can be added by lain config save")
@arg('username', help="sso username to add")
@arg('role', help='role to assigned to the user', choices=['admin', 'normal'])
def add(cls, phase, username, role):
"""
add maintianer for different phase
"""
check_phase(phase)
yml = lain_yaml(ignore_prepare=True)
authorize_and_check(phase, yml.appname)
auth_header = get_auth_header(SSOAccess.get_token(phase))
console = "console.%s" % get_domain(phase)
maintainer_url = "http://%s/api/v1/repos/%s/maintainers/" % (
console, yml.appname)
payload = {"username": username,
"role": role}
add_response = requests.post(maintainer_url, headers=auth_header, json=payload)
if add_response.status_code < 300:
info("add successfully.")
else:
error("shit happened : %s" % add_response.text)
@classmethod
@arg('phase', help="lain cluster phase id, can be added by lain config save")
@arg('username', help="sso username")
def delete(cls, phase, username):
"""
delete maintianer for different phase
"""
check_phase(phase)
yml = lain_yaml(ignore_prepare=True)
authorize_and_check(phase, yml.appname)
auth_header = get_auth_header(SSOAccess.get_token(phase))
console = "console.%s" % get_domain(phase)
maintainer_url = "http://%s/api/v1/repos/%s/maintainers/%s/" % (
console, yml.appname, username)
delete_response = requests.delete(maintainer_url, headers=auth_header)
if delete_response.status_code < 300:
info("delete successfully.")
else:
error("shit happened : %s" % delete_response.text)
|
the-stack_106_29503 | from enum import Enum, auto
from BoschShcPy.base import Base
from BoschShcPy.base_list import BaseList
from BoschShcPy.client import ErrorException
from BoschShcPy.device import Device, status_rx
class operation_state(Enum):
SYSTEM_DISARMED = auto()
SYSTEM_ARMING = auto()
SYSTEM_ARMED = auto()
MUTE_ALARM = auto()
operation_state_rx = {'SYSTEM_DISARMED': operation_state.SYSTEM_DISARMED,
'SYSTEM_ARMING': operation_state.SYSTEM_ARMING,
'SYSTEM_ARMED': operation_state.SYSTEM_ARMED,
'MUTE_ALARM': operation_state.MUTE_ALARM}
operation_state_tx = {operation_state.SYSTEM_DISARMED: 'SYSTEM_DISARMED',
operation_state.SYSTEM_ARMING: 'SYSTEM_ARMING',
operation_state.SYSTEM_ARMED: 'SYSTEM_ARMED',
operation_state.MUTE_ALARM: 'MUTE_ALARM'}
class IntrusionDetection(Base):
def __init__(self, client, device, id, name=None):
self.client = client
self.device = device
self.id = id
self.name = name
self.value = 'SYSTEM_DISARMED'
self.actuators = []
self.triggers = []
self.remainingTimeUntilArmed = None
self.armActivationDelayTime = None
self.alarmActivationDelayTime = None
# self.update()
@property
def get_state(self):
"""Retrieve state of Intrusion Detection."""
return operation_state_rx[self.value]
@property
def get_name(self):
"""Retrieve name of Intrusion Detection"""
return self.name
@property
def get_id(self):
"""Retrieve id of Intrusion Detection"""
return self.id
@property
def get_device(self):
"""Retrieve device of Intrusion Detection"""
return self.device
def update(self):
try:
self.load( self.client.request("smarthome/devices/intrusionDetectionSystem/services/IntrusionDetectionControl/state") )
return True
except ErrorException:
return False
def setOperationState(self, operation_state):
"""Set a new operation state of the intrusion detection system."""
"""Operation states are SYSTEM_ARMED, SYSTEM_DISARMED, MUTE_ALARM"""
if operation_state == operation_state.SYSTEM_ARMING:
return False
data={'@type':'intrusionDetectionControlState', 'value': operation_state_tx[operation_state]}
try:
self.client.request("smarthome/devices/intrusionDetectionSystem/services/IntrusionDetectionControl/state", method='PUT', params=data)
# self.value = operation_state_tx[operation_state]
# self.update()
return True
except ErrorException:
return False
def disarm(self):
"""Disarm the intrusion detection system."""
return self.setOperationState(operation_state.SYSTEM_DISARMED)
def arm(self):
"""Arm the intrusion detection system."""
return self.setOperationState(operation_state.SYSTEM_ARMED)
def arm_activation_delay(self, seconds):
"""Set the arm activation delay time the intrusion detection system."""
if operation_state == operation_state.SYSTEM_ARMING:
return False
data = {'@type': 'intrusionDetectionControlState',
'armActivationDelayTime': seconds}
try:
self.client.request("smarthome/devices/intrusionDetectionSystem/services/IntrusionDetectionControl/state", method='PUT', params=data)
return True
except ErrorException:
return False
def arm_instant(self):
"""Set an instant arm activation of the intrusion detection system."""
if operation_state == operation_state.SYSTEM_ARMING:
return False
delay_time = self.armActivationDelayTime
self.arm_activation_delay(1)
self.arm()
self.arm_activation_delay(delay_time)
def mute_alarm(self):
"""Mute the alarm of the intrusion detection system."""
return self.setOperationState(operation_state.MUTE_ALARM)
def trigger(self):
# not implemented yet
print("Trigger alarm simulation")
def __str__(self):
return "\n".join([
'Intrusion Detection:',
' Value : %s' % self.value,
' Actuators : %s' % self.actuators,
' Triggers : %s' % self.triggers,
' remainingTimeUntilArmed : %s' % self.remainingTimeUntilArmed,
' armActivationDelayTime : %s' % self.armActivationDelayTime,
' alarmActivationDelayTime : %s' % self.alarmActivationDelayTime,
])
def register_polling(self, client, callback):
client.register_device(self, callback)
def initialize_intrusion_detection(client, device_list):
"""Helper function to initialize intrusion detection given from a device list."""
for item in device_list.items:
if item.deviceModel == "INTRUSION_DETECTION_SYSTEM":
return IntrusionDetection(client, item, item.id, item.name)
|
the-stack_106_29505 | class Win:
'''
This class defines a type of win the game. This is the list of be parsed when running
the AI in order to find the win that is most probable for the player. The
characteristics of the class are similar to that of the player class
'''
def __init__(self):
#I haven't included monoplies and year of plenty since they are very variable
#and their use changes based on the iterations of the game
self.devCardDict = {
"knight": 0,
"victoryPoint": 0,
"roadBuilding": 0
}
#amount of resources required to get that victory
self.resourceDict = {
"wheat": 0,
"sheep": 0,
"brick": 0,
"ore": 0,
"wood": 0
}
#number of Settlements and Cities required in that win
self.numSettlements = 0
self.numCities = 0
#bool based on whether or not a win required largest Arny
self.largestArmy = False
#bool based on whether or not win requires longest road as well as minimum roads
#needed to win
self.longestRoad = False
self.minRoadsNeeded = 0
#total victory points in this win
self.totalVicPts = 0
self.totalCost = 0
#a number to help keep track of this exact win
self.number = 0
'''
when you print the string you will get the number
'''
def __str__(self):
return str(self.number)
'''
defines the type of win/ what the player uses to win the game.
uses an input of similar dummy dictionaries and bools from the main AI function
'''
def setWinType(self, vicPts, army , road ,setts, cits):
#if army has 2 vic points, then the win requires largest army
if army == 2:
self.devCardDict["knight"] = 3
self.largestArmy = True
#if win has 2 vic points, then the win requires longest road
if road == 2:
self.longestRoad = True
#copies over the number of dev card victory points
self.devCardDict["victoryPoint"] = vicPts
#number of Settlements and cities in the win, cities divided by two, since they
#are worth double
self.numSettlements = setts
self.numCities = int(cits/2)
#set total victory points in this win
self.totalVicPts = vicPts + army + road +setts + cits
'''
sets the information for roads
including minimum roads needed and how many road building cards were uesd
'''
def setRoads(self, minR, rBuilding):
self.minRoadsNeeded = minR
self.devCardDict["roadBuilding"] = rBuilding
'''
sets the arbitary counter , just to help access and debugging in the future
'''
def setNumber(self, counter):
self.number = counter
'''
calculate the total resouces requied to accomplish that particular winning strategy
returns the dictionary of the resources
'''
def calcResourceCost(self):
#sum of all dev cards required in the win
devSum = self.devCardDict["knight"] + self.devCardDict["victoryPoint"] + self.devCardDict["roadBuilding"]
#adds neccesarry resources for all the dev cards
for i in range(0, devSum):
self.resourceDict["sheep"] = self.resourceDict["sheep"] + 1
self.resourceDict["ore"] = self.resourceDict["ore"] + 1
self.resourceDict["wheat"] = self.resourceDict["wheat"] + 1
#add the neccesarry resource for all the settlements plus the cities minus 2
#this is because all cities were settlements once
#also we start with 2 settlements that are free
for i in range(0, self.numSettlements + self.numCities - 2):
self.resourceDict["sheep"] = self.resourceDict["sheep"] + 1
self.resourceDict["wood"] = self.resourceDict["wood"] + 1
self.resourceDict["brick"] = self.resourceDict["brick"] + 1
self.resourceDict["wheat"] = self.resourceDict["wheat"] + 1
#add neccesarry resources for all the Cities
for i in range(0, self.numCities):
self.resourceDict["ore"] = self.resourceDict["ore"] + 3
self.resourceDict["wheat"] = self.resourceDict["wheat"] + 2
#add neccessary resources for all roads built
#minus road building since we accounted for that earlier
#minus the 2 roads we started with
for i in range(0, self.minRoadsNeeded - (2*self.devCardDict["roadBuilding"]) -2):
self.resourceDict["wood"] = self.resourceDict["wood"] + 1
self.resourceDict["brick"] = self.resourceDict["brick"] + 1
return self.resourceDict
'''
calculates the total cost by summing the resource cards required for the victory
'''
def calcTotalCost(self):
for key,val in self.resourceDict.items():
self.totalCost = self.totalCost + val
'''
for i in range(0, self.devCardDict["knight"]):
self.totalCost = self.totalCost -1
'''
return self.totalCost
'''
calculates the victory points in this particular solution
'''
def vicPoints(self):
self.totalVicPts = self.devCardDict["victoryPoint"] + self.numSettlements
self.totalVicPts = self.totalVicPts + (2* self.numCities)
if self.longestRoad == True:
self.totalVicPts = self.totalVicPts + 2
if self.largestArmy == True:
self.totalVicPts = self.totalVicPts + 2
'''
makes sure that victory points are greater than 10
'''
def sanityCheck(self):
if (self.totalVicPts >=10):
return True
return False
'''
sets the less than symbol for comparisons and sorting
'''
def __lt__(self, other):
return self.totalCost < other.totalCost
|
the-stack_106_29507 | import requests
from lxml import etree
url="https://www.w3schools.com/xml/simple.xml"
response = requests.get(url).content
tree = etree.XML(response)
print(tree)
print(type(tree))
#iter through all elements found in Tree
for element in tree.iter():
print("%s - %s" % (element.tag, element.text))
#iter through selected elements found in Tree
for element in tree.iter('calories','name'):
print("%s - %s" % (element.tag, element.text))
|
the-stack_106_29508 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.instance",
manifest={"ImageClassificationPredictionInstance",},
)
class ImageClassificationPredictionInstance(proto.Message):
r"""Prediction input format for Image Classification.
Attributes:
content (str):
The image bytes or GCS URI to make the
prediction on.
mime_type (str):
The MIME type of the content of the image.
Only the images in below listed MIME types are
supported. - image/jpeg
- image/gif
- image/png
- image/webp
- image/bmp
- image/tiff
- image/vnd.microsoft.icon
"""
content = proto.Field(proto.STRING, number=1,)
mime_type = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_106_29510 | # -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from base64 import b64decode
from calvin.requests import calvinresponse
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities.issuetracker import IssueTracker
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities.security import security_enabled
_log = get_logger(__name__)
def authentication_decorator(func):
@functools.wraps(func)
def wrapper(self, handle, connection, match, data, hdr):
def _unauthorized_response():
self.send_response(handle, connection, None, status=calvinresponse.UNAUTHORIZED)
def _handle_authentication_decision(authentication_decision):
_log.debug("control_apis.authentication::_handle_authentication_decision, authentication_decision={}".format(authentication_decision))
if not authentication_decision:
_unauthorized_response()
return
try:
self.security.check_security_policy(
CalvinCB(_handle_policy_decision),
element_type="control_interface",
element_value=arguments['func'].__name__
)
except Exception as exc:
_log.exception("Failed to check security policy, exc={}".format(exc))
_unauthorized_response()
def _handle_policy_decision(access_decision):
if not access_decision:
_unauthorized_response()
return
# Yay! We made it!
func(self, handle, connection, match, data, hdr)
#
# Exit early if security disabled
#
if not security_enabled():
func(self, handle, connection, match, data, hdr)
return
#
# Verify the request against credentials and policy
#
credentials = None
arguments={'func':func, 'self':self, 'handle':handle, 'connection':connection, 'match':match, 'data':data, 'hdr':hdr}
try:
if 'authorization' in hdr:
cred = b64decode(hdr['authorization'].strip('Basic ')).split(':')
credentials ={'user':cred[0], 'password':cred[1]}
except TypeError as err:
_log.error("_verify_permission: Missing or wrongly formatted credentials in request header")
# Continue without credentials, policy might allow access
try:
self.security.authenticate_subject(credentials, callback=CalvinCB(_handle_authentication_decision))
except Exception as exc:
_log.exception("Failed to authenticate the subject, exc={}".format(exc))
_unauthorized_response()
return wrapper
|
the-stack_106_29513 | '''
lookup_tab.py: implementation of the "Look up records" tab
Copyright
---------
Copyright (c) 2021-2022 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from commonpy.data_utils import unique, pluralized, flattened
from commonpy.exceptions import Interrupted
from commonpy.file_utils import exists, readable
from commonpy.interrupt import wait, interrupt, interrupted, reset_interrupts
import json
from pprint import pformat
from pywebio.input import input, select, checkbox, radio
from pywebio.input import NUMBER, TEXT, input_update, input_group
from pywebio.output import put_text, put_markdown, put_row, put_html
from pywebio.output import toast, popup, close_popup, put_buttons, put_button, put_error
from pywebio.output import use_scope, set_scope, clear, remove, put_warning
from pywebio.output import put_success, put_info, put_table, put_grid, span
from pywebio.output import put_tabs, put_image, put_scrollable, put_code, put_link
from pywebio.output import put_processbar, set_processbar, put_loading
from pywebio.output import put_column, put_scope, clear_scope, get_scope
from pywebio.pin import pin, pin_wait_change, put_input, put_actions
from pywebio.pin import put_textarea, put_radio, put_checkbox, put_select
from pywebio.session import run_js, eval_js
from sidetrack import set_debug, log
import threading
from foliage.base_tab import FoliageTab
from foliage.export import export_records
from foliage.folio import Folio, RecordKind, IdKind, TypeKind
from foliage.folio import unique_identifiers
from foliage.ui import confirm, notify, user_file, stop_processbar
from foliage.ui import tell_success, tell_warning, tell_failure
from foliage.ui import note_info, note_warn, note_error, PROGRESS_BOX
# Tab definition class.
# .............................................................................
class LookupTab(FoliageTab):
def contents(self):
return {'title': 'Look up records', 'content': tab_contents()}
def pin_watchers(self):
return {}
# Tab layout.
# .............................................................................
def tab_contents():
log(f'generating lookup tab contents')
return [
put_grid([[
put_markdown('Input one or more item barcode, item id, item hrid,'
+ ' instance id, instance hrid, instance accession'
+ ' number, loan id, loan hrid, user id, or user'
+ ' barcode below, or upload a file containing them.'),
put_button('Upload', outline = True,
onclick = lambda: load_file()).style('text-align: right'),
]], cell_widths = 'auto 100px'),
put_textarea('textbox_find', rows = 4),
put_grid([[
put_radio('select_kind', inline = True,
label = 'Kind of record to retrieve:',
options = [ ('Item', RecordKind.ITEM, True),
('Holdings', RecordKind.HOLDINGS),
('Instance', RecordKind.INSTANCE),
('Loan', RecordKind.LOAN),
('User', RecordKind.USER)]),
put_checkbox("open_loans", inline = True,
options = [('Search open loans only', True, True)],
help_text = ('When searching for loans (and users,'
+ ' based on loans), limit searches to'
+ ' open loans only. Deselect'
+ ' to search all loans.')),
]], cell_widths = '54% 46%'),
put_grid([[
put_grid([[
put_text('Format in which to display records:'),
],[
put_radio('show_raw', inline = True,
options = [('Summary', 'summary', True),
('Raw data', 'json')]),
]]),
put_checkbox("inventory_api", inline = True,
options = [('Use inventory API for items and instances',
True, True)],
help_text = ("FOLIO's Inventory API shows more fields but"
+ ' some values are computed. Deselect to'
+ ' get pure records from the storage API.')),
]], cell_widths = '54% 46%'),
put_row([
put_button('Look up records', onclick = lambda: do_find()),
put_text(''), # Adds a column, pushing next item to the right.
put_button('Clear', outline = True,
onclick = lambda: clear_tab()).style('text-align: right')
])
]
# Tab implementation.
# .............................................................................
_interrupted = False
_running = False
_last_textbox = ''
_last_results = {}
_last_kind = None
_last_inventory_api = True
_last_open_loans = True
_location_map = None
def load_file():
log(f'user requesting file upload')
if (contents := user_file('Upload a file containing identifiers')):
pin.textbox_find = contents
def init_location_map():
global _location_map
if _location_map is None:
folio = Folio()
_location_map = {x.data['id']:x.data['name']
for x in folio.types(TypeKind.LOCATION)}
def inputs_are_unchanged():
global _last_textbox
global _last_kind
global _last_inventory_api
global _last_open_loans
unchanged = (pin.textbox_find == _last_textbox
and pin.select_kind == _last_kind
and pin.inventory_api == _last_inventory_api
and pin.open_loans == _last_open_loans)
log(f'field values are considered {"unchanged" if unchanged else "changed"}')
return unchanged
def clear_tab():
global _last_textbox
global _last_inventory_api
log(f'clearing tab')
clear('output')
pin.textbox_find = ''
pin.inventory_api = [True]
_last_textbox = ''
_last_inventory_api = [True]
_last_open_loans = [True]
def stop():
'''Stop an ongoing lookup by setting the _interrupted flag.'''
global _interrupted
global _last_textbox
log(f'stopping')
_interrupted = True
_last_textbox = ''
stop_processbar()
interrupt()
with use_scope('output'):
tell_warning('**Stopping** ...')
def reset():
# Reset to state where we can run new operations.
global _interrupted
_interrupted = False
reset_interrupts()
def enable_lookup_button(state):
'''Enable the "look up records" button if True, disable if False.'''
action = 'removeClass' if state else 'addClass'
eval_js(f'''$("button:contains('Look up records')").{action}("disabled-button");''')
def wait_if_running():
'''Check if the run state is running; if it is, wait until it changes.'''
# The _running variable is set by do_find() when it's finished, and it's
# set even if it gets interrupted. This is what we cue from.
global _running
if not _running:
return
enable_lookup_button(False)
stop()
# Wait in case an ongoing lookup is running, but don't wait forever.
wait_count = 10
while _running and wait_count > 0:
# If the user clicks multiple times rapidly, the exception raised for
# interrupts starts to cascade. Wrap with a try-except to avoid this.
try:
wait(1)
wait_count -= 1
except Interrupted:
continue
enable_lookup_button(True)
# Summary of the basic flow of control:
#
# User clicks "look up records", thus invoking do_find().
# We show progress bar & stop button while lookup is running.
# Possible scenarios:
# 1) process finishes normally
# 2) user clicks stop button
# 3) user clicks "look up records" button while lookup is running
def do_find():
global _last_results
global _last_textbox
global _last_kind
global _last_inventory_api
global _last_open_loans
global _location_map
global _interrupted
global _running
log('do_find invoked')
wait_if_running()
reset()
# Normally we'd want to find out if they input any identifiers, but I want
# to detect *any* change to the input box, so this is a lower-level test.
if not pin.textbox_find.strip():
note_error('Please input at least one barcode or other id.')
return
identifiers = unique_identifiers(pin.textbox_find)
if not identifiers:
note_error('The input does not appear to contain FOLIO identifiers.')
return
reuse_results = False
if inputs_are_unchanged() and user_wants_reuse():
reuse_results = True
else:
_last_results = {}
_last_textbox = pin.textbox_find
_last_kind = pin.select_kind
_last_inventory_api = pin.inventory_api
_last_open_loans = pin.open_loans
kind_wanted = pin.select_kind
steps = len(identifiers) + 1
folio = Folio()
init_location_map()
total_found = 0
with use_scope('output', clear = True):
put_grid([[
put_scope('current_activity', [
put_markdown(f'_Certain lookups take a long time. Please be patient._'
).style('color: DarkOrange; margin-bottom: 0')]),
], [
put_processbar('bar', init = 1/steps).style('margin-top: 11px'),
put_button('Stop', outline = True, color = 'danger',
onclick = lambda: stop()).style('text-align: right'),
]], cell_widths = '85% 15%').style(PROGRESS_BOX)
# The staff want to see location names, so we need to get the mapping.
_running = True
for count, id in enumerate(identifiers, start = 2):
if _interrupted:
break
try:
# Figure out what kind of identifier we were given.
id_kind = folio.id_kind(id)
if id_kind is IdKind.UNKNOWN:
tell_failure(f'Unrecognized identifier: **{id}**.')
continue
if reuse_results:
records = _last_results.get(id)
else:
records = folio.related_records(id, id_kind, kind_wanted,
pin.inventory_api, pin.open_loans)
_last_results[id] = records
if not records or len(records) == 0:
tell_failure(f'No {kind_wanted} record(s) found for {id_kind} **{id}**.')
continue
# Report the results & how we got them.
source = 'storage'
if pin.inventory_api and kind_wanted in ['item', 'instance']:
source = 'inventory'
this = pluralized(kind_wanted + f' {source} record', records, True)
how = f'by searching for {id_kind} **{id}**.'
tell_success(f'Found {this} {how}')
show_index = (len(records) > 1)
for index, record in enumerate(records, start = 1):
print_record(record, id, index, show_index, pin.show_raw == 'json')
total_found += len(records)
except Interrupted as ex:
log('stopping due to interruption')
_interrupted = True
except Exception as ex:
import traceback
log('Exception info: ' + str(ex) + '\n' + traceback.format_exc())
tell_failure(f'Error: ' + str(ex))
stop_processbar()
return
finally:
if not _interrupted:
set_processbar('bar', count/steps)
stop_processbar()
clear_scope('current_activity')
if _interrupted:
tell_warning('**Stopped**.')
else:
summary = (f'Found {total_found} {kind_wanted} records by looking up '
+ pluralized('unique identifier', identifiers, True)
+ '.')
put_grid([[
put_markdown(summary).style('margin-top: 6px'),
put_button('Export', outline = True,
onclick = lambda: do_export(_last_results, kind_wanted),
).style('text-align: right')
]]).style('margin: 1.5em 17px auto 17px')
_running = False
def field(record, field_name, subfield_name = None, list_joiner = ', '):
if field_name not in record.data:
return ''
if subfield_name:
if subfield_name not in record.data[field_name]:
return ''
value = record.data[field_name][subfield_name]
else:
value = record.data[field_name]
if isinstance(value, list) and list_joiner:
return list_joiner.join(str(x) for x in value)
else:
return str(value)
def location(record, field_name):
global _location_map
if field_name not in record.data:
return ''
location_data = record.data[field_name]
if isinstance(location_data, dict):
if 'name' in location_data:
return f'{location_data["name"]} ({location_data["id"]})'
else:
return location_data["id"]
elif location_data and location_data in _location_map:
return f'{_location_map[location_data]} ({location_data})'
return '(unknown location)'
def notes(record, field_name):
if field_name not in record.data:
return ''
notes = record.data[field_name]
if isinstance(notes, str):
return notes
elif isinstance(notes, list):
if len(notes) == 0:
return ''
elif isinstance(notes[0], dict):
return '\n'.join(n['note'] for n in notes)
else:
return '\n'.join(str(note) for note in notes)
else:
return notes
def print_record(record, identifier, index, show_index, show_raw):
log(f'printing {record.kind} record {record.id}')
if show_index:
put_markdown(f'{record.kind.title()} record #{index}:')
if show_raw:
put_code(pformat(record.data, indent = 2))
elif record.kind is RecordKind.ITEM:
# Caution: left-hand values contain nonbreaking spaces (invisible here).
if 'title' in record.data:
# Inventory record version.
put_table([
['Title' , field(record, 'title')],
['Barcode' , field(record, 'barcode')],
['Call number' , field(record, 'callNumber')],
[f'{record.kind.title()} id' , field(record, 'id')],
['Effective location' , location(record, 'effectiveLocation')],
['Permanent location' , location(record, 'permanentLocation')],
['Status' , field(record, 'status', 'name')],
['Tags' , field(record, 'tags', 'tagsList')],
['Notes' , notes(record, 'notes')],
['HRID' , field(record, 'hrid')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
else:
# Storage record version.
put_table([
['Barcode' , field(record, 'barcode')],
['Call number' , field(record, 'itemLevelCallNumber')],
[f'{record.kind.title()} id' , field(record, 'id')],
['Effective location' , location(record, 'effectiveLocationId')],
['Permanent location' , location(record, 'permanentLocationId')],
['Tags' , field(record, 'tags', 'tagsList')],
['Notes' , notes(record, 'notes')],
['HRID' , field(record, 'hrid')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
elif record.kind is RecordKind.INSTANCE:
# Caution: left-hand values contain nonbreaking spaces (invisible here).
if field(record, 'classifications'):
call_number = record.data['classifications'][0]['classificationNumber']
else:
call_number = ''
if 'tags' in record.data:
put_table([
['Title' , field(record, 'title')],
['Call number' , call_number],
[f'{record.kind.title()} id' , field(record, 'id')],
['Tags' , field(record, 'tags', 'tagsList')],
['Notes' , notes(record, 'notes')],
['HRID' , field(record, 'hrid')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
else:
put_table([
['Title' , field(record, 'title')],
['Call number' , call_number],
[f'{record.kind.title()} id' , field(record, 'id')],
['HRID' , field(record, 'hrid')],
['Notes' , notes(record, 'notes')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
elif record.kind is RecordKind.HOLDINGS:
# Caution: left-hand values contain nonbreaking spaces (invisible here).
if 'effectiveLocationId' in record.data:
put_table([
[f'{record.kind.title()} id' , field(record, 'id')],
['HRID' , field(record, 'hrid')],
['Holdings type id' , field(record, 'holdingsTypeId')],
['Instance id' , field(record, 'instanceId')],
['Effective location' , location(record, 'effectiveLocationId')],
['Permanent location' , location(record, 'permanentLocationId')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
else:
put_table([
[f'{record.kind.title()} id' , field(record, 'id')],
['HRID' , field(record, 'hrid')],
['Holdings type id' , field(record, 'holdingsTypeId')],
['Instance id' , field(record, 'instanceId')],
['Effective location' , ''],
['Permanent location' , location(record, 'permanentLocationId')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
elif record.kind is RecordKind.USER:
# Caution: left-hand values contain nonbreaking spaces (invisible here).
put_table([
['Username' , field(record, 'username')],
['Barcode' , field(record, 'barcode')],
[f'{record.kind.title()} id' , field(record, 'id')],
['Patron group' , field(record, 'patronGroup')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
elif record.kind is RecordKind.LOAN:
if 'userId' in record.data:
put_table([
[f'{record.kind.title()} id' , field(record, 'id')],
['Status', field(record, 'status', 'name')],
['User id' , field(record, 'userId')],
['Item id' , field(record, 'itemId')],
['Loan date' , field(record, 'loanDate')],
['Due date' , field(record, 'dueDate')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
else:
put_table([
[f'{record.kind.title()} id' , field(record, 'id')],
['Status', field(record, 'status', 'name')],
['User id' , ''],
['Item id' , field(record, 'itemId')],
['Loan date' , field(record, 'loanDate')],
['Due date' , field(record, 'dueDate')],
['Created' , field(record, 'metadata', 'createdDate')],
['Updated' , field(record, 'metadata', 'updatedDate')],
]).style('font-size: 90%; margin: auto 17px 1.5em 17px')
def user_wants_reuse():
event = threading.Event()
answer = False
def clk(val):
nonlocal answer
answer = val
event.set()
pins = [
put_text('The list of identifiers and the kind of record to retrieve'
+ ' are unchanged from the previous lookup. Should the results'
+ ' be reused, or should the identifiers be looked up again?'),
put_html('<br>'),
put_buttons([
{'label': 'Reuse the results', 'value': True},
{'label': 'Search again', 'value': False, 'color': 'secondary'},
], onclick = clk).style('float: left')
]
popup(title = 'Should results be reused?', content = pins, closable = False)
event.wait()
close_popup()
wait(0.5) # Give time for popup to go away.
return answer
def do_export(results, record_kind):
log(f'exporting {record_kind} {pluralized("record", results, True)}')
# Results is a dictionary; each value is a list of records. Unwind it.
all_records = [item for value in results.values() for item in value]
export_records(all_records, record_kind)
|
the-stack_106_29515 | import flask
from flask import request, jsonify
app = flask.Flask(__name__)
app.config["DEBUG"] = True
# Create some test data for our catalog in the form of a list of dictionaries.
books = [
{'id': 0,
'title': 'A Fire Upon the Deep',
'author': 'Vernor Vinge',
'first_sentence': 'The coldsleep itself was dreamless.',
'year_published': '1992'},
{'id': 1,
'title': 'The Ones Who Walk Away From Omelas',
'author': 'Ursula K. Le Guin',
'first_sentence': 'With a clamor of bells that set the swallows soaring, the Festival of Summer came to the city Omelas, bright-towered by the sea.',
'published': '1973'},
{'id': 2,
'title': 'Dhalgren',
'author': 'Samuel R. Delany',
'first_sentence': 'to wound the autumnal city.',
'published': '1975'}
]
@app.route('/', methods=['GET'])
def home():
return '''<h1>Distant Reading Archive</h1>
<p>A prototype API for distant reading of science fiction novels.</p>'''
# A route to return all of the available entries in our catalog.
@app.route('/api/v1/resources/books/all', methods=['GET'])
def api_all():
return jsonify(books)
app.run() |
the-stack_106_29516 | """Platform for interfacing to Tentalux lighting fixtures.
light, scene, camera
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tentalux/
"""
import logging
import voluptuous as vol
import requests
import json
import time
from threading import Thread
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_NAME, EVENT_HOMEASSISTANT_STOP)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
TENTALUX_CONTROLLER = 'tentalux_controller'
TENTALUX_DEVICES = 'tentalux_devices'
DOMAIN = 'tentalux'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
}),
}, extra=vol.ALLOW_EXTRA)
POLLING_FREQ = .5
def setup(hass, base_config):
"""Start Tentalux platform."""
config = base_config.get(DOMAIN)
host = config[CONF_HOST]
port = config[CONF_PORT]
name = 'tentalux'
controller = TentaluxController(host, port)
controller.connect()
devs = {'light': [], 'scene': [], 'camera': []}
# One light for every tentacle
for arm in range(controller.arms):
dev_name = '%s arm %d' % (name, arm)
devs['light'].append((dev_name, arm))
# Create scenes for each pose and movement
for pose in controller.get_poses():
devs['scene'].append(('%s %s' % (name, pose), pose))
# Create camera
devs['camera'].append(('%s camera' % name, controller.get_camera_url()))
hass.data[TENTALUX_CONTROLLER] = controller
hass.data[TENTALUX_DEVICES] = devs
for component in devs.keys():
discovery.load_platform(hass, component, DOMAIN, None, base_config)
def cleanup(event):
controller.close()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
return True
class TentaluxController(Thread):
"""Interface between HASS and Tentalux."""
arms = None
ARBs = None
def __init__(self, host, port):
"""Host and port of Tentalux."""
Thread.__init__(self)
self._host = host
self._port = port
self._url = '%s:%d' % (host, port)
self._subscribers = []
self._running = False
def connect(self):
"""Start the status polling."""
self.status()
self.start()
def run(self):
self._running = True
while self._running:
self.status()
time.sleep(POLLING_FREQ)
def subscribe(self, device):
"""Add a device to subscribe to events."""
self._subscribers.append(device)
def control_some(self, data = [{}]):
"""Send a command to Tentalux and get status."""
response = requests.post(self._url + '/controlsome', data={'data': json.dumps(data)})
self._process_state(response)
def status(self):
"""Request status."""
response = requests.post(self._url + '/status')
data = response.json()
self._process_state(response)
def _process_state(self, response):
"""Process Tentalux state information."""
data = response.json()
self.ARBs = data['ARBs']
self.arms = len(self.ARBs)
# Tell all related components that data may have changed.
for sub in self._subscribers:
if sub.callback():
sub.schedule_update_ha_state()
def get_poses(self):
"""Query Tentalux poses."""
response = requests.get(self._url + '/q_poses')
data = response.json()
return data
def set_pose(self, pose):
"""Set a Tentalux pose (scene)."""
response = requests.post(self._url + '/s_pose', data = {'pose': pose})
self._process_state(response)
def get_camera_url(self):
"""URL of the camera image."""
return self._url + '/camera'
def close(self):
"""Close the connection."""
self._running = False
time.sleep(POLLING_FREQ)
class TentaluxDevice(Entity):
"""Base class of a tentalux device."""
def __init__(self, controller, name):
"""Controller and name of the device."""
self._name = name
self._controller = controller
async def async_added_to_hass(self):
"""Register callback."""
self.hass.async_add_job(self._controller.subscribe, self)
@property
def name(self):
"""Device name."""
return self._name
@property
def should_poll(self):
"""No need to poll."""
return False
def callback(self):
"""Run when device potentially changes state."""
return False
|
the-stack_106_29518 | #-*- coding:utf-8 -*-
import os
import time
import threading
import tkinter as tk
from tkinter import ttk
from tc_ui_front import *
import tkinter.filedialog as tkf
import tkinter.messagebox as tkm
def func_thrd_ExecuteCommand():
time.sleep(0.01)
text.delete(0.0,tk.END)
def handle_Input(event):
if event.keycode==13:
global lines
txt=text.get("0.0", "end")
tree.insert("",lines,text="" ,values=(txt,lines))
tree.yview_moveto(1)
lines=lines+1
thrd_once=threading.Thread(target=func_thrd_ExecuteCommand)
thrd_once.start()
def callRB():
global lines
tree.insert("",lines,text="" ,values=(foo.get(),'训练','文本'))
tree.yview_moveto(1)
lines=lines+1
def filecallback():
global lines
#tkm.showinfo('','请选择配置文件')
filename = tkf.askopenfilename()
if filename!='':
text.insert(0.0,filename)
tree.insert("",lines,text="" ,values=(filename,'选择文件:',os.path.basename(filename)))
else:
tree.insert("",lines,text="" ,values=('选择文件/输入预测文本','无','无'))
tree.yview_moveto(1)
lines=lines+1
def traincallback():
global lines
tree.insert("",lines,text="" ,values=('训练文本','训练','文本'))
tree.yview_moveto(1)
lines=lines+1
items=tree.get_children()
tree.selection_set(items[len(items)-1])
def testcallback():
global lines
tree.insert("",lines,text="" ,values=('测试文本','文件','文本'))
tree.yview_moveto(1)
lines=lines+1
def predcallback():
global lines
tree.insert("",lines,text="" ,values=('预测为本','文件','文本'))
tree.yview_moveto(1)
lines=lines+1
if __name__ == "__main__":
window = tk.Tk()
window.title('my window')
window.geometry('500x500')
window.update()
window.rowconfigure(0,weight=1)
window.columnconfigure(0,weight=1)
text = tk.Text(window,height=1)
text.bind('<Key>',func=handle_Input)
text.pack(side='top',fill='x')
#text.grid(row=1,columnspan=4,sticky='ew')
frame=tk.Frame(window)
frame.pack(side='top',fill='x')
btn_file = tk.Button(frame, text ="...", command = filecallback)
btn_file.grid(row=0,column=0)
#btn_file.grid(row=1,column=4,sticky='we')
btn_train = tk.Button(frame, text ="训练", command = traincallback)
btn_train.grid(row=0,column=1)
#btn_train.grid(row=2,column=2,sticky='we')
btn_test = tk.Button(frame, text ="测试", command = testcallback)
btn_test.grid(row=0,column=2)
#btn_test.grid(row=2,column=3,sticky='we')
btn_predict = tk.Button(frame, text ="预测", command = predcallback)
btn_predict.grid(row=0,column=3)
#btn_predict.grid(row=2,column=4,sticky='we')
#frame.grid(row=2,column=1)
#frame.rowconfigure(0,weight=1)
#frame.columnconfigure(0,weight=1)
foo=tk.IntVar(window)
i=4
for t, v in [('卷积神经网络', 1), ('朴素贝叶斯', 2), ('逻辑回归', 3)]:
r = tk.Radiobutton(frame, text=t, value=v,variable=foo,command=callRB)
#r.grid(row=0,column=i,sticky='w')
r.grid(row=0,column=i)
i+=1
lines=0
foo.set(1)
h=window.winfo_height()-text.winfo_height()-frame.winfo_height()
tree=ttk.Treeview(window,show="headings",height=h,selectmode='browse')
tree["columns"]=("text","classification","other")
tree.column("text",width=int(window.winfo_width()*3/5))
tree.column("classification",width=int(window.winfo_width()/5))
tree.column("other",width=int(window.winfo_width()/5))
tree.heading("text",text="输出1",anchor = 'w')
tree.heading("classification",anchor = 'w',text='输出2')
tree.heading("other",anchor = 'w',text='输出3')
#tree.grid(row=4,columnspan=4,sticky='nsew')
vbar = ttk.Scrollbar(window,orient=tk.VERTICAL,command=tree.yview)
#vbar.grid(row=4,column=4,sticky='ns')
vbar.pack(side='right',fill='y')
tree.configure(yscrollcommand=vbar.set)
tree.pack(side='bottom',fill='both')
for j in range(100):
tree.insert("",lines,text="" ,values=(lines,'文件','文本'))
lines=lines+1
window.mainloop()
|
the-stack_106_29520 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image operations for RaggedTensors."""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import dispatch
@dispatch.dispatch_for_api(image_ops.resize_images_v2)
def resize_images_v2(images: ragged_tensor.RaggedTensor,
size,
method=image_ops.ResizeMethod.BILINEAR,
preserve_aspect_ratio=False,
antialias=False,
name=None):
"""RaggedTensor dispatcher for tf.image.resize (tf-v2)."""
with ops.name_scope(name, "RaggedResizeImages", [images, size]):
return _resize_images(
image_ops.resize_images_v2,
images,
size,
method=method,
preserve_aspect_ratio=preserve_aspect_ratio,
antialias=antialias)
@dispatch.dispatch_for_api(image_ops.resize_images)
def resize_images_v1(images: ragged_tensor.RaggedTensor,
size,
method=image_ops.ResizeMethodV1.BILINEAR,
align_corners=False,
preserve_aspect_ratio=False,
name=None):
"""RaggedTensor dispatcher for tf.image.resize (tf-v1)."""
with ops.name_scope(name, "RaggedResizeImages", [images, size]):
return _resize_images(
image_ops.resize_images,
images,
size,
method=method,
preserve_aspect_ratio=preserve_aspect_ratio,
align_corners=align_corners)
def _resize_images(resize_op, images, size, **kwargs):
"""RaggedTensor dispatcher for tf.image.resize."""
if images.shape.rank != 4:
raise ValueError(
"tf.image.resize: images.shape.rank must be 4 if images is ragged.")
# Determine the output shape (excluding the batch dimension).
static_batch_size = tensor_shape.dimension_value(images.shape[0])
size = ops.convert_to_tensor(size, dtypes.int32, "size")
size_as_shape = tensor_util.constant_value_as_shape(size).with_rank(2)
out_shape = size_as_shape + images.shape[-1:]
out_spec = tensor_spec.TensorSpec(out_shape, dtypes.float32)
def resize_one(image):
if isinstance(image, ragged_tensor.RaggedTensor):
image = image.to_tensor()
return resize_op(image, size, **kwargs)
def resize_with_map():
return map_fn.map_fn_v2(resize_one, images, fn_output_signature=out_spec)
def empty_result():
channels = array_ops.shape(images.flat_values)[-1:]
return array_ops.zeros(array_ops.concat([[0], size, channels], axis=0))
if static_batch_size == 0:
return empty_result()
elif static_batch_size is not None:
return resize_with_map()
else:
empty_batch = math_ops.equal(images.nrows(), 0)
return control_flow_ops.cond(empty_batch, empty_result, resize_with_map)
|
the-stack_106_29522 | ##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from distutils.util import strtobool
import math
import json
# uncomment below if you want to use SRU
# and you need to install SRU: pip install sru[cuda].
# or you can install it from source code: https://github.com/taolei87/sru.
# import sru
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def act_fun(act_type):
if act_type == "relu":
return nn.ReLU()
if act_type == "tanh":
return nn.Tanh()
if act_type == "sigmoid":
return nn.Sigmoid()
if act_type == "leaky_relu":
return nn.LeakyReLU(0.2)
if act_type == "elu":
return nn.ELU()
if act_type == "softmax":
return nn.LogSoftmax(dim=1)
if act_type == "linear":
# initializzed like this, but not used in forward!
return nn.LeakyReLU(1)
class MLP(nn.Module):
def __init__(self, options, inp_dim):
super(MLP, self).__init__()
self.input_dim = inp_dim
self.dnn_lay = list(map(int, options["dnn_lay"].split(",")))
self.dnn_drop = list(map(float, options["dnn_drop"].split(",")))
self.dnn_use_batchnorm = list(
map(strtobool, options["dnn_use_batchnorm"].split(",")))
self.dnn_use_laynorm = list(
map(strtobool, options["dnn_use_laynorm"].split(",")))
self.dnn_use_laynorm_inp = strtobool(options["dnn_use_laynorm_inp"])
self.dnn_use_batchnorm_inp = strtobool(
options["dnn_use_batchnorm_inp"])
self.dnn_act = options["dnn_act"].split(",")
self.wx = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
# input layer normalization
if self.dnn_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# input batch normalization
if self.dnn_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_dnn_lay = len(self.dnn_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_dnn_lay):
# dropout
self.drop.append(nn.Dropout(p=self.dnn_drop[i]))
# activation
self.act.append(act_fun(self.dnn_act[i]))
add_bias = True
# layer norm initialization
self.ln.append(LayerNorm(self.dnn_lay[i]))
self.bn.append(nn.BatchNorm1d(self.dnn_lay[i], momentum=0.05))
if self.dnn_use_laynorm[i] or self.dnn_use_batchnorm[i]:
add_bias = False
# Linear operations
self.wx.append(
nn.Linear(current_input, self.dnn_lay[i], bias=add_bias))
# weight initialization
self.wx[i].weight = torch.nn.Parameter(
torch.Tensor(self.dnn_lay[i], current_input).uniform_(
-np.sqrt(0.01 / (current_input + self.dnn_lay[i])),
np.sqrt(0.01 / (current_input + self.dnn_lay[i])),
)
)
self.wx[i].bias = torch.nn.Parameter(torch.zeros(self.dnn_lay[i]))
current_input = self.dnn_lay[i]
self.out_dim = current_input
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.dnn_use_laynorm_inp):
x = self.ln0((x))
if bool(self.dnn_use_batchnorm_inp):
x = self.bn0((x))
for i in range(self.N_dnn_lay):
if self.dnn_use_laynorm[i] and not (self.dnn_use_batchnorm[i]):
x = self.drop[i](self.act[i](self.ln[i](self.wx[i](x))))
if self.dnn_use_batchnorm[i] and not (self.dnn_use_laynorm[i]):
x = self.drop[i](self.act[i](self.bn[i](self.wx[i](x))))
if self.dnn_use_batchnorm[i] == True and self.dnn_use_laynorm[i] == True:
x = self.drop[i](self.act[i](
self.bn[i](self.ln[i](self.wx[i](x)))))
if self.dnn_use_batchnorm[i] == False and self.dnn_use_laynorm[i] == False:
x = self.drop[i](self.act[i](self.wx[i](x)))
return x
class LSTM_cudnn(nn.Module):
def __init__(self, options, inp_dim):
super(LSTM_cudnn, self).__init__()
self.input_dim = inp_dim
self.hidden_size = int(options["hidden_size"])
self.num_layers = int(options["num_layers"])
self.bias = bool(strtobool(options["bias"]))
self.batch_first = bool(strtobool(options["batch_first"]))
self.dropout = float(options["dropout"])
self.bidirectional = bool(strtobool(options["bidirectional"]))
self.lstm = nn.ModuleList(
[
nn.LSTM(
self.input_dim,
self.hidden_size,
self.num_layers,
bias=self.bias,
dropout=self.dropout,
bidirectional=self.bidirectional,
)
]
)
for name, param in self.lstm[0].named_parameters():
if 'weight_hh' in name:
if self.batch_first:
nn.init.orthogonal_(param)
elif 'bias' in name:
nn.init.zeros_(param)
self.out_dim = self.hidden_size + self.bidirectional * self.hidden_size
def forward(self, x):
if self.bidirectional:
h0 = torch.zeros(self.num_layers * 2, x.shape[1], self.hidden_size)
c0 = torch.zeros(self.num_layers * 2, x.shape[1], self.hidden_size)
else:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
c0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
if x.is_cuda:
h0 = h0.cuda()
c0 = c0.cuda()
output, (hn, cn) = self.lstm[0](x, (h0, c0))
return output
class GRU_cudnn(nn.Module):
def __init__(self, options, inp_dim):
super(GRU_cudnn, self).__init__()
self.input_dim = inp_dim
self.hidden_size = int(options["hidden_size"])
self.num_layers = int(options["num_layers"])
self.bias = bool(strtobool(options["bias"]))
self.batch_first = bool(strtobool(options["batch_first"]))
self.dropout = float(options["dropout"])
self.bidirectional = bool(strtobool(options["bidirectional"]))
self.gru = nn.ModuleList(
[
nn.GRU(
self.input_dim,
self.hidden_size,
self.num_layers,
bias=self.bias,
dropout=self.dropout,
bidirectional=self.bidirectional,
)
]
)
for name, param in self.gru[0].named_parameters():
if 'weight_hh' in name:
nn.init.orthogonal_(param)
elif 'weight_ih' in name:
nn.init.xavier_uniform_(param)
elif 'bias' in name:
nn.init.zeros_(param)
self.out_dim = self.hidden_size + self.bidirectional * self.hidden_size
def forward(self, x):
if self.bidirectional:
h0 = torch.zeros(self.num_layers * 2, x.shape[1], self.hidden_size)
else:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
if x.is_cuda:
h0 = h0.cuda()
output, hn = self.gru[0](x, h0)
return output
class RNN_cudnn(nn.Module):
def __init__(self, options, inp_dim):
super(RNN_cudnn, self).__init__()
self.input_dim = inp_dim
self.hidden_size = int(options["hidden_size"])
self.num_layers = int(options["num_layers"])
self.nonlinearity = options["nonlinearity"]
self.bias = bool(strtobool(options["bias"]))
self.batch_first = bool(strtobool(options["batch_first"]))
self.dropout = float(options["dropout"])
self.bidirectional = bool(strtobool(options["bidirectional"]))
self.rnn = nn.ModuleList(
[
nn.RNN(
self.input_dim,
self.hidden_size,
self.num_layers,
nonlinearity=self.nonlinearity,
bias=self.bias,
dropout=self.dropout,
bidirectional=self.bidirectional,
)
]
)
self.out_dim = self.hidden_size + self.bidirectional * self.hidden_size
def forward(self, x):
if self.bidirectional:
h0 = torch.zeros(self.num_layers * 2, x.shape[1], self.hidden_size)
else:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
if x.is_cuda:
h0 = h0.cuda()
output, hn = self.rnn[0](x, h0)
return output
class LSTM(nn.Module):
def __init__(self, options, inp_dim):
super(LSTM, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.lstm_lay = list(map(int, options["lstm_lay"].split(",")))
self.lstm_drop = list(map(float, options["lstm_drop"].split(",")))
self.lstm_use_batchnorm = list(
map(strtobool, options["lstm_use_batchnorm"].split(",")))
self.lstm_use_laynorm = list(
map(strtobool, options["lstm_use_laynorm"].split(",")))
self.lstm_use_laynorm_inp = strtobool(options["lstm_use_laynorm_inp"])
self.lstm_use_batchnorm_inp = strtobool(
options["lstm_use_batchnorm_inp"])
self.lstm_act = options["lstm_act"].split(",")
self.lstm_orthinit = strtobool(options["lstm_orthinit"])
self.bidir = strtobool(options["lstm_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wfx = nn.ModuleList([]) # Forget
self.ufh = nn.ModuleList([]) # Forget
self.wix = nn.ModuleList([]) # Input
self.uih = nn.ModuleList([]) # Input
self.wox = nn.ModuleList([]) # Output
self.uoh = nn.ModuleList([]) # Output
self.wcx = nn.ModuleList([]) # Cell state
self.uch = nn.ModuleList([]) # Cell state
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wfx = nn.ModuleList([]) # Batch Norm
self.bn_wix = nn.ModuleList([]) # Batch Norm
self.bn_wox = nn.ModuleList([]) # Batch Norm
self.bn_wcx = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.lstm_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.lstm_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_lstm_lay = len(self.lstm_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_lstm_lay):
# Activations
self.act.append(act_fun(self.lstm_act[i]))
add_bias = True
if self.lstm_use_laynorm[i] or self.lstm_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wfx.append(
nn.Linear(current_input, self.lstm_lay[i], bias=add_bias))
self.wix.append(
nn.Linear(current_input, self.lstm_lay[i], bias=add_bias))
self.wox.append(
nn.Linear(current_input, self.lstm_lay[i], bias=add_bias))
self.wcx.append(
nn.Linear(current_input, self.lstm_lay[i], bias=add_bias))
# Recurrent connections
self.ufh.append(
nn.Linear(self.lstm_lay[i], self.lstm_lay[i], bias=False))
self.uih.append(
nn.Linear(self.lstm_lay[i], self.lstm_lay[i], bias=False))
self.uoh.append(
nn.Linear(self.lstm_lay[i], self.lstm_lay[i], bias=False))
self.uch.append(
nn.Linear(self.lstm_lay[i], self.lstm_lay[i], bias=False))
if self.lstm_orthinit:
nn.init.orthogonal_(self.ufh[i].weight)
nn.init.orthogonal_(self.uih[i].weight)
nn.init.orthogonal_(self.uoh[i].weight)
nn.init.orthogonal_(self.uch[i].weight)
# batch norm initialization
self.bn_wfx.append(nn.BatchNorm1d(self.lstm_lay[i], momentum=0.05))
self.bn_wix.append(nn.BatchNorm1d(self.lstm_lay[i], momentum=0.05))
self.bn_wox.append(nn.BatchNorm1d(self.lstm_lay[i], momentum=0.05))
self.bn_wcx.append(nn.BatchNorm1d(self.lstm_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.lstm_lay[i]))
if self.bidir:
current_input = 2 * self.lstm_lay[i]
else:
current_input = self.lstm_lay[i]
self.out_dim = self.lstm_lay[i] + self.bidir * self.lstm_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.lstm_use_laynorm_inp):
x = self.ln0((x))
if bool(self.lstm_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_lstm_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.lstm_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.lstm_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(torch.Tensor(
h_init.shape[0], h_init.shape[1]).fill_(1 - self.lstm_drop[i]))
else:
drop_mask = torch.FloatTensor([1 - self.lstm_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wfx_out = self.wfx[i](x)
wix_out = self.wix[i](x)
wox_out = self.wox[i](x)
wcx_out = self.wcx[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.lstm_use_batchnorm[i]:
wfx_out_bn = self.bn_wfx[i](wfx_out.view(
wfx_out.shape[0] * wfx_out.shape[1], wfx_out.shape[2]))
wfx_out = wfx_out_bn.view(
wfx_out.shape[0], wfx_out.shape[1], wfx_out.shape[2])
wix_out_bn = self.bn_wix[i](wix_out.view(
wix_out.shape[0] * wix_out.shape[1], wix_out.shape[2]))
wix_out = wix_out_bn.view(
wix_out.shape[0], wix_out.shape[1], wix_out.shape[2])
wox_out_bn = self.bn_wox[i](wox_out.view(
wox_out.shape[0] * wox_out.shape[1], wox_out.shape[2]))
wox_out = wox_out_bn.view(
wox_out.shape[0], wox_out.shape[1], wox_out.shape[2])
wcx_out_bn = self.bn_wcx[i](wcx_out.view(
wcx_out.shape[0] * wcx_out.shape[1], wcx_out.shape[2]))
wcx_out = wcx_out_bn.view(
wcx_out.shape[0], wcx_out.shape[1], wcx_out.shape[2])
# Processing time steps
hiddens = []
ct = h_init
ht = h_init
for k in range(x.shape[0]):
# LSTM equations
ft = torch.sigmoid(wfx_out[k] + self.ufh[i](ht))
it = torch.sigmoid(wix_out[k] + self.uih[i](ht))
ot = torch.sigmoid(wox_out[k] + self.uoh[i](ht))
ct = it * self.act[i](wcx_out[k] + self.uch[i]
(ht)) * drop_mask + ft * ct
ht = ot * self.act[i](ct)
if self.lstm_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0: int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class GRU(nn.Module):
def __init__(self, options, inp_dim):
super(GRU, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.gru_lay = list(map(int, options["gru_lay"].split(",")))
self.gru_drop = list(map(float, options["gru_drop"].split(",")))
self.gru_use_batchnorm = list(
map(strtobool, options["gru_use_batchnorm"].split(",")))
self.gru_use_laynorm = list(
map(strtobool, options["gru_use_laynorm"].split(",")))
self.gru_use_laynorm_inp = strtobool(options["gru_use_laynorm_inp"])
self.gru_use_batchnorm_inp = strtobool(
options["gru_use_batchnorm_inp"])
self.gru_orthinit = strtobool(options["gru_orthinit"])
self.gru_act = options["gru_act"].split(",")
self.bidir = strtobool(options["gru_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.wz = nn.ModuleList([]) # Update Gate
self.uz = nn.ModuleList([]) # Update Gate
self.wr = nn.ModuleList([]) # Reset Gate
self.ur = nn.ModuleList([]) # Reset Gate
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.bn_wz = nn.ModuleList([]) # Batch Norm
self.bn_wr = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.gru_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.gru_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_gru_lay = len(self.gru_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_gru_lay):
# Activations
self.act.append(act_fun(self.gru_act[i]))
add_bias = True
if self.gru_use_laynorm[i] or self.gru_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(
nn.Linear(current_input, self.gru_lay[i], bias=add_bias))
self.wz.append(
nn.Linear(current_input, self.gru_lay[i], bias=add_bias))
self.wr.append(
nn.Linear(current_input, self.gru_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(
nn.Linear(self.gru_lay[i], self.gru_lay[i], bias=False))
self.uz.append(
nn.Linear(self.gru_lay[i], self.gru_lay[i], bias=False))
self.ur.append(
nn.Linear(self.gru_lay[i], self.gru_lay[i], bias=False))
if self.gru_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
nn.init.orthogonal_(self.uz[i].weight)
nn.init.orthogonal_(self.ur[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(self.gru_lay[i], momentum=0.05))
self.bn_wz.append(nn.BatchNorm1d(self.gru_lay[i], momentum=0.05))
self.bn_wr.append(nn.BatchNorm1d(self.gru_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.gru_lay[i]))
if self.bidir:
current_input = 2 * self.gru_lay[i]
else:
current_input = self.gru_lay[i]
self.out_dim = self.gru_lay[i] + self.bidir * self.gru_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.gru_use_laynorm_inp):
x = self.ln0((x))
if bool(self.gru_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_gru_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.gru_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.gru_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(torch.Tensor(
h_init.shape[0], h_init.shape[1]).fill_(1 - self.gru_drop[i]))
else:
drop_mask = torch.FloatTensor([1 - self.gru_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
wz_out = self.wz[i](x)
wr_out = self.wr[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.gru_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(
wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(
wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
wz_out_bn = self.bn_wz[i](wz_out.view(
wz_out.shape[0] * wz_out.shape[1], wz_out.shape[2]))
wz_out = wz_out_bn.view(
wz_out.shape[0], wz_out.shape[1], wz_out.shape[2])
wr_out_bn = self.bn_wr[i](wr_out.view(
wr_out.shape[0] * wr_out.shape[1], wr_out.shape[2]))
wr_out = wr_out_bn.view(
wr_out.shape[0], wr_out.shape[1], wr_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# gru equation
zt = torch.sigmoid(wz_out[k] + self.uz[i](ht))
rt = torch.sigmoid(wr_out[k] + self.ur[i](ht))
at = wh_out[k] + self.uh[i](rt * ht)
hcand = self.act[i](at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
if self.gru_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0: int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class logMelFb(nn.Module):
def __init__(self, options, inp_dim):
super(logMelFb, self).__init__()
import torchaudio
self._sample_rate = int(options["logmelfb_nr_sample_rate"])
self._nr_of_filters = int(options["logmelfb_nr_filt"])
self._stft_window_size = int(options["logmelfb_stft_window_size"])
self._stft_window_shift = int(options["logmelfb_stft_window_shift"])
self._use_cuda = strtobool(options["use_cuda"])
self.out_dim = self._nr_of_filters
self._mspec = torchaudio.transforms.MelSpectrogram(
sr=self._sample_rate,
n_fft=self._stft_window_size,
ws=self._stft_window_size,
hop=self._stft_window_shift,
n_mels=self._nr_of_filters,
)
def forward(self, x):
def _safe_log(inp, epsilon=1e-20):
eps = torch.FloatTensor([epsilon])
if self._use_cuda:
eps = eps.cuda()
log_inp = torch.log10(torch.max(inp, eps.expand_as(inp)))
return log_inp
assert x.shape[-1] == 1, "Multi channel time signal processing not suppored yet"
x_reshape_for_stft = torch.squeeze(x, -1).transpose(0, 1)
if self._use_cuda:
window = self._mspec.window(self._stft_window_size).cuda()
else:
window = self._mspec.window(self._stft_window_size)
x_stft = torch.stft(
x_reshape_for_stft, self._stft_window_size, hop_length=self._stft_window_shift, center=False, window=window
)
x_power_stft = x_stft.pow(2).sum(-1)
x_power_stft_reshape_for_filterbank_mult = x_power_stft.transpose(1, 2)
mel_spec = self._mspec.fm(
x_power_stft_reshape_for_filterbank_mult).transpose(0, 1)
log_mel_spec = _safe_log(mel_spec)
out = log_mel_spec
return out
class channel_averaging(nn.Module):
def __init__(self, options, inp_dim):
super(channel_averaging, self).__init__()
self._use_cuda = strtobool(options["use_cuda"])
channel_weights = [float(e)
for e in options["chAvg_channelWeights"].split(",")]
self._nr_of_channels = len(channel_weights)
numpy_weights = np.asarray(
channel_weights, dtype=np.float32) * 1.0 / np.sum(channel_weights)
self._weights = torch.from_numpy(numpy_weights)
if self._use_cuda:
self._weights = self._weights.cuda()
self.out_dim = 1
def forward(self, x):
assert self._nr_of_channels == x.shape[-1]
out = torch.einsum("tbc,c->tb", x, self._weights).unsqueeze(-1)
return out
class fusionRNN_jit(torch.jit.ScriptModule):
def __init__(self, options, inp_dim):
super(fusionRNN_jit, self).__init__()
# Reading parameters
input_size = inp_dim
hidden_size = list(map(int, options["fusionRNN_lay"].split(",")))[0]
dropout = list(map(float, options["fusionRNN_drop"].split(",")))[0]
num_layers = len(list(map(int, options["fusionRNN_lay"].split(","))))
batch_size = int(options["batches"])
self.do_fusion = map(
strtobool, options["fusionRNN_do_fusion"].split(","))
self.act = str(options["fusionRNN_fusion_act"])
self.reduce = str(options["fusionRNN_fusion_reduce"])
self.fusion_layer_size = int(options["fusionRNN_fusion_layer_size"])
self.to_do = options["to_do"]
self.number_of_mic = int(options["fusionRNN_number_of_mic"])
self.save_mic = self.number_of_mic
bidirectional = True
self.out_dim = 2 * hidden_size
current_dim = int(input_size)
self.model = torch.nn.ModuleList([])
if self.to_do == "train":
self.training = True
else:
self.training = False
for i in range(num_layers):
rnn_lay = liGRU_layer(
current_dim,
hidden_size,
num_layers,
batch_size,
dropout=dropout,
bidirectional=bidirectional,
device="cuda",
do_fusion=self.do_fusion,
fusion_layer_size=self.fusion_layer_size,
number_of_mic=self.number_of_mic,
act=self.act,
reduce=self.reduce
)
if i == 0:
if self.do_fusion:
if bidirectional:
current_dim = (self.fusion_layer_size //
self.save_mic) * 2
else:
current_dim = self.fusion_layer_size // self.save_mic
# We need to reset the number of mic for the next layers so it is divided by 1
self.number_of_mic = 1
else:
if bidirectional:
current_dim = hidden_size * 2
else:
current_dim = hidden_size
self.do_fusion = False # DO NOT APPLY FUSION ON THE NEXT LAYERS
else:
if bidirectional:
current_dim = hidden_size * 2
else:
current_dim == hidden_size
self.model.append(rnn_lay)
@torch.jit.script_method
def forward(self, x):
# type: (Tensor) -> Tensor
for ligru_lay in self.model:
x = ligru_lay(x)
return x
class liGRU_layer(torch.jit.ScriptModule):
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
nonlinearity="relu",
bidirectional=True,
device="cuda",
do_fusion=False,
fusion_layer_size=64,
number_of_mic=1,
act="relu",
reduce="mean",
):
super(liGRU_layer, self).__init__()
self.hidden_size = int(hidden_size)
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.device = device
self.do_fusion = do_fusion
self.fusion_layer_size = fusion_layer_size
self.number_of_mic = number_of_mic
self.act = act
self.reduce = reduce
if self.do_fusion:
self.hidden_size = self.fusion_layer_size // self.number_of_mic
if self.do_fusion:
self.wz = FusionLinearConv(
self.input_size, self.hidden_size, bias=True, number_of_mic=self.number_of_mic, act=self.act, reduce=self.reduce
).to(device)
self.wh = FusionLinearConv(
self.input_size, self.hidden_size, bias=True, number_of_mic=self.number_of_mic, act=self.act, reduce=self.reduce
).to(device)
else:
self.wz = nn.Linear(
self.input_size, self.hidden_size, bias=True
).to(device)
self.wh = nn.Linear(
self.input_size, self.hidden_size, bias=True
).to(device)
self.wz.bias.data.fill_(0)
torch.nn.init.xavier_normal_(self.wz.weight.data)
self.wh.bias.data.fill_(0)
torch.nn.init.xavier_normal_(self.wh.weight.data)
self.u = nn.Linear(
self.hidden_size, 2 * self.hidden_size, bias=False
).to(device)
# Adding orthogonal initialization for recurrent connection
nn.init.orthogonal_(self.u.weight)
self.bn_wh = nn.BatchNorm1d(self.hidden_size, momentum=0.05).to(
device
)
self.bn_wz = nn.BatchNorm1d(self.hidden_size, momentum=0.05).to(
device
)
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False).to(device)
self.drop_mask_te = torch.tensor([1.0], device=device).float()
self.N_drop_masks = 100
self.drop_mask_cnt = 0
# Setting the activation function
self.act = torch.nn.ReLU().to(device)
@torch.jit.script_method
def forward(self, x):
# type: (Tensor) -> Tensor
if self.bidirectional:
x_flip = x.flip(0)
x = torch.cat([x, x_flip], dim=1)
# Feed-forward affine transformations (all steps in parallel)
wz = self.wz(x)
wh = self.wh(x)
# Apply batch normalization
wz_bn = self.bn_wz(wz.view(wz.shape[0] * wz.shape[1], wz.shape[2]))
wh_bn = self.bn_wh(wh.view(wh.shape[0] * wh.shape[1], wh.shape[2]))
wz = wz_bn.view(wz.shape[0], wz.shape[1], wz.shape[2])
wh = wh_bn.view(wh.shape[0], wh.shape[1], wh.shape[2])
# Processing time steps
h = self.ligru_cell(wz, wh)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=1)
h_b = h_b.flip(0)
h = torch.cat([h_f, h_b], dim=2)
return h
@torch.jit.script_method
def ligru_cell(self, wz, wh):
# type: (Tensor, Tensor) -> Tensor
if self.bidirectional:
h_init = torch.zeros(
2 * self.batch_size,
self.hidden_size,
device="cuda",
)
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size,
self.hidden_size,
device="cuda",
)
).data
else:
h_init = torch.zeros(
self.batch_size,
self.hidden_size,
device="cuda",
)
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
device="cuda",
)
).data
hiddens = []
ht = h_init
if self.training:
drop_mask = drop_masks_i[self.drop_mask_cnt]
self.drop_mask_cnt = self.drop_mask_cnt + 1
if self.drop_mask_cnt >= self.N_drop_masks:
self.drop_mask_cnt = 0
if self.bidirectional:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_mask = self.drop_mask_te
for k in range(wh.shape[0]):
uz, uh = self.u(ht).chunk(2, 1)
at = wh[k] + uh
zt = wz[k] + uz
# ligru equation
zt = torch.sigmoid(zt)
hcand = self.act(at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
return h
class liGRU(nn.Module):
def __init__(self, options, inp_dim):
super(liGRU, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.ligru_lay = list(map(int, options["ligru_lay"].split(",")))
self.ligru_drop = list(map(float, options["ligru_drop"].split(",")))
self.ligru_use_batchnorm = list(
map(strtobool, options["ligru_use_batchnorm"].split(",")))
self.ligru_use_laynorm = list(
map(strtobool, options["ligru_use_laynorm"].split(",")))
self.ligru_use_laynorm_inp = strtobool(
options["ligru_use_laynorm_inp"])
self.ligru_use_batchnorm_inp = strtobool(
options["ligru_use_batchnorm_inp"])
self.ligru_orthinit = strtobool(options["ligru_orthinit"])
self.ligru_act = options["ligru_act"].split(",")
self.bidir = strtobool(options["ligru_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.wz = nn.ModuleList([]) # Update Gate
self.uz = nn.ModuleList([]) # Update Gate
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.bn_wz = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.ligru_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.ligru_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_ligru_lay = len(self.ligru_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_ligru_lay):
# Activations
self.act.append(act_fun(self.ligru_act[i]))
add_bias = True
if self.ligru_use_laynorm[i] or self.ligru_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(
nn.Linear(current_input, self.ligru_lay[i], bias=add_bias))
self.wz.append(
nn.Linear(current_input, self.ligru_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(
nn.Linear(self.ligru_lay[i], self.ligru_lay[i], bias=False))
self.uz.append(
nn.Linear(self.ligru_lay[i], self.ligru_lay[i], bias=False))
if self.ligru_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
nn.init.orthogonal_(self.uz[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(self.ligru_lay[i], momentum=0.05))
self.bn_wz.append(nn.BatchNorm1d(self.ligru_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.ligru_lay[i]))
if self.bidir:
current_input = 2 * self.ligru_lay[i]
else:
current_input = self.ligru_lay[i]
self.out_dim = self.ligru_lay[i] + self.bidir * self.ligru_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.ligru_use_laynorm_inp):
x = self.ln0((x))
if bool(self.ligru_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_ligru_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.ligru_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.ligru_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(
torch.Tensor(h_init.shape[0], h_init.shape[1]).fill_(
1 - self.ligru_drop[i])
)
else:
drop_mask = torch.FloatTensor([1 - self.ligru_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
wz_out = self.wz[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.ligru_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(
wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(
wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
wz_out_bn = self.bn_wz[i](wz_out.view(
wz_out.shape[0] * wz_out.shape[1], wz_out.shape[2]))
wz_out = wz_out_bn.view(
wz_out.shape[0], wz_out.shape[1], wz_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# ligru equation
zt = torch.sigmoid(wz_out[k] + self.uz[i](ht))
at = wh_out[k] + self.uh[i](ht)
hcand = self.act[i](at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
if self.ligru_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0: int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class minimalGRU(nn.Module):
def __init__(self, options, inp_dim):
super(minimalGRU, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.minimalgru_lay = list(
map(int, options["minimalgru_lay"].split(",")))
self.minimalgru_drop = list(
map(float, options["minimalgru_drop"].split(",")))
self.minimalgru_use_batchnorm = list(
map(strtobool, options["minimalgru_use_batchnorm"].split(",")))
self.minimalgru_use_laynorm = list(
map(strtobool, options["minimalgru_use_laynorm"].split(",")))
self.minimalgru_use_laynorm_inp = strtobool(
options["minimalgru_use_laynorm_inp"])
self.minimalgru_use_batchnorm_inp = strtobool(
options["minimalgru_use_batchnorm_inp"])
self.minimalgru_orthinit = strtobool(options["minimalgru_orthinit"])
self.minimalgru_act = options["minimalgru_act"].split(",")
self.bidir = strtobool(options["minimalgru_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.wz = nn.ModuleList([]) # Update Gate
self.uz = nn.ModuleList([]) # Update Gate
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.bn_wz = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.minimalgru_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.minimalgru_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_minimalgru_lay = len(self.minimalgru_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_minimalgru_lay):
# Activations
self.act.append(act_fun(self.minimalgru_act[i]))
add_bias = True
if self.minimalgru_use_laynorm[i] or self.minimalgru_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(
nn.Linear(current_input, self.minimalgru_lay[i], bias=add_bias))
self.wz.append(
nn.Linear(current_input, self.minimalgru_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(
nn.Linear(self.minimalgru_lay[i], self.minimalgru_lay[i], bias=False))
self.uz.append(
nn.Linear(self.minimalgru_lay[i], self.minimalgru_lay[i], bias=False))
if self.minimalgru_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
nn.init.orthogonal_(self.uz[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(
self.minimalgru_lay[i], momentum=0.05))
self.bn_wz.append(nn.BatchNorm1d(
self.minimalgru_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.minimalgru_lay[i]))
if self.bidir:
current_input = 2 * self.minimalgru_lay[i]
else:
current_input = self.minimalgru_lay[i]
self.out_dim = self.minimalgru_lay[i] + \
self.bidir * self.minimalgru_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.minimalgru_use_laynorm_inp):
x = self.ln0((x))
if bool(self.minimalgru_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_minimalgru_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.minimalgru_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.minimalgru_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(
torch.Tensor(h_init.shape[0], h_init.shape[1]).fill_(
1 - self.minimalgru_drop[i])
)
else:
drop_mask = torch.FloatTensor([1 - self.minimalgru_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
wz_out = self.wz[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.minimalgru_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(
wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(
wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
wz_out_bn = self.bn_wz[i](wz_out.view(
wz_out.shape[0] * wz_out.shape[1], wz_out.shape[2]))
wz_out = wz_out_bn.view(
wz_out.shape[0], wz_out.shape[1], wz_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# minimalgru equation
zt = torch.sigmoid(wz_out[k] + self.uz[i](ht))
at = wh_out[k] + self.uh[i](zt * ht)
hcand = self.act[i](at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
if self.minimalgru_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0: int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class RNN(nn.Module):
def __init__(self, options, inp_dim):
super(RNN, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.rnn_lay = list(map(int, options["rnn_lay"].split(",")))
self.rnn_drop = list(map(float, options["rnn_drop"].split(",")))
self.rnn_use_batchnorm = list(
map(strtobool, options["rnn_use_batchnorm"].split(",")))
self.rnn_use_laynorm = list(
map(strtobool, options["rnn_use_laynorm"].split(",")))
self.rnn_use_laynorm_inp = strtobool(options["rnn_use_laynorm_inp"])
self.rnn_use_batchnorm_inp = strtobool(
options["rnn_use_batchnorm_inp"])
self.rnn_orthinit = strtobool(options["rnn_orthinit"])
self.rnn_act = options["rnn_act"].split(",")
self.bidir = strtobool(options["rnn_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.rnn_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.rnn_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_rnn_lay = len(self.rnn_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_rnn_lay):
# Activations
self.act.append(act_fun(self.rnn_act[i]))
add_bias = True
if self.rnn_use_laynorm[i] or self.rnn_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(
nn.Linear(current_input, self.rnn_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(
nn.Linear(self.rnn_lay[i], self.rnn_lay[i], bias=False))
if self.rnn_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(self.rnn_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.rnn_lay[i]))
if self.bidir:
current_input = 2 * self.rnn_lay[i]
else:
current_input = self.rnn_lay[i]
self.out_dim = self.rnn_lay[i] + self.bidir * self.rnn_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.rnn_use_laynorm_inp):
x = self.ln0((x))
if bool(self.rnn_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_rnn_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.rnn_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.rnn_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(torch.Tensor(
h_init.shape[0], h_init.shape[1]).fill_(1 - self.rnn_drop[i]))
else:
drop_mask = torch.FloatTensor([1 - self.rnn_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.rnn_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(
wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(
wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# rnn equation
at = wh_out[k] + self.uh[i](ht)
ht = self.act[i](at) * drop_mask
if self.rnn_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0: int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class CNN(nn.Module):
def __init__(self, options, inp_dim):
super(CNN, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.cnn_N_filt = list(map(int, options["cnn_N_filt"].split(",")))
self.cnn_len_filt = list(map(int, options["cnn_len_filt"].split(",")))
self.cnn_max_pool_len = list(
map(int, options["cnn_max_pool_len"].split(",")))
self.cnn_act = options["cnn_act"].split(",")
self.cnn_drop = list(map(float, options["cnn_drop"].split(",")))
self.cnn_use_laynorm = list(
map(strtobool, options["cnn_use_laynorm"].split(",")))
self.cnn_use_batchnorm = list(
map(strtobool, options["cnn_use_batchnorm"].split(",")))
self.cnn_use_laynorm_inp = strtobool(options["cnn_use_laynorm_inp"])
self.cnn_use_batchnorm_inp = strtobool(
options["cnn_use_batchnorm_inp"])
self.N_cnn_lay = len(self.cnn_N_filt)
self.conv = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
if self.cnn_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
if self.cnn_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)
current_input = self.input_dim
for i in range(self.N_cnn_lay):
N_filt = int(self.cnn_N_filt[i])
len_filt = int(self.cnn_len_filt[i])
# dropout
self.drop.append(nn.Dropout(p=self.cnn_drop[i]))
# activation
self.act.append(act_fun(self.cnn_act[i]))
# layer norm initialization
self.ln.append(
LayerNorm([N_filt, int(
(current_input - self.cnn_len_filt[i] + 1) / self.cnn_max_pool_len[i])])
)
self.bn.append(
nn.BatchNorm1d(
N_filt, int((current_input - self.cnn_len_filt[i] + 1) / self.cnn_max_pool_len[i]), momentum=0.05
)
)
if i == 0:
self.conv.append(nn.Conv1d(1, N_filt, len_filt))
else:
self.conv.append(
nn.Conv1d(self.cnn_N_filt[i - 1], self.cnn_N_filt[i], self.cnn_len_filt[i]))
current_input = int(
(current_input - self.cnn_len_filt[i] + 1) / self.cnn_max_pool_len[i])
self.out_dim = current_input * N_filt
def forward(self, x):
batch = x.shape[0]
seq_len = x.shape[1]
if bool(self.cnn_use_laynorm_inp):
x = self.ln0((x))
if bool(self.cnn_use_batchnorm_inp):
x = self.bn0((x))
x = x.view(batch, 1, seq_len)
for i in range(self.N_cnn_lay):
if self.cnn_use_laynorm[i]:
x = self.drop[i](self.act[i](self.ln[i](
F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i]))))
if self.cnn_use_batchnorm[i]:
x = self.drop[i](self.act[i](self.bn[i](
F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i]))))
if self.cnn_use_batchnorm[i] == False and self.cnn_use_laynorm[i] == False:
x = self.drop[i](self.act[i](F.max_pool1d(
self.conv[i](x), self.cnn_max_pool_len[i])))
x = x.view(batch, -1)
return x
class SincNet(nn.Module):
def __init__(self, options, inp_dim):
super(SincNet, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.sinc_N_filt = list(map(int, options["sinc_N_filt"].split(",")))
self.sinc_len_filt = list(
map(int, options["sinc_len_filt"].split(",")))
self.sinc_max_pool_len = list(
map(int, options["sinc_max_pool_len"].split(",")))
self.sinc_act = options["sinc_act"].split(",")
self.sinc_drop = list(map(float, options["sinc_drop"].split(",")))
self.sinc_use_laynorm = list(
map(strtobool, options["sinc_use_laynorm"].split(",")))
self.sinc_use_batchnorm = list(
map(strtobool, options["sinc_use_batchnorm"].split(",")))
self.sinc_use_laynorm_inp = strtobool(options["sinc_use_laynorm_inp"])
self.sinc_use_batchnorm_inp = strtobool(
options["sinc_use_batchnorm_inp"])
self.N_sinc_lay = len(self.sinc_N_filt)
self.sinc_sample_rate = int(options["sinc_sample_rate"])
self.sinc_min_low_hz = int(options["sinc_min_low_hz"])
self.sinc_min_band_hz = int(options["sinc_min_band_hz"])
self.conv = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
if self.sinc_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
if self.sinc_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)
current_input = self.input_dim
for i in range(self.N_sinc_lay):
N_filt = int(self.sinc_N_filt[i])
len_filt = int(self.sinc_len_filt[i])
# dropout
self.drop.append(nn.Dropout(p=self.sinc_drop[i]))
# activation
self.act.append(act_fun(self.sinc_act[i]))
# layer norm initialization
self.ln.append(
LayerNorm([N_filt, int(
(current_input - self.sinc_len_filt[i] + 1) / self.sinc_max_pool_len[i])])
)
self.bn.append(
nn.BatchNorm1d(
N_filt, int((current_input - self.sinc_len_filt[i] + 1) / self.sinc_max_pool_len[i]), momentum=0.05
)
)
if i == 0:
self.conv.append(
SincConv(
1,
N_filt,
len_filt,
sample_rate=self.sinc_sample_rate,
min_low_hz=self.sinc_min_low_hz,
min_band_hz=self.sinc_min_band_hz,
)
)
else:
self.conv.append(
nn.Conv1d(self.sinc_N_filt[i - 1], self.sinc_N_filt[i], self.sinc_len_filt[i]))
current_input = int(
(current_input - self.sinc_len_filt[i] + 1) / self.sinc_max_pool_len[i])
self.out_dim = current_input * N_filt
def forward(self, x):
batch = x.shape[0]
seq_len = x.shape[1]
if bool(self.sinc_use_laynorm_inp):
x = self.ln0(x)
if bool(self.sinc_use_batchnorm_inp):
x = self.bn0(x)
x = x.view(batch, 1, seq_len)
for i in range(self.N_sinc_lay):
if self.sinc_use_laynorm[i]:
x = self.drop[i](self.act[i](self.ln[i](
F.max_pool1d(self.conv[i](x), self.sinc_max_pool_len[i]))))
if self.sinc_use_batchnorm[i]:
x = self.drop[i](self.act[i](self.bn[i](
F.max_pool1d(self.conv[i](x), self.sinc_max_pool_len[i]))))
if self.sinc_use_batchnorm[i] == False and self.sinc_use_laynorm[i] == False:
x = self.drop[i](self.act[i](F.max_pool1d(
self.conv[i](x), self.sinc_max_pool_len[i])))
x = x.view(batch, -1)
return x
class SincConv(nn.Module):
"""Sinc-based convolution
Parameters
----------
in_channels : `int`
Number of input channels. Must be 1.
out_channels : `int`
Number of filters.
kernel_size : `int`
Filter length.
sample_rate : `int`, optional
Sample rate. Defaults to 16000.
Usage
-----
See `torch.nn.Conv1d`
Reference
---------
Mirco Ravanelli, Yoshua Bengio,
"Speaker Recognition from raw waveform with SincNet".
https://arxiv.org/abs/1808.00158
"""
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=False,
groups=1,
sample_rate=16000,
min_low_hz=50,
min_band_hz=50,
):
super(SincConv, self).__init__()
if in_channels != 1:
# msg = (f'SincConv only support one input channel '
# f'(here, in_channels = {in_channels:d}).')
msg = "SincConv only support one input channel (here, in_channels = {%i})" % (
in_channels)
raise ValueError(msg)
self.out_channels = out_channels
self.kernel_size = kernel_size
# Forcing the filters to be odd (i.e, perfectly symmetrics)
if kernel_size % 2 == 0:
self.kernel_size = self.kernel_size + 1
self.stride = stride
self.padding = padding
self.dilation = dilation
if bias:
raise ValueError("SincConv does not support bias.")
if groups > 1:
raise ValueError("SincConv does not support groups.")
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# initialize filterbanks such that they are equally spaced in Mel scale
low_hz = 30
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
mel = np.linspace(self.to_mel(low_hz), self.to_mel(
high_hz), self.out_channels + 1)
hz = self.to_hz(mel) / self.sample_rate
# filter lower frequency (out_channels, 1)
self.low_hz_ = nn.Parameter(torch.Tensor(hz[:-1]).view(-1, 1))
# filter frequency band (out_channels, 1)
self.band_hz_ = nn.Parameter(torch.Tensor(np.diff(hz)).view(-1, 1))
# Hamming window
# self.window_ = torch.hamming_window(self.kernel_size)
n_lin = torch.linspace(0, self.kernel_size, steps=self.kernel_size)
self.window_ = 0.54 - 0.46 * \
torch.cos(2 * math.pi * n_lin / self.kernel_size)
# (kernel_size, 1)
n = (self.kernel_size - 1) / 2
self.n_ = torch.arange(-n, n + 1).view(1, -1) / self.sample_rate
def sinc(self, x):
# Numerically stable definition
x_left = x[:, 0: int((x.shape[1] - 1) / 2)]
y_left = torch.sin(x_left) / x_left
y_right = torch.flip(y_left, dims=[1])
sinc = torch.cat(
[y_left, torch.ones([x.shape[0], 1]).to(x.device), y_right], dim=1)
return sinc
def forward(self, waveforms):
"""
Parameters
----------
waveforms : `torch.Tensor` (batch_size, 1, n_samples)
Batch of waveforms.
Returns
-------
features : `torch.Tensor` (batch_size, out_channels, n_samples_out)
Batch of sinc filters activations.
"""
self.n_ = self.n_.to(waveforms.device)
self.window_ = self.window_.to(waveforms.device)
low = self.min_low_hz / self.sample_rate + torch.abs(self.low_hz_)
high = low + self.min_band_hz / \
self.sample_rate + torch.abs(self.band_hz_)
f_times_t = torch.matmul(low, self.n_)
low_pass1 = 2 * low * \
self.sinc(2 * math.pi * f_times_t * self.sample_rate)
f_times_t = torch.matmul(high, self.n_)
low_pass2 = 2 * high * \
self.sinc(2 * math.pi * f_times_t * self.sample_rate)
band_pass = low_pass2 - low_pass1
max_, _ = torch.max(band_pass, dim=1, keepdim=True)
band_pass = band_pass / max_
self.filters = (
band_pass * self.window_).view(self.out_channels, 1, self.kernel_size)
return F.conv1d(
waveforms,
self.filters,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
bias=None,
groups=1,
)
class SincConv_fast(nn.Module):
"""Sinc-based convolution
Parameters
----------
in_channels : `int`
Number of input channels. Must be 1.
out_channels : `int`
Number of filters.
kernel_size : `int`
Filter length.
sample_rate : `int`, optional
Sample rate. Defaults to 16000.
Usage
-----
See `torch.nn.Conv1d`
Reference
---------
Mirco Ravanelli, Yoshua Bengio,
"Speaker Recognition from raw waveform with SincNet".
https://arxiv.org/abs/1808.00158
"""
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=False,
groups=1,
sample_rate=16000,
min_low_hz=50,
min_band_hz=50,
):
super(SincConv_fast, self).__init__()
if in_channels != 1:
# msg = (f'SincConv only support one input channel '
# f'(here, in_channels = {in_channels:d}).')
msg = "SincConv only support one input channel (here, in_channels = {%i})" % (
in_channels)
raise ValueError(msg)
self.out_channels = out_channels
self.kernel_size = kernel_size
# Forcing the filters to be odd (i.e, perfectly symmetrics)
if kernel_size % 2 == 0:
self.kernel_size = self.kernel_size + 1
self.stride = stride
self.padding = padding
self.dilation = dilation
if bias:
raise ValueError("SincConv does not support bias.")
if groups > 1:
raise ValueError("SincConv does not support groups.")
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# initialize filterbanks such that they are equally spaced in Mel scale
low_hz = 30
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
mel = np.linspace(self.to_mel(low_hz), self.to_mel(
high_hz), self.out_channels + 1)
hz = self.to_hz(mel)
# filter lower frequency (out_channels, 1)
self.low_hz_ = nn.Parameter(torch.Tensor(hz[:-1]).view(-1, 1))
# filter frequency band (out_channels, 1)
self.band_hz_ = nn.Parameter(torch.Tensor(np.diff(hz)).view(-1, 1))
# Hamming window
# self.window_ = torch.hamming_window(self.kernel_size)
n_lin = torch.linspace(
0, (self.kernel_size / 2) - 1, steps=int((self.kernel_size / 2))
) # computing only half of the window
self.window_ = 0.54 - 0.46 * \
torch.cos(2 * math.pi * n_lin / self.kernel_size)
# (kernel_size, 1)
n = (self.kernel_size - 1) / 2.0
self.n_ = (
2 * math.pi * torch.arange(-n, 0).view(1, -1) / self.sample_rate
) # Due to symmetry, I only need half of the time axes
def forward(self, waveforms):
"""
Parameters
----------
waveforms : `torch.Tensor` (batch_size, 1, n_samples)
Batch of waveforms.
Returns
-------
features : `torch.Tensor` (batch_size, out_channels, n_samples_out)
Batch of sinc filters activations.
"""
self.n_ = self.n_.to(waveforms.device)
self.window_ = self.window_.to(waveforms.device)
low = self.min_low_hz + torch.abs(self.low_hz_)
high = torch.clamp(low + self.min_band_hz + torch.abs(self.band_hz_),
self.min_low_hz, self.sample_rate / 2)
band = (high - low)[:, 0]
f_times_t_low = torch.matmul(low, self.n_)
f_times_t_high = torch.matmul(high, self.n_)
band_pass_left = (
(torch.sin(f_times_t_high) - torch.sin(f_times_t_low)) / (self.n_ / 2)
) * self.window_ # Equivalent of Eq.4 of the reference paper (SPEAKER RECOGNITION FROM RAW WAVEFORM WITH SINCNET). I just have expanded the sinc and simplified the terms. This way I avoid several useless computations.
band_pass_center = 2 * band.view(-1, 1)
band_pass_right = torch.flip(band_pass_left, dims=[1])
band_pass = torch.cat(
[band_pass_left, band_pass_center, band_pass_right], dim=1)
band_pass = band_pass / (2 * band[:, None])
self.filters = (band_pass).view(self.out_channels, 1, self.kernel_size)
return F.conv1d(
waveforms,
self.filters,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
bias=None,
groups=1,
)
def flip(x, dim):
xsize = x.size()
dim = x.dim() + dim if dim < 0 else dim
x = x.contiguous()
x = x.view(-1, *xsize[dim:])
x = x.view(x.size(0), x.size(1), -1)[
:, getattr(torch.arange(x.size(1) - 1, -1, -1), ("cpu", "cuda")[x.is_cuda])().long(), :
]
return x.view(xsize)
class SRU(nn.Module):
def __init__(self, options, inp_dim):
super(SRU, self).__init__()
self.input_dim = inp_dim
self.hidden_size = int(options["sru_hidden_size"])
self.num_layers = int(options["sru_num_layers"])
self.dropout = float(options["sru_dropout"])
self.rnn_dropout = float(options["sru_rnn_dropout"])
self.use_tanh = bool(strtobool(options["sru_use_tanh"]))
self.use_relu = bool(strtobool(options["sru_use_relu"]))
self.use_selu = bool(strtobool(options["sru_use_selu"]))
self.weight_norm = bool(strtobool(options["sru_weight_norm"]))
self.layer_norm = bool(strtobool(options["sru_layer_norm"]))
self.bidirectional = bool(strtobool(options["sru_bidirectional"]))
self.is_input_normalized = bool(
strtobool(options["sru_is_input_normalized"]))
self.has_skip_term = bool(strtobool(options["sru_has_skip_term"]))
self.rescale = bool(strtobool(options["sru_rescale"]))
self.highway_bias = float(options["sru_highway_bias"])
self.n_proj = int(options["sru_n_proj"])
self.sru = sru.SRU(
self.input_dim,
self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
rnn_dropout=self.rnn_dropout,
bidirectional=self.bidirectional,
n_proj=self.n_proj,
use_tanh=self.use_tanh,
use_selu=self.use_selu,
use_relu=self.use_relu,
weight_norm=self.weight_norm,
layer_norm=self.layer_norm,
has_skip_term=self.has_skip_term,
is_input_normalized=self.is_input_normalized,
highway_bias=self.highway_bias,
rescale=self.rescale,
)
self.out_dim = self.hidden_size + self.bidirectional * self.hidden_size
def forward(self, x):
if self.bidirectional:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size * 2)
else:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
if x.is_cuda:
h0 = h0.cuda()
output, hn = self.sru(x, c0=h0)
return output
class PASE(nn.Module):
def __init__(self, options, inp_dim):
super(PASE, self).__init__()
# To use PASE within PyTorch-Kaldi, please clone the current PASE repository: https://github.com/santi-pdp/pase
# Note that you have to clone the dev branch.
# Take a look into the requirements (requirements.txt) and install in your environment what is missing. An important requirement is QRNN (https://github.com/salesforce/pytorch-qrnn).
# Before starting working with PASE, it could make sense to a quick test with QRNN independently (see “usage” section in the QRNN repository).
# Remember to install pase. This way it can be used outside the pase folder directory. To do it, go into the pase folder and type:
# "python setup.py install"
from pase.models.frontend import wf_builder
self.input_dim = inp_dim
self.pase_cfg = options["pase_cfg"]
self.pase_model = options["pase_model"]
self.pase = wf_builder(self.pase_cfg)
self.pase.load_pretrained(
self.pase_model, load_last=True, verbose=True)
# Reading the out_dim from the config file:
with open(self.pase_cfg) as json_file:
config = json.load(json_file)
self.out_dim = int(config["emb_dim"])
def forward(self, x):
x = x.unsqueeze(0).unsqueeze(0)
output = self.pase(x)
return output
class FusionLinearConv(nn.Module):
r"""Applies a FusionLayer as described in:
'FusionRNN: Shared Neural Parameters for
Multi-Channel Distant Speech Recognition', Titouan P. et Al.
Input channels are supposed to be concatenated along the last dimension
"""
def __init__(self, in_features, out_features, number_of_mic=1, bias=True, seed=None, act="leaky", reduce="sum"):
super(FusionLinearConv, self).__init__()
self.in_features = in_features // number_of_mic
self.out_features = out_features
self.number_of_mic = number_of_mic
self.reduce = reduce
if act == "leaky_relu":
self.act_function = nn.LeakyReLU()
elif act == "prelu":
self.act_function = nn.PReLU()
elif act == "relu":
self.act_function = nn.ReLU()
else:
self.act_function = nn.Tanh()
self.conv = nn.Conv1d(1, self.out_features, kernel_size=self.in_features,
stride=self.in_features, bias=True, padding=0)
self.conv.bias.data.fill_(0)
torch.nn.init.xavier_normal_(self.conv.weight.data)
def forward(self, input):
orig_shape = input.shape
out = self.act_function(
self.conv(input.view(orig_shape[0]*orig_shape[1], 1, -1)))
if self.reduce == "mean":
out = torch.mean(out, dim=-1)
else:
out = torch.sum(out, dim=-1)
return out.view(orig_shape[0], orig_shape[1], -1)
class RNNP(nn.Module):
"""RNN with projection layer module
"""
def __init__(self, options, inp_dim):
super(RNNP, self).__init__()
typ = options["rnnp_typ"]
elayers = int(options["rnnp_elayers"])
cdim = int(options["rnnp_eunits"])
hdim = int(options["rnnp_eprojs"])
dropout = float(options["rnnp_drop"])
bidir = typ[0] == "b"
RNN = torch.nn.LSTM if "lstm" in typ else torch.nn.GRU
for i in range(elayers):
if i == 0:
inputdim = inp_dim
else:
inputdim = hdim
rnn = RNN(
inputdim, cdim, num_layers=1, bidirectional=bidir
)
# if torch.cuda.is_available():
# for m in rnn.parameters():
# m = m.cuda()
setattr(self, "%s%d" % ("birnn" if bidir else "rnn", i), rnn)
# bottleneck layer to merge
if bidir:
proj = torch.nn.Linear(2 * cdim, hdim)
# if torch.cuda.is_available():
# for m in proj.parameters():
# m = m.cuda()
setattr(self, "bt%d" % i, proj)
else:
# proj = torch.nn.Linear(cdim, hdim)
# if torch.cuda.is_available():
# for m in proj.parameters():
# m = m.cuda()
setattr(self, "bt%d" % i, proj)
self.elayers = elayers
self.cdim = cdim
self.typ = typ
self.bidir = bidir
self.dropout = dropout
self.out_dim = hdim
def forward(self, x):
"""RNNP forward
"""
ys = x
for layer in range(self.elayers):
if self.bidir:
h0 = torch.zeros(2, x.shape[1], self.cdim)
c0 = torch.zeros(2, x.shape[1], self.cdim)
else:
h0 = torch.zeros(1, x.shape[1], self.cdim)
c0 = torch.zeros(1, x.shape[1], self.cdim)
h0 = h0.cuda()
c0 = c0.cuda()
rnn = getattr(
self, ("birnn" if self.bidir else "rnn") + str(layer))
rnn.flatten_parameters()
ys, states = rnn(ys, (h0, c0))
projection_layer = getattr(self, "bt%d" % layer)
projected = projection_layer(
ys.contiguous().view(-1, ys.size(2)))
projected = projected.view(ys.size(0), ys.size(1), -1)
ys = projected
if layer < self.elayers - 1:
ys = torch.tanh(F.dropout(ys, p=self.dropout))
return ys
|
the-stack_106_29523 | """Stores Research Object including provenance."""
import copy
import datetime
import hashlib
import logging
import os
import re
import shutil
import tempfile
import urllib
import uuid
from collections import OrderedDict
from getpass import getuser
from io import BytesIO, FileIO, TextIOWrapper, open
from socket import getfqdn
from types import ModuleType
from typing import (
IO,
Any,
Callable,
Dict,
Generator,
List,
MutableMapping,
Optional,
Set,
Tuple,
Union,
cast,
)
import prov.model as provM
from pathlib2 import Path, PurePath, PurePosixPath
from prov.identifier import Identifier, Namespace
from prov.model import PROV, ProvDocument, ProvEntity
from typing_extensions import TYPE_CHECKING
from ruamel import yaml
from schema_salad.sourceline import SourceLine
from schema_salad.utils import json_dumps
from .context import RuntimeContext
from .errors import WorkflowException
from .loghandler import _logger
from .pathmapper import get_listing
from .process import Process, shortname
from .stdfsaccess import StdFsAccess
from .utils import onWindows, versionstring
# move to a regular typing import when Python 3.3-3.6 is no longer supported
# imports needed for retrieving user data
if onWindows():
import ctypes # pylint: disable=unused-import
else:
try:
import pwd # pylint: disable=unused-import
except ImportError:
pass
if TYPE_CHECKING:
from .command_line_tool import (
CommandLineTool,
ExpressionTool,
) # pylint: disable=unused-import
from .workflow import Workflow # pylint: disable=unused-import
__citation__ = "https://doi.org/10.5281/zenodo.1208477"
# NOTE: Semantic versioning of the CWLProv Research Object
# **and** the cwlprov files
#
# Rough guide (major.minor.patch):
# 1. Bump major number if removing/"breaking" resources or PROV statements
# 2. Bump minor number if adding resources or PROV statements
# 3. Bump patch number for non-breaking non-adding changes,
# e.g. fixing broken relative paths
CWLPROV_VERSION = "https://w3id.org/cwl/prov/0.6.0"
# Research Object folders
METADATA = "metadata"
DATA = "data"
WORKFLOW = "workflow"
SNAPSHOT = "snapshot"
# sub-folders
MAIN = os.path.join(WORKFLOW, "main")
PROVENANCE = os.path.join(METADATA, "provenance")
LOGS = os.path.join(METADATA, "logs")
WFDESC = Namespace("wfdesc", "http://purl.org/wf4ever/wfdesc#")
WFPROV = Namespace("wfprov", "http://purl.org/wf4ever/wfprov#")
WF4EVER = Namespace("wf4ever", "http://purl.org/wf4ever/wf4ever#")
RO = Namespace("ro", "http://purl.org/wf4ever/ro#")
ORE = Namespace("ore", "http://www.openarchives.org/ore/terms/")
FOAF = Namespace("foaf", "http://xmlns.com/foaf/0.1/")
SCHEMA = Namespace("schema", "http://schema.org/")
CWLPROV = Namespace("cwlprov", "https://w3id.org/cwl/prov#")
ORCID = Namespace("orcid", "https://orcid.org/")
UUID = Namespace("id", "urn:uuid:")
# BagIt and YAML always use UTF-8
ENCODING = "UTF-8"
TEXT_PLAIN = 'text/plain; charset="%s"' % ENCODING
# sha1, compatible with the File type's "checksum" field
# e.g. "checksum" = "sha1$47a013e660d408619d894b20806b1d5086aab03b"
# See ./cwltool/schemas/v1.0/Process.yml
Hasher = hashlib.sha1
SHA1 = "sha1"
SHA256 = "sha256"
SHA512 = "sha512"
# TODO: Better identifiers for user, at least
# these should be preserved in ~/.config/cwl for every execution
# on this host
USER_UUID = uuid.uuid4().urn
ACCOUNT_UUID = uuid.uuid4().urn
def _posix_path(local_path):
# type: (str) -> str
return str(PurePosixPath(Path(local_path)))
def _local_path(posix_path):
# type: (str) -> str
return str(Path(posix_path))
def _whoami():
# type: () -> Tuple[str,str]
"""Return the current operating system account as (username, fullname)."""
username = getuser()
try:
if onWindows():
get_user_name = ctypes.windll.secur32.GetUserNameExW # type: ignore
size = ctypes.pointer(ctypes.c_ulong(0))
get_user_name(3, None, size)
name_buffer = ctypes.create_unicode_buffer(size.contents.value)
get_user_name(3, name_buffer, size)
fullname = str(name_buffer.value)
else:
fullname = pwd.getpwuid(os.getuid())[4].split(",")[0]
except (KeyError, IndexError):
fullname = username
return (username, fullname)
class WritableBagFile(FileIO):
"""Writes files in research object."""
def __init__(self, research_object, rel_path):
# type: (ResearchObject, str) -> None
"""Initialize an ROBagIt."""
self.research_object = research_object
if Path(rel_path).is_absolute():
raise ValueError("rel_path must be relative: %s" % rel_path)
self.rel_path = rel_path
self.hashes = {
SHA1: hashlib.sha1(), # nosec
SHA256: hashlib.sha256(),
SHA512: hashlib.sha512(),
}
# Open file in Research Object folder
path = os.path.abspath(
os.path.join(research_object.folder, _local_path(rel_path))
)
if not path.startswith(os.path.abspath(research_object.folder)):
raise ValueError("Path is outside Research Object: %s" % path)
super(WritableBagFile, self).__init__(str(path), mode="w")
def write(self, b):
# type: (Union[bytes, str]) -> int
if isinstance(b, bytes):
real_b = b
else:
real_b = b.encode("utf-8")
total = 0
length = len(real_b)
while total < length:
ret = super(WritableBagFile, self).write(real_b)
if ret:
total += ret
for _ in self.hashes.values():
_.update(real_b)
return total
def close(self): # type: () -> None
# FIXME: Convert below block to a ResearchObject method?
if self.rel_path.startswith("data/"):
self.research_object.bagged_size[self.rel_path] = self.tell()
else:
self.research_object.tagfiles.add(self.rel_path)
super(WritableBagFile, self).close()
# { "sha1": "f572d396fae9206628714fb2ce00f72e94f2258f" }
checksums = {}
for name in self.hashes:
checksums[name] = self.hashes[name].hexdigest().lower()
self.research_object.add_to_manifest(self.rel_path, checksums)
# To simplify our hash calculation we won't support
# seeking, reading or truncating, as we can't do
# similar seeks in the current hash.
# TODO: Support these? At the expense of invalidating
# the current hash, then having to recalculate at close()
def seekable(self): # type: () -> bool
return False
def readable(self): # type: () -> bool
return False
def truncate(self, size=None):
# type: (Optional[int]) -> int
# FIXME: This breaks contract IOBase,
# as it means we would have to recalculate the hash
if size is not None:
raise IOError("WritableBagFile can't truncate")
return self.tell()
def _check_mod_11_2(numeric_string):
# type: (str) -> bool
"""
Validate numeric_string for its MOD-11-2 checksum.
Any "-" in the numeric_string are ignored.
The last digit of numeric_string is assumed to be the checksum, 0-9 or X.
See ISO/IEC 7064:2003 and
https://support.orcid.org/knowledgebase/articles/116780-structure-of-the-orcid-identifier
"""
# Strip -
nums = numeric_string.replace("-", "")
total = 0
# skip last (check)digit
for num in nums[:-1]:
digit = int(num)
total = (total + digit) * 2
remainder = total % 11
result = (12 - remainder) % 11
if result == 10:
checkdigit = "X"
else:
checkdigit = str(result)
# Compare against last digit or X
return nums[-1].upper() == checkdigit
def _valid_orcid(orcid): # type: (Optional[str]) -> str
"""
Ensure orcid is a valid ORCID identifier.
The string must be equivalent to one of these forms:
0000-0002-1825-0097
orcid.org/0000-0002-1825-0097
http://orcid.org/0000-0002-1825-0097
https://orcid.org/0000-0002-1825-0097
If the ORCID number or prefix is invalid, a ValueError is raised.
The returned ORCID string is always in the form of:
https://orcid.org/0000-0002-1825-0097
"""
if orcid is None or not orcid:
raise ValueError("ORCID cannot be unspecified")
# Liberal in what we consume, e.g. ORCID.org/0000-0002-1825-009x
orcid = orcid.lower()
match = re.match(
# Note: concatinated r"" r"" below so we can add comments to pattern
# Optional hostname, with or without protocol
r"(http://orcid\.org/|https://orcid\.org/|orcid\.org/)?"
# alternative pattern, but probably messier
# r"^((https?://)?orcid.org/)?"
# ORCID number is always 4x4 numerical digits,
# but last digit (modulus 11 checksum)
# can also be X (but we made it lowercase above).
# e.g. 0000-0002-1825-0097
# or 0000-0002-1694-233x
r"(?P<orcid>(\d{4}-\d{4}-\d{4}-\d{3}[0-9x]))$",
orcid,
)
help_url = (
"https://support.orcid.org/knowledgebase/articles/"
"116780-structure-of-the-orcid-identifier"
)
if not match:
raise ValueError("Invalid ORCID: %s\n%s" % (orcid, help_url))
# Conservative in what we produce:
# a) Ensure any checksum digit is uppercase
orcid_num = match.group("orcid").upper()
# b) ..and correct
if not _check_mod_11_2(orcid_num):
raise ValueError("Invalid ORCID checksum: %s\n%s" % (orcid_num, help_url))
# c) Re-add the official prefix https://orcid.org/
return "https://orcid.org/%s" % orcid_num
class ProvenanceProfile:
"""
Provenance profile.
Populated as the workflow runs.
"""
def __init__(
self,
research_object, # type: ResearchObject
full_name, # type: str
host_provenance, # type: bool
user_provenance, # type: bool
orcid, # type: str
fsaccess: StdFsAccess,
run_uuid=None, # type: Optional[uuid.UUID]
): # type: (...) -> None
"""Initialize the provenance profile."""
self.fsaccess = fsaccess
self.orcid = orcid
self.research_object = research_object
self.folder = self.research_object.folder
self.document = ProvDocument()
self.host_provenance = host_provenance
self.user_provenance = user_provenance
self.engine_uuid = research_object.engine_uuid
self.add_to_manifest = self.research_object.add_to_manifest
if self.orcid:
_logger.debug("[provenance] Creator ORCID: %s", self.orcid)
self.full_name = full_name
if self.full_name:
_logger.debug("[provenance] Creator Full name: %s", self.full_name)
if run_uuid is None:
run_uuid = uuid.uuid4()
self.workflow_run_uuid = run_uuid
self.workflow_run_uri = run_uuid.urn
self.generate_prov_doc()
def __str__(self): # type: () -> str
"""Represent this Provenvance profile as a string."""
return "ProvenanceProfile <%s> in <%s>" % (
self.workflow_run_uri,
self.research_object,
)
def generate_prov_doc(self):
# type: () -> Tuple[str, ProvDocument]
"""Add basic namespaces."""
def host_provenance(document):
# type: (ProvDocument) -> None
"""Record host provenance."""
document.add_namespace(CWLPROV)
document.add_namespace(UUID)
document.add_namespace(FOAF)
hostname = getfqdn()
# won't have a foaf:accountServiceHomepage for unix hosts, but
# we can at least provide hostname
document.agent(
ACCOUNT_UUID,
{
provM.PROV_TYPE: FOAF["OnlineAccount"],
"prov:location": hostname,
CWLPROV["hostname"]: hostname,
},
)
self.cwltool_version = "cwltool %s" % versionstring().split()[-1]
self.document.add_namespace("wfprov", "http://purl.org/wf4ever/wfprov#")
# document.add_namespace('prov', 'http://www.w3.org/ns/prov#')
self.document.add_namespace("wfdesc", "http://purl.org/wf4ever/wfdesc#")
# TODO: Make this ontology. For now only has cwlprov:image
self.document.add_namespace("cwlprov", "https://w3id.org/cwl/prov#")
self.document.add_namespace("foaf", "http://xmlns.com/foaf/0.1/")
self.document.add_namespace("schema", "http://schema.org/")
self.document.add_namespace("orcid", "https://orcid.org/")
self.document.add_namespace("id", "urn:uuid:")
# NOTE: Internet draft expired 2004-03-04 (!)
# https://tools.ietf.org/html/draft-thiemann-hash-urn-01
# TODO: Change to nih:sha-256; hashes
# https://tools.ietf.org/html/rfc6920#section-7
self.document.add_namespace("data", "urn:hash::sha1:")
# Also needed for docker images
self.document.add_namespace(SHA256, "nih:sha-256;")
# info only, won't really be used by prov as sub-resources use /
self.document.add_namespace("researchobject", self.research_object.base_uri)
# annotations
self.metadata_ns = self.document.add_namespace(
"metadata", self.research_object.base_uri + METADATA + "/"
)
# Pre-register provenance directory so we can refer to its files
self.provenance_ns = self.document.add_namespace(
"provenance", self.research_object.base_uri + _posix_path(PROVENANCE) + "/"
)
ro_identifier_workflow = self.research_object.base_uri + "workflow/packed.cwl#"
self.wf_ns = self.document.add_namespace("wf", ro_identifier_workflow)
ro_identifier_input = (
self.research_object.base_uri + "workflow/primary-job.json#"
)
self.document.add_namespace("input", ro_identifier_input)
# More info about the account (e.g. username, fullname)
# may or may not have been previously logged by user_provenance()
# .. but we always know cwltool was launched (directly or indirectly)
# by a user account, as cwltool is a command line tool
account = self.document.agent(ACCOUNT_UUID)
if self.orcid or self.full_name:
person = {provM.PROV_TYPE: PROV["Person"], "prov:type": SCHEMA["Person"]}
if self.full_name:
person["prov:label"] = self.full_name
person["foaf:name"] = self.full_name
person["schema:name"] = self.full_name
else:
# TODO: Look up name from ORCID API?
pass
agent = self.document.agent(self.orcid or uuid.uuid4().urn, person)
self.document.actedOnBehalfOf(account, agent)
else:
if self.host_provenance:
host_provenance(self.document)
if self.user_provenance:
self.research_object.user_provenance(self.document)
# The execution of cwltool
wfengine = self.document.agent(
self.engine_uuid,
{
provM.PROV_TYPE: PROV["SoftwareAgent"],
"prov:type": WFPROV["WorkflowEngine"],
"prov:label": self.cwltool_version,
},
)
# FIXME: This datetime will be a bit too delayed, we should
# capture when cwltool.py earliest started?
self.document.wasStartedBy(wfengine, None, account, datetime.datetime.now())
# define workflow run level activity
self.document.activity(
self.workflow_run_uri,
datetime.datetime.now(),
None,
{
provM.PROV_TYPE: WFPROV["WorkflowRun"],
"prov:label": "Run of workflow/packed.cwl#main",
},
)
# association between SoftwareAgent and WorkflowRun
main_workflow = "wf:main"
self.document.wasAssociatedWith(
self.workflow_run_uri, self.engine_uuid, main_workflow
)
self.document.wasStartedBy(
self.workflow_run_uri, None, self.engine_uuid, datetime.datetime.now()
)
return (self.workflow_run_uri, self.document)
def evaluate(
self,
process: Process,
job, # type: Any
job_order_object, # type: Dict[str, str]
research_obj, # type: ResearchObject
): # type: (...) -> None
"""Evaluate the nature of job."""
if not hasattr(process, "steps"):
# record provenance of independent commandline tool executions
self.prospective_prov(job)
customised_job = copy_job_order(job, job_order_object)
self.used_artefacts(customised_job, self.workflow_run_uri)
research_obj.create_job(customised_job, job)
elif hasattr(job, "workflow"):
# record provenance of workflow executions
self.prospective_prov(job)
customised_job = copy_job_order(job, job_order_object)
self.used_artefacts(customised_job, self.workflow_run_uri)
def record_process_start(self, process, job, process_run_id=None):
# type: (Process, Any, Optional[str]) -> Optional[str]
if not hasattr(process, "steps"):
process_run_id = self.workflow_run_uri
elif not hasattr(job, "workflow"):
# commandline tool execution as part of workflow
name = str(job.name) if hasattr(job, "name") else ""
process_name = urllib.parse.quote(name, safe=":/,#")
process_run_id = self.start_process(process_name, datetime.datetime.now())
return process_run_id
def start_process(self, process_name, when, process_run_id=None):
# type: (str, datetime.datetime, Optional[str]) -> str
"""Record the start of each Process."""
if process_run_id is None:
process_run_id = uuid.uuid4().urn
prov_label = "Run of workflow/packed.cwl#main/" + process_name
self.document.activity(
process_run_id,
None,
None,
{provM.PROV_TYPE: WFPROV["ProcessRun"], provM.PROV_LABEL: prov_label},
)
self.document.wasAssociatedWith(
process_run_id, self.engine_uuid, str("wf:main/" + process_name)
)
self.document.wasStartedBy(
process_run_id, None, self.workflow_run_uri, when, None, None
)
return process_run_id
def record_process_end(
self,
process_name: str,
process_run_id: str,
outputs: Any,
when: datetime.datetime,
) -> None:
self.generate_output_prov(outputs, process_run_id, process_name)
self.document.wasEndedBy(process_run_id, None, self.workflow_run_uri, when)
def declare_file(self, value):
# type: (MutableMapping[str, Any]) -> Tuple[ProvEntity, ProvEntity, str]
if value["class"] != "File":
raise ValueError("Must have class:File: %s" % value)
# Need to determine file hash aka RO filename
entity = None # type: Optional[ProvEntity]
checksum = None
if "checksum" in value:
csum = value["checksum"]
(method, checksum) = csum.split("$", 1)
if method == SHA1 and self.research_object.has_data_file(checksum):
entity = self.document.entity("data:" + checksum)
if not entity and "location" in value:
location = str(value["location"])
# If we made it here, we'll have to add it to the RO
with self.fsaccess.open(location, "rb") as fhandle:
relative_path = self.research_object.add_data_file(fhandle)
# FIXME: This naively relies on add_data_file setting hash as filename
checksum = PurePath(relative_path).name
entity = self.document.entity(
"data:" + checksum, {provM.PROV_TYPE: WFPROV["Artifact"]}
)
if "checksum" not in value:
value["checksum"] = "%s$%s" % (SHA1, checksum)
if not entity and "contents" in value:
# Anonymous file, add content as string
entity, checksum = self.declare_string(value["contents"])
# By here one of them should have worked!
if not entity or not checksum:
raise ValueError(
"class:File but missing checksum/location/content: %r" % value
)
# Track filename and extension, this is generally useful only for
# secondaryFiles. Note that multiple uses of a file might thus record
# different names for the same entity, so we'll
# make/track a specialized entity by UUID
file_id = value.setdefault("@id", uuid.uuid4().urn)
# A specialized entity that has just these names
file_entity = self.document.entity(
file_id,
[(provM.PROV_TYPE, WFPROV["Artifact"]), (provM.PROV_TYPE, WF4EVER["File"])],
) # type: ProvEntity
if "basename" in value:
file_entity.add_attributes({CWLPROV["basename"]: value["basename"]})
if "nameroot" in value:
file_entity.add_attributes({CWLPROV["nameroot"]: value["nameroot"]})
if "nameext" in value:
file_entity.add_attributes({CWLPROV["nameext"]: value["nameext"]})
self.document.specializationOf(file_entity, entity)
# Check for secondaries
for sec in value.get("secondaryFiles", ()):
# TODO: Record these in a specializationOf entity with UUID?
if sec["class"] == "File":
(sec_entity, _, _) = self.declare_file(sec)
elif sec["class"] == "Directory":
sec_entity = self.declare_directory(sec)
else:
raise ValueError("Got unexpected secondaryFiles value: {}".format(sec))
# We don't know how/when/where the secondary file was generated,
# but CWL convention is a kind of summary/index derived
# from the original file. As its generally in a different format
# then prov:Quotation is not appropriate.
self.document.derivation(
sec_entity,
file_entity,
other_attributes={PROV["type"]: CWLPROV["SecondaryFile"]},
)
return file_entity, entity, checksum
def declare_directory(self, value: MutableMapping[str, Any]) -> ProvEntity:
"""Register any nested files/directories."""
# FIXME: Calculate a hash-like identifier for directory
# so we get same value if it's the same filenames/hashes
# in a different location.
# For now, mint a new UUID to identify this directory, but
# attempt to keep it inside the value dictionary
dir_id = value.setdefault("@id", uuid.uuid4().urn)
# New annotation file to keep the ORE Folder listing
ore_doc_fn = dir_id.replace("urn:uuid:", "directory-") + ".ttl"
dir_bundle = self.document.bundle(self.metadata_ns[ore_doc_fn])
coll = self.document.entity(
dir_id,
[
(provM.PROV_TYPE, WFPROV["Artifact"]),
(provM.PROV_TYPE, PROV["Collection"]),
(provM.PROV_TYPE, PROV["Dictionary"]),
(provM.PROV_TYPE, RO["Folder"]),
],
)
# ORE description of ro:Folder, saved separately
coll_b = dir_bundle.entity(
dir_id,
[(provM.PROV_TYPE, RO["Folder"]), (provM.PROV_TYPE, ORE["Aggregation"])],
)
self.document.mentionOf(dir_id + "#ore", dir_id, dir_bundle.identifier)
# dir_manifest = dir_bundle.entity(
# dir_bundle.identifier, {PROV["type"]: ORE["ResourceMap"],
# ORE["describes"]: coll_b.identifier})
coll_attribs = [(ORE["isDescribedBy"], dir_bundle.identifier)]
coll_b_attribs = [] # type: List[Tuple[Identifier, ProvEntity]]
# FIXME: .listing might not be populated yet - hopefully
# a later call to this method will sort that
is_empty = True
if "listing" not in value:
get_listing(self.fsaccess, value)
for entry in value.get("listing", []):
is_empty = False
# Declare child-artifacts
entity = self.declare_artefact(entry)
self.document.membership(coll, entity)
# Membership relation aka our ORE Proxy
m_id = uuid.uuid4().urn
m_entity = self.document.entity(m_id)
m_b = dir_bundle.entity(m_id)
# PROV-O style Dictionary
# https://www.w3.org/TR/prov-dictionary/#dictionary-ontological-definition
# ..as prov.py do not currently allow PROV-N extensions
# like hadDictionaryMember(..)
m_entity.add_asserted_type(PROV["KeyEntityPair"])
m_entity.add_attributes(
{PROV["pairKey"]: entry["basename"], PROV["pairEntity"]: entity,}
)
# As well as a being a
# http://wf4ever.github.io/ro/2016-01-28/ro/#FolderEntry
m_b.add_asserted_type(RO["FolderEntry"])
m_b.add_asserted_type(ORE["Proxy"])
m_b.add_attributes(
{
RO["entryName"]: entry["basename"],
ORE["proxyIn"]: coll,
ORE["proxyFor"]: entity,
}
)
coll_attribs.append((PROV["hadDictionaryMember"], m_entity))
coll_b_attribs.append((ORE["aggregates"], m_b))
coll.add_attributes(coll_attribs)
coll_b.add_attributes(coll_b_attribs)
# Also Save ORE Folder as annotation metadata
ore_doc = ProvDocument()
ore_doc.add_namespace(ORE)
ore_doc.add_namespace(RO)
ore_doc.add_namespace(UUID)
ore_doc.add_bundle(dir_bundle)
ore_doc = ore_doc.flattened()
ore_doc_path = str(PurePosixPath(METADATA, ore_doc_fn))
with self.research_object.write_bag_file(ore_doc_path) as provenance_file:
ore_doc.serialize(provenance_file, format="rdf", rdf_format="turtle")
self.research_object.add_annotation(
dir_id, [ore_doc_fn], ORE["isDescribedBy"].uri
)
if is_empty:
# Empty directory
coll.add_asserted_type(PROV["EmptyCollection"])
coll.add_asserted_type(PROV["EmptyDictionary"])
self.research_object.add_uri(coll.identifier.uri)
return coll
def declare_string(self, value):
# type: (Union[str, str]) -> Tuple[ProvEntity,str]
"""Save as string in UTF-8."""
byte_s = BytesIO(str(value).encode(ENCODING))
data_file = self.research_object.add_data_file(byte_s, content_type=TEXT_PLAIN)
checksum = PurePosixPath(data_file).name
# FIXME: Don't naively assume add_data_file uses hash in filename!
data_id = "data:%s" % PurePosixPath(data_file).stem
entity = self.document.entity(
data_id, {provM.PROV_TYPE: WFPROV["Artifact"], provM.PROV_VALUE: str(value)}
) # type: ProvEntity
return entity, checksum
def declare_artefact(self, value):
# type: (Any) -> ProvEntity
"""Create data artefact entities for all file objects."""
if value is None:
# FIXME: If this can happen in CWL, we'll
# need a better way to represent this in PROV
return self.document.entity(CWLPROV["None"], {provM.PROV_LABEL: "None"})
if isinstance(value, (bool, int, float)):
# Typically used in job documents for flags
# FIXME: Make consistent hash URIs for these
# that somehow include the type
# (so "1" != 1 != "1.0" != true)
entity = self.document.entity(uuid.uuid4().urn, {provM.PROV_VALUE: value})
self.research_object.add_uri(entity.identifier.uri)
return entity
if isinstance(value, (str, str)):
(entity, _) = self.declare_string(value)
return entity
if isinstance(value, bytes):
# If we got here then we must be in Python 3
byte_s = BytesIO(value)
data_file = self.research_object.add_data_file(byte_s)
# FIXME: Don't naively assume add_data_file uses hash in filename!
data_id = "data:%s" % PurePosixPath(data_file).stem
return self.document.entity(
data_id,
{provM.PROV_TYPE: WFPROV["Artifact"], provM.PROV_VALUE: str(value)},
)
if isinstance(value, MutableMapping):
if "@id" in value:
# Already processed this value, but it might not be in this PROV
entities = self.document.get_record(value["@id"])
if entities:
return entities[0]
# else, unknown in PROV, re-add below as if it's fresh
# Base case - we found a File we need to update
if value.get("class") == "File":
(entity, _, _) = self.declare_file(value)
value["@id"] = entity.identifier.uri
return entity
if value.get("class") == "Directory":
entity = self.declare_directory(value)
value["@id"] = entity.identifier.uri
return entity
coll_id = value.setdefault("@id", uuid.uuid4().urn)
# some other kind of dictionary?
# TODO: also Save as JSON
coll = self.document.entity(
coll_id,
[
(provM.PROV_TYPE, WFPROV["Artifact"]),
(provM.PROV_TYPE, PROV["Collection"]),
(provM.PROV_TYPE, PROV["Dictionary"]),
],
)
if value.get("class"):
_logger.warning("Unknown data class %s.", value["class"])
# FIXME: The class might be "http://example.com/somethingelse"
coll.add_asserted_type(CWLPROV[value["class"]])
# Let's iterate and recurse
coll_attribs = [] # type: List[Tuple[Identifier, ProvEntity]]
for (key, val) in value.items():
v_ent = self.declare_artefact(val)
self.document.membership(coll, v_ent)
m_entity = self.document.entity(uuid.uuid4().urn)
# Note: only support PROV-O style dictionary
# https://www.w3.org/TR/prov-dictionary/#dictionary-ontological-definition
# as prov.py do not easily allow PROV-N extensions
m_entity.add_asserted_type(PROV["KeyEntityPair"])
m_entity.add_attributes(
{PROV["pairKey"]: str(key), PROV["pairEntity"]: v_ent}
)
coll_attribs.append((PROV["hadDictionaryMember"], m_entity))
coll.add_attributes(coll_attribs)
self.research_object.add_uri(coll.identifier.uri)
return coll
# some other kind of Collection?
# TODO: also save as JSON
try:
members = []
for each_input_obj in iter(value):
# Recurse and register any nested objects
e = self.declare_artefact(each_input_obj)
members.append(e)
# If we reached this, then we were allowed to iterate
coll = self.document.entity(
uuid.uuid4().urn,
[
(provM.PROV_TYPE, WFPROV["Artifact"]),
(provM.PROV_TYPE, PROV["Collection"]),
],
)
if not members:
coll.add_asserted_type(PROV["EmptyCollection"])
else:
for member in members:
# FIXME: This won't preserve order, for that
# we would need to use PROV.Dictionary
# with numeric keys
self.document.membership(coll, member)
self.research_object.add_uri(coll.identifier.uri)
# FIXME: list value does not support adding "@id"
return coll
except TypeError:
_logger.warning("Unrecognized type %s of %r", type(value), value)
# Let's just fall back to Python repr()
entity = self.document.entity(
uuid.uuid4().urn, {provM.PROV_LABEL: repr(value)}
)
self.research_object.add_uri(entity.identifier.uri)
return entity
def used_artefacts(
self,
job_order: Union[Dict[Any, Any], List[Dict[Any, Any]]],
process_run_id: str,
name: Optional[str] = None,
) -> None:
"""Add used() for each data artefact."""
if isinstance(job_order, list):
for entry in job_order:
self.used_artefacts(entry, process_run_id, name)
else:
# FIXME: Use workflow name in packed.cwl, "main" is wrong for nested workflows
base = "main"
if name is not None:
base += "/" + name
for key, value in job_order.items():
prov_role = self.wf_ns["%s/%s" % (base, key)]
try:
entity = self.declare_artefact(value)
self.document.used(
process_run_id,
entity,
datetime.datetime.now(),
None,
{"prov:role": prov_role},
)
except OSError:
pass
def generate_output_prov(
self,
final_output: Union[Dict[str, Any], List[Dict[str, Any]]],
process_run_id: Optional[str],
name: Optional[str],
) -> None:
"""Call wasGeneratedBy() for each output,copy the files into the RO."""
if isinstance(final_output, list):
for entry in final_output:
self.generate_output_prov(entry, process_run_id, name)
else:
# Timestamp should be created at the earliest
timestamp = datetime.datetime.now()
# For each output, find/register the corresponding
# entity (UUID) and document it as generated in
# a role corresponding to the output
for output, value in final_output.items():
entity = self.declare_artefact(value)
if name is not None:
name = urllib.parse.quote(str(name), safe=":/,#")
# FIXME: Probably not "main" in nested workflows
role = self.wf_ns["main/%s/%s" % (name, output)]
else:
role = self.wf_ns["main/%s" % output]
if not process_run_id:
process_run_id = self.workflow_run_uri
self.document.wasGeneratedBy(
entity, process_run_id, timestamp, None, {"prov:role": role}
)
def prospective_prov(self, job):
# type: (Any) -> None
"""Create prospective prov recording as wfdesc prov:Plan."""
if not hasattr(job, "steps"):
# direct command line tool execution
self.document.entity(
"wf:main",
{
provM.PROV_TYPE: WFDESC["Process"],
"prov:type": PROV["Plan"],
"prov:label": "Prospective provenance",
},
)
return
self.document.entity(
"wf:main",
{
provM.PROV_TYPE: WFDESC["Workflow"],
"prov:type": PROV["Plan"],
"prov:label": "Prospective provenance",
},
)
for step in job.steps:
stepnametemp = "wf:main/" + str(step.name)[5:]
stepname = urllib.parse.quote(stepnametemp, safe=":/,#")
step = self.document.entity(
stepname,
{provM.PROV_TYPE: WFDESC["Process"], "prov:type": PROV["Plan"]},
)
self.document.entity(
"wf:main",
{"wfdesc:hasSubProcess": step, "prov:label": "Prospective provenance"},
)
# TODO: Declare roles/parameters as well
def activity_has_provenance(self, activity, prov_ids):
# type: (str, List[Identifier]) -> None
"""Add http://www.w3.org/TR/prov-aq/ relations to nested PROV files."""
# NOTE: The below will only work if the corresponding metadata/provenance arcp URI
# is a pre-registered namespace in the PROV Document
attribs = [(PROV["has_provenance"], prov_id) for prov_id in prov_ids]
self.document.activity(activity, other_attributes=attribs)
# Tip: we can't use https://www.w3.org/TR/prov-links/#term-mention
# as prov:mentionOf() is only for entities, not activities
uris = [i.uri for i in prov_ids]
self.research_object.add_annotation(activity, uris, PROV["has_provenance"].uri)
def finalize_prov_profile(self, name):
# type: (Optional[str]) -> List[Identifier]
"""Transfer the provenance related files to the RO."""
# NOTE: Relative posix path
if name is None:
# master workflow, fixed filenames
filename = "primary.cwlprov"
else:
# ASCII-friendly filename, avoiding % as we don't want %2520 in manifest.json
wf_name = urllib.parse.quote(str(name), safe="").replace("%", "_")
# Note that the above could cause overlaps for similarly named
# workflows, but that's OK as we'll also include run uuid
# which also covers thhe case of this step being run in
# multiple places or iterations
filename = "%s.%s.cwlprov" % (wf_name, self.workflow_run_uuid)
basename = str(PurePosixPath(PROVENANCE) / filename)
# TODO: Also support other profiles than CWLProv, e.g. ProvOne
# list of prov identifiers of provenance files
prov_ids = []
# https://www.w3.org/TR/prov-xml/
with self.research_object.write_bag_file(basename + ".xml") as provenance_file:
self.document.serialize(provenance_file, format="xml", indent=4)
prov_ids.append(self.provenance_ns[filename + ".xml"])
# https://www.w3.org/TR/prov-n/
with self.research_object.write_bag_file(
basename + ".provn"
) as provenance_file:
self.document.serialize(provenance_file, format="provn", indent=2)
prov_ids.append(self.provenance_ns[filename + ".provn"])
# https://www.w3.org/Submission/prov-json/
with self.research_object.write_bag_file(basename + ".json") as provenance_file:
self.document.serialize(provenance_file, format="json", indent=2)
prov_ids.append(self.provenance_ns[filename + ".json"])
# "rdf" aka https://www.w3.org/TR/prov-o/
# which can be serialized to ttl/nt/jsonld (and more!)
# https://www.w3.org/TR/turtle/
with self.research_object.write_bag_file(basename + ".ttl") as provenance_file:
self.document.serialize(provenance_file, format="rdf", rdf_format="turtle")
prov_ids.append(self.provenance_ns[filename + ".ttl"])
# https://www.w3.org/TR/n-triples/
with self.research_object.write_bag_file(basename + ".nt") as provenance_file:
self.document.serialize(
provenance_file, format="rdf", rdf_format="ntriples"
)
prov_ids.append(self.provenance_ns[filename + ".nt"])
# https://www.w3.org/TR/json-ld/
# TODO: Use a nice JSON-LD context
# see also https://eprints.soton.ac.uk/395985/
# 404 Not Found on https://provenance.ecs.soton.ac.uk/prov.jsonld :(
with self.research_object.write_bag_file(
basename + ".jsonld"
) as provenance_file:
self.document.serialize(provenance_file, format="rdf", rdf_format="json-ld")
prov_ids.append(self.provenance_ns[filename + ".jsonld"])
_logger.debug("[provenance] added provenance: %s", prov_ids)
return prov_ids
class ResearchObject:
"""CWLProv Research Object."""
def __init__(self, fsaccess, temp_prefix_ro="tmp", orcid="", full_name=""):
# type: (StdFsAccess, str, str, str) -> None
"""Initialize the ResearchObject."""
self.temp_prefix = temp_prefix_ro
self.orcid = "" if not orcid else _valid_orcid(orcid)
self.full_name = full_name
tmp_dir, tmp_prefix = os.path.split(temp_prefix_ro)
self.folder = os.path.abspath(
tempfile.mkdtemp(prefix=tmp_prefix, dir=tmp_dir)
) # type: str
self.closed = False
# map of filename "data/de/alsdklkas": 12398123 bytes
self.bagged_size = {} # type: Dict[str, int]
self.tagfiles = set() # type: Set[str]
self._file_provenance = {} # type: Dict[str, Dict[str, str]]
self._external_aggregates = [] # type: List[Dict[str, str]]
self.annotations = [] # type: List[Dict[str, Any]]
self._content_types = {} # type: Dict[str,str]
self.fsaccess = fsaccess
# These should be replaced by generate_prov_doc when workflow/run IDs are known:
self.engine_uuid = "urn:uuid:%s" % uuid.uuid4()
self.ro_uuid = uuid.uuid4()
self.base_uri = "arcp://uuid,%s/" % self.ro_uuid
self.cwltool_version = "cwltool %s" % versionstring().split()[-1]
##
self.relativised_input_object = {} # type: Dict[Any, Any]
self._initialize()
_logger.debug("[provenance] Temporary research object: %s", self.folder)
def self_check(self): # type: () -> None
"""Raise ValueError if this RO is closed."""
if self.closed:
raise ValueError(
"This ResearchObject has already been closed and is not "
"available for futher manipulation."
)
def __str__(self): # type: () -> str
"""Represent this RO as a string."""
return "ResearchObject <{}> in <{}>".format(self.ro_uuid, self.folder)
def _initialize(self): # type: () -> None
for research_obj_folder in (
METADATA,
DATA,
WORKFLOW,
SNAPSHOT,
PROVENANCE,
LOGS,
):
os.makedirs(os.path.join(self.folder, research_obj_folder))
self._initialize_bagit()
def _initialize_bagit(self): # type: () -> None
"""Write fixed bagit header."""
self.self_check()
bagit = os.path.join(self.folder, "bagit.txt")
# encoding: always UTF-8 (although ASCII would suffice here)
# newline: ensure LF also on Windows
with open(bagit, "w", encoding=ENCODING, newline="\n") as bag_it_file:
# TODO: \n or \r\n ?
bag_it_file.write("BagIt-Version: 0.97\n")
bag_it_file.write("Tag-File-Character-Encoding: %s\n" % ENCODING)
def open_log_file_for_activity(self, uuid_uri: str) -> WritableBagFile:
self.self_check()
# Ensure valid UUID for safe filenames
activity_uuid = uuid.UUID(uuid_uri)
if activity_uuid.urn == self.engine_uuid:
# It's the engine aka cwltool!
name = "engine"
else:
name = "activity"
p = os.path.join(LOGS, "{}.{}.txt".format(name, activity_uuid))
_logger.debug("[provenance] Opening log file for %s: %s" % (name, p))
self.add_annotation(activity_uuid.urn, [p], CWLPROV["log"].uri)
return self.write_bag_file(p)
def _finalize(self): # type: () -> None
self._write_ro_manifest()
self._write_bag_info()
def user_provenance(self, document): # type: (ProvDocument) -> None
"""Add the user provenance."""
self.self_check()
(username, fullname) = _whoami()
if not self.full_name:
self.full_name = fullname
document.add_namespace(UUID)
document.add_namespace(ORCID)
document.add_namespace(FOAF)
account = document.agent(
ACCOUNT_UUID,
{
provM.PROV_TYPE: FOAF["OnlineAccount"],
"prov:label": username,
FOAF["accountName"]: username,
},
)
user = document.agent(
self.orcid or USER_UUID,
{
provM.PROV_TYPE: PROV["Person"],
"prov:label": self.full_name,
FOAF["name"]: self.full_name,
FOAF["account"]: account,
},
)
# cwltool may be started on the shell (directly by user),
# by shell script (indirectly by user)
# or from a different program
# (which again is launched by any of the above)
#
# We can't tell in which way, but ultimately we're still
# acting in behalf of that user (even if we might
# get their name wrong!)
document.actedOnBehalfOf(account, user)
def write_bag_file(self, path, encoding=ENCODING):
# type: (str, Optional[str]) -> WritableBagFile
"""Write the bag file into our research object."""
self.self_check()
# For some reason below throws BlockingIOError
# fp = BufferedWriter(WritableBagFile(self, path))
bag_file = WritableBagFile(self, path)
if encoding is not None:
# encoding: match Tag-File-Character-Encoding: UTF-8
# newline: ensure LF also on Windows
return cast(
WritableBagFile,
TextIOWrapper(
cast(IO[bytes], bag_file), encoding=encoding, newline="\n"
),
)
return bag_file
def add_tagfile(self, path, timestamp=None):
# type: (str, Optional[datetime.datetime]) -> None
"""Add tag files to our research object."""
self.self_check()
checksums = {}
# Read file to calculate its checksum
if os.path.isdir(path):
return
# FIXME: do the right thing for directories
with open(path, "rb") as tag_file:
# FIXME: Should have more efficient open_tagfile() that
# does all checksums in one go while writing through,
# adding checksums after closing.
# Below probably OK for now as metadata files
# are not too large..?
checksums[SHA1] = checksum_copy(tag_file, hasher=hashlib.sha1)
tag_file.seek(0)
checksums[SHA256] = checksum_copy(tag_file, hasher=hashlib.sha256)
tag_file.seek(0)
checksums[SHA512] = checksum_copy(tag_file, hasher=hashlib.sha512)
rel_path = _posix_path(os.path.relpath(path, self.folder))
self.tagfiles.add(rel_path)
self.add_to_manifest(rel_path, checksums)
if timestamp is not None:
self._file_provenance[rel_path] = {"createdOn": timestamp.isoformat()}
def _ro_aggregates(self):
# type: () -> List[Dict[str, Any]]
"""Gather dictionary of files to be added to the manifest."""
def guess_mediatype(rel_path):
# type: (str) -> Dict[str, Any]
"""Return the mediatypes."""
media_types = {
# Adapted from
# https://w3id.org/bundle/2014-11-05/#media-types
"txt": TEXT_PLAIN,
"ttl": 'text/turtle; charset="UTF-8"',
"rdf": "application/rdf+xml",
"json": "application/json",
"jsonld": "application/ld+json",
"xml": "application/xml",
##
"cwl": 'text/x+yaml; charset="UTF-8"',
"provn": 'text/provenance-notation; charset="UTF-8"',
"nt": "application/n-triples",
} # type: Dict[str, str]
conforms_to = {
"provn": "http://www.w3.org/TR/2013/REC-prov-n-20130430/",
"cwl": "https://w3id.org/cwl/",
} # type: Dict[str, str]
prov_conforms_to = {
"provn": "http://www.w3.org/TR/2013/REC-prov-n-20130430/",
"rdf": "http://www.w3.org/TR/2013/REC-prov-o-20130430/",
"ttl": "http://www.w3.org/TR/2013/REC-prov-o-20130430/",
"nt": "http://www.w3.org/TR/2013/REC-prov-o-20130430/",
"jsonld": "http://www.w3.org/TR/2013/REC-prov-o-20130430/",
"xml": "http://www.w3.org/TR/2013/NOTE-prov-xml-20130430/",
"json": "http://www.w3.org/Submission/2013/SUBM-prov-json-20130424/",
} # type: Dict[str, str]
extension = rel_path.rsplit(".", 1)[-1].lower() # type: Optional[str]
if extension == rel_path:
# No ".", no extension
extension = None
local_aggregate = {} # type: Dict[str, Any]
if extension in media_types:
local_aggregate["mediatype"] = media_types[extension]
if extension in conforms_to:
# TODO: Open CWL file to read its declared "cwlVersion", e.g.
# cwlVersion = "v1.0"
local_aggregate["conformsTo"] = conforms_to[extension]
if (
rel_path.startswith(_posix_path(PROVENANCE))
and extension in prov_conforms_to
):
if ".cwlprov" in rel_path:
# Our own!
local_aggregate["conformsTo"] = [
prov_conforms_to[extension],
CWLPROV_VERSION,
]
else:
# Some other PROV
# TODO: Recognize ProvOne etc.
local_aggregate["conformsTo"] = prov_conforms_to[extension]
return local_aggregate
aggregates = [] # type: List[Dict[str, Any]]
for path in self.bagged_size.keys():
aggregate_dict = {} # type: Dict[str, Any]
temp_path = PurePosixPath(path)
folder = temp_path.parent
filename = temp_path.name
# NOTE: Here we end up aggregating the abstract
# data items by their sha1 hash, so that it matches
# the entity() in the prov files.
# TODO: Change to nih:sha-256; hashes
# https://tools.ietf.org/html/rfc6920#section-7
aggregate_dict["uri"] = "urn:hash::sha1:" + filename
aggregate_dict["bundledAs"] = {
# The arcp URI is suitable ORE proxy; local to this Research Object.
# (as long as we don't also aggregate it by relative path!)
"uri": self.base_uri + path,
# relate it to the data/ path
"folder": "/%s/" % folder,
"filename": filename,
}
if path in self._file_provenance:
# Made by workflow run, merge captured provenance
aggregate_dict["bundledAs"].update(self._file_provenance[path])
else:
# Probably made outside wf run, part of job object?
pass
if path in self._content_types:
aggregate_dict["mediatype"] = self._content_types[path]
aggregates.append(aggregate_dict)
for path in self.tagfiles:
if not (
path.startswith(METADATA)
or path.startswith(WORKFLOW)
or path.startswith(SNAPSHOT)
):
# probably a bagit file
continue
if path == PurePosixPath(METADATA) / "manifest.json":
# Should not really be there yet! But anyway, we won't
# aggregate it.
continue
rel_aggregates = {} # type: Dict[str, Any]
# These are local paths like metadata/provenance - but
# we need to relativize them for our current directory for
# as we are saved in metadata/manifest.json
uri = str(Path(os.pardir) / path)
rel_aggregates["uri"] = uri
rel_aggregates.update(guess_mediatype(path))
if path in self._file_provenance:
# Propagate file provenance (e.g. timestamp)
rel_aggregates.update(self._file_provenance[path])
elif not path.startswith(SNAPSHOT):
# make new timestamp?
rel_aggregates.update(self._self_made())
aggregates.append(rel_aggregates)
aggregates.extend(self._external_aggregates)
return aggregates
def add_uri(self, uri, timestamp=None):
# type: (str, Optional[datetime.datetime]) -> Dict[str, Any]
self.self_check()
aggr = self._self_made(timestamp=timestamp)
aggr["uri"] = uri
self._external_aggregates.append(aggr)
return aggr
def add_annotation(self, about, content, motivated_by="oa:describing"):
# type: (str, List[str], str) -> str
"""Cheap URI relativize for current directory and /."""
self.self_check()
curr = self.base_uri + METADATA + "/"
content = [c.replace(curr, "").replace(self.base_uri, "../") for c in content]
uri = uuid.uuid4().urn
ann = {
"uri": uri,
"about": about,
"content": content,
"oa:motivatedBy": {"@id": motivated_by},
}
self.annotations.append(ann)
return uri
def _ro_annotations(self):
# type: () -> List[Dict[str, Any]]
annotations = [] # type: List[Dict[str, Any]]
annotations.append(
{
"uri": uuid.uuid4().urn,
"about": self.ro_uuid.urn,
"content": "/",
# https://www.w3.org/TR/annotation-vocab/#named-individuals
"oa:motivatedBy": {"@id": "oa:describing"},
}
)
# How was it run?
# FIXME: Only primary*
prov_files = [
str(PurePosixPath(p).relative_to(METADATA))
for p in self.tagfiles
if p.startswith(_posix_path(PROVENANCE)) and "/primary." in p
]
annotations.append(
{
"uri": uuid.uuid4().urn,
"about": self.ro_uuid.urn,
"content": prov_files,
# Modulation of https://www.w3.org/TR/prov-aq/
"oa:motivatedBy": {"@id": "http://www.w3.org/ns/prov#has_provenance"},
}
)
# Where is the main workflow?
annotations.append(
{
"uri": uuid.uuid4().urn,
"about": str(PurePosixPath("..") / WORKFLOW / "packed.cwl"),
"oa:motivatedBy": {"@id": "oa:highlighting"},
}
)
annotations.append(
{
"uri": uuid.uuid4().urn,
"about": self.ro_uuid.urn,
"content": [
str(PurePosixPath("..") / WORKFLOW / "packed.cwl"),
str(PurePosixPath("..") / WORKFLOW / "primary-job.json"),
],
"oa:motivatedBy": {"@id": "oa:linking"},
}
)
# Add user-added annotations at end
annotations.extend(self.annotations)
return annotations
def _authored_by(self):
# type: () -> Dict[str, Any]
authored_by = {}
if self.orcid:
authored_by["orcid"] = self.orcid
if self.full_name:
authored_by["name"] = self.full_name
if not self.orcid:
authored_by["uri"] = USER_UUID
if authored_by:
return {"authoredBy": authored_by}
return {}
def _write_ro_manifest(self):
# type: () -> None
# Does not have to be this order, but it's nice to be consistent
manifest = OrderedDict() # type: Dict[str, Any]
manifest["@context"] = [
{"@base": "%s%s/" % (self.base_uri, _posix_path(METADATA))},
"https://w3id.org/bundle/context",
]
manifest["id"] = "/"
manifest["conformsTo"] = CWLPROV_VERSION
filename = "manifest.json"
manifest["manifest"] = filename
manifest.update(self._self_made())
manifest.update(self._authored_by())
manifest["aggregates"] = self._ro_aggregates()
manifest["annotations"] = self._ro_annotations()
json_manifest = json_dumps(manifest, indent=4, ensure_ascii=False)
rel_path = str(PurePosixPath(METADATA) / filename)
json_manifest += "\n"
with self.write_bag_file(rel_path) as manifest_file:
manifest_file.write(json_manifest)
def _write_bag_info(self):
# type: () -> None
with self.write_bag_file("bag-info.txt") as info_file:
info_file.write("Bag-Software-Agent: %s\n" % self.cwltool_version)
# FIXME: require sha-512 of payload to comply with profile?
# FIXME: Update profile
info_file.write(
"BagIt-Profile-Identifier: https://w3id.org/ro/bagit/profile\n"
)
info_file.write("Bagging-Date: %s\n" % datetime.date.today().isoformat())
info_file.write(
"External-Description: Research Object of CWL workflow run\n"
)
if self.full_name:
info_file.write("Contact-Name: %s\n" % self.full_name)
# NOTE: We can't use the urn:uuid:{UUID} of the workflow run (a prov:Activity)
# as identifier for the RO/bagit (a prov:Entity). However the arcp base URI is good.
info_file.write("External-Identifier: %s\n" % self.base_uri)
# Calculate size of data/ (assuming no external fetch.txt files)
total_size = sum(self.bagged_size.values())
num_files = len(self.bagged_size)
info_file.write("Payload-Oxum: %d.%d\n" % (total_size, num_files))
_logger.debug("[provenance] Generated bagit metadata: %s", self.folder)
def generate_snapshot(self, prov_dep):
# type: (MutableMapping[str, Any]) -> None
"""Copy all of the CWL files to the snapshot/ directory."""
self.self_check()
for key, value in prov_dep.items():
if key == "location" and value.split("/")[-1]:
filename = value.split("/")[-1]
path = os.path.join(self.folder, SNAPSHOT, filename)
filepath = ""
if "file://" in value:
filepath = value[7:]
else:
filepath = value
# FIXME: What if destination path already exists?
if os.path.exists(filepath):
try:
if os.path.isdir(filepath):
shutil.copytree(filepath, path)
else:
shutil.copy(filepath, path)
timestamp = datetime.datetime.fromtimestamp(
os.path.getmtime(filepath)
)
self.add_tagfile(path, timestamp)
except PermissionError:
pass # FIXME: avoids duplicate snapshotting; need better solution
elif key in ("secondaryFiles", "listing"):
for files in value:
if isinstance(files, MutableMapping):
self.generate_snapshot(files)
else:
pass
def packed_workflow(self, packed): # type: (str) -> None
"""Pack CWL description to generate re-runnable CWL object in RO."""
self.self_check()
rel_path = str(PurePosixPath(WORKFLOW) / "packed.cwl")
# Write as binary
with self.write_bag_file(rel_path, encoding=None) as write_pack:
# YAML is always UTF8, but json.dumps gives us str in py2
write_pack.write(packed.encode(ENCODING))
_logger.debug("[provenance] Added packed workflow: %s", rel_path)
def has_data_file(self, sha1hash): # type: (str) -> bool
"""Confirm the presence of the given file in the RO."""
folder = os.path.join(self.folder, DATA, sha1hash[0:2])
hash_path = os.path.join(folder, sha1hash)
return os.path.isfile(hash_path)
def add_data_file(self, from_fp, timestamp=None, content_type=None):
# type: (IO[Any], Optional[datetime.datetime], Optional[str]) -> str
"""Copy inputs to data/ folder."""
self.self_check()
tmp_dir, tmp_prefix = os.path.split(self.temp_prefix)
with tempfile.NamedTemporaryFile(
prefix=tmp_prefix, dir=tmp_dir, delete=False
) as tmp:
checksum = checksum_copy(from_fp, tmp)
# Calculate hash-based file path
folder = os.path.join(self.folder, DATA, checksum[0:2])
path = os.path.join(folder, checksum)
# os.rename assumed safe, as our temp file should
# be in same file system as our temp folder
if not os.path.isdir(folder):
os.makedirs(folder)
os.rename(tmp.name, path)
# Relative posix path
# (to avoid \ on Windows)
rel_path = _posix_path(os.path.relpath(path, self.folder))
# Register in bagit checksum
if Hasher == hashlib.sha1:
self._add_to_bagit(rel_path, sha1=checksum)
else:
_logger.warning(
"[provenance] Unknown hash method %s for bagit manifest", Hasher
)
# Inefficient, bagit support need to checksum again
self._add_to_bagit(rel_path)
_logger.debug("[provenance] Added data file %s", path)
if timestamp is not None:
self._file_provenance[rel_path] = self._self_made(timestamp)
_logger.debug("[provenance] Relative path for data file %s", rel_path)
if content_type is not None:
self._content_types[rel_path] = content_type
return rel_path
def _self_made(self, timestamp=None):
# type: (Optional[datetime.datetime]) -> Dict[str, Any]
if timestamp is None:
timestamp = datetime.datetime.now()
return {
"createdOn": timestamp.isoformat(),
"createdBy": {"uri": self.engine_uuid, "name": self.cwltool_version},
}
def add_to_manifest(self, rel_path, checksums):
# type: (str, Dict[str,str]) -> None
"""Add files to the research object manifest."""
self.self_check()
if PurePosixPath(rel_path).is_absolute():
raise ValueError("rel_path must be relative: %s" % rel_path)
if os.path.commonprefix(["data/", rel_path]) == "data/":
# payload file, go to manifest
manifest = "manifest"
else:
# metadata file, go to tag manifest
manifest = "tagmanifest"
# Add checksums to corresponding manifest files
for (method, hash_value) in checksums.items():
# File not in manifest because we bailed out on
# existence in bagged_size above
manifestpath = os.path.join(
self.folder, "%s-%s.txt" % (manifest, method.lower())
)
# encoding: match Tag-File-Character-Encoding: UTF-8
# newline: ensure LF also on Windows
with open(
manifestpath, "a", encoding=ENCODING, newline="\n"
) as checksum_file:
line = "%s %s\n" % (hash_value, rel_path)
_logger.debug("[provenance] Added to %s: %s", manifestpath, line)
checksum_file.write(line)
def _add_to_bagit(self, rel_path, **checksums):
# type: (str, Any) -> None
if PurePosixPath(rel_path).is_absolute():
raise ValueError("rel_path must be relative: %s" % rel_path)
local_path = os.path.join(self.folder, _local_path(rel_path))
if not os.path.exists(local_path):
raise IOError(
"File %s does not exist within RO: %s" % (rel_path, local_path)
)
if rel_path in self.bagged_size:
# Already added, assume checksum OK
return
self.bagged_size[rel_path] = os.path.getsize(local_path)
if SHA1 not in checksums:
# ensure we always have sha1
checksums = dict(checksums)
with open(local_path, "rb") as file_path:
# FIXME: Need sha-256 / sha-512 as well for Research Object BagIt profile?
checksums[SHA1] = checksum_copy(file_path, hasher=hashlib.sha1)
self.add_to_manifest(rel_path, checksums)
def create_job(
self,
builder_job, # type: Dict[str, Any]
wf_job: Optional[
Callable[
[Dict[str, str], Callable[[Any, Any], Any], RuntimeContext],
Generator[Any, None, None],
]
] = None,
is_output=False, # type: bool
): # type: (...) -> Dict[str, str]
# TODO customise the file
"""Generate the new job object with RO specific relative paths."""
copied = copy.deepcopy(builder_job)
relativised_input_objecttemp = {} # type: Dict[str, Any]
self._relativise_files(copied)
def jdefault(o): # type: (Any) -> Dict[Any, Any]
return dict(o)
if is_output:
rel_path = PurePosixPath(WORKFLOW) / "primary-output.json"
else:
rel_path = PurePosixPath(WORKFLOW) / "primary-job.json"
j = json_dumps(copied, indent=4, ensure_ascii=False, default=jdefault)
with self.write_bag_file(str(rel_path)) as file_path:
file_path.write(j + "\n")
_logger.debug("[provenance] Generated customised job file: %s", rel_path)
# Generate dictionary with keys as workflow level input IDs and values
# as
# 1) for files the relativised location containing hash
# 2) for other attributes, the actual value.
relativised_input_objecttemp = {}
for key, value in copied.items():
if isinstance(value, MutableMapping):
if value.get("class") in ("File", "Directory"):
relativised_input_objecttemp[key] = value
else:
relativised_input_objecttemp[key] = value
self.relativised_input_object.update(
{k: v for k, v in relativised_input_objecttemp.items() if v}
)
return self.relativised_input_object
def _relativise_files(self, structure):
# type: (Dict[Any, Any]) -> None
"""Save any file objects into the RO and update the local paths."""
# Base case - we found a File we need to update
_logger.debug("[provenance] Relativising: %s", structure)
if isinstance(structure, MutableMapping):
if structure.get("class") == "File":
relative_path = None
if "checksum" in structure:
alg, checksum = structure["checksum"].split("$")
if alg != SHA1:
raise TypeError(
"Only SHA1 CWL checksums are currently supported: "
"{}".format(structure)
)
if self.has_data_file(checksum):
prefix = checksum[0:2]
relative_path = PurePosixPath("data") / prefix / checksum
if not relative_path is not None and "location" in structure:
# Register in RO; but why was this not picked
# up by used_artefacts?
_logger.info("[provenance] Adding to RO %s", structure["location"])
with self.fsaccess.open(structure["location"], "rb") as fp:
relative_path = self.add_data_file(fp)
checksum = PurePosixPath(relative_path).name
structure["checksum"] = "%s$%s" % (SHA1, checksum)
if relative_path is not None:
# RO-relative path as new location
structure["location"] = str(PurePosixPath("..") / relative_path)
else:
_logger.warning(
"Could not determine RO path for file %s", structure
)
if "path" in structure:
del structure["path"]
if structure.get("class") == "Directory":
# TODO: Generate anonymoys Directory with a "listing"
# pointing to the hashed files
del structure["location"]
for val in structure.values():
try:
self._relativise_files(val)
except OSError:
pass
return
if isinstance(structure, (str, str)):
# Just a string value, no need to iterate further
return
try:
for obj in iter(structure):
# Recurse and rewrite any nested File objects
self._relativise_files(obj)
except TypeError:
pass
def close(self, save_to=None):
# type: (Optional[str]) -> None
"""Close the Research Object, optionally saving to specified folder.
Closing will remove any temporary files used by this research object.
After calling this method, this ResearchObject instance can no longer
be used, except for no-op calls to .close().
The 'saveTo' folder should not exist - if it does, it will be deleted.
It is safe to call this function multiple times without the
'saveTo' argument, e.g. within a try..finally block to
ensure the temporary files of this Research Object are removed.
"""
if save_to is None:
if not self.closed:
_logger.debug("[provenance] Deleting temporary %s", self.folder)
shutil.rmtree(self.folder, ignore_errors=True)
else:
save_to = os.path.abspath(save_to)
_logger.info("[provenance] Finalizing Research Object")
self._finalize() # write manifest etc.
# TODO: Write as archive (.zip or .tar) based on extension?
if os.path.isdir(save_to):
_logger.info("[provenance] Deleting existing %s", save_to)
shutil.rmtree(save_to)
shutil.move(self.folder, save_to)
_logger.info("[provenance] Research Object saved to %s", save_to)
self.folder = save_to
self.closed = True
def checksum_copy(
src_file, # type: IO[Any]
dst_file=None, # type: Optional[IO[Any]]
hasher=Hasher, # type: Callable[[], hashlib._Hash]
buffersize=1024 * 1024, # type: int
): # type: (...) -> str
"""Compute checksums while copying a file."""
# TODO: Use hashlib.new(Hasher_str) instead?
checksum = hasher()
contents = src_file.read(buffersize)
if dst_file and hasattr(dst_file, "name") and hasattr(src_file, "name"):
temp_location = os.path.join(os.path.dirname(dst_file.name), str(uuid.uuid4()))
try:
os.rename(dst_file.name, temp_location)
os.link(src_file.name, dst_file.name)
dst_file = None
os.unlink(temp_location)
except OSError:
pass
if os.path.exists(temp_location):
os.rename(temp_location, dst_file.name) # type: ignore
while contents != b"":
if dst_file is not None:
dst_file.write(contents)
checksum.update(contents)
contents = src_file.read(buffersize)
if dst_file is not None:
dst_file.flush()
return checksum.hexdigest().lower()
def copy_job_order(job, job_order_object):
# type: (Any, Any) -> Any
"""Create copy of job object for provenance."""
if not hasattr(job, "tool"):
# direct command line tool execution
return job_order_object
customised_job = {} # new job object for RO
for each, i in enumerate(job.tool["inputs"]):
with SourceLine(
job.tool["inputs"],
each,
WorkflowException,
_logger.isEnabledFor(logging.DEBUG),
):
iid = shortname(i["id"])
if iid in job_order_object:
customised_job[iid] = copy.deepcopy(job_order_object[iid])
# add the input element in dictionary for provenance
elif "default" in i:
customised_job[iid] = copy.deepcopy(i["default"])
# add the default elements in the dictionary for provenance
else:
pass
return customised_job
|
the-stack_106_29525 | #######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import os
import re
class Plotter:
COLORS = ['blue', 'green', 'red', 'black', 'cyan', 'magenta', 'yellow', 'brown', 'purple', 'pink',
'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
RETURN_TRAIN = 'episodic_return_train'
RETURN_TEST = 'episodic_return_test'
def __init__(self):
pass
def _rolling_window(self, a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def _window_func(self, x, y, window, func):
yw = self._rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window - 1:], yw_func
def load_results(self, dirs, **kwargs):
kwargs.setdefault('tag', self.RETURN_TRAIN)
kwargs.setdefault('right_align', False)
kwargs.setdefault('window', 0)
kwargs.setdefault('top_k', 0)
kwargs.setdefault('top_k_measure', None)
kwargs.setdefault('interpolation', 100)
xy_list = self.load_log_dirs(dirs, **kwargs)
if kwargs['top_k']:
perf = [kwargs['top_k_measure'](y) for _, y in xy_list]
top_k_runs = np.argsort(perf)[-kwargs['top_k']:]
new_xy_list = []
for r, (x, y) in enumerate(xy_list):
if r in top_k_runs:
new_xy_list.append((x, y))
xy_list = new_xy_list
if kwargs['interpolation']:
x_right = float('inf')
for x, y in xy_list:
x_right = min(x_right, x[-1])
x = np.arange(0, x_right, kwargs['interpolation'])
y = []
for x_, y_ in xy_list:
y.append(np.interp(x, x_, y_))
y = np.asarray(y)
else:
x = xy_list[0][0]
y = [y for _, y in xy_list]
x = np.asarray(x)
y = np.asarray(y)
return x, y
def filter_log_dirs(self, pattern, negative_pattern=' ', root='./log', **kwargs):
dirs = [item[0] for item in os.walk(root)]
leaf_dirs = []
for i in range(len(dirs)):
if i + 1 < len(dirs) and dirs[i + 1].startswith(dirs[i]):
continue
leaf_dirs.append(dirs[i])
names = []
p = re.compile(pattern)
np = re.compile(negative_pattern)
for dir in leaf_dirs:
if p.match(dir) and not np.match(dir):
names.append(dir)
print(dir)
print('')
return sorted(names)
def load_log_dirs(self, dirs, **kwargs):
kwargs.setdefault('right_align', False)
kwargs.setdefault('window', 0)
xy_list = []
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
for dir in dirs:
event_acc = EventAccumulator(dir)
event_acc.Reload()
_, x, y = zip(*event_acc.Scalars(kwargs['tag']))
xy_list.append([x, y])
if kwargs['right_align']:
x_max = float('inf')
for x, y in xy_list:
x_max = min(x_max, len(y))
xy_list = [[x[:x_max], y[:x_max]] for x, y in xy_list]
x_max = kwargs['right_most']
if x_max:
xy_list = [[x[:x_max], y[:x_max]] for x, y in xy_list]
if kwargs['window']:
xy_list = [self._window_func(np.asarray(x), np.asarray(y), kwargs['window'], np.mean) for x, y in xy_list]
return xy_list
def plot_mean(self, data, x=None, **kwargs):
import matplotlib.pyplot as plt
if x is None:
x = np.arange(data.shape[1])
if kwargs['error'] == 'se':
e_x = np.std(data, axis=0) / np.sqrt(data.shape[0])
elif kwargs['error'] == 'std':
e_x = np.std(data, axis=0)
else:
raise NotImplementedError
m_x = np.mean(data, axis=0)
del kwargs['error']
plt.plot(x, m_x, **kwargs)
del kwargs['label']
plt.fill_between(x, m_x + e_x, m_x - e_x, alpha=0.3, **kwargs)
def plot_median_std(self, data, x=None, **kwargs):
import matplotlib.pyplot as plt
if x is None:
x = np.arange(data.shape[1])
e_x = np.std(data, axis=0)
m_x = np.median(data, axis=0)
plt.plot(x, m_x, **kwargs)
del kwargs['label']
plt.fill_between(x, m_x + e_x, m_x - e_x, alpha=0.3, **kwargs)
def plot_games(self, games, **kwargs):
kwargs.setdefault('agg', 'mean')
import matplotlib.pyplot as plt
l = len(games)
plt.figure(figsize=(l * 5, 5))
for i, game in enumerate(games):
plt.subplot(1, l, i + 1)
for j, p in enumerate(kwargs['patterns']):
label = kwargs['labels'][j]
color = self.COLORS[j]
log_dirs = self.filter_log_dirs(pattern='.*%s.*%s' % (game, p), **kwargs)
x, y = self.load_results(log_dirs, **kwargs)
if kwargs['downsample']:
indices = np.linspace(0, len(x) - 1, kwargs['downsample']).astype(np.int)
x = x[indices]
y = y[:, indices]
if kwargs['agg'] == 'mean':
self.plot_mean(y, x, label=label, color=color, error='se')
elif kwargs['agg'] == 'mean_std':
self.plot_mean(y, x, label=label, color=color, error='std')
elif kwargs['agg'] == 'median':
self.plot_median_std(y, x, label=label, color=color)
else:
for k in range(y.shape[0]):
plt.plot(x, y[i], label=label, color=color)
label = None
plt.xlabel('steps')
if not i:
plt.ylabel(kwargs['tag'])
plt.title(game)
plt.legend()
def select_best_parameters(self, patterns, **kwargs):
scores = []
for pattern in patterns:
log_dirs = self.filter_log_dirs(pattern, **kwargs)
xy_list = self.load_log_dirs(log_dirs, **kwargs)
y = np.asarray([xy[1] for xy in xy_list])
scores.append(kwargs['score'](y))
indices = np.argsort(-np.asarray(scores))
return indices
|
the-stack_106_29526 | import re
import json
import itertools
import urlparse
from datetime import datetime
from .feed import Feed
from ..timeline import Timeline, Tag, PullRequest
class GithubFeed(Feed):
API_ROOT_URL = 'https://api.github.com'
UI_ROOT_URL = 'https://github.com'
def __init__(self, project, token=None):
self._project = project
self._token = token
self._timeline = Timeline(self)
@property
def _headers(self):
headers = {}
if self._token is not None:
headers['Authorization'] = 'token {}'.format(self._token)
return headers
@property
def _api_url(self):
return '{}/repos/{}'.format(self.API_ROOT_URL, self._project)
@property
def project_url(self):
return '{}/{}'.format(self.UI_ROOT_URL, self._project)
def pull_request_url(self, number):
return '{}/pull/{}'.format(self.project_url, number)
def user_url(self, user):
return '{}/{}'.format(self.UI_ROOT_URL, user)
def release_url(self, tag):
return '{}/releases/tag/{}'.format(self.project_url, tag)
def compare_url(self, base, compare):
return '{}/compare/{}...{}'.format(
self.project_url, base, compare
)
def _github_request(self, url):
response = self._request(url, self._headers)
response_json = json.loads(response.read())
link = response.info().getheader('Link')
if link:
rel = self._read_link(link)
urls = [
rel['next'].replace('page=2', 'page={}'.format(page))
for page in range(2, rel['pages'] + 1)
]
responses = self._github_request_in_pull(urls)
return itertools.chain(response_json, *responses)
return response_json
def _github_request_in_pull(self, urls):
responses = self._request_in_pool(urls, self._headers)
return map(lambda response: json.loads(response.read()), responses)
def _parse_datetime(self, date):
return datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ')
def _read_link(self, link):
match = re.match(
r'<(?P<url1>[^>]+)>; rel=\"(?P<rel1>\w+)\", '
r'<(?P<url2>[^>]+)>; rel=\"(?P<rel2>\w+)\"',
link
).groupdict()
rels = {
match['rel1']: match['url1'],
match['rel2']: match['url2']
}
if 'last' in rels:
rels['pages'] = int(urlparse.parse_qs(rels['last'])['page'][0])
return rels
def _fetch_all_tags(self, start_at=None):
tags_url = '{}/{}'.format(self._api_url, 'git/refs/tags')
tags_sha = [t['object']['sha'] for t in self._github_request(tags_url)]
tag_info_urls = [
'{}/{}'.format(self._api_url, 'git/tags/{}'.format(sha))
for sha in tags_sha
]
for tag in self._github_request_in_pull(tag_info_urls):
at = self._parse_datetime(tag['tagger']['date'])
if at < start_at:
continue
self._timeline.add(
Tag(
name=tag['tag'],
url=self.release_url(tag['tag']),
at=at
)
)
def _fetch_all_pull_requests(self, start_at=None):
pull_requests_url = '{}/{}'.format(
self._api_url, 'pulls?state=closed&per_page=10')
for pr in self._github_request(pull_requests_url):
if not pr.get('merged_at'):
# closed non-merged PR
continue
at = self._parse_datetime(pr['merged_at'])
if at < start_at:
continue
self._timeline.add(
PullRequest(
number=pr['number'],
author=pr['user']['login'],
title=pr['title'],
url=self.pull_request_url(pr['number']),
at=at
)
)
def fetch(self, start_at=None):
self._fetch_all_tags(start_at)
self._fetch_all_pull_requests(start_at)
return self._timeline
|
the-stack_106_29531 | """
本程序是实现利用OpenCV来调用已经训练好的YOLO模型
来进行目标检测,opencv的版本最好为3.4以上。
2019/1/24_Zjh_于学科二楼
"""
import numpy as np
import cv2
import os
import time
#加载已经训练好的模型
weightsPath="yolov3.weights"
configPath="yolov3.cfg"
labelsPath = "coco.names"
#初始化一些参数
LABELS = open(labelsPath).read().strip().split("\n") #物体类别
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),dtype="uint8")#颜色
boxes = []
confidences = []
classIDs = []
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
#读入待检测的图像
image=cv2.imread("./vedio/videos/37.jpg")
(H, W) = image.shape[:2]
# 得到 YOLO需要的输出层
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
#从输入图像构造一个blob,然后通过加载的模型,给我们提供边界框和相关概率
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),swapRB=True, crop=False)
net.setInput(blob)
layerOutputs = net.forward(ln)
#在每层输出上循环
for output in layerOutputs:
# 对每个检测进行循环
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
#过滤掉那些置信度较小的检测结果
if confidence > 0.5:
#框后接框的宽度和高度
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
#边框的左上角
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# 更新检测出来的框
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# 极大值抑制
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.2,0.3)
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# 在原图上绘制边框和类别
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
cv2.imshow("Image", image)
cv2.waitKey(0) |
the-stack_106_29532 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Vincent<[email protected]>
# http://blog.vincentzhong.cn
# Created on 2017/3/28 13:29
import os
import aiofiles
from pyquery import PyQuery
from catty.parser import BaseParser
from catty.spider import BaseSpider
from catty.libs.utils import md5string
default_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
}
async def write_response(root_path, text):
async with aiofiles.open(os.path.join(root_path, md5string(text)), mode='wb') as f:
await f.write(text)
class MyParser(BaseParser):
# you can handle the response when it is not normal
handle_status_code = [502]
def retry(self, task):
if task['response']['status'] != 302:
return task
"""
You also can return a list
return tasks_list
"""
async def parser_content_page(self, response, task, loop):
pq = PyQuery(response.body)
urls = [a.attr.href for a in pq('a').items() if a.attr.href.startswith('http')]
print(pq('title').text() + '\t' + str(task['meta']['deep']))
await write_response('/mnt2/test', response.body)
return {'urls': urls}
def return_a_list(self, response):
tasks = []
"to make some task.only return a list can be treated as task"
return tasks
class Spider(BaseSpider, MyParser):
name = 'test_spider'
urls = ['https://web.sogou.com/', 'http://www.999.com/xinwen/', 'http://www.haoqq.com/', 'http://news.hao123.com/']
def start(self):
callbacks = [{'parser': self.parser_content_page, 'fetcher': self.get_content}]
return [self.request(
url=url,
callback=callbacks,
headers=default_headers,
meta={'deep': 100, 'dupe_filter': True}
) for url in self.urls]
def get_content(self, task):
callbacks = [{'parser': self.parser_content_page, 'fetcher': self.get_content}]
return [
self.request(
url=url,
callback=callbacks,
headers=default_headers,
meta={'deep': task['meta']['deep'] - 1, 'dupe_filter': True},
priority=task['meta']['deep'] - 1,
)
for url in task['parser']['item']['urls']]
|
the-stack_106_29533 | import inspect
import PREFS
from types import ModuleType
def prefs(func: callable):
"""This decorator will pass the result of the given func to PREFS.convert_to_prefs,
to print a dictionary using PREFS format.
Example:
# Without prefs decorator
def dictionary():
return {'keybindings': {'Ctrl+C': 'Copy', 'Ctrl+V': 'Paste'}}
print(dictionary())
>>> {'keybindings': {'Ctrl+C': 'Copy', 'Ctrl+V': 'Paste'}}
# With prefs decorator
@prefs # This is called a decorator
def dictionary():
return {'keybindings': {"Ctrl+C": "Copy", "Ctrl+V": "Paste"}}
print(dictionary())
>>> keybindings=>
Ctrl+C='Copy'
Ctrl+C='Paste'
Notes:
Only works with dictionaries.
"""
def wrapper_function(*args, **kwargs):
result = PREFS.convert_to_prefs(func(*args, **kwargs)) # Call given func and pass it's result to PREFS.convert_to_prefs
return result
return wrapper_function # Return function to call
def inspect_object(object_: object):
"""Find all members of Python object.
Example:
def say_hi(name: str) -> str:
print(f"hi {name}")
print(inspect_object(say_hi))
>>> say_hi=>
type=<class 'function'>
parameters=>
name=>
annotation=<class 'str'>
default=None
kind=POSITIONAL_OR_KEYWORD
return_annotation=<class 'str'>
Errors:
Tatkes too much time, need to add some optimization.
"""
object_name = object_.__name__
result = {object_name: get_object_properties(object_)}
return result
def get_object_members(object_: object, exclude_types: tuple=(ModuleType)):
def filter_member(member_name: str, member: object):
if isinstance(member, exclude_types):
return False
# If the object it's a module
if inspect.ismodule(object_):
# Get the module of the member (where it was defined or it belongs to)
# And check if the object name is the same as the member one.
# This will exclude all members that do not belong to the given module.
member_module = inspect.getmodule(member)
if member_module is not None:
return member_module.__name__ == object_.__name__
dunder_methods = PREFS.read_prefs_file("dunder_methods.prefs")
if type(object_).__name__ in dunder_methods:
dunder_methods_to_include = dunder_methods[type(object_).__name__]
else:
dunder_methods_to_include = ()
if (member_name.startswith("__") and member_name.endswith("__")
and member_name not in dunder_methods_to_include):
return False
return True
# Instead of using inspect.getmembers to keep the members in the order they were defined
# We are going to use vars(object_)
# result = inspect.getmembers(object_)
result = tuple(vars(object_).items())
# filter_member(*x) is equivalent to filter_member(x[0], x[1])
result = filter(lambda x: filter_member(*x), result)
return result
def get_object_content(object_: object):
"""Given an object get attributes of all of it's members.
"""
result = {}
for member_name, member in get_object_members(object_):
result[member_name] = get_object_properties(member)
return result
def get_object_properties(object_: object):
"""Given an object return it's type and content.
Example:
class Test2(Test1):
def __init__(self):
pass
def test_fun(self):
pass
>>>
type=class
inherits=>
Test1
content=>
__init__=>
type=function
parameters ...
test_func=>
type=function
parameters ...
callable (function, lambda, methods) -> parameters (see get_callable_parameters), return_annotation
"""
object_type = type(object_).__name__
if object_type == "type":
object_type = "class"
result = {"type": object_type, "docstring": object_.__doc__}
if inspect.isclass(object_) or inspect.ismodule(object_):
if inspect.isclass(object_):
result["inherits"] = [i.__name__ for i in inspect.getmro(object_)[1:-1]]
result["content"] = get_object_content(object_)
elif inspect.isfunction(object_) or inspect.ismethod(object_):
result["parameters"] = get_callable_parameters(object_)
if "return" in object_.__annotations__:
result["return_annotation"] = []
if isinstance(object_.__annotations__["return"], (tuple, list)):
for annotation in object_.__annotations__["return"]:
result["return_annotation"].append(str(annotation.__name__) if not annotation == inspect._empty else None)
else:
result["return_annotation"] = str(object_.__annotations__["return"].__name__) if object_.__annotations__["return"] is not None else None
else:
result["value"] = str(object_)
return result
def get_callable_parameters(callable_: callable):
"""Given a callable object (functions, lambda or methods) get all it's parameters,
each parameter annotation (a: str, b: int), default value (a=1, b=2) and kind (positional, keyword, etc).
If no annotation or default value None.
Example:
def say_hi(name: str, last_name: str, age: int=20):
print(f"hi {name} {last_name}, you are {age} years old.")
print(get_callable_parameters(say_hi))
>>>
name=>
annotation=<class 'str'>
default=None
kind=POSITIONAL_OR_KEYWORD
last_name=>
annotation=<class 'str'>
default=None
kind=POSITIONAL_OR_KEYWORD
age=>
annotation=<class 'int'>
default=20
kind=POSITIONAL_OR_KEYWORD
"""
result = {}
for parameter in inspect.signature(callable_).parameters.values():
if parameter.default == inspect._empty:
default_parameter = None
else:
try:
default_parameter = parameter.default.__name__
except AttributeError:
default_parameter = str(parameter.default)
result[parameter.name] = {
"annotation": [],
"default": default_parameter,
"kind": parameter.kind.description
}
if isinstance(parameter.annotation, (tuple, list)):
for annotation in parameter.annotation:
result[parameter.name]["annotation"].append(str(annotation.__name__) if not annotation == inspect._empty else None)
continue
result[parameter.name]["annotation"] = str(parameter.annotation.__name__) if not parameter.annotation == inspect._empty else None
return result
|
the-stack_106_29534 | # -*- coding: utf-8 -*-
# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on Tue Sep 12 14:38:54 2017
@author: a001985
"""
try:
import pandas as pd
import numpy as np
except:
pass
import os
import shutil
import codecs
"""
===============================================================================
===============================================================================
===============================================================================
"""
class ColumnFile:
"""
Hold a column based file using pandas.
"""
def __init__(self, file_path, **kwargs):
self.file_path = file_path
self.kwarguments = {'sep': '\t',
'encoding': 'cp1252'}
self.kwarguments.update(kwargs)
self.df = pd.load_csv(file_path, **self.kwarguments)
#==========================================================================
def get_mapping(self, par, **kwargs):
"""
Returns a list of values from parameter=par that matches the criteria
in kwargs. Only one value in the kwargs argument are allowed.
"""
boolean = np.array(np.ones(len(self.df)), dtype=bool)
for key, value in kwargs.iteritems():
boolean = boolean & (self.df[key]==value)
values = self.df.loc[boolean, par].values
return list(values)
class DirectoryContent(object):
def __init__(self, dir_1, dir_2=None):
self.dir_1 = dir_1
self.dir_2 = dir_2
def _list_files(self):
self.dir_1_file_names = os.listdir(self.dir_1)
self.dir_1_file_paths = [os.path.join(self.dir_1, file_name) for file_name in self.dir_1_file_names]
if self.dir_2 is not None:
self.dir_2_file_names = os.listdir(self.dir_2)
self.dir_2_file_paths = [os.path.join(self.dir_2, file_name) for file_name in self.dir_2_file_names]
def show_comparison(self, **kwargs):
if self.dir_2 is None:
return
self._list_files()
if kwargs.get('ignore_file_ending'):
self.dir_1_file_names = [item.split('.')[0] for item in self.dir_1_file_names]
self.dir_2_file_names = [item.split('.')[0] for item in self.dir_2_file_names]
not_in_dir_2 = [f for f in self.dir_1_file_names if f not in self.dir_2_file_names]
not_in_dir_1 = [f for f in self.dir_2_file_names if f not in self.dir_1_file_names]
# print()
print('='*50)
print('Nr files in {}: {}'.format(self.dir_1, len(self.dir_1_file_names)))
print('Nr files in {}: {}'.format(self.dir_2, len(self.dir_2_file_names)))
print('Nr {} files not in {}: {}'.format(self.dir_1, self.dir_2, len(not_in_dir_2)))
print('Nr {} files not in {}: {}'.format(self.dir_2, self.dir_1, len(not_in_dir_1)))
if kwargs.get('list_files'):
print()
print('Files not in {}:\n'.format(self.dir_2))
print('\n'.join(not_in_dir_2))
print()
print('Files not in {}:\n'.format(self.dir_1))
print('\n'.join(not_in_dir_1))
class Explorer(object):
def __init__(self, directory):
self.directory = os.path.abspath(directory)
self.file_mapping = {}
def load_file_location(self):
for k, (root, dirs, files) in enumerate(os.walk(self.directory, topdown=True)):
for name in files:
path = os.path.join(root, name)
self.file_mapping[name] = path
def copy_files_in_list(self, file_list=[], to_directory=None, **kwargs):
if not all([file_list, to_directory]):
raise IOError
if not os.path.exists(to_directory):
os.mkdir(to_directory)
copied = []
not_copied = []
for file_name in file_list:
file_path = self.file_mapping.get(file_name, None)
if file_path is None:
not_copied.append(file_path)
else:
copied.append(file_name)
to_file_path = os.path.join(to_directory, file_name)
print(file_path)
print(to_file_path)
shutil.copy(file_path, to_file_path)
print('{} files copied to directory {}'.format(len(copied), to_directory))
print('{} files where NOT found!'.format(len(not_copied)))
print()
def move_files_in_list(file_paths, to_directory, **kwargs):
if not to_directory:
raise ValueError
if not os.path.exists(to_directory):
os.mkdir(to_directory)
if type(file_paths) == str:
file_paths = [file_paths]
nr_files_moved = 0
for file_path in file_paths:
if os.path.exists(file_path):
file_name = os.path.basename(file_path)
dest = os.path.join(to_directory, file_name)
shutil.move(file_path, dest)
nr_files_moved += 1
print('{} files moved'.format(nr_files_moved))
|
the-stack_106_29537 | from django.conf.urls import patterns, include, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib.gis import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^dashboard/', include('dashboard.urls')), # remove after 1.2.2013
url(r'^', include('dashboard.urls')),
url(r'^api/client/', include('geonition_client.urls')),
url(r'^api/geojson/', include('geojson_rest.urls')),
url(r'^api/auth/', include('gntauth.urls')),
url(r'^base_page/', include('base_page.urls')),
url(r'^planning/', include('plan_proposals.urls')),
url(r'^images/', include('gntimages.urls')),
url(r'^auth_page/', include('auth_page.urls')),
url(r'^geoforms/', include('geoforms.urls')),
url(r'^maps/', include('maps.urls')),
url(r'^questionnaire_admin/', include('questionnaire_admin.urls')), # specific to Mapita service
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^user_media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^rosetta/', include('rosetta.urls')),
)
|
the-stack_106_29544 | """
MPEG audio file parser.
Creation: 12 decembre 2005
Author: Victor Stinner
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
MissingField, ParserError, createOrphanField,
Bit, Bits, Enum,
PaddingBits, PaddingBytes,
RawBytes)
from hachoir_parser.audio.id3 import ID3v1, ID3v2
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.tools import humanFrequency, humanBitSize
from hachoir_core.bits import long2raw
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.stream import InputStreamError
# Max MP3 filesize: 200 MB
MAX_FILESIZE = 200*1024*1024*8
class Frame(FieldSet):
VERSION_NAME = { 0: "2.5", 2: "2", 3: "1" }
MPEG_I = 3
MPEG_II = 2
MPEG_II_5 = 0
LAYER_NAME = { 1: "III", 2: "II", 3: "I" }
LAYER_I = 3
LAYER_II = 2
LAYER_III = 1
# Bit rates (bit_rate * 1000 = bits/sec)
# key 15 is always invalid
BIT_RATES = {
1: ( # MPEG1
( 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 ), # layer I
( 0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 ), # layer II
( 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 ), # layer III
# - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -
),
2: ( # MPEG2 / MPEG2.5
( 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256 ), # layer I
( 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 ), # layer II
( 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 ), # layer III
# - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -
)
}
SAMPLING_RATES = {
3: {0: 44100, 1: 48000, 2: 32000}, # MPEG1
2: {0: 22050, 1: 24000, 2: 16000}, # MPEG2
0: {0: 11025, 1: 12000, 2: 8000} # MPEG2.5
}
EMPHASIS_NAME = {0: "none", 1: "50/15 ms", 3: "CCIT J.17"}
CHANNEL_MODE_NAME = {
0: "Stereo",
1: "Joint stereo",
2: "Dual channel",
3: "Single channel"
}
# Channel mode => number of channels
NB_CHANNEL = {
0: 2,
1: 2,
2: 2,
3: 1,
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if not self._size:
frame_size = self.getFrameSize()
if not frame_size:
raise ParserError("MPEG audio: Invalid frame %s" % self.path)
self._size = min(frame_size * 8, self.parent.size - self.address)
def createFields(self):
# Header
yield PaddingBits(self, "sync", 11, "Synchronize bits (set to 1)", pattern=1)
yield Enum(Bits(self, "version", 2, "MPEG audio version"), self.VERSION_NAME)
yield Enum(Bits(self, "layer", 2, "MPEG audio layer"), self.LAYER_NAME)
yield Bit(self, "crc16", "No CRC16 protection?")
# Rates and padding
yield Bits(self, "bit_rate", 4, "Bit rate")
yield Bits(self, "sampling_rate", 2, "Sampling rate")
yield Bit(self, "use_padding", "Stream field use padding?")
yield Bit(self, "extension", "Extension")
# Channel mode, mode extension, copyright, ...
yield Enum(Bits(self, "channel_mode", 2, "Channel mode"), self.CHANNEL_MODE_NAME)
yield Bits(self, "mode_ext", 2, "Mode extension")
yield Bit(self, "copyright", "Is copyrighted?")
yield Bit(self, "original", "Is original?")
yield Enum(Bits(self, "emphasis", 2, "Emphasis"), self.EMPHASIS_NAME)
size = (self.size - self.current_size) / 8
if size:
yield RawBytes(self, "data", size)
def isValid(self):
return (self["layer"].value != 0
and self["sync"].value == 2047
and self["version"].value != 1
and self["sampling_rate"].value != 3
and self["bit_rate"].value not in (0, 15)
and self["emphasis"].value != 2)
def getSampleRate(self):
"""
Read sampling rate. Returns None on error.
"""
version = self["version"].value
rate = self["sampling_rate"].value
try:
return self.SAMPLING_RATES[version][rate]
except (KeyError, IndexError):
return None
def getBitRate(self):
"""
Read bit rate in bit/sec. Returns None on error.
"""
layer = 3 - self["layer"].value
bit_rate = self["bit_rate"].value
if bit_rate in (0, 15):
return None
if self["version"].value == 3:
dataset = self.BIT_RATES[1] # MPEG1
else:
dataset = self.BIT_RATES[2] # MPEG2 / MPEG2.5
try:
return dataset[layer][bit_rate] * 1000
except (KeyError, IndexError):
return None
def getFrameSize(self):
"""
Read frame size in bytes. Returns None on error.
"""
frame_size = self.getBitRate()
if not frame_size:
return None
sample_rate = self.getSampleRate()
if not sample_rate:
return None
padding = int(self["use_padding"].value)
if self["layer"].value == self.LAYER_III:
if self["version"].value == self.MPEG_I:
return (frame_size * 144) // sample_rate + padding
else:
return (frame_size * 72) // sample_rate + padding
elif self["layer"].value == self.LAYER_II:
return (frame_size * 144) / sample_rate + padding
else: # self.LAYER_I:
frame_size = (frame_size * 12) / sample_rate
return (frame_size + padding) * 4
def getNbChannel(self):
return self.NB_CHANNEL[ self["channel_mode"].value ]
def createDescription(self):
info = ["layer %s" % self["layer"].display]
bit_rate = self.getBitRate()
if bit_rate:
info.append("%s/sec" % humanBitSize(bit_rate))
sampling_rate = self.getSampleRate()
if sampling_rate:
info.append(humanFrequency(sampling_rate))
return "MPEG-%s %s" % (self["version"].display, ", ".join(info))
def findSynchronizeBits(parser, start, max_size):
"""
Find synchronisation bits (11 bits set to 1)
Returns None on error, or number of bytes before the synchronization.
"""
address0 = parser.absolute_address
end = start + max_size
size = 0
while start < end:
# Fast search: search 0xFF (first byte of sync frame field)
length = parser.stream.searchBytesLength("\xff", False, start, end)
if length is None:
return None
size += length
start += length * 8
# Strong validation of frame: create the frame
# and call method isValid()
try:
frame = createOrphanField(parser, start-address0, Frame, "frame")
valid = frame.isValid()
except HACHOIR_ERRORS:
valid = False
if valid:
return size
# Invalid frame: continue
start += 8
size += 1
return None
class Frames(FieldSet):
# Padding bytes allowed before a frame
MAX_PADDING = 256
def synchronize(self):
addr = self.absolute_address
start = addr + self.current_size
end = min(start + self.MAX_PADDING*8, addr + self.size)
padding = findSynchronizeBits(self, start, end)
if padding is None:
raise ParserError("MPEG audio: Unable to find synchronization bits")
if padding:
return PaddingBytes(self, "padding[]", padding, "Padding before synchronization")
else:
return None
def looksConstantBitRate(self, count=10):
"""
Guess if frames are constant bit rate. If it returns False, you can
be sure that frames are variable bit rate. Otherwise, it looks like
constant bit rate (on first count fields).
"""
check_keys = ("version", "layer", "bit_rate")
last_field = None
for index, field in enumerate(self.array("frame")):
if last_field:
for key in check_keys:
if field[key].value != last_field[key].value:
return False
last_field = field
if index == count:
break
return True
def createFields(self):
# Find synchronisation bytes
padding = self.synchronize()
if padding:
yield padding
while self.current_size < self.size:
yield Frame(self, "frame[]")
# padding = self.synchronize()
# if padding:
# yield padding
# Read raw bytes at the end (if any)
size = (self.size - self.current_size) / 8
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
if self.looksConstantBitRate():
text = "(looks like) Constant bit rate (CBR)"
else:
text = "Variable bit rate (VBR)"
return "Frames: %s" % text
def createMpegAudioMagic():
# ID3v1 magic
magics = [("TAG", 0)]
# ID3v2 magics
for ver_major in ID3v2.VALID_MAJOR_VERSIONS:
magic = "ID3%c\x00" % ver_major
magics.append( (magic,0) )
# MPEG frame magic
# TODO: Use longer magic: 32 bits instead of 16 bits
SYNC_BITS = 2047
for version in Frame.VERSION_NAME.iterkeys():
for layer in Frame.LAYER_NAME.iterkeys():
for crc16 in (0, 1):
magic = (SYNC_BITS << 5) | (version << 3) | (layer << 1) | crc16
magic = long2raw(magic, BIG_ENDIAN, 2)
magics.append( (magic, 0) )
return magics
class MpegAudioFile(Parser):
PARSER_TAGS = {
"id": "mpeg_audio",
"category": "audio",
"file_ext": ("mpa", "mp1", "mp2", "mp3"),
"mime": (u"audio/mpeg",),
"min_size": 4*8,
# "magic": createMpegAudioMagic(),
"description": "MPEG audio version 1, 2, 2.5",
"subfile": "skip",
}
endian = BIG_ENDIAN
def validate(self):
if self[0].name in ("id3v2", "id3v1"):
return True
if not self.stream.checked: # TODO: is it possible to handle piped input?
return False
# Validate first 5 frames
for index in xrange(5):
try:
frame = self["frames/frame[%u]" % index]
except MissingField:
# Require a least one valid frame
if (1 <= index) \
and self["frames"].done:
return True
return "Unable to get frame #%u" % index
except (InputStreamError, ParserError):
return "Unable to create frame #%u" % index
# Check first frame values
if not frame.isValid():
return "Frame #%u is invalid" % index
# Check that all frames are similar
if not index:
frame0 = frame
else:
if frame0["channel_mode"].value != frame["channel_mode"].value:
return "Frame #%u channel mode is different" % index
return True
def createFields(self):
# Read ID3v2 (if any)
if self.stream.readBytes(0, 3) == "ID3":
yield ID3v2(self, "id3v2")
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
# Check if file is ending with ID3v1 or not and compute frames size
frames_size = self.size - self.current_size
addr = self.size - 128*8
if 0 <= addr:
has_id3 = (self.stream.readBytes(addr, 3) == "TAG")
if has_id3:
frames_size -= 128*8
else:
has_id3 = False
# Read frames (if any)
if frames_size:
yield Frames(self, "frames", size=frames_size)
# Read ID3v1 (if any)
if has_id3:
yield ID3v1(self, "id3v1")
def createDescription(self):
if "frames" in self:
frame = self["frames/frame[0]"]
return "%s, %s" % (frame.description, frame["channel_mode"].display)
elif "id3v2" in self:
return self["id3v2"].description
elif "id3v1" in self:
return self["id3v1"].description
else:
return "MPEG audio"
def createContentSize(self):
# Get "frames" field
field = self[0]
if field.name != "frames":
try:
field = self[1]
except MissingField:
# File only contains ID3v1 or ID3v2
return field.size
# Error: second field are not the frames"?
if field.name != "frames":
return None
# Go to last frame
frames = field
frame = frames["frame[0]"]
address0 = field.absolute_address
size = address0 + frame.size
while True:
try:
# Parse one MPEG audio frame
frame = createOrphanField(frames, size - address0, Frame, "frame")
# Check frame 32 bits header
if not frame.isValid():
break
except HACHOIR_ERRORS:
break
if MAX_FILESIZE < (size + frame.size):
break
size += frame.size
# ID3v1 at the end?
try:
if self.stream.readBytes(size, 3) == "TAG":
size += ID3v1.static_size
except InputStreamError:
pass
return size
|
the-stack_106_29545 | import sqlite3
from flask import g
DATABASE = 'ahoy.db'
def row_to_dictionary(cursor, row):
return dict((cursor.description[idx][0], value)
for idx, value in enumerate(row)
)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = row_to_dictionary # sqlite3.Row
return db
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def modify_db(query, args=()):
cur = get_db().execute(query, args)
row_id = None
if query.lower().find('insert'):
row_id = cur.lastrowid
cur.close()
get_db().commit()
return row_id
|
the-stack_106_29546 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 00:55:03 2019
Equipment from the Humbird 2011 Report.
Humbird, D., Davis, R., Tao, L., Kinchin, C., Hsu, D., Aden, A., Dudgeon, D. (2011). Process Design and Economics for Biochemical Conversion of Lignocellulosic Biomass to Ethanol: Dilute-Acid Pretreatment and Enzymatic Hydrolysis of Corn Stover (No. NREL/TP-5100-47764, 1013269). https://doi.org/10.2172/1013269
@author: yoelr
"""
import os
import sys
import flexsolve as flx
from thermosteam import MultiStream
from biosteam import Unit
from biosteam.units.decorators import cost, design
from biosteam.units.design_tools import size_batch
from biosteam.units.design_tools.specification_factors import material_densities_lb_per_in3
from biosteam.units.design_tools import column_design
import thermosteam as tmo
import biosteam as bst
Rxn = tmo.reaction.Reaction
ParallelRxn = tmo.reaction.ParallelReaction
# %% Add excel unit operations
from biosteam.units.factories import xl2mod
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '_humbird2011.xlsx')
xl2mod(path, sys.modules[__name__])
del sys, xl2mod, os, path
# %% Constants
_gal2m3 = 0.003785
_gpm2m3hr = 0.227124
# _m3hr2gpm = 4.40287
_hp2kW = 0.7457
_Gcal2kJ = 4184e3
# %% Pre-washing
@cost('Flow rate', 'Drum filter',
cost=30000, CE=525.4, S=10, n=1, kW=1.63, BM=1, N='N_filters')
class DrumFilter(bst.Splitter):
_units = {'Flow rate': 'm3/hr', 'Cost': 'USD'}
_N_ins = 1
_N_outs= 2
#: Number of parallel filters
N_filters = 190 #total
N_spare = 4
def __init__(self, ID='', ins=None, outs=(), *, order=None, flux=1220.6, split, moisture_content):
bst.Splitter.__init__(self, ID, ins, outs, order=order, split=split)
#: Moisture content of retentate
self.moisture_content = moisture_content
self.flux = flux #kg/hr/m2
assert self.isplit['Water'] == 0, 'cannot define water split, only moisture content'
def run_split_with_solids(self,mc):
"""Splitter mass and energy balance function with mixing all input streams."""
top, bot = self.outs
feed = self.ins[0]
top.copy_like(feed)
bot.copy_like(top)
top_mass = top.mass
F_mass_solids = sum(top_mass*self.split)
TS=1-mc
F_mass_tot = F_mass_solids/TS
F_mass_wat = F_mass_tot - F_mass_solids
top_mass[:] *= self.split
top.imass['Water']=F_mass_wat
bot.mass[:] -= top_mass
def _run(self):
self.run_split_with_solids(self.moisture_content)
retentate, permeate = self.outs
if permeate.imass['Water'] < 0:
import warnings
warnings.warn(f'not enough water for {repr(self)}')
def _design(self):
self.design_results['Flow rate'] = self.ins[0].F_vol/(self.N_filters-self.N_spare)
@cost('Tank volume', 'Tanks', cost=3840e3/8, S=250e3*_gal2m3,
CE=522, n=0.7, BM=2.0, N='N_tanks')
@cost('Tank volume', 'Agitators', CE=522, cost=52500,
S=1e6*_gal2m3, n=0.5, kW=90, BM=1.5, N='N_tanks')
@cost('Flow rate', 'Transfer pumps', kW=58, S=352*_gpm2m3hr,
cost=47200/5, CE=522, n=0.8, BM=2.3, N='N_tanks')
class WashingTank(bst.Unit):
"""Washing tank system where part of the manure organics and inorganics
dissolve
**Parameters**
**reactions:** [ReactionSet] Washing reactions.
**ins**
[0] Feed
[1] Process water
**outs**
[0] Washed feed
"""
# purchase_cost = installation_cost = 0
_N_ins = 2
_N_outs = 1
N_tanks = 1
tau_tank = 5/60/N_tanks #Residence time in each tank
V_wf = 0.9
_units = {'Flow rate': 'm3/hr',
'Tank volume': 'm3'}
def __init__(self, ID='', ins=None, outs=(), *, reactions):
Unit.__init__(self, ID, ins, outs)
self.reactions = reactions
def _run(self):
feed, process_water = self.ins
washed, = self.outs
washed.mix_from([feed,process_water])
self.reactions(washed.mass)
def _design(self):
effluent = self.outs[0]
v_0 = effluent.F_vol
Design = self.design_results
Design['Tank volume'] = v_0*self.tau_tank/self.V_wf
Design['Flow rate'] = v_0
# %% Pretreatment
class SteamMixer(Unit):
"""
**ins**
[0] Feed
[1] Steam
**outs**
[0] Mixed
"""
_N_outs = 1
_N_ins = 2
_N_heat_utilities = 1
def __init__(self, ID='', ins=None, outs=(), *, P):
super().__init__(ID, ins, outs)
self.P = P
@staticmethod
def _P_at_flow(mol_water, P, steam, mixed, feed):
steam.imol['7732-18-5'] = mol_water
mixed.mol[:] = steam.mol + feed.mol
mixed.H = feed.H + mol_water * 40798
P_new = mixed.chemicals.Water.Psat(mixed.T)
return P - P_new
def _run(self):
feed, steam = self._ins
steam_mol = steam.F_mol
mixed = self.outs[0]
steam_mol = flx.aitken_secant(self._P_at_flow,
steam_mol, steam_mol+0.1,
1e-4, 1e-4,
args=(self.P, steam, mixed, feed))
mixed.P = self.P
hu = self.heat_utilities[0]
hu(steam.Hvap, mixed.T)
@property
def installation_cost(self): return 0
@property
def purchase_cost(self): return 0
def _design(self): pass
def _cost(self): pass
@cost('Dry flow rate', units='kg/hr', S=83333, CE=522,
cost=19812400, n=0.6, kW=4578, BM=1.5)
class PretreatmentReactorSystem(Unit):
_N_ins = 1
_N_outs = 2
_graphics = bst.Flash._graphics
def __init__(self, ID='', ins=None, outs=()):
Unit.__init__(self, ID, ins, outs)
self._multistream = MultiStream(None)
self.reactions = ParallelRxn([
# Reaction definition Reactant Conversion
Rxn('Glucan + H2O -> Glucose', 'Glucan', 0.0510),#
Rxn('Glucan + H2O -> GlucoseOligomer', 'Glucan', 0.0750),#
Rxn('Glucan -> HMF + 2 H2O', 'Glucan', 0.0043),#
Rxn('Xylan + H2O -> Xylose', 'Xylan', 0.5190),#
Rxn('Xylan + H2O -> XyloseOligomer', 'Xylan', 0.2610),#
Rxn('Xylan -> Furfural + 2 H2O', 'Xylan', 0.0860),#
Rxn('Arabinan + H2O -> Arabinose', 'Arabinan', 1.0000),#
Rxn('Arabinan + H2O -> ArabinoseOligomer', 'Arabinan', 0.0000),#
Rxn('Arabinan -> Furfural + 2 H2O', 'Arabinan', 0.0000),#
Rxn('Acetate -> AceticAcid', 'Acetate', 1.0000),
Rxn('Lignin -> SolubleLignin', 'Lignin', 0.0470),
Rxn('Extract -> ExtractVol', 'Extract', 0.7000),
Rxn('Extract -> ExtractNonVol', 'Extract', 0.3000)])
vapor, liquid = self.outs
vapor.phase = 'g'
def _run(self):
ms = self._multistream
feed = self.ins[0]
vapor, liquid = self.outs
liquid.copy_like(feed)
self.reactions(liquid) #self.reactions.adiabatic_reaction(liquid)
ms.copy_like(liquid)
H = ms.H + liquid.Hf - feed.Hf
ms.vle(T=190+273.15, H=H)
vapor.mol[:] = ms.imol['g']
liquid.mol[:] = ms.imol['l']
vapor.T = liquid.T = ms.T
vapor.P = liquid.P = ms.P
@cost('Flow rate', 'Sieve filter',
cost=14800, CE=551, S=0.2273, n=0.64, BM=1)
class SieveFilter(bst.Splitter):
_units = {'Flow rate': 'm3/hr'}
_N_ins = 1
_N_outs= 2
def __init__(self, ID='', ins=None, outs=(), *, order=None, WIS=False, split, moisture_content):
bst.Splitter.__init__(self, ID, ins, outs, order=order, split=split)
#: Moisture content of retentate
#If WIS=True, the moisture content is 1-WIS content. Otherwise is 1- totals solid content
self.WIS=WIS
self.moisture_content = moisture_content
assert self.isplit['Water'] == 0, 'cannot define water split, only moisture content'
def run_split_with_solids(self,mc):
"""Splitter mass and energy balance function with mixing all input streams."""
top, bot = self.outs
feed = self.ins[0]
top.copy_like(feed)
bot.copy_like(top)
top_mass = top.mass
F_mass_ins = sum(top_mass*self.split)
F_mass_sol = top.F_mass - F_mass_ins
F_mass_wat = top.imass['Water']
x_sol = mc*F_mass_ins/(F_mass_wat-mc*F_mass_sol)
self.split[self.split==0] = x_sol
top_mass[:] *= self.split
bot.mass[:] -= top_mass
def run_split_with_solidsWIS(self,mc):
"""Splitter mass and energy balance function with mixing all input streams."""
top, bot = self.outs
feed = self.ins[0]
top.copy_like(feed)
bot.copy_like(top)
top_mass = top.mass
WIS=1-mc
F_mass_ins = sum(top_mass*self.split)
F_mass_tot_out = F_mass_ins/WIS
F_mass_sol_out = F_mass_tot_out - F_mass_ins
F_mass_sol_in = feed.F_mass - F_mass_ins
x_sol = F_mass_sol_out/F_mass_sol_in
self.split[self.split==0] = x_sol
top_mass[:] *= self.split
bot.mass[:] -= top_mass
def _run(self):
if self.WIS:
self.run_split_with_solidsWIS(self.moisture_content)
else:
self.run_split_with_solids(self.moisture_content)
retentate, permeate = self.outs
if permeate.imass['Water'] < 0:
import warnings
warnings.warn(f'not enough water for {repr(self)}')
def _design(self):
self.design_results['Flow rate'] = self.ins[0].F_vol
@cost('Area', 'Pressure filter',
cost=1220, CE=551, S=0.092903, n=0.71, BM=1.7)
class PressureFilter(bst.Splitter):
_units = {'Flow rate': 'kg/hr','Area': 'm2','Power': 'kW'}
_N_ins = 1
_N_outs= 2
_pressure = 13e5
_efficiency = 0.95
def __init__(self, ID='', ins=None, outs=(), *, order=None, WIS=False, flux=1220.6, split, moisture_content):
bst.Splitter.__init__(self, ID, ins, outs, order=order, split=split)
#: Moisture content of retentate
#If WIS=True, the moisture content is 1-WIS content. Otherwise is 1- totals solid content
self.WIS=WIS
self.moisture_content = moisture_content
self.flux = flux #kg/hr/m2
assert self.isplit['Water'] == 0, 'cannot define water split, only moisture content'
def run_split_with_solids(self,mc):
"""Splitter mass and energy balance function with mixing all input streams."""
top, bot = self.outs
feed = self.ins[0]
top.copy_like(feed)
bot.copy_like(top)
top_mass = top.mass
F_mass_ins = sum(top_mass*self.split)
F_mass_sol = top.F_mass - F_mass_ins
F_mass_wat = top.imass['Water']
x_sol = mc*F_mass_ins/(F_mass_wat-mc*F_mass_sol)
self.split[self.split==0] = x_sol
top_mass[:] *= self.split
bot.mass[:] -= top_mass
def run_split_with_solidsWIS(self,mc):
"""Splitter mass and energy balance function with mixing all input streams."""
top, bot = self.outs
feed = self.ins[0]
top.copy_like(feed)
bot.copy_like(top)
top_mass = top.mass
WIS=1-mc
F_mass_ins = sum(top_mass*self.split)
F_mass_tot_out = F_mass_ins/WIS
F_mass_sol_out = F_mass_tot_out - F_mass_ins
F_mass_sol_in = feed.F_mass - F_mass_ins
x_sol = F_mass_sol_out/F_mass_sol_in
self.split[self.split==0] = x_sol
top_mass[:] *= self.split
bot.mass[:] -= top_mass
def _run(self):
if self.WIS:
self.run_split_with_solidsWIS(self.moisture_content)
else:
self.run_split_with_solids(self.moisture_content)
retentate, permeate = self.outs
if permeate.imass['Water'] < 0:
import warnings
warnings.warn(f'not enough water for {repr(self)}')
def _design(self):
self.design_results['Flow rate'] = abs(self.outs[1].F_mass)
self.design_results['Area'] = abs(self.outs[1].F_mass)/self.flux #m2
feed = self.ins[0]
light_ind = feed.chemicals._light_indices
l = [a for a in feed.vol[light_ind] if not a==0]
v_0 = feed.F_vol - sum(l)
self.design_results['Power'] = power_rate = (v_0/3600)*self._pressure/self._efficiency/1000 #kW
pu = self.power_utility
pu(rate=power_rate)
# %% Saccharification and fermentation
@cost('Flow rate', 'Pumps',
S=43149, CE=522, cost=24800, n=0.8, kW=40, BM=2.3)
@cost('Stage #1 reactor volume', 'Stage #1 reactors',
cost=37700, S=20*_gal2m3, CE=522, n=0.7, BM=1.8)
@cost('Stage #2 reactor volume', 'Stage #2 reactors',
cost=58300, S=200*_gal2m3, CE=522, n=0.7, BM=1.8)
@cost('Stage #3 reactor volume', 'Stage #3 reactors',
cost=78800, S=2e3*_gal2m3, CE=522, n=0.7, BM=1.8)
@cost('Stage #4 reactor volume', 'Stage #4 reactors',
cost=176e3, S=20e3*_gal2m3, CE=522, n=0.7, BM=1.8)
@cost('Stage #4 reactor volume', 'Stage #4 agitators',
cost=26e3/2, S=20e3*_gal2m3, kW=7.5, CE=522, n=0.5, BM=1.5)
@cost('Stage #5 reactor volume', 'Stage #5 reactors',
cost=590e3, S=200e3*_gal2m3, CE=522, n=0.7, BM=1.8)
@cost('Stage #5 reactor volume', 'Stage #5 agitators',
cost=43e3/2, S=200e3*_gal2m3, kW=10, CE=522, n=0.5, BM=1.5)
class SeedTrain(Unit):
_N_ins = 1
_N_outs= 2
_N_heat_utilities = 1
_units= {'Flow rate': 'kg/hr',
'Stage #1 reactor volume': 'm3',
'Stage #2 reactor volume': 'm3',
'Stage #3 reactor volume': 'm3',
'Stage #4 reactor volume': 'm3',
'Stage #5 reactor volume': 'm3'}
@property
def N_stages(self):
"""Number of stages in series."""
return 5
#: Number of parallel seed trains
N_trains = 2
#: Cycle time for each batch (hr)
tau_batch = 24
@property
def tau_turnover(self):
"""Turnover time (hr) calculated by batch time divided by number of trains."""
return self.tau_batch/self.N_trains
#: Operating temperature (K)
T = 32+273.15
# #: wt % media (e.g. corn steep liquor) in each stage
# media_loading = 0.50
# #: Diammonium phosphate loading in g/L of fermentation broth
# DAP = 0.67
def __init__(self, ID='', ins=None, outs=()):
Unit.__init__(self, ID, ins, outs)
self.reactions = ParallelRxn([
# Reaction definition Reactant Conversion
Rxn('Glucose -> 2 Ethanol + 2 CO2', 'Glucose', 0.0400),##0.04
Rxn('Glucose + 0.62 NH3 + 2.17 O2 -> 3.65 S_cerevisiae + 3.71 H2O + 2.34 CO2', 'Glucose', 0.2400),##0.90
Rxn('Glucose + 2 H2O -> 2 Glycerol + O2', 'Glucose', 0.0040),
Rxn('2 Glucose + 1.5 O2 -> 3 SuccinicAcid + 3 H2O', 'Glucose', 0.0060)
])
#20.65 g/mol S_cerivesiae
def _run(self):
feed, = self.ins
vent, effluent= self.outs
effluent.copy_flow(feed)
self.reactions.force_reaction(effluent.mol) # TODO: Ignore negative O2; probably bug in _system.py
effluent.T = self.T
vent.phase = 'g'
vent.copy_flow(effluent, ('CO2', 'NH3', 'O2', 'N2'), remove=True)
def _design(self):
maxvol = self.outs[1].F_vol*self.tau_turnover
vol = maxvol*10**-self.N_stages
Design = self.design_results
for i in range(1, self.N_stages+1):
Design[f'Stage #{i} reactor volume'] = vol
vol *= 10
Design['Flow rate'] = sum([i.F_mass for i in self.outs])
self.heat_utilities[0](self.Hnet, self.T)
def _cost(self):
N = self.N_trains
D = self.design_results
C = self.purchase_costs
kW = 0
for i, x in self.cost_items.items():
S = D[x._basis]
q = S/x.S
C[i] = N*bst.CE/x.CE*x.cost*q**x.n
kW += N*x.kW*q
self.power_utility(kW)
@cost('Flow rate', 'Recirculation pumps', kW=30, S=340*_gpm2m3hr,
cost=47200, n=0.8, BM=2.3, CE=522, N='N_recirculation_pumps')
@cost('Reactor duty', 'Heat exchangers', CE=522, cost=23900,
S=5*_Gcal2kJ, n=0.7, BM=2.2, N='N_reactors') # Based on a similar heat exchanger
@cost('Reactor volume', 'Agitators', CE=522, cost=52500,
S=1e6*_gal2m3, n=0.5, kW=90, BM=1.5, N='N_reactors')
@cost('Reactor volume', 'Reactors', CE=522, cost=844000,
S=1e6*_gal2m3, n=0.5, BM=1.5, N='N_reactors')
@cost('Flow rate', 'Transfer pumps', kW=58, S=352*_gpm2m3hr,
cost=47200/5, CE=522, n=0.8, BM=2.3, N='N_transfer_pumps')
@cost('Tank volume', 'Tanks', cost=3840e3/8, S=250e3*_gal2m3,
CE=522, n=0.7, BM=2.0, N='N_tanks')
class SaccharificationAndCoFermentation(Unit):
_N_ins = 3
_N_outs = 3
_N_heat_utilities = 2
#: Saccharification temperature (K)
T_saccharification = 48+273.15
#: Fermentation temperature (K)
T_fermentation = 32+273.15
#: Residence time of countinuous saccharification tanks (hr)
tau_tank = 24
#: Saccharification time (hr)
tau_saccharification = 60
#: Co-Fermentation time (hr)
tau_cofermentation = 36
#: Unload and clean up time (hr)
tau_0 = 4
#: Working volume fraction (filled tank to total tank volume)
V_wf = 0.9
#: Number of reactors
N_reactors = 12
#: Number of continuous saccharification tanks
N_tanks = 8
#: Number of transfer pumps
N_transfer_pumps = 5
#: Number of recirculation pumps
N_recirculation_pumps = 5
_units = {'Flow rate': 'm3/hr',
'Tank volume': 'm3',
'Reactor volume': 'm3',
'Reactor duty': 'kJ/hr'}
def __init__(self, ID='', ins=None, outs=(), saccharified_slurry_split = 0.1, P=101325):
Unit.__init__(self, ID, ins, outs)
self.P = P
# Split to outs[2]
self.saccharified_slurry_split = saccharified_slurry_split
#: [ParallelReaction] Enzymatic hydrolysis reactions including from downstream batch tank in co-fermentation.
self.saccharification = ParallelRxn([
# Reaction definition Reactant Conversion
Rxn('Glucan -> GlucoseOligomer', 'Glucan', 0.0400),
Rxn('Glucan + 0.5 H2O -> 0.5 Cellobiose', 'Glucan', 0.0120),
Rxn('Glucan + H2O -> Glucose', 'Glucan', 0.7800),#changed
Rxn('GlucoseOligomer + H2O -> Glucose', 'GlucoseOligomer', 1.0000),
Rxn('Cellobiose + H2O -> Glucose', 'Cellobiose', 1.0000),
Rxn('Xylan -> XyloseOligomer', 'Xylan', 0.0400),
Rxn('Xylan + H2O -> Xylose', 'Xylan', 0.9000)
])
self.loss = ParallelRxn([
# Reaction definition Reactant Conversion
Rxn('Glucose -> 2 LacticAcid', 'Glucose', 0.0300),
Rxn('3 Xylose -> 5 LacticAcid', 'Xylose', 0.0300),
Rxn('3 Arabinose -> 5 LacticAcid', 'Arabinose', 0.0300),
Rxn('Galactose -> 2 LacticAcid', 'Galactose', 0.0300),
Rxn('Mannose -> 2 LacticAcid', 'Mannose', 0.0300),])
self.cofermentation = ParallelRxn([
# Reaction definition Reactant Conversion
Rxn('Glucose -> 2 Ethanol + 2 CO2', 'Glucose', 0.8400),#changed
Rxn('Glucose + 0.62 NH3 + 2.17 O2 -> 3.65 S_cerevisiae + 3.71 H2O + 2.34 CO2', 'Glucose', 0.0130),
Rxn('Glucose + 2 H2O -> 2 Glycerol + O2', 'Glucose', 0.0040),
Rxn('2 Glucose + 1.5 O2 -> 3 SuccinicAcid + 3 H2O', 'Glucose', 0.0060),
])
self.saccharified_stream = tmo.Stream(None)
def _run(self):
feed, DAP, air = self.ins
vent, effluent, sidedraw = self.outs
vent.P = effluent.P = sidedraw.P = self.P
ss = self.saccharified_stream
ss.T = sidedraw.T = self.T_saccharification
vent.T = effluent.T = self.T_fermentation
vent.phase = 'g'
ss.copy_flow(feed)
self.saccharification(ss.mol)
sidedraw.mol[:] = ss.mol * self.saccharified_slurry_split
effluent.mol[:] = ss.mol - sidedraw.mol + DAP.mol + air.mol
self.loss(effluent.mol)
self.cofermentation.force_reaction(effluent.mol)
vent.receive_vent(effluent)
def _design(self):
effluent = self.outs[1]
v_0 = effluent.F_vol
Design = self.design_results
Design['Tank volume'] = v_0*self.tau_tank/self.V_wf/self.N_tanks
Design['Flow rate'] = v_0/self.N_transfer_pumps
tau = self.tau_saccharification + self.tau_cofermentation
Design.update(size_batch(v_0, tau, self.tau_0, self.N_reactors, self.V_wf))
hu_cooling, hu_fermentation = self.heat_utilities
mixture = self.thermo.mixture
ss = self.saccharified_stream
mol = ss.mol
hu_cooling(mixture.H('l', mol, self.T_fermentation, 101325.)
- mixture.H('l', mol, self.T_saccharification, 101325.),
self.T_fermentation)
ei = effluent.chemicals.index('Ethanol')
ethanol = (sum([i.mol[ei] for i in self.outs])
- sum([i.mol[ei] for i in self.ins]))
duty = ethanol*-5568
hu_fermentation(duty, effluent.T)
Design['Reactor duty'] = -duty
# %% Ethanol purification
class DistillationColumn(bst.BinaryDistillation):
def __init__(self, ID='', ins=None, outs=(),*,P=101325, energy_integration=False, LHK, k, y_top,x_bot):
bst.BinaryDistillation.__init__(self, ID=ID, ins=ins,
P=P, y_top=y_top, x_bot=x_bot,
k=k, LHK=LHK)
self.energy_integration=energy_integration
def _run(self):
bst.BinaryDistillation._run(self)
def _design(self):
bst.BinaryDistillation._design(self)
H=self.get_design_result('Height','ft')
Di=self.get_design_result('Diameter','ft')
Po = self.P * 0.000145078 #to psi
Pgauge = Po - 14.69
if Pgauge<0.0: Po=-Pgauge+14.69
Design = self.design_results
Design['Wall thickness'] = tv = column_design.compute_tower_wall_thickness(Po, Di, H)
rho_M = material_densities_lb_per_in3[self.vessel_material]
Design['Weight'] = column_design.compute_tower_weight(Di, H, tv, rho_M)
W = Design['Weight'] # in lb
L = Design['Height']*3.28 # in ft
Cost = self.purchase_costs
F_VM = self._F_VM
Cost['Tower'] = column_design.compute_purchase_cost_of_tower(Di, L, W, F_VM)
if self.energy_integration:
self.boiler.heat_utilities[0].flow=0
self.boiler.heat_utilities[0].cost=0
self._simulate_components()
# %% Biogas production
@cost('Reactor cooling', 'Heat exchangers', CE=522, cost=23900,
S=5*_Gcal2kJ, n=0.7, BM=2.2, N='N_reactors') # Based on a similar heat exchanger
@cost('Reactor volume', 'Agitators', CE=522, cost=52500,
S=1e6*_gal2m3, n=0.5, kW=30, BM=1.5, N='N_reactors')
@cost('Reactor volume', 'Reactors', CE=522, cost=12.8e6/4,
S=5450, n=0.5, BM=1, N='N_reactors')
@cost('Flow rate', 'Transfer pumps', kW=4.4, S=352*_gpm2m3hr,
cost=47200/5, CE=522, n=0.8, BM=2.3, N='N_transfer_pumps')
@cost('Raw biogas', 'Biogas purification', kW=0.25, S=0.0446,
cost=1430, CE=522, n=1, BM=1)
class AnaerobicDigestion(bst.Unit):
"""Anaerobic digestion system as modeled by Humbird 2011
**Parameters**
**reactions:** [ReactionSet] Anaerobic digestion reactions.
**sludge_split:** [Array] Split between waste water and sludge
**ins**
[0] Waste water
[1] Cool well water
**outs**
[0] Biogas
[1] Waste water
[2] Sludge
[3] Hot well water
"""
_N_ins = 1
_N_outs = 3
_N_heat_utilities = 1
#: Residence time of countinuous anaerobic digesters (hr)
tau = 30*24
#: Working volume fraction (filled tank to total tank volume)
V_wf = 0.8
#: Number of reactors
N_reactors = 4
#: Number of transfer pumps
N_transfer_pumps = 4
#: Number of recirculation pumps
N_recirculation_pumps = 4
_units = {'Flow rate': 'm3/hr',
'Reactor volume': 'm3',
'Reactor cooling': 'kJ/hr',
'Raw biogas': 'kmol/hr',
'Pure biogas': 'kmol/hr'}
def __init__(self, ID='', ins=None, outs=(), *, reactions, sludge_split,T):
Unit.__init__(self, ID, ins, outs)
self.reactions = reactions
self.sludge_split = sludge_split
self.multi_stream = tmo.MultiStream()
self.T=T
def _run(self):
feed, = self.ins
biogas, waste, sludge = self.outs
biogas.phase = 'g'
biogas.T = waste.T = sludge.T = self.T
sludge.copy_flow(feed)
self.reactions(sludge.mol)
self.multi_stream.copy_flow(sludge)
self.multi_stream.vle(P=101325, H=self.multi_stream.H)
biogas.mol[:] = self.multi_stream.imol['g']
liquid_mol = self.multi_stream.imol['l']
sludge.mol[:] = liquid_mol * self.sludge_split
waste.mol[:] = liquid_mol - sludge.mol
biogas.receive_vent(waste, accumulate=True)
def _design(self):
feed = self.ins[0]
biogas = self.outs[0]
light_ind = feed.chemicals._light_indices
l = [a for a in feed.vol[light_ind] if not a==0]
v_0 = feed.F_vol - sum(l)
Design = self.design_results
Design['Reactor volume'] = v_0*self.tau/self.V_wf/self.N_reactors
Design['Flow rate'] = v_0/self.N_transfer_pumps
Design['Raw biogas'] = biogas.F_mol - biogas.imol['Water']
Design['Pure biogas'] = biogas.imol['CH4']
hu_cooling, = self.heat_utilities
H_at_35C = feed.thermo.mixture.H(mol=feed.mol, phase='l', T=self.T, P=101325)
duty = H_at_35C - feed.H
hu_cooling(duty,self.T)
Design['Reactor cooling'] = abs(duty)
# %% Waste water treatment
@cost('Flow rate', 'Waste water system', units='kg/hr', CE=551,
cost=50280080., n=0.6, BM=1, kW=7139/1.05, S=393100)
class WasteWaterSystemCost(bst.Unit): pass
class AnaerobicDigestionWWT(bst.Unit):
"""Anaerobic digestion system as modeled by Humbird 2011
**Parameters**
**reactions:** [ReactionSet] Anaerobic digestion reactions.
**sludge_split:** [Array] Split between waste water and sludge
**ins**
[0] Waste water
[1] Cool well water
**outs**
[0] Biogas
[1] Waste water
[2] Sludge
[3] Hot well water
"""
purchase_cost = installation_cost = 0
_N_ins = 2
_N_outs = 4
def __init__(self, ID='', ins=None, outs=(), *, reactions, sludge_split,T):
Unit.__init__(self, ID, ins, outs)
self.reactions = reactions
self.sludge_split = sludge_split
self.multi_stream = tmo.MultiStream()
self.T=T
def _run(self):
feed, cool_water = self.ins
biogas, waste, sludge, hot_water = self.outs
hot_water.link_with(cool_water, TP=False)
hot_water.T = feed.T - 5
H_at_35C = feed.thermo.mixture.H(mol=feed.mol, phase='l', T=self.T, P=101325)
cool_water.mol[:] *= (feed.H - H_at_35C)/(hot_water.H - cool_water.H)
biogas.phase = 'g'
biogas.T = waste.T = sludge.T = self.T
sludge.copy_flow(feed)
self.reactions(sludge.mol)
self.multi_stream.copy_flow(sludge)
self.multi_stream.vle(P=101325, H=self.multi_stream.H)
biogas.mol[:] = self.multi_stream.imol['g']
liquid_mol = self.multi_stream.imol['l']
sludge.mol[:] = liquid_mol * self.sludge_split
waste.mol[:] = liquid_mol - sludge.mol
biogas.receive_vent(waste, accumulate=True)
class AerobicDigestionWWT(bst.Unit):
"""Anaerobic digestion system as modeled by Humbird 2011
**Parameters**
**reactions:** [ReactionSet] Anaerobic digestion reactions.
**sludge_split:** [Array] Split between waste water and sludge
**ins**
[0] Waste water
[1] Air
[2] Caustic
**outs**
[0] Vent
[1] Treated waste water
"""
_N_ins = 3
_N_outs = 2
purchase_cost = installation_cost = 0
evaporation = 4/355
def __init__(self, ID='', ins=None, outs=(), *, reactions):
Unit.__init__(self, ID, ins, outs)
self.reactions = reactions
def _run(self):
waste, air, caustic = self._ins
vent, water = self.outs
vent.phase = 'g'
water.copy_like(waste)
water.mol[:] += air.mol
water.mol[:] += caustic.mol
self.reactions(water.mol)
vent.copy_flow(water, ('CO2', 'O2', 'N2'))
vent.imol['7732-18-5'] = water.imol['7732-18-5'] * self.evaporation
water.mol[:] -= vent.mol
@cost('Flow rate', units='kg/hr',
S=63, cost=421e3, CE=522, BM=1.8, n=0.6)
class CIPpackage(bst.Facility):
line = 'CIP Package'
network_priority = 0
_N_ins = 1
_N_outs = 1
|
the-stack_106_29548 | import numpy as np
from .grid import Grid, CachedData
class UnstructuredGrid(Grid):
"""
Class for an unstructured model grid
Parameters
----------
vertices
list of vertices that make up the grid
cell2d
list of cells and their vertices
Properties
----------
vertices
returns list of vertices that make up the grid
cell2d
returns list of cells and their vertices
Methods
----------
get_cell_vertices(cellid)
returns vertices for a single cell at cellid.
"""
def __init__(self, vertices=None, iverts=None, xcenters=None, ycenters=None,
top=None, botm=None, idomain=None, lenuni=None,
ncpl=None, epsg=None, proj4=None, prj=None,
xoff=0., yoff=0., angrot=0., layered=True, nodes=None):
super(UnstructuredGrid, self).__init__('unstructured', top, botm,
idomain, lenuni, epsg, proj4,
prj, xoff, yoff, angrot)
self._vertices = vertices
self._iverts = iverts
self._top = top
self._botm = botm
self._ncpl = ncpl
self._layered = layered
self._xc = xcenters
self._yc = ycenters
self._nodes = nodes
if iverts is not None:
if self.layered:
assert np.all([n == len(iverts) for n in ncpl])
assert np.array(self.xcellcenters).shape[0] == self.ncpl[0]
assert np.array(self.ycellcenters).shape[0] == self.ncpl[0]
else:
msg = ('Length of iverts must equal ncpl.sum '
'({} {})'.format(len(iverts), ncpl))
assert len(iverts) == np.sum(ncpl), msg
assert np.array(self.xcellcenters).shape[0] == self.ncpl
assert np.array(self.ycellcenters).shape[0] == self.ncpl
@property
def is_valid(self):
if self._nodes is not None:
return True
return False
@property
def is_complete(self):
if self._nodes is not None and \
super(UnstructuredGrid, self).is_complete:
return True
return False
@property
def nlay(self):
if self.layered:
try:
return len(self.ncpl)
except TypeError:
return 1
else:
return 1
@property
def layered(self):
return self._layered
@property
def nnodes(self):
if self._nodes is not None:
return self._nodes
else:
return self.nlay * self.ncpl
@property
def ncpl(self):
if self._ncpl is None:
if self._iverts is None:
return None
else:
return len(self._iverts)
return self._ncpl
@property
def shape(self):
if self.ncpl is None:
return self.nnodes
if isinstance(self.ncpl, (list, np.ndarray)):
return self.nlay, self.ncpl[0]
else:
return self.nlay, self.ncpl
@property
def extent(self):
self._copy_cache = False
xvertices = np.hstack(self.xvertices)
yvertices = np.hstack(self.yvertices)
self._copy_cache = True
return (np.min(xvertices),
np.max(xvertices),
np.min(yvertices),
np.max(yvertices))
@property
def grid_lines(self):
"""
Creates a series of grid line vertices for drawing
a model grid line collection
Returns:
list: grid line vertices
"""
self._copy_cache = False
xgrid = self.xvertices
ygrid = self.yvertices
lines = []
for ncell, verts in enumerate(xgrid):
for ix, vert in enumerate(verts):
lines.append([(xgrid[ncell][ix - 1], ygrid[ncell][ix - 1]),
(xgrid[ncell][ix], ygrid[ncell][ix])])
self._copy_cache = True
return lines
@property
def xyzcellcenters(self):
"""
Method to get cell centers and set to grid
"""
cache_index = 'cellcenters'
if cache_index not in self._cache_dict or \
self._cache_dict[cache_index].out_of_date:
self._build_grid_geometry_info()
if self._copy_cache:
return self._cache_dict[cache_index].data
else:
return self._cache_dict[cache_index].data_nocopy
@property
def xyzvertices(self):
"""
Method to get model grid verticies
Returns:
list of dimension ncpl by nvertices
"""
cache_index = 'xyzgrid'
if cache_index not in self._cache_dict or \
self._cache_dict[cache_index].out_of_date:
self._build_grid_geometry_info()
if self._copy_cache:
return self._cache_dict[cache_index].data
else:
return self._cache_dict[cache_index].data_nocopy
def intersect(self, x, y, local=False, forgive=False):
x, y = super(UnstructuredGrid, self).intersect(x, y, local, forgive)
raise Exception('Not implemented yet')
def get_cell_vertices(self, cellid):
"""
Method to get a set of cell vertices for a single cell
used in the Shapefile export utilities
:param cellid: (int) cellid number
:return: list of x,y cell vertices
"""
self._copy_cache = False
cell_vert = list(zip(self.xvertices[cellid],
self.yvertices[cellid]))
self._copy_cache = True
return cell_vert
def _build_grid_geometry_info(self):
cache_index_cc = 'cellcenters'
cache_index_vert = 'xyzgrid'
vertexdict = {ix: list(v[-2:])
for ix, v in enumerate(self._vertices)}
xcenters = self._xc
ycenters = self._yc
xvertices = []
yvertices = []
# build xy vertex and cell center info
for iverts in self._iverts:
xcellvert = []
ycellvert = []
for ix in iverts:
xcellvert.append(vertexdict[ix][0])
ycellvert.append(vertexdict[ix][1])
xvertices.append(xcellvert)
yvertices.append(ycellvert)
zvertices, zcenters = self._zcoords()
if self._has_ref_coordinates:
# transform x and y
xcenters, ycenters = self.get_coords(xcenters, ycenters)
xvertxform = []
yvertxform = []
# vertices are a list within a list
for xcellvertices, ycellvertices in zip(xvertices, yvertices):
xcellvertices, \
ycellvertices = self.get_coords(xcellvertices, ycellvertices)
xvertxform.append(xcellvertices)
yvertxform.append(ycellvertices)
xvertices = xvertxform
yvertices = yvertxform
self._cache_dict[cache_index_cc] = CachedData([xcenters,
ycenters,
zcenters])
self._cache_dict[cache_index_vert] = CachedData([xvertices,
yvertices,
zvertices])
@classmethod
def from_argus_export(cls, fname, nlay=1):
"""
Create a new SpatialReferenceUnstructured grid from an Argus One
Trimesh file
Parameters
----------
fname : string
File name
nlay : int
Number of layers to create
Returns
-------
sru : flopy.utils.reference.SpatialReferenceUnstructured
"""
from ..utils.geometry import get_polygon_centroid
f = open(fname, 'r')
line = f.readline()
ll = line.split()
ncells, nverts = ll[0:2]
ncells = int(ncells)
nverts = int(nverts)
verts = np.empty((nverts, 2), dtype=np.float)
xc = np.empty((ncells), dtype=np.float)
yc = np.empty((ncells), dtype=np.float)
# read the vertices
f.readline()
for ivert in range(nverts):
line = f.readline()
ll = line.split()
c, iv, x, y = ll[0:4]
verts[ivert, 0] = x
verts[ivert, 1] = y
# read the cell information and create iverts, xc, and yc
iverts = []
for icell in range(ncells):
line = f.readline()
ll = line.split()
ivlist = []
for ic in ll[2:5]:
ivlist.append(int(ic) - 1)
if ivlist[0] != ivlist[-1]:
ivlist.append(ivlist[0])
iverts.append(ivlist)
xc[icell], yc[icell] = get_polygon_centroid(verts[ivlist, :])
# close file and return spatial reference
f.close()
return cls(verts, iverts, xc, yc, ncpl=np.array(nlay * [len(iverts)]))
|
the-stack_106_29550 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo.config import cfg
from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
driver_opts = [
cfg.StrOpt('network_driver',
default='nova.network.linux_net',
help='Driver to use for network creation'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
def load_network_driver(network_driver=None):
if not network_driver:
network_driver = CONF.network_driver
if not network_driver:
LOG.error(_("Network driver option required, but not specified"))
sys.exit(1)
LOG.info(_("Loading network driver '%s'") % network_driver)
return importutils.import_module(network_driver)
|
the-stack_106_29551 | #######################################################################################
#
# ObjectID network (based on FaceId)
# https://github.com/albertogaspar/keras-face-id/blob/master/models/facenet.py
#
#######################################################################################
from tensorflow.python.keras.models import Sequential, Model
from tensorflow.python.keras.layers import Dense, Activation, Flatten, Dropout, Lambda, ELU, concatenate, GlobalAveragePooling2D, Input, BatchNormalization, SeparableConv2D, Subtract, Concatenate, Conv2D
from tensorflow.keras.activations import relu, softmax
from tensorflow.python.keras.layers.convolutional import Convolution2D
from tensorflow.python.keras.layers.pooling import MaxPooling2D, AveragePooling2D
from tensorflow.keras.optimizers import Adam, RMSprop, SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras import backend as K
def euclidean_distance(inputs):
assert len(inputs) == 2, \
'Euclidean distance needs 2 inputs, %d given' % len(inputs)
u, v = inputs
return K.sqrt(K.sum((K.square(u - v)), axis=1, keepdims=True))
def Inception1x1(input, conv_1x1=64, strides_1x1=(1,1)):
x = Conv2D(conv_1x1, 1, strides=strides_1x1, padding='same')(input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def Inception3x3(input, conv_1x1=96, conv_3x3=128, strides_1x1 =(1,1), strides_3x3 =(1,1)):
x = Inception1x1(input, conv_1x1, strides_1x1=strides_1x1)
x = Conv2D(conv_3x3, 3, strides=strides_3x3, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def Inception5x5(input, conv_1x1=16, conv_5x5=32, strides_1x1 =(1,1), strides_5x5 =(1,1)):
x = Inception1x1(input, conv_1x1, strides_1x1=strides_1x1)
x = Conv2D(conv_5x5, 5, strides=strides_5x5, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def InceptionPooling(input, conv_1x1=32, strides=(1,1), pool_type='max'):
if pool_type == 'max':
x = MaxPooling2D(pool_size=3, strides=strides, padding='same')(input)
elif pool_type == 'l2':
x = AveragePooling2D(pool_size=3, strides=strides, padding='same')(input)
else:
raise NotImplementedError('pool_type = {0}. '
'This type of pooling is not available.'.format(pool_type))
if conv_1x1:
x = Inception1x1(x, conv_1x1=conv_1x1, strides_1x1=strides)
return x
def InceptionLayer(input, conv_1x1, conv3x3_reduce, conv_3x3, conv_5x5_reduce, conv_5x5, pool_proj):
to_concatenate = []
if conv_1x1:
inception_1x1 = Inception1x1(input, conv_1x1=conv_1x1[0], strides_1x1= conv_1x1[1])
to_concatenate.append(inception_1x1)
if conv_3x3:
inception_3x3 = Inception3x3(input, conv_1x1=conv3x3_reduce[0], conv_3x3=conv_3x3[0],
strides_1x1 =conv3x3_reduce[1], strides_3x3 =conv_3x3[1])
to_concatenate.append(inception_3x3)
if conv_5x5:
inception_5x5 = Inception5x5(input, conv_1x1=conv_5x5_reduce[0], conv_5x5=conv_5x5[0],
strides_1x1 =conv_5x5_reduce[1], strides_5x5 =conv_5x5[1])
to_concatenate.append(inception_5x5)
if pool_proj:
inception_pool = InceptionPooling(input, conv_1x1=pool_proj[1], strides=pool_proj[2], pool_type=pool_proj[0])
to_concatenate.append(inception_pool)
inception = Concatenate()(to_concatenate)
return inception
def objectid_model():
input = Input(shape=(96,96,4))
x = Conv2D(64, 7, strides=(2,2), padding='same')(input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=3, strides=(2,2), padding='same')(x)
x = Inception3x3(x, conv_1x1=64, conv_3x3=192)
x = MaxPooling2D(pool_size = 3, strides = 2, padding='same')(x)
inception_3a = InceptionLayer(x, conv_1x1=(64,(1,1)), conv3x3_reduce=(96,(1,1)), conv_3x3=(128,(1,1)),
conv_5x5_reduce=(16,(1,1)), conv_5x5=(32,(1,1)), pool_proj=('max',32,1))
inception_3b = InceptionLayer(inception_3a, conv_1x1=(64,(1,1)), conv3x3_reduce=(96,(1,1)), conv_3x3=(128,(1,1)),
conv_5x5_reduce=(32,(1,1)), conv_5x5=(64,(1,1)), pool_proj=('l2',64,1))
inception_3c = InceptionLayer(inception_3b, conv_1x1=None, conv3x3_reduce=(128,(1,1)), conv_3x3=(256,(2,2)),
conv_5x5_reduce=(32,(1,1)), conv_5x5=(64,(2,2)), pool_proj=('max',None,2))
inception_4a = InceptionLayer(inception_3c, conv_1x1=(256,(1,1)), conv3x3_reduce=(96,(1,1)), conv_3x3=(192,(1,1)),
conv_5x5_reduce=(32,(1,1)), conv_5x5=(64,(1,1)), pool_proj=('l2',128,1))
inception_4b = InceptionLayer(inception_4a, conv_1x1=(224,(1,1)), conv3x3_reduce=(112,(1,1)), conv_3x3=(224,(1,1)),
conv_5x5_reduce=(32,(1,1)), conv_5x5=(64,(1,1)), pool_proj=('l2',128,1))
inception_4c = InceptionLayer(inception_4b, conv_1x1=(192,(1,1)), conv3x3_reduce=(128,(1,1)), conv_3x3=(256,(1,1)),
conv_5x5_reduce=(32,(1,1)), conv_5x5=(64,(1,1)), pool_proj=('l2',128,1))
inception_4d = InceptionLayer(inception_4c, conv_1x1=(160,(1,1)), conv3x3_reduce=(144,(1,1)), conv_3x3=(288,(1,1)),
conv_5x5_reduce=(32,(1,1)), conv_5x5=(64,(1,1)), pool_proj=('l2',128,1))
inception_4e = InceptionLayer(inception_4d, conv_1x1=None, conv3x3_reduce=(160,(1,1)), conv_3x3=(256,(2,2)),
conv_5x5_reduce=(64,(1,1)), conv_5x5=(128,(2,2)), pool_proj=('max',None,2))
inception_5a = InceptionLayer(inception_4e, conv_1x1=(384,(1,1)), conv3x3_reduce=(192,(1,1)), conv_3x3=(384,(1,1)),
conv_5x5_reduce=(48,(1,1)), conv_5x5=(128,(1,1)), pool_proj=('l2',128,1))
inception_5b = InceptionLayer(inception_5a, conv_1x1=(384,(1,1)), conv3x3_reduce=(192,(1,1)), conv_3x3=(384,(1,1)),
conv_5x5_reduce=(48,(1,1)), conv_5x5=(128,(1,1)), pool_proj=('max',128,1))
x = GlobalAveragePooling2D()(inception_5b)
x = Dense(128)(x)
return Model(inputs = [input], outputs = x)
def objectid_train_model(objectid_model):
im_in1 = Input(shape=(96,96,4))
im_in2 = Input(shape=(96,96,4))
feat_x1 = objectid_model(im_in1)
feat_x2 = objectid_model(im_in2)
# L2 Normalization in final layer, tfjs not support Lambda python code layer =)
# Need perform normalization in pure js
feat_x1 = Lambda(lambda x: K.l2_normalize(x,axis=1))(feat_x1)
feat_x2 = Lambda(lambda x: K.l2_normalize(x,axis=1))(feat_x2)
lambda_merge = Lambda(euclidean_distance)([feat_x1, feat_x2])
return Model(inputs = [im_in1, im_in2], outputs = lambda_merge)
|
the-stack_106_29552 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Converters for ONNX-ML SV models.
"""
import numpy as np
from onnxconverter_common.registration import register_converter
from .._sv_implementations import SVC
def convert_onnx_svm_classifier_model(operator, device, extra_config):
"""
Converter for `ai.onnx.ml.SVMClassifier`
Args:
operator: An operator wrapping a `ai.onnx.ml.SVMClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
# These are passed as params to SVC()
kernel = degree = sv = nv = a = b = gamma = coef0 = classes = None
# These are stored for reshaping after parsing is done
sv_vals = coeffis = None
for attr in operator.raw_operator.origin.attribute:
if attr.name == "kernel_type":
# ex: Convert b'RBF' to 'rbf' for consistency
kernel = attr.s.lower().decode("UTF-8")
if kernel not in ["linear", "poly", "rbf"]: # from svc.py ln 58
raise RuntimeError("Unsupported kernel for SVC: {}".format(kernel))
elif attr.name == "coefficients":
coeffis = np.array(attr.floats)
elif attr.name == "vectors_per_class":
nv = np.array(attr.ints).astype("int32")
elif attr.name == "support_vectors":
sv_vals = np.array(attr.floats)
elif attr.name == "rho":
b = np.array(attr.floats)
elif attr.name == "kernel_params":
# See
# https://github.com/onnx/sklearn-onnx/blob/master/skl2onnx/operator_converters/support_vector_machines.py
# for details on [op._gamma, op.coef0, op.degree]
kp_arr = np.array(attr.floats)
gamma = kp_arr[0]
coef0 = kp_arr[1]
degree = int(kp_arr[2])
elif attr.name == "classlabels_ints":
classes = np.array(attr.ints)
if any(v is None for v in [sv_vals, coeffis]):
raise RuntimeError("Error parsing SVC arrays, found unexpected None")
# Now that we have parsed the degree and lengths, reshape 'a' and 'sv'
# For 'a', these are in 'dual' shape, so resize into 2:
# https://github.com/onnx/sklearn-onnx/blob/master/skl2onnx/operator_converters/support_vector_machines.py#L41
#
# Except for when they're not...
# https://stackoverflow.com/questions/22816646/the-dimension-of-dual-coef-in-sklearn-svc
if len(classes) > 2:
a = coeffis.reshape(2, len(coeffis) // 2)
else: # if not in "dual" form with classes > 3 (binary), 'a' and 'b' are the inverse. Don't ask why.
a = np.negative([coeffis])
b = np.negative(b)
sv = sv_vals.reshape(len(a[0]), len(sv_vals) // len(a[0]))
if any(v is None for v in [kernel, degree, sv, nv, a, b, gamma, coef0, classes]):
raise RuntimeError(
"Error parsing SVC, found unexpected None. kernel{} degree{} sv{} nv{} a{} b{} gamma{} coef0{} classes{}".format(
kernel, degree, sv, nv, a, b, gamma, coef0, classes
)
)
return SVC(operator, kernel, degree, sv, nv, a, b, gamma, coef0, classes, device)
register_converter("ONNXMLSVMClassifier", convert_onnx_svm_classifier_model)
|
the-stack_106_29554 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import Any, List, Optional
from azure.batch import models as batch_models
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.azure_batch import AzureBatchHook
from airflow.utils.decorators import apply_defaults
# pylint: disable=too-many-instance-attributes
class AzureBatchOperator(BaseOperator):
"""
Executes a job on Azure Batch Service
:param batch_pool_id: A string that uniquely identifies the Pool within the Account.
:type batch_pool_id: str
:param batch_pool_vm_size: The size of virtual machines in the Pool
:type batch_pool_vm_size: str
:param batch_job_id: A string that uniquely identifies the Job within the Account.
:type batch_job_id: str
:param batch_task_command_line: The command line of the Task
:type batch_task_command_line: str
:param batch_task_id: A string that uniquely identifies the task within the Job.
:type batch_task_id: str
:param batch_pool_display_name: The display name for the Pool.
The display name need not be unique
:type batch_pool_display_name: Optional[str]
:param batch_job_display_name: The display name for the Job.
The display name need not be unique
:type batch_job_display_name: Optional[str]
:param batch_job_manager_task: Details of a Job Manager Task to be launched when the Job is started.
:type batch_job_manager_task: Optional[batch_models.JobManagerTask]
:param batch_job_preparation_task: The Job Preparation Task. If set, the Batch service will
run the Job Preparation Task on a Node before starting any Tasks of that
Job on that Compute Node. Required if batch_job_release_task is set.
:type batch_job_preparation_task: Optional[batch_models.JobPreparationTask]
:param batch_job_release_task: The Job Release Task. Use to undo changes to Compute Nodes
made by the Job Preparation Task
:type batch_job_release_task: Optional[batch_models.JobReleaseTask]
:param batch_task_display_name: The display name for the task.
The display name need not be unique
:type batch_task_display_name: Optional[str]
:param batch_task_container_settings: The settings for the container under which the Task runs
:type batch_task_container_settings: Optional[batch_models.TaskContainerSettings]
:param batch_start_task: A Task specified to run on each Compute Node as it joins the Pool.
The Task runs when the Compute Node is added to the Pool or
when the Compute Node is restarted.
:type batch_start_task: Optional[batch_models.StartTask]
:param batch_max_retries: The number of times to retry this batch operation before it's
considered a failed operation. Default is 3
:type batch_max_retries: int
:param batch_task_resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line.
:type batch_task_resource_files: Optional[List[batch_models.ResourceFile]]
:param batch_task_output_files: A list of files that the Batch service will upload
from the Compute Node after running the command line.
:type batch_task_output_files: Optional[List[batch_models.OutputFile]]
:param batch_task_user_identity: The user identity under which the Task runs.
If omitted, the Task runs as a non-administrative user unique to the Task.
:type batch_task_user_identity: Optional[batch_models.UserIdentity]
:param target_low_priority_nodes: The desired number of low-priority Compute Nodes in the Pool.
This property must not be specified if enable_auto_scale is set to true.
:type target_low_priority_nodes: Optional[int]
:param target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool.
This property must not be specified if enable_auto_scale is set to true.
:type target_dedicated_nodes: Optional[int]
:param enable_auto_scale: Whether the Pool size should automatically adjust over time. Default is false
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of Compute Nodes in the Pool.
This property must not be specified if enableAutoScale is set to false.
It is required if enableAutoScale is set to true.
:type auto_scale_formula: Optional[str]
:param azure_batch_conn_id: The connection id of Azure batch service
:type azure_batch_conn_id: str
:param use_latest_verified_vm_image_and_sku: Whether to use the latest verified virtual
machine image and sku in the batch account. Default is false.
:type use_latest_verified_vm_image_and_sku: bool
:param vm_publisher: The publisher of the Azure Virtual Machines Marketplace Image.
For example, Canonical or MicrosoftWindowsServer. Required if
use_latest_image_and_sku is set to True
:type vm_publisher: Optional[str]
:param vm_offer: The offer type of the Azure Virtual Machines Marketplace Image.
For example, UbuntuServer or WindowsServer. Required if
use_latest_image_and_sku is set to True
:type vm_offer: Optional[str]
:param sku_starts_with: The starting string of the Virtual Machine SKU. Required if
use_latest_image_and_sku is set to True
:type sku_starts_with: Optional[str]
:param vm_sku: The name of the virtual machine sku to use
:type vm_sku: Optional[str]
:param vm_version: The version of the virtual machine
:param vm_version: Optional[str]
:param vm_node_agent_sku_id: The node agent sku id of the virtual machine
:type vm_node_agent_sku_id: Optional[str]
:param os_family: The Azure Guest OS family to be installed on the virtual machines in the Pool.
:type os_family: Optional[str]
:param os_version: The OS family version
:type os_version: Optional[str]
:param timeout: The amount of time to wait for the job to complete in minutes. Default is 25
:type timeout: int
:param should_delete_job: Whether to delete job after execution. Default is False
:type should_delete_job: bool
:param should_delete_pool: Whether to delete pool after execution of jobs. Default is False
:type should_delete_pool: bool
"""
template_fields = (
'batch_pool_id',
'batch_pool_vm_size',
'batch_job_id',
'batch_task_id',
'batch_task_command_line',
)
ui_color = '#f0f0e4'
@apply_defaults
def __init__(
self,
*, # pylint: disable=too-many-arguments,too-many-locals
batch_pool_id: str,
batch_pool_vm_size: str,
batch_job_id: str,
batch_task_command_line: str,
batch_task_id: str,
vm_publisher: Optional[str] = None,
vm_offer: Optional[str] = None,
sku_starts_with: Optional[str] = None,
vm_sku: Optional[str] = None,
vm_version: Optional[str] = None,
vm_node_agent_sku_id: Optional[str] = None,
os_family: Optional[str] = None,
os_version: Optional[str] = None,
batch_pool_display_name: Optional[str] = None,
batch_job_display_name: Optional[str] = None,
batch_job_manager_task: Optional[batch_models.JobManagerTask] = None,
batch_job_preparation_task: Optional[batch_models.JobPreparationTask] = None,
batch_job_release_task: Optional[batch_models.JobReleaseTask] = None,
batch_task_display_name: Optional[str] = None,
batch_task_container_settings: Optional[batch_models.TaskContainerSettings] = None,
batch_start_task: Optional[batch_models.StartTask] = None,
batch_max_retries: int = 3,
batch_task_resource_files: Optional[List[batch_models.ResourceFile]] = None,
batch_task_output_files: Optional[List[batch_models.OutputFile]] = None,
batch_task_user_identity: Optional[batch_models.UserIdentity] = None,
target_low_priority_nodes: Optional[int] = None,
target_dedicated_nodes: Optional[int] = None,
enable_auto_scale: bool = False,
auto_scale_formula: Optional[str] = None,
azure_batch_conn_id='azure_batch_default',
use_latest_verified_vm_image_and_sku: bool = False,
timeout: int = 25,
should_delete_job: bool = False,
should_delete_pool: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.batch_pool_id = batch_pool_id
self.batch_pool_vm_size = batch_pool_vm_size
self.batch_job_id = batch_job_id
self.batch_task_id = batch_task_id
self.batch_task_command_line = batch_task_command_line
self.batch_pool_display_name = batch_pool_display_name
self.batch_job_display_name = batch_job_display_name
self.batch_job_manager_task = batch_job_manager_task
self.batch_job_preparation_task = batch_job_preparation_task
self.batch_job_release_task = batch_job_release_task
self.batch_task_display_name = batch_task_display_name
self.batch_task_container_settings = batch_task_container_settings
self.batch_start_task = batch_start_task
self.batch_max_retries = batch_max_retries
self.batch_task_resource_files = batch_task_resource_files
self.batch_task_output_files = batch_task_output_files
self.batch_task_user_identity = batch_task_user_identity
self.target_low_priority_nodes = target_low_priority_nodes
self.target_dedicated_nodes = target_dedicated_nodes
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.azure_batch_conn_id = azure_batch_conn_id
self.use_latest_image = use_latest_verified_vm_image_and_sku
self.vm_publisher = vm_publisher
self.vm_offer = vm_offer
self.sku_starts_with = sku_starts_with
self.vm_sku = vm_sku
self.vm_version = vm_version
self.vm_node_agent_sku_id = vm_node_agent_sku_id
self.os_family = os_family
self.os_version = os_version
self.timeout = timeout
self.should_delete_job = should_delete_job
self.should_delete_pool = should_delete_pool
self.hook = self.get_hook()
def _check_inputs(self) -> Any:
if not self.os_family and not self.vm_publisher:
raise AirflowException("You must specify either vm_publisher or os_family")
if self.os_family and self.vm_publisher:
raise AirflowException(
"Cloud service configuration and virtual machine configuration "
"are mutually exclusive. You must specify either of os_family and"
" vm_publisher"
)
if self.use_latest_image:
if not all(elem for elem in [self.vm_publisher, self.vm_offer]):
raise AirflowException(
"If use_latest_image_and_sku is"
" set to True then the parameters vm_publisher, vm_offer, "
"must all be set. Found "
"vm_publisher={}, vm_offer={}".format(self.vm_publisher, self.vm_offer)
)
if self.vm_publisher:
if not all([self.vm_sku, self.vm_offer, self.vm_node_agent_sku_id]):
raise AirflowException(
"If vm_publisher is set, then the parameters vm_sku, vm_offer,"
"vm_node_agent_sku_id must be set. Found "
f"vm_publisher={self.vm_publisher}, vm_offer={self.vm_offer} "
f"vm_node_agent_sku_id={self.vm_node_agent_sku_id}, "
f"vm_version={self.vm_version}"
)
if not self.target_dedicated_nodes and not self.enable_auto_scale:
raise AirflowException(
"Either target_dedicated_nodes or enable_auto_scale must be set. None was set"
)
if self.enable_auto_scale:
if self.target_dedicated_nodes or self.target_low_priority_nodes:
raise AirflowException(
"If enable_auto_scale is set, then the parameters "
"target_dedicated_nodes and target_low_priority_nodes must not "
"be set. Found target_dedicated_nodes={},"
" target_low_priority_nodes={}".format(
self.target_dedicated_nodes, self.target_low_priority_nodes
)
)
if not self.auto_scale_formula:
raise AirflowException("The auto_scale_formula is required when enable_auto_scale is set")
if self.batch_job_release_task and not self.batch_job_preparation_task:
raise AirflowException(
"A batch_job_release_task cannot be specified without also "
" specifying a batch_job_preparation_task for the Job."
)
if not all(
[
self.batch_pool_id,
self.batch_job_id,
self.batch_pool_vm_size,
self.batch_task_id,
self.batch_task_command_line,
]
):
raise AirflowException(
"Some required parameters are missing.Please you must set all the required parameters. "
)
def execute(self, context: dict) -> None:
self._check_inputs()
self.hook.connection.config.retry_policy = self.batch_max_retries
pool = self.hook.configure_pool(
pool_id=self.batch_pool_id,
vm_size=self.batch_pool_vm_size,
display_name=self.batch_pool_display_name,
target_dedicated_nodes=self.target_dedicated_nodes,
use_latest_image_and_sku=self.use_latest_image,
vm_publisher=self.vm_publisher,
vm_offer=self.vm_offer,
sku_starts_with=self.sku_starts_with,
vm_sku=self.vm_sku,
vm_version=self.vm_version,
vm_node_agent_sku_id=self.vm_node_agent_sku_id,
os_family=self.os_family,
os_version=self.os_version,
target_low_priority_nodes=self.target_low_priority_nodes,
enable_auto_scale=self.enable_auto_scale,
auto_scale_formula=self.auto_scale_formula,
start_task=self.batch_start_task,
)
self.hook.create_pool(pool)
# Wait for nodes to reach complete state
self.hook.wait_for_all_node_state(
self.batch_pool_id,
{
batch_models.ComputeNodeState.start_task_failed,
batch_models.ComputeNodeState.unusable,
batch_models.ComputeNodeState.idle,
},
)
# Create job if not already exist
job = self.hook.configure_job(
job_id=self.batch_job_id,
pool_id=self.batch_pool_id,
display_name=self.batch_job_display_name,
job_manager_task=self.batch_job_manager_task,
job_preparation_task=self.batch_job_preparation_task,
job_release_task=self.batch_job_release_task,
)
self.hook.create_job(job)
# Create task
task = self.hook.configure_task(
task_id=self.batch_task_id,
command_line=self.batch_task_command_line,
display_name=self.batch_task_display_name,
container_settings=self.batch_task_container_settings,
resource_files=self.batch_task_resource_files,
output_files=self.batch_task_output_files,
user_identity=self.batch_task_user_identity,
)
# Add task to job
self.hook.add_single_task_to_job(job_id=self.batch_job_id, task=task)
# Wait for tasks to complete
self.hook.wait_for_job_tasks_to_complete(job_id=self.batch_job_id, timeout=self.timeout)
# Clean up
if self.should_delete_job:
# delete job first
self.clean_up(job_id=self.batch_job_id)
if self.should_delete_pool:
self.clean_up(self.batch_pool_id)
def on_kill(self) -> None:
response = self.hook.connection.job.terminate(
job_id=self.batch_job_id, terminate_reason='Job killed by user'
)
self.log.info("Azure Batch job (%s) terminated: %s", self.batch_job_id, response)
def get_hook(self) -> AzureBatchHook:
"""Create and return an AzureBatchHook."""
return AzureBatchHook(azure_batch_conn_id=self.azure_batch_conn_id)
def clean_up(self, pool_id: Optional[str] = None, job_id: Optional[str] = None) -> None:
"""
Delete the given pool and job in the batch account
:param pool_id: The id of the pool to delete
:type pool_id: str
:param job_id: The id of the job to delete
:type job_id: str
"""
if job_id:
self.log.info("Deleting job: %s", job_id)
self.hook.connection.job.delete(job_id)
if pool_id:
self.log.info("Deleting pool: %s", pool_id)
self.hook.connection.pool.delete(pool_id)
|
the-stack_106_29556 | # -*- coding: utf-8 -*-
__author__ = 'gzp'
from typing import Optional
from jinja2 import Environment, PackageLoader
from urllib.parse import urlunparse
from rimuru.utils.jinja2 import filters
from rimuru.core.doc_generator import (
MarkdownGenerator
)
from .base import APIDocWorkshop
template_env = Environment(loader=PackageLoader('rimuru', 'templates'))
template_env.cache = None
template_env.filters['success_responses_filter'] = filters.success_responses_filter
template_env.filters['error_responses_filter'] = filters.error_responses_filter
class MarkdownWorkShop(APIDocWorkshop):
generator_class = MarkdownGenerator
template = 'zh_hans_doc.md'
template_env = template_env
def __init__(self, *args, template: Optional[str] = None, template_env: Optional[Environment] = None, **kwargs):
super().__init__(*args, **kwargs)
if template:
self.template = template
if template_env:
self.template_env = template_env
def set_generator(self, generator_key, method, url_components, name=None):
self.generators[generator_key] = self.generator_class(
method,
urlunparse(url_components),
self.template,
self.template_env,
name=name
)
def save(self, file_path='.'):
for doc_generator in self.generators.values():
if doc_generator.exist_response:
doc_generator.save(file_path)
def delete(self):
for doc_generator in self.generators.values():
doc_generator.delete()
|
the-stack_106_29557 | import tensorflow as tf
import glob
import numpy as np
from tqdm import tqdm
from utils.utils import SentenceEmbedding
class BirdDataset(tf.keras.utils.Sequence):
def __init__(self,
path: str = 'dataset/Bird_dataset_text2image/images_crop',
size: int = (64, 64),
batch_size: int = 32,
shuffle: bool = True):
# Scan files
self.img_files = dict()
self.text_embeddings = dict()
self.embedding_model = SentenceEmbedding()
image_paths = glob.glob(path+'/*/*.jpg')
num_example=0
for image_path in tqdm(image_paths):
text_path = image_path.replace('.jpg','.txt')
with open(text_path,'r') as f:
data = f.read()
for text in data.split('\n'):
if len(text)>0:
self.img_files[num_example] = image_path
self.text_embeddings[num_example] = self.embedding_model.encode(text)
num_example+=1
# Initialize
self.indexes = None
self.size = size
self.shuffle = shuffle
self.batch_size = batch_size
self.on_epoch_end()
def __len__(self):
"""
Denotes the number of batches per epoch
"""
return int(np.floor(len(self.text_embeddings) / self.batch_size))
def on_epoch_end(self):
"""
Updates indexes after each epoch
:return:
"""
self.indexes = np.arange(len(self.text_embeddings))
if self.shuffle:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
"""
Generate one batch of data
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index+1) * self.batch_size]
# Find list of IDs
ids = [k for k in indexes]
# Generate data
x, y = self.__data_generation(ids)
return x, y
def __data_generation(self, ids):
"""
Generates data containing batch_size samples.
"""
images, labels = [], []
# Begin load data
for i in ids:
# Load image
image = tf.io.read_file(self.img_files[i])
# convert the compressed string to a 3D uint8 tensor
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.random_flip_left_right(image)
image = tf.image.resize(image,self.size,
method='nearest', antialias=True)
image = image/255
image = tf.keras.layers.Normalization(mean=[0.5, 0.5, 0.5],
variance=[tf.square(0.5),
tf.square(0.5),
tf.square(0.5)])(image)
images.append(image)
# Load label json file
labels.append(self.text_embeddings[i])
images, labels = np.asarray(images).astype(np.float32), np.asarray(labels).astype(np.float32)
return images, labels |
the-stack_106_29558 | import glob
import os
import pickle
from math import floor
from random import shuffle
from urllib.request import urlopen
from zipfile import ZipFile
from data_utils import read_off_file_into_nparray
def download_datasets(args):
model_net10_url = 'http://vision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip'
print('[*] Downloading and unzipping datasets.')
unzip_files(model_net10_url, args.data_dir)
os.remove(os.path.join(args.Net10_data_dir, '.DS_Store'))
os.remove(os.path.join(args.Net10_data_dir, 'README.txt'))
def unzip_files(url, destination):
zip_resp = urlopen(url)
temp_zip = open('/tmp/tempfile.zip', 'wb')
temp_zip.write(zip_resp.read())
temp_zip.close()
zf = ZipFile('/tmp/tempfile.zip')
zf.extractall(path=destination)
zf.close()
def prepare_datasets(args):
data = dict()
data['class_dict'] = generate_class_str_to_num_dict(args.Net10_data_dir)
master_list = get_filenames_and_class(args.Net10_data_dir)
master_list = remove_small_point_clouds(master_list, args.small_sample_threshold)
shuffle(master_list)
n_samples = len(master_list)
data['train_list'] = master_list[:floor(0.8*n_samples)]
data['eval_list'] = master_list[floor(0.8*n_samples):floor(0.9*n_samples)]
data['test_list'] = master_list[floor(0.9*n_samples):]
pickle.dump(data, open(os.path.join(args.data_dir, 'data.pickle'), "wb"))
def get_filenames_and_class(data_dir):
master_list = list()
classes = os.listdir(data_dir)
for point_class in classes:
train_dir = os.path.join(data_dir, point_class + '/train')
test_dir = os.path.join(data_dir, point_class + '/test')
for file in glob.glob(os.path.join(train_dir, '*.off')):
master_list.append({point_class: file})
for file in glob.glob(os.path.join(test_dir, '*.off')):
master_list.append({point_class: file})
return master_list
def generate_class_str_to_num_dict(data_dir):
classes = sorted(os.listdir(data_dir))
class_dict = {}
for pt_class, i in enumerate(classes):
class_dict[i] = pt_class
return class_dict
def remove_small_point_clouds(train_list, threshold):
new_list = list()
for file_dict in train_list:
point_cloud = read_off_file_into_nparray(list(file_dict.items())[0][1], n_points_to_read=None)
if point_cloud.shape[0] >= threshold:
new_list.append(file_dict)
return new_list |
the-stack_106_29562 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Daniel E. Cook
Script/Tools for working with a VCF in python.
Used for generating the interval summary.
"""
import json
import re
import pandas as pd
import numpy as np
import itertools
from collections import defaultdict, Counter
from cyvcf2 import VCF
from pandas import DataFrame, Series
from logzero import logger
from functools import reduce
def infinite_dict():
return defaultdict(infinite_dict)
def flatten_cols(df):
"""
Flattens hierarchical columns
Stack Overflow: 14507794
"""
df.columns = [
'_'.join(tuple(map(str, t))).rstrip('_')
for t in df.columns.values
]
return df
ANN_FIELDS = ["allele",
"effect",
"impact",
"gene_name",
"gene_id",
"feature_type",
"feature_id",
"transcript_biotype",
"exon_intron_rank",
"nt_change",
"aa_change",
"cdna_pos",
"protein_position",
"distance_to_feature",
"error"]
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
class AnnotationItem(Series):
@property
def _constructor(self):
return AnnotationItem
@property
def _constructor_expanddim(self):
return VCF_DataFrame
def __eq__(self, other):
return AnnotationItem(self.apply(lambda row: other in row if type(row) == list else False))
@property
def length(self):
result = self.apply(lambda row: len(row) if type(row) == list else 0)
return AnnotationItem(data=result)
class AnnotationSeries(Series):
# https://stackoverflow.com/q/48435082/2615190
our_column_names = ('ANN',)
def __new__(cls, *args, **kwargs):
if kwargs.get('name', '') in cls.our_column_names:
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
return pd.Series(*args, **kwargs)
def __eq__(self, other):
return self.apply(lambda row: other in row if type(row) == list else False)
@property
def _constructor(self):
return AnnotationSeries
@property
def _constructor_expanddim(self):
return VCF_DataFrame
def _fetch_field(self, field):
"""
Highly redundant - but I could
figure out a way to dynamically specify properties.
"""
ann_column_index = ANN_FIELDS.index(field)
result = self.apply(lambda row: [x[ann_column_index] for x in row] if type(row) == list else np.nan)
return AnnotationSeries(data=result, name='ANN')
@property
def allele(self):
result = self._fetch_field('allele')
return AnnotationItem(data=result, name='ANN')
@property
def effect(self):
result = self._fetch_field('effect')
return AnnotationItem(data=result, name='ANN')
@property
def impact(self):
result = self._fetch_field('impact')
return AnnotationItem(data=result, name='ANN')
@property
def gene_name(self):
result = self._fetch_field('gene_name')
return AnnotationItem(data=result, name='ANN')
@property
def gene_id(self):
result = self._fetch_field('gene_id')
return AnnotationItem(data=result, name='ANN')
@property
def feature_type(self):
result = self._fetch_field('feature_type')
return AnnotationItem(data=result, name='ANN')
@property
def feature_id(self):
result = self._fetch_field('feature_id')
return AnnotationItem(data=result, name='ANN')
@property
def transcript_biotype(self):
result = self._fetch_field('transcript_biotype')
return AnnotationItem(data=result, name='ANN')
@property
def exon_intron_rank(self):
result = self._fetch_field('exon_intron_rank')
return AnnotationItem(data=result, name='ANN')
@property
def nt_change(self):
result = self._fetch_field('nt_change')
return AnnotationItem(data=result, name='ANN')
@property
def aa_change(self):
result = self._fetch_field('aa_change')
return AnnotationItem(data=result, name='ANN')
@property
def cnda_pos(self):
result = self._fetch_field('cnda_pos')
return AnnotationItem(data=result, name='ANN')
@property
def protein_pos(self):
result = self._fetch_field('protein_pos')
return AnnotationItem(data=result, name='ANN')
@property
def distance_to_feature(self):
result = self._fetch_field('distance_to_feature')
return AnnotationItem(data=result, name='ANN')
@property
def error(self):
result = self._fetch_field('error')
return AnnotationItem(data=result, name='ANN')
class VCF_DataFrame(DataFrame):
_metadata = ['samples', 'interval', 'chrom', 'start', 'end']
attrs = ['CHROM',
'POS',
'ID',
'REF',
'ALT',
'QUAL',
'FILTER',
'start',
'end',
'aaf',
'nucl_diversity',
'is_snp',
'is_indel',
'call_rate',
'num_called',
'num_het',
'num_hom_ref',
'num_hom_alt',
'ploidy',
'is_transition']
def __init__(self, *args, **kwargs):
super(VCF_DataFrame, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return VCF_DataFrame
@property
def _constructor_sliced(self):
return AnnotationSeries
@classmethod
def from_vcf(cls, filename, interval=None):
"""
Create a numpy-array VCF object.
filename:
Name of the VCF
interval:
An interval of the VCF to use (chrom:start-end)
"""
vcf = VCF(filename, gts012=True)
rows = []
for i, line in enumerate(vcf(interval)):
var_line = {}
var_line = {attr: getattr(line, attr) for attr in cls.attrs if hasattr(line, attr)}
# Currently string lists must be encoded using python.
var_line['FT'] = line.format("FT")
var_line['TGT'] = line.gt_bases
var_line['DP'] = line.format("DP").flatten().astype(np.int64)
var_line['GT'] = line.gt_types.astype(np.int64)
ANN = line.INFO.get("ANN")
if ANN:
var_line['ANN'] = [x.split("|") for x in ANN.split(",")]
rows.append(var_line)
dataset = DataFrame.from_dict(rows)
# Convert to categorical
dataset.REF = pd.Categorical(dataset.REF)
dataset.FILTER = pd.Categorical(dataset.FILTER)
# Add num missing column
dataset['num_missing'] = dataset.GT.apply(lambda row: np.sum(np.isin(row, ['./.', '.|.'])))
# Use ordered CHROM
dataset.CHROM = pd.Categorical(dataset.CHROM,
ordered=True,
categories=vcf.seqnames)
dataset.REF = pd.Categorical(dataset.REF)
dataset.FILTER = pd.Categorical(dataset.FILTER)
# Add samples
dataset = VCF_DataFrame(dataset)
dataset.samples = np.array(vcf.samples)
if interval:
dataset.interval = interval
chrom, start, end = re.split(":|\-", interval)
dataset.chrom = chrom
dataset.start = int(start)
dataset.end = int(end)
dataset['allele_set'] = dataset.TGT.apply(lambda x: set([a for a in sum([re.split("\||\/", i) for i in x], []) if a != '.']))
return dataset
def _prune_non_snps(self):
"""
Remove snps not present in the VCF (monomorphic sites)
Also will remove sites that are all missing.
"""
non_snps = self.GT.apply(lambda x: len(set(x[~np.isnan(x)])) > 1)
return self[non_snps]
def _prune_alleles(self):
"""
Remove ANN that are not present in the set of subset samples
"""
self['allele_set'] = self.TGT.apply(lambda x: set([a for a in sum([re.split("\||\/", i) for i in x], []) if a != '.']))
self[~self.ANN.isna()].ANN = self[~self.ANN.isna()].apply(lambda row: [i for i in row['ANN'] if i[0] in row.allele_set], axis=1)
return self
def subset_samples(self, samples, prune_non_snps=True, inplace=False):
"""
Subset samples
"""
sample_bool_keep = np.isin(self.samples, samples)
df = self.copy()
# Subset GT
df.GT = df.GT.apply(lambda row: row[sample_bool_keep])
df.TGT = df.TGT.apply(lambda row: row[sample_bool_keep])
df.DP = df.DP.apply(lambda row: row[sample_bool_keep])
df.FT = df.FT.apply(lambda row: row[sample_bool_keep])
# Update variables
df.num_hom_ref = df.GT.apply(lambda row: np.sum(row == 0))
df.num_het = df.GT.apply(lambda row: np.sum(row == 1))
df.num_hom_alt = df.GT.apply(lambda row: np.sum(row == 2))
df.num_missing = df.TGT.apply(lambda row: np.sum(np.isin(row, ['./.', '.|.'])))
df.missing_rate = df.num_missing
# Do not change '==' to 'is'; numpy doesn't use 'in'.
df.num_called = df.TGT.apply(lambda row: np.sum(np.isin(row, ['./.', '.|.']) == False))
df.call_rate = df.GT.apply(lambda row: np.sum(row != 3)/row.size)
if prune_non_snps and len(samples) > 1:
if len(samples) == 1:
self.messages.append("Subsetting on one sample - not pruning monomorphic SNPs.")
original_size = df.size
df = df._prune_non_snps()
pruned_snps = original_size - df.size
self.messages.append(f"Pruned SNPs: {pruned_snps}")
# Update samples
df.samples = self.samples[np.isin(self.samples, samples)]
if inplace:
self.samples = df.samples
self = df
else:
return df
def _parse_interval(interval):
"""
Parses an interval
"""
chrom, *pos = re.split(":-", interval)
if len(pos) not in [0, 2]:
raise Exception("Invalid interval")
elif len(pos) == 2:
pos = list(map(int, pos))
return chrom, pos[0], pos[1]
return chrom, None, None
#def interval(self, interval):
# """
# Filters a VCF on an interval
# """
# chrom, start, end = self._parse_interval(interval)
# if chrom and start and end:
# query_string = f"CHROM == '{chrom}' & POS > {start} & POS < {end}"
# elif chrom:
# query_string = f"CHROM == '{chrom}'"
# return self.query(query_string)
def interval_summary(self, interval=None, deep=False):
"""
Generates a comprehensive interval summary
Args:
interval - Act on an interval
deep - add extra info
"""
if interval:
df = self.interval(interval)
else:
df = self
results = infinite_dict()
# Impact
impact = results['variants']['impact']
impact['total'] = Counter(sum(df.ANN.impact.dropna(), []))
impact['unique'] = Counter(sum(df.ANN.impact.dropna().apply(lambda x: list(set(x))), []))
# FILTER summary
impact = results['variants']['impact']
impact['total'] = Counter(sum(df.ANN.impact.dropna(), []))
impact['unique'] = Counter(sum(df.ANN.impact.dropna().apply(lambda x: list(set(x))), []))
# Summary
variants = results['variants']
variants['filters']['FILTER'] = Counter(df.FILTER.dropna())
FT_vals = np.concatenate(df.FT.values)
if deep:
# These operations take too long.
variants['filters']['FT']['combined'] = Counter(FT_vals)
variants['filters']['FT']['separate'] = Counter(np.concatenate(Series(FT_vals).apply(lambda x: x.split(";")).values))
# snp
variants['snp']['records'] = sum(df.is_snp)
variants['snp']['num_missing'] = sum(df[df.is_snp].num_missing)
variants['snp']['avg_call_rate'] = np.average(df[df.is_snp].call_rate)
variants['snp']['transition'] = sum(df[df.is_snp].is_transition)
variants['snp']['transversion'] = sum(df[df.is_snp].is_transition == False)
variants['snp']['num_hom_ref'] = sum(df[df.is_snp].num_hom_ref)
variants['snp']['num_het'] = sum(df[df.is_snp].num_het)
variants['snp']['num_hom_alt'] = sum(df[df.is_snp].num_hom_alt)
# indel
variants['indel']['records'] = sum(df.is_indel)
variants['indel']['num_missing'] = sum(df[df.is_indel].num_missing)
variants['indel']['avg_call_rate'] = np.average(df[df.is_indel].call_rate)
variants['indel']['transition'] = sum(df[df.is_indel].is_transition)
variants['indel']['transversion'] = sum(df[df.is_indel].is_transition == False)
variants['indel']['num_hom_ref'] = sum(df[df.is_indel].num_hom_ref)
variants['indel']['num_het'] = sum(df[df.is_indel].num_het)
variants['indel']['num_hom_alt'] = sum(df[df.is_indel].num_hom_alt)
# biotype summary
variants['biotype'] = Counter(sum(df.ANN.transcript_biotype.dropna().apply(lambda x: list(set(x))), []))
# By Gene
gene = results['gene']
# Gene count
gene['genes_w_variants'] =len(set(sum(df.ANN.gene_id.dropna().values, [])))
for impact in set(sum(df.ANN.impact.dropna().values, [])):
gene['impact'][impact] = list(set(sum(df[df.ANN.impact == impact].ANN.gene_id.dropna().values, [])))
for transcript_biotype in set(sum(df.ANN.transcript_biotype.dropna().values, [])):
gene['transcript_biotype'][transcript_biotype] = list(set(sum(df[df.ANN.transcript_biotype == transcript_biotype].ANN.gene_id.dropna().values, [])))
# Biotype+Impact counts
for impact in set(sum(df.ANN.impact.dropna().values, [])):
for transcript_biotype in set(sum(df.ANN.transcript_biotype.dropna().values, [])):
filter_crit = (df.ANN.impact == impact) & (df.ANN.transcript_biotype == transcript_biotype)
gene['impact-biotype'][impact][transcript_biotype] = list(set(sum(df[filter_crit].ANN.gene_id.dropna().values, [])))
# Genes
return json.dumps(results)
def interval_summary_table(self):
df = self
genes = pd.read_csv("genes.tsv.gz")
interval_genes = genes[(genes.chrom == df.chrom) & (genes.start > df.start) & (genes.end < df.end) ]
biotypes_set = list(set(sum(df.ANN.transcript_biotype.dropna().values, [])))
for biotype in biotypes_set:
df[biotype] = df.ANN.transcript_biotype == biotype
df['gene_id'] = df.ANN.gene_id.dropna().apply(lambda x: list(set(x))[0])
ALL_gene_count = interval_genes[['biotype', 'gene_id']].groupby(['biotype'], as_index=False) \
.agg(['count'])
ALL_gene_count = flatten_cols(ALL_gene_count).rename(index=str, columns={"gene_id_count": "gene_count"}) \
.reset_index()
GENE_count = df[biotypes_set + ['gene_id']].groupby(['gene_id']) \
.agg(['max']) \
.agg(['sum']) \
.transpose() \
.reset_index() \
.rename(index=str, columns={"sum": "genes_w_variants", "level_0": "biotype"}) \
.drop("level_1", axis=1)
LMH_set = []
for x in ["MODIFIER", "LOW", "MODERATE", "HIGH"]:
lmh_df = df[biotypes_set + ['gene_id']][df.ANN.impact == x].groupby(['gene_id']) \
.agg(['max']) \
.agg(['sum']) \
.transpose() \
.reset_index() \
.rename(index=str, columns={"sum": f"genes_w_{x}_variants", "level_0": "biotype"}) \
.drop("level_1", axis=1)
LMH_set.append(lmh_df)
VARIANT_count = df[biotypes_set].agg(['sum']) \
.transpose() \
.reset_index() \
.rename(index=str, columns={"sum": "variants", "index": "biotype"})
dfs = [ALL_gene_count, GENE_count] + LMH_set + [VARIANT_count]
merged = reduce(lambda left, right: pd.merge(left, right, how='outer', on='biotype'), dfs)
merged.iloc[:,1:] = merged.iloc[:,1:].fillna(0).astype(int)
merged['interval'] = df.interval
return merged.sort_values('variants', ascending=False)
@staticmethod
def _sub_values(row, find, replace):
"""
Substitute values in an array
"""
np.place(row, row == find, replace)
return row
def concordance(self):
"""
Calculate the concordance of genotypes across all samples.
Currently functions with ploidy == 1 or 2
A homozygous REF (e.g. AA) and heterozygous (AG) call
are treated as dicordant.
"""
df = self
# Convert GT to float so nan values can be
# added.
df.GT = df.GT.apply(lambda row: row.astype(float)) \
.apply(lambda row: self._sub_values(row, 3.0, np.nan))
called_gtypes = sum(df.GT.apply(lambda row: np.isnan(row) == False))
# cf
cf = sum(df.GT.apply(lambda row: (row[:, None] == row)))
cf = DataFrame(cf, columns=df.samples, index=df.samples)
cf.index.name = "sample_a"
cf.columns.name = "sample_b"
cf = cf.stack()
cf = DataFrame(cf, columns=['concordant_gt']).reset_index()
n_called_a = pd.DataFrame(called_gtypes, columns=['gt_called_a'], index=df.samples)
n_called_b = pd.DataFrame(called_gtypes, columns=['gt_called_b'], index=df.samples)
n_called_a.index.name = 'sample_a'
n_called_b.index.name = 'sample_b'
cf = cf.join(n_called_a, on='sample_a').join(n_called_b, on='sample_b')
cf['minimum_gt'] = cf.apply(lambda row: min(row.gt_called_a, row.gt_called_b), axis=1)
cf['concordance'] = cf['concordant_gt'] / cf['minimum_gt']
return cf
def hard_filter(self):
"""
The hard filter method does two things:
(1) Removes all columns where
FILTER != PASS (which is represented as None in pandas-vcf)
(2) Sets FT (genotype-level) variants to NaN.
"""
df = self
df.GT = df.GT.apply(lambda row: row.astype(float)) \
.apply(lambda row: self._sub_values(row, 3.0, np.nan)) \
# Format genotypes and filters.
GT_filter = np.vstack(df.FT.apply(lambda row: row != "PASS").values)
GT_vals = np.vstack(df.GT.apply(lambda row: row.astype(float)).values)
# Apply nan filter to FT != PASS
GT_vals[GT_filter] = np.nan
# Re-integrate genotypes
df.GT = Series(list(GT_vals))
# FILTER columns
df = df[df.FILTER.isnull()]
return df
def to_fasta(self, filename=None):
"""
Generates a FASTA file
"""
df = self
for sample, row in zip(df.samples, np.vstack(df.TGT.values).T):
print(f">{sample}")
seq = Series(row).apply(lambda row: np.str.replace(row, "|", "/")) \
.apply(lambda row: np.str.split(row, "/")) \
.apply(lambda row: row[0] if len(set(row)) == 1 else "N")
print(''.join(seq.values).replace(".", "N"))
|
the-stack_106_29566 | """
A class for creating a satellite object, describing the characteristics of it.
"""
from math import pi
from .utils import heavenly_body_radius
import warnings
class Satellite(object):
def __init__(self, name, altitude, eccentricity, inclination, right_ascension, perigee, ta, beam,
focus="earth", rads=True):
self._name = name
self._altitude = altitude
self._focus = focus
self._true_alt = self.altitude + self.__get_radius()
self._eccentricity = eccentricity
self._beam = beam
if not rads:
self.inclination = inclination
self.right_ascension = right_ascension
self.perigee = perigee
self.ta = ta
self.inclination_r, self.right_ascension_r, self.perigee_r, self.ta_r = self.__convert_to_rads()
else:
self.inclination_r = inclination
self.right_ascension_r = right_ascension
self.perigee_r = perigee
self.ta_r = ta
self.inclination, self.right_ascension, self.perigee, self.ta = self.__convert_to_degs()
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def altitude(self):
return self._altitude
@altitude.setter
def altitude(self, new_alt):
if new_alt < 100:
return ValueError("Satellite's orbital altitude must be over the Karman line.")
else:
self._altitude = new_alt
self._true_alt = new_alt + self.__get_radius()
@property
def true_alt(self):
return self._true_alt
@property
def eccentricity(self):
return self._eccentricity
@eccentricity.setter
def eccentricity(self, new_e):
if new_e < 0:
return ValueError("Eccentricity can't be set below a perfect circle.")
else:
self._eccentricity = new_e
@property
def beam(self):
return self._beam
@beam.setter
def beam(self, new_beam):
if new_beam < 0:
return ValueError("Beam width must be between 0 and 180 degrees")
elif new_beam > 180:
return ValueError("Beam width must be between 0 and 180 degrees")
else:
self._beam = new_beam
def __convert_to_rads(self, value=None):
to_rad = pi / 180
if value:
return value * to_rad
else:
return self.inclination * to_rad, self.right_ascension * to_rad, self.perigee * to_rad, self.ta * to_rad
def __convert_to_degs(self, value=None):
to_deg = 180 / pi
if value:
return value * to_deg
else:
return self.inclination_r * to_deg, self.right_ascension_r * to_deg, self.perigee_r * to_deg, \
self.ta_r * to_deg
def __get_radius(self):
return heavenly_body_radius[self._focus.lower()]
def __repr__(self):
return "{0}, {1}, {2}, {3}, {4}, {5}, {6}".format(self.name, self.altitude, self.eccentricity,
self.inclination, self.right_ascension, self.perigee, self.ta)
def __str__(self):
return "Satellite Name: {0}, Alt: {1}, e: {2}, " \
"Inclination: {3}, RA: {4}, Periapsis: {5}, Anomaly: {6}".format(self.name, self.altitude,
self.eccentricity, self.inclination,
self.right_ascension, self.perigee,
self.ta)
def as_dict(self, rads=True):
if rads:
sat = {"Name": self.name,
"Orbital Elements": {
"Eccentricity": self.eccentricity,
"Right Ascension": self.right_ascension_r,
"Semi-major Axis": self.true_alt,
"Arg. Periapsis": self.perigee_r,
"Mean Anomaly": self.ta_r,
"Inclination": self.inclination_r
},
"Beam Width": self.beam}
else:
sat = {"Name": self.name,
"Orbital Elements": {
"Eccentricity": self.eccentricity,
"Right Ascension": self.right_ascension,
"Semi-major Axis": self.true_alt,
"Arg. Periapsis": self.perigee,
"Mean Anomaly": self.ta,
"Inclination": self.inclination
},
"Beam Width": self.beam}
sat['Focus'] = self._focus
sat['Type'] = 'satellite'
return sat
def as_xml(self, epoch_date='2017-Jan-18 00:00:00', fov=1):
warnings.warn("XML support is depreciated and not supported from PIGI 0.8.5 onward", DeprecationWarning)
return '\t\t< Entity Type = "Satellite" Name = "{0}" >\n' \
'\t\t\t<PropertySection Name="UserProperties">\n' \
'\t\t\t\t<StringPropertyValue name="PlanetName" value="Earth"/>\n' \
'\t\t\t\t<StringPropertyValue name="CatalogName" value="{0}"/>\n' \
'\t\t\t\t<StringPropertyValue name="MeshName" value="SaberBox.mesh"/>\n' \
'\t\t\t\t<StringPropertyValue name="BindingsFile" value=""/>\n' \
'\t\t\t\t<IntPropertyValue name="ManualOrbitalElements" value="0"/>\n' \
'\t\t\t\t<StringPropertyValue name="AssemblyFile" value=""/>\n' \
'\t\t\t\t<StringPropertyValue name="SystemMapSourceId" value=""/>\n' \
'\t\t\t\t<StringPropertyValue name="ResourceGroup" value="Autodetect"/>\n' \
'\t\t\t\t<StringPropertyValue name="MetricSourceIds" value=""/>\n' \
'\t\t\t\t<FloatPropertyValue name="BeamWidth" value="{1}"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="SGP4 Parameters">\n' \
'\t\t\t\t<FloatPropertyValue name="B Star" value="-1.1606e-005"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Eccentricity" value="{2}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="RAAN" value="{3}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Semi-major axis" value="{4}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Arg. Perigee" value="{5}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Mean Anomaly" value="{6}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Inclination" value="{7}"/>\n' \
'\t\t\t\t<TimestampPropertyValue name="Epoch" value="{8}"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Fov">\n' \
'\t\t\t\t<EnumPropertyValue name="Enabled" value="{9}"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="TargetSceneNode">\n' \
'\t\t\t\t<ArrayPropertyValue name="Position" value="[0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Orientation" value="[1, 0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Scale" value="[200, 200, 200]"/>\n' \
'\t\t\t\t<EnumPropertyValue name="Debug" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Billboard">\n' \
'\t\t\t\t<ArrayPropertyValue name="Position" value="[0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Orientation" value="[1, 0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Scale" value="[200, 200, 200]"/>\n' \
'\t\t\t\t<EnumPropertyValue name="Debug" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Favourite">\n' \
'\t\t\t\t<EnumPropertyValue name="favourite" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t</Entity>\n'.format(self.name, self.beam, self.eccentricity, self.right_ascension_r,
self.true_alt, self.perigee_r, self.ta_r, self.inclination_r,
epoch_date, fov)
|
the-stack_106_29569 | # coding: utf-8
import os
import re
import time
import signal
import shutil
import logging
import tempfile
import subprocess
import errno
import distutils.version
import six
try:
# yatest.common should try to be hermetic, otherwise, PYTEST_SCRIPT (aka USE_ARCADIA_PYTHON=no) won't work.
import library.python.cores as cores
except ImportError:
cores = None
from . import runtime
from . import path
from . import environment
MAX_OUT_LEN = 1000 * 1000 # 1 mb
MAX_MESSAGE_LEN = 1500
SANITIZER_ERROR_PATTERN = br": ([A-Z][\w]+Sanitizer)"
GLIBC_PATTERN = re.compile(r"\S+@GLIBC_([0-9.]+)")
yatest_logger = logging.getLogger("ya.test")
def truncate(s, size):
if s is None:
return None
elif len(s) <= size:
return s
else:
return (b'...' if isinstance(s, bytes) else '...') + s[-(size - 3):]
def get_command_name(command):
return os.path.basename(command.split()[0] if isinstance(command, six.string_types) else command[0])
class ExecutionError(Exception):
def __init__(self, execution_result):
if not isinstance(execution_result.command, six.string_types):
command = " ".join(str(arg) for arg in execution_result.command)
else:
command = execution_result.command
message = "Command '{command}' has failed with code {code}.\nErrors:\n{err}\n".format(
command=command,
code=execution_result.exit_code,
err=_format_error(execution_result.std_err))
if cores:
if execution_result.backtrace:
message += "Backtrace:\n[[rst]]{}[[bad]]\n".format(cores.colorize_backtrace(execution_result._backtrace))
else:
message += "Backtrace is not available: module cores isn't available"
super(ExecutionError, self).__init__(message)
self.execution_result = execution_result
class TimeoutError(Exception):
pass
class ExecutionTimeoutError(TimeoutError):
def __init__(self, execution_result, *args, **kwargs):
super(ExecutionTimeoutError, self).__init__(args, kwargs)
self.execution_result = execution_result
class InvalidExecutionStateError(Exception):
pass
class SignalInterruptionError(Exception):
def __init__(self, message=None):
super(SignalInterruptionError, self).__init__(message)
self.res = None
class _Execution(object):
def __init__(self, command, process, out_file, err_file, process_progress_listener=None, cwd=None, collect_cores=True, check_sanitizer=True, started=0, user_stdout=False, user_stderr=False):
self._command = command
self._process = process
self._out_file = out_file
self._err_file = err_file
self._std_out = None
self._std_err = None
self._elapsed = None
self._start = time.time()
self._process_progress_listener = process_progress_listener
self._cwd = cwd or os.getcwd()
self._collect_cores = collect_cores
self._backtrace = ''
self._check_sanitizer = check_sanitizer
self._metrics = {}
self._started = started
self._user_stdout = bool(user_stdout)
self._user_stderr = bool(user_stderr)
self._exit_code = None
if process_progress_listener:
process_progress_listener.open(command, process, out_file, err_file)
@property
def running(self):
return self._process.poll() is None
def kill(self):
if self.running:
self._save_outputs(False)
_kill_process_tree(self._process.pid)
self._clean_files()
# DEVTOOLS-2347
yatest_logger.debug("Process status before wait_for: %s", self.running)
try:
wait_for(lambda: not self.running, timeout=5, fail_message="Could not kill process {}".format(self._process.pid), sleep_time=.1)
except TimeoutError:
yatest_logger.debug("Process status after wait_for: %s", self.running)
yatest_logger.debug("Process %d info: %s", self._process.pid, _get_proc_tree_info([self._process.pid]))
raise
else:
raise InvalidExecutionStateError("Cannot kill a stopped process")
def terminate(self):
if self.running:
self._process.terminate()
@property
def process(self):
return self._process
@property
def command(self):
return self._command
@property
def returncode(self):
return self.exit_code
@property
def exit_code(self):
"""
Deprecated, use returncode
"""
if self._exit_code is None:
self._exit_code = self._process.returncode
return self._exit_code
@property
def stdout(self):
return self.std_out
@property
def std_out(self):
"""
Deprecated, use stdout
"""
if self._std_out is not None:
return self._std_out
if self._process.stdout and not self._user_stdout:
self._std_out = self._process.stdout.read()
return self._std_out
@property
def stderr(self):
return self.std_err
@property
def std_err(self):
"""
Deprecated, use stderr
"""
if self._std_err is not None:
return self._std_err
if self._process.stderr and not self._user_stderr:
self._std_err = self._process.stderr.read()
return self._std_err
@property
def elapsed(self):
return self._elapsed
@property
def backtrace(self):
return self._backtrace
@property
def metrics(self):
return self._metrics
def _save_outputs(self, clean_files=True):
if self._process_progress_listener:
self._process_progress_listener()
self._process_progress_listener.close()
if not self._user_stdout:
if self._out_file is None:
pass
elif self._out_file != subprocess.PIPE:
self._out_file.flush()
self._out_file.seek(0, os.SEEK_SET)
self._std_out = self._out_file.read()
else:
self._std_out = self._process.stdout.read()
if not self._user_stderr:
if self._err_file is None:
pass
elif self._err_file != subprocess.PIPE:
self._err_file.flush()
self._err_file.seek(0, os.SEEK_SET)
self._std_err = self._err_file.read()
else:
self._std_err = self._process.stderr.read()
if clean_files:
self._clean_files()
yatest_logger.debug("Command (pid %s) rc: %s", self._process.pid, self.exit_code)
yatest_logger.debug("Command (pid %s) elapsed time (sec): %s", self._process.pid, self.elapsed)
if self._metrics:
for key, value in six.iteritems(self._metrics):
yatest_logger.debug("Command (pid %s) %s: %s", self._process.pid, key, value)
# Since this code is Python2/3 compatible, we don't know is _std_out/_std_err is real bytes or bytes-str.
printable_std_out, err = _try_convert_bytes_to_string(self._std_out)
if err:
yatest_logger.debug("Got error during parse process stdout: %s", err)
yatest_logger.debug("stdout will be displayed as raw bytes.")
printable_std_err, err = _try_convert_bytes_to_string(self._std_err)
if err:
yatest_logger.debug("Got error during parse process stderr: %s", err)
yatest_logger.debug("stderr will be displayed as raw bytes.")
yatest_logger.debug("Command (pid %s) output:\n%s", self._process.pid, truncate(printable_std_out, MAX_OUT_LEN))
yatest_logger.debug("Command (pid %s) errors:\n%s", self._process.pid, truncate(printable_std_err, MAX_OUT_LEN))
def _clean_files(self):
if self._err_file and not self._user_stderr and self._err_file != subprocess.PIPE:
self._err_file.close()
self._err_file = None
if self._out_file and not self._user_stdout and self._out_file != subprocess.PIPE:
self._out_file.close()
self._out_file = None
def _recover_core(self):
core_path = cores.recover_core_dump_file(self.command[0], self._cwd, self.process.pid)
if core_path:
# Core dump file recovering may be disabled (for distbuild for example) - produce only bt
store_cores = runtime._get_ya_config().collect_cores
if store_cores:
new_core_path = path.get_unique_file_path(runtime.output_path(), "{}.{}.core".format(os.path.basename(self.command[0]), self._process.pid))
# Copy core dump file, because it may be overwritten
yatest_logger.debug("Coping core dump file from '%s' to the '%s'", core_path, new_core_path)
shutil.copyfile(core_path, new_core_path)
core_path = new_core_path
bt_filename = None
pbt_filename = None
if os.path.exists(runtime.gdb_path()):
self._backtrace = cores.get_gdb_full_backtrace(self.command[0], core_path, runtime.gdb_path())
bt_filename = path.get_unique_file_path(runtime.output_path(), "{}.{}.backtrace".format(os.path.basename(self.command[0]), self._process.pid))
with open(bt_filename, "wb") as afile:
afile.write(self._backtrace)
# generate pretty html version of backtrace aka Tri Korochki
pbt_filename = bt_filename + ".html"
backtrace_to_html(bt_filename, pbt_filename)
if store_cores:
runtime._register_core(os.path.basename(self.command[0]), self.command[0], core_path, bt_filename, pbt_filename)
else:
runtime._register_core(os.path.basename(self.command[0]), None, None, bt_filename, pbt_filename)
def wait(self, check_exit_code=True, timeout=None, on_timeout=None):
def _wait():
finished = None
interrupted = False
try:
if hasattr(os, "wait4"):
try:
if hasattr(subprocess, "_eintr_retry_call"):
pid, sts, rusage = subprocess._eintr_retry_call(os.wait4, self._process.pid, 0)
else:
# PEP 475
pid, sts, rusage = os.wait4(self._process.pid, 0)
finished = time.time()
self._process._handle_exitstatus(sts)
for field in [
"ru_idrss",
"ru_inblock",
"ru_isrss",
"ru_ixrss",
"ru_majflt",
"ru_maxrss",
"ru_minflt",
"ru_msgrcv",
"ru_msgsnd",
"ru_nivcsw",
"ru_nsignals",
"ru_nswap",
"ru_nvcsw",
"ru_oublock",
"ru_stime",
"ru_utime",
]:
if hasattr(rusage, field):
self._metrics[field.replace("ru_", "")] = getattr(rusage, field)
except OSError as exc:
if exc.errno == errno.ECHILD:
yatest_logger.debug("Process resource usage is not available as process finished before wait4 was called")
else:
raise
except SignalInterruptionError:
interrupted = True
raise
finally:
if not interrupted:
self._process.wait() # this has to be here unconditionally, so that all process properties are set
if not finished:
finished = time.time()
self._metrics["wtime"] = round(finished - self._started, 3)
try:
if timeout:
process_is_finished = lambda: not self.running
fail_message = "Command '%s' stopped by %d seconds timeout" % (self._command, timeout)
try:
wait_for(process_is_finished, timeout, fail_message, sleep_time=0.1, on_check_condition=self._process_progress_listener)
except TimeoutError as e:
if on_timeout:
yatest_logger.debug("Calling user specified on_timeout function")
try:
on_timeout(self, timeout)
except Exception:
yatest_logger.exception("Exception while calling on_timeout")
raise ExecutionTimeoutError(self, str(e))
# Wait should be always called here, it finalizes internal states of its process and sets up return code
_wait()
except BaseException as e:
_kill_process_tree(self._process.pid)
_wait()
yatest_logger.debug("Command exception: %s", e)
raise
finally:
self._elapsed = time.time() - self._start
self._save_outputs()
self.verify_no_coredumps()
self._finalise(check_exit_code)
def _finalise(self, check_exit_code):
# Set the signal (negative number) which caused the process to exit
if check_exit_code and self.exit_code != 0:
yatest_logger.error("Execution failed with exit code: %s\n\t,std_out:%s\n\tstd_err:%s\n",
self.exit_code, truncate(self.std_out, MAX_OUT_LEN), truncate(self.std_err, MAX_OUT_LEN))
raise ExecutionError(self)
# Don't search for sanitize errors if stderr was redirected
self.verify_sanitize_errors()
def verify_no_coredumps(self):
"""
Verify there is no coredump from this binary. If there is then report backtrace.
"""
if self.exit_code < 0 and self._collect_cores:
if cores:
try:
self._recover_core()
except Exception:
yatest_logger.exception("Exception while recovering core")
else:
yatest_logger.warning("Core dump file recovering is skipped: module cores isn't available")
def verify_sanitize_errors(self):
"""
Verify there are no sanitizer (ASAN, MSAN, TSAN, etc) errors for this binary. If there are any report them.
"""
if self._std_err and self._check_sanitizer and runtime._get_ya_config().sanitizer_extra_checks:
build_path = runtime.build_path()
if self.command[0].startswith(build_path):
match = re.search(SANITIZER_ERROR_PATTERN, self._std_err)
if match:
yatest_logger.error("%s sanitizer found errors:\n\tstd_err:%s\n", match.group(1), truncate(self.std_err, MAX_OUT_LEN))
raise ExecutionError(self)
else:
yatest_logger.debug("No sanitizer errors found")
else:
yatest_logger.debug("'%s' doesn't belong to '%s' - no check for sanitize errors", self.command[0], build_path)
# Don't forget to sync changes in the interface and defaults with yatest.yt.process.execute
def execute(
command, check_exit_code=True,
shell=False, timeout=None,
cwd=None, env=None,
stdin=None, stdout=None, stderr=None,
creationflags=0, wait=True,
process_progress_listener=None, close_fds=False,
collect_cores=True, check_sanitizer=True, preexec_fn=None, on_timeout=None,
executor=_Execution,
):
"""
Executes a command
:param command: command: can be a list of arguments or a string
:param check_exit_code: will raise ExecutionError if the command exits with non zero code
:param shell: use shell to run the command
:param timeout: execution timeout
:param cwd: working directory
:param env: command environment
:param stdin: command stdin
:param stdout: command stdout
:param stderr: command stderr
:param creationflags: command creation flags
:param wait: should wait until the command finishes
:param process_progress_listener=object that is polled while execution is in progress
:param close_fds: subrpocess.Popen close_fds args
:param collect_cores: recover core dump files if shell == False
:param check_sanitizer: raise ExecutionError if stderr contains sanitize errors
:param preexec_fn: subrpocess.Popen preexec_fn arg
:param on_timeout: on_timeout(<execution object>, <timeout value>) callback
:return: Execution object
"""
if env is None:
env = os.environ.copy()
else:
# Certain environment variables must be present for programs to work properly.
# For more info see DEVTOOLSSUPPORT-4907
mandatory_env_name = 'YA_MANDATORY_ENV_VARS'
if mandatory_env_name in os.environ:
env[mandatory_env_name] = os.environ[mandatory_env_name]
mandatory_system_vars = filter(None, os.environ.get('YA_MANDATORY_ENV_VARS', '').split(':'))
else:
mandatory_system_vars = ['TMPDIR']
for var in mandatory_system_vars:
if var not in env and var in os.environ:
env[var] = os.environ[var]
if not wait and timeout is not None:
raise ValueError("Incompatible arguments 'timeout' and wait=False")
# if subprocess.PIPE in [stdout, stderr]:
# raise ValueError("Don't use pipe to obtain stream data - it may leads to the deadlock")
def get_out_stream(stream, default_name):
if stream is None:
# No stream is supplied: open new temp file
return _get_command_output_file(command, default_name), False
if isinstance(stream, six.string_types):
# User filename is supplied: open file for writing
return open(stream, 'wb+'), stream.startswith('/dev/')
# Open file or PIPE sentinel is supplied
is_pipe = stream == subprocess.PIPE
return stream, not is_pipe
# to be able to have stdout/stderr and track the process time execution, we don't use subprocess.PIPE,
# as it can cause processes hangs, but use tempfiles instead
out_file, user_stdout = get_out_stream(stdout, 'out')
err_file, user_stderr = get_out_stream(stderr, 'err')
in_file = stdin
if shell and type(command) == list:
command = " ".join(command)
if shell:
collect_cores = False
check_sanitizer = False
if check_sanitizer:
env["LSAN_OPTIONS"] = environment.extend_env_var(os.environ, "LSAN_OPTIONS", "exitcode=100")
if stdin:
name = "PIPE" if stdin == subprocess.PIPE else stdin.name
yatest_logger.debug("Executing '%s' with input '%s' in '%s'", command, name, cwd)
else:
yatest_logger.debug("Executing '%s' in '%s'", command, cwd)
# XXX
started = time.time()
process = subprocess.Popen(
command, shell=shell, universal_newlines=True,
stdout=out_file, stderr=err_file, stdin=in_file,
cwd=cwd, env=env, creationflags=creationflags, close_fds=close_fds, preexec_fn=preexec_fn,
)
yatest_logger.debug("Command pid: %s", process.pid)
res = executor(command, process, out_file, err_file, process_progress_listener, cwd, collect_cores, check_sanitizer, started, user_stdout=user_stdout, user_stderr=user_stderr)
if wait:
res.wait(check_exit_code, timeout, on_timeout)
return res
def _get_command_output_file(cmd, ext):
parts = [get_command_name(cmd)]
if 'YA_RETRY_INDEX' in os.environ:
parts.append('retry{}'.format(os.environ.get('YA_RETRY_INDEX')))
if int(os.environ.get('YA_SPLIT_COUNT', '0')) > 1:
parts.append('chunk{}'.format(os.environ.get('YA_SPLIT_INDEX', '0')))
filename = '.'.join(parts + [ext])
try:
# if execution is performed from test, save out / err to the test logs dir
import yatest.common
import pytest
if not hasattr(pytest, 'config'):
raise ImportError("not in test")
filename = path.get_unique_file_path(yatest.common.output_path(), filename)
yatest_logger.debug("Command %s will be placed to %s", ext, os.path.basename(filename))
return open(filename, "wb+")
except ImportError:
return tempfile.NamedTemporaryFile(delete=False, suffix=filename)
def _get_proc_tree_info(pids):
if os.name == 'nt':
return 'Not supported'
else:
stdout, _ = subprocess.Popen(["/bin/ps", "-wufp"] + [str(p) for p in pids], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return stdout
def py_execute(
command, check_exit_code=True,
shell=False, timeout=None,
cwd=None, env=None,
stdin=None, stdout=None, stderr=None,
creationflags=0, wait=True,
process_progress_listener=None, close_fds=False
):
"""
Executes a command with the arcadia python
:param command: command to pass to python
:param check_exit_code: will raise ExecutionError if the command exits with non zero code
:param shell: use shell to run the command
:param timeout: execution timeout
:param cwd: working directory
:param env: command environment
:param stdin: command stdin
:param stdout: command stdout
:param stderr: command stderr
:param creationflags: command creation flags
:param wait: should wait until the command finishes
:param process_progress_listener=object that is polled while execution is in progress
:return: Execution object
"""
if isinstance(command, six.string_types):
command = [command]
command = [runtime.python_path()] + command
if shell:
command = " ".join(command)
return execute(**locals())
def _format_error(error):
return truncate(error, MAX_MESSAGE_LEN)
def wait_for(check_function, timeout, fail_message="", sleep_time=1.0, on_check_condition=None):
"""
Tries to execute `check_function` for `timeout` seconds.
Continue until function returns nonfalse value.
If function doesn't return nonfalse value for `timeout` seconds
OperationTimeoutException is raised.
Return first nonfalse result returned by `checkFunction`.
"""
if sleep_time <= 0:
raise ValueError("Incorrect sleep time value {}".format(sleep_time))
if timeout < 0:
raise ValueError("Incorrect timeout value {}".format(timeout))
start = time.time()
while start + timeout > time.time():
if on_check_condition:
on_check_condition()
res = check_function()
if res:
return res
time.sleep(sleep_time)
message = "{} second(s) wait timeout has expired".format(timeout)
if fail_message:
message += ": {}".format(fail_message)
raise TimeoutError(truncate(message, MAX_MESSAGE_LEN))
def _kill_process_tree(process_pid, target_pid_signal=None):
"""
Kills child processes, req. Note that psutil should be installed
@param process_pid: parent id to search for descendants
"""
yatest_logger.debug("Killing process %s", process_pid)
if os.name == 'nt':
_win_kill_process_tree(process_pid)
else:
_nix_kill_process_tree(process_pid, target_pid_signal)
def _nix_get_proc_children(pid):
try:
cmd = ["pgrep", "-P", str(pid)]
return [int(p) for p in subprocess.check_output(cmd).split()]
except Exception:
return []
def _get_binname(pid):
try:
return os.path.basename(os.readlink('/proc/{}/exe'.format(pid)))
except Exception as e:
return "error({})".format(e)
def _nix_kill_process_tree(pid, target_pid_signal=None):
"""
Kills the process tree.
"""
yatest_logger.debug("Killing process tree for pid {} (bin:'{}')".format(pid, _get_binname(pid)))
def try_to_send_signal(pid, sig):
try:
os.kill(pid, sig)
yatest_logger.debug("Sent signal %d to the pid %d", sig, pid)
except Exception as exc:
yatest_logger.debug("Error while sending signal {sig} to pid {pid}: {error}".format(sig=sig, pid=pid, error=str(exc)))
try_to_send_signal(pid, signal.SIGSTOP) # Stop the process to prevent it from starting any child processes.
# Get the child process PID list.
child_pids = _nix_get_proc_children(pid)
# Stop the child processes.
for child_pid in child_pids:
try:
# Kill the child recursively.
_kill_process_tree(int(child_pid))
except Exception as e:
# Skip the error and continue killing.
yatest_logger.debug("Killing child pid {pid} failed: {error}".format(pid=child_pid, error=e))
continue
try_to_send_signal(pid, target_pid_signal or signal.SIGKILL) # Kill the root process.
# sometimes on freebsd sigkill cannot kill the process and either sigkill or sigcont should be sent
# https://www.mail-archive.com/[email protected]/msg159646.html
try_to_send_signal(pid, signal.SIGCONT)
def _win_kill_process_tree(pid):
subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)])
def _run_readelf(binary_path):
return str(subprocess.check_output([runtime.binary_path('contrib/python/pyelftools/readelf/readelf'), '-s', runtime.binary_path(binary_path)]))
def check_glibc_version(binary_path):
lucid_glibc_version = distutils.version.LooseVersion("2.11")
for l in _run_readelf(binary_path).split('\n'):
match = GLIBC_PATTERN.search(l)
if not match:
continue
assert distutils.version.LooseVersion(match.group(1)) <= lucid_glibc_version, match.group(0)
def backtrace_to_html(bt_filename, output):
try:
from library.python.coredump_filter import core_proc
with open(output, "wb") as afile:
core_proc.filter_stackdump(bt_filename, stream=afile)
except ImportError as e:
yatest_logger.debug("Failed to import coredump_filter: %s", e)
with open(output, "wb") as afile:
afile.write("<html>Failed to import coredump_filter in USE_ARCADIA_PYTHON=no mode</html>")
def _try_convert_bytes_to_string(source):
""" Function is necessary while this code Python2/3 compatible, because bytes in Python3 is a real bytes and in Python2 is not """
# Bit ugly typecheck, because in Python2 isinstance(str(), bytes) and "type(str()) is bytes" working as True as well
if 'bytes' not in str(type(source)):
# We already got not bytes. Nothing to do here.
return source, False
result = source
error = False
try:
result = source.decode(encoding='utf-8')
except ValueError as e:
error = e
return result, error
|
the-stack_106_29571 |
from ..utils import docval, getargs
from ..spec.spec import DtypeHelper
from numpy import dtype
__all__ = [
"Error",
"DtypeError",
"MissingError",
"ExpectedArrayError",
"ShapeError",
"MissingDataType",
"IllegalLinkError",
"IncorrectDataType"
]
class Error(object):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'reason', 'type': str, 'doc': 'the reason for the error'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
self.__name = getargs('name', kwargs)
self.__reason = getargs('reason', kwargs)
self.__location = getargs('location', kwargs)
if self.__location is not None:
self.__str = "%s (%s): %s" % (self.__name, self.__location, self.__reason)
else:
self.__str = "%s: %s" % (self.name, self.reason)
@property
def name(self):
return self.__name
@property
def reason(self):
return self.__reason
@property
def location(self):
return self.__location
@location.setter
def location(self, loc):
self.__location = loc
self.__str = "%s (%s): %s" % (self.__name, self.__location, self.__reason)
def __str__(self):
return self.__str
def __repr__(self):
return self.__str__()
class DtypeError(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'expected', 'type': (dtype, type, str, list), 'doc': 'the expected dtype'},
{'name': 'received', 'type': (dtype, type, str, list), 'doc': 'the received dtype'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
expected = getargs('expected', kwargs)
received = getargs('received', kwargs)
if isinstance(expected, list):
expected = DtypeHelper.simplify_cpd_type(expected)
reason = "incorrect type - expected '%s', got '%s'" % (expected, received)
loc = getargs('location', kwargs)
super(DtypeError, self).__init__(name, reason, location=loc)
class MissingError(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
reason = "argument missing"
loc = getargs('location', kwargs)
super(MissingError, self).__init__(name, reason, location=loc)
class MissingDataType(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'data_type', 'type': str, 'doc': 'the missing data type'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name, data_type = getargs('name', 'data_type', kwargs)
self.__data_type = data_type
reason = "missing data type %s" % self.__data_type
loc = getargs('location', kwargs)
super(MissingDataType, self).__init__(name, reason, location=loc)
@property
def data_type(self):
return self.__data_type
class ExpectedArrayError(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'expected', 'type': (tuple, list), 'doc': 'the expected shape'},
{'name': 'received', 'type': str, 'doc': 'the received data'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
expected = getargs('expected', kwargs)
received = getargs('received', kwargs)
reason = "incorrect shape - expected an array of shape '%s', got non-array data '%s'" % (expected, received)
loc = getargs('location', kwargs)
super(ExpectedArrayError, self).__init__(name, reason, location=loc)
class ShapeError(Error):
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'expected', 'type': (tuple, list), 'doc': 'the expected shape'},
{'name': 'received', 'type': (tuple, list), 'doc': 'the received shape'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
expected = getargs('expected', kwargs)
received = getargs('received', kwargs)
reason = "incorrect shape - expected '%s', got '%s'" % (expected, received)
loc = getargs('location', kwargs)
super(ShapeError, self).__init__(name, reason, location=loc)
class IllegalLinkError(Error):
"""
A validation error for indicating that a link was used where an actual object
(i.e. a dataset or a group) must be used
"""
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
reason = "illegal use of link"
loc = getargs('location', kwargs)
super(IllegalLinkError, self).__init__(name, reason, location=loc)
class IncorrectDataType(Error):
"""
A validation error for indicating that the incorrect data_type (not dtype) was used.
"""
@docval({'name': 'name', 'type': str, 'doc': 'the name of the component that is erroneous'},
{'name': 'expected', 'type': str, 'doc': 'the expected data_type'},
{'name': 'received', 'type': str, 'doc': 'the received data_type'},
{'name': 'location', 'type': str, 'doc': 'the location of the error', 'default': None})
def __init__(self, **kwargs):
name = getargs('name', kwargs)
expected = getargs('expected', kwargs)
received = getargs('received', kwargs)
reason = "incorrect data_type - expected '%s', got '%s'" % (expected, received)
loc = getargs('location', kwargs)
super(DtypeError, self).__init__(name, reason, location=loc)
|
the-stack_106_29579 | from model.group import Group
import random
def test_delete_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="NewTest000001"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(group) #вырезка от 0 до 1 не вкл 1
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max) |
the-stack_106_29582 | # python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAPPO system implementation."""
import functools
from typing import Any, Callable, Dict, Optional, Type, Union
import acme
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
from acme import specs as acme_specs
from acme.utils import counting, loggers
import mava
from mava import core
from mava import specs as mava_specs
from mava.components.tf.architectures import DecentralisedValueActorCritic
from mava.environment_loop import ParallelEnvironmentLoop
from mava.systems.tf import savers as tf2_savers
from mava.systems.tf.mappo import builder, execution, training
from mava.utils import lp_utils
from mava.utils.loggers import MavaLogger, logger_utils
from mava.wrappers import DetailedPerAgentStatistics
class MAPPO:
"""MAPPO system."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[acme_specs.BoundedArray], Dict[str, snt.Module]],
logger_factory: Callable[[str], MavaLogger] = None,
architecture: Type[
DecentralisedValueActorCritic
] = DecentralisedValueActorCritic,
trainer_fn: Type[training.MAPPOTrainer] = training.MAPPOTrainer,
executor_fn: Type[core.Executor] = execution.MAPPOFeedForwardExecutor,
num_executors: int = 1,
num_caches: int = 0,
environment_spec: mava_specs.MAEnvironmentSpec = None,
shared_weights: bool = True,
agent_net_keys: Dict[str, str] = {},
executor_variable_update_period: int = 100,
policy_optimizer: Union[
snt.Optimizer, Dict[str, snt.Optimizer]
] = snt.optimizers.Adam(learning_rate=5e-4),
critic_optimizer: snt.Optimizer = snt.optimizers.Adam(learning_rate=1e-5),
discount: float = 0.99,
lambda_gae: float = 0.99,
clipping_epsilon: float = 0.2,
entropy_cost: float = 0.01,
baseline_cost: float = 0.5,
max_gradient_norm: Optional[float] = None,
max_queue_size: int = 100000,
batch_size: int = 256,
sequence_length: int = 10,
sequence_period: int = 5,
max_executor_steps: int = None,
checkpoint: bool = True,
checkpoint_subpath: str = "~/mava/",
checkpoint_minute_interval: int = 5,
logger_config: Dict = {},
train_loop_fn: Callable = ParallelEnvironmentLoop,
eval_loop_fn: Callable = ParallelEnvironmentLoop,
train_loop_fn_kwargs: Dict = {},
eval_loop_fn_kwargs: Dict = {},
evaluator_interval: Optional[dict] = None,
learning_rate_scheduler_fn: Optional[Dict[str, Callable[[int], None]]] = None,
):
"""Initialise the system
Args:
environment_factory (Callable[[bool], dm_env.Environment]): function to
instantiate an environment.
network_factory (Callable[[acme_specs.BoundedArray],
Dict[str, snt.Module]]): function to instantiate system networks.
logger_factory (Callable[[str], MavaLogger], optional): function to
instantiate a system logger. Defaults to None.
architecture (Type[ DecentralisedValueActorCritic ], optional): system
architecture, e.g. decentralised or centralised. Defaults to
DecentralisedValueActorCritic.
trainer_fn (Type[training.MAPPOTrainer], optional): training type
associated with executor and architecture, e.g. centralised training.
Defaults to training.MAPPOTrainer.
executor_fn (Type[core.Executor], optional): executor type, e.g. feedforward
or recurrent. Defaults to execution.MAPPOFeedForwardExecutor.
num_executors (int, optional): number of executor processes to run in
parallel. Defaults to 1.
num_caches (int, optional): number of trainer node caches. Defaults to 0.
environment_spec (mava_specs.MAEnvironmentSpec, optional): description of
the action, observation spaces etc. for each agent in the system.
Defaults to None.
shared_weights (bool, optional): whether agents should share weights or not.
When agent_net_keys are provided the value of shared_weights is ignored.
Defaults to True.
agent_net_keys: (dict, optional): specifies what network each agent uses.
Defaults to {}.
executor_variable_update_period (int, optional): number of steps before
updating executor variables from the variable source. Defaults to 100.
policy_optimizer (Union[ snt.Optimizer, Dict[str, snt.Optimizer] ],
optional): optimizer(s) for updating policy networks.
Defaults to snt.optimizers.Adam(learning_rate=5e-4).
critic_optimizer (snt.Optimizer, optional): optimizer for updating critic
networks. Defaults to snt.optimizers.Adam(learning_rate=1e-5).
discount (float, optional): discount factor to use for TD updates. Defaults
to 0.99.
lambda_gae (float, optional): scalar determining the mix of bootstrapping
vs further accumulation of multi-step returns at each timestep.
Defaults to 0.99.
clipping_epsilon (float, optional): Hyper-parameter for clipping in the
policy objective. Defaults to 0.2.
entropy_cost (float, optional): contribution of entropy regularization to
the total loss. Defaults to 0.01.
baseline_cost (float, optional): contribution of the value loss to the
total loss. Defaults to 0.5.
max_gradient_norm: value to specify the maximum clipping value for the
gradient norm during optimization.
max_queue_size (int, optional): maximum number of items in the queue.
Defaults to 100000.
batch_size (int, optional): sample batch size for updates. Defaults to 256.
sequence_length (int, optional): recurrent sequence rollout length. Defaults
to 10.
sequence_period (int, optional): consecutive starting points for
overlapping rollouts across a sequence. Defaults to 5.
max_executor_steps (int, optional): maximum number of steps and executor
can in an episode. Defaults to None.
checkpoint (bool, optional): whether to checkpoint models. Defaults to
False.
checkpoint_subpath (str, optional): subdirectory specifying where to store
checkpoints. Defaults to "~/mava/".
checkpoint_minute_interval (int): The number of minutes to wait between
checkpoints.
logger_config (Dict, optional): additional configuration settings for the
logger factory. Defaults to {}.
train_loop_fn (Callable, optional): function to instantiate a train loop.
Defaults to ParallelEnvironmentLoop.
eval_loop_fn (Callable, optional): function to instantiate an evaluation
loop. Defaults to ParallelEnvironmentLoop.
train_loop_fn_kwargs (Dict, optional): possible keyword arguments to send
to the training loop. Defaults to {}.
eval_loop_fn_kwargs (Dict, optional): possible keyword arguments to send to
the evaluation loop. Defaults to {}.
learning_rate_scheduler_fn: dict with two functions/classes (one for the
policy and one for the critic optimizer), that takes in a trainer
step t and returns the current learning rate,
e.g. {"policy": policy_lr_schedule ,"critic": critic_lr_schedule}.
See
examples/debugging/simple_spread/feedforward/decentralised/run_maddpg_lr_schedule.py
for an example.
evaluator_interval: An optional condition that is used to
evaluate/test system performance after [evaluator_interval]
condition has been met. If None, evaluation will
happen at every timestep.
E.g. to evaluate a system after every 100 executor episodes,
evaluator_interval = {"executor_episodes": 100}.
"""
if not environment_spec:
environment_spec = mava_specs.MAEnvironmentSpec(
environment_factory(evaluation=False) # type: ignore
)
# set default logger if no logger provided
if not logger_factory:
logger_factory = functools.partial(
logger_utils.make_logger,
directory="~/mava",
to_terminal=True,
time_delta=10,
)
self._architecture = architecture
self._environment_factory = environment_factory
self._network_factory = network_factory
self._logger_factory = logger_factory
self._environment_spec = environment_spec
# Setup agent networks
self._agent_net_keys = agent_net_keys
if not agent_net_keys:
agents = environment_spec.get_agent_ids()
self._agent_net_keys = {
agent: agent.split("_")[0] if shared_weights else agent
for agent in agents
}
self._num_exectors = num_executors
self._num_caches = num_caches
self._max_executor_steps = max_executor_steps
self._checkpoint_subpath = checkpoint_subpath
self._checkpoint = checkpoint
self._logger_config = logger_config
self._train_loop_fn = train_loop_fn
self._train_loop_fn_kwargs = train_loop_fn_kwargs
self._eval_loop_fn = eval_loop_fn
self._eval_loop_fn_kwargs = eval_loop_fn_kwargs
self._checkpoint_minute_interval = checkpoint_minute_interval
self._evaluator_interval = evaluator_interval
self._builder = builder.MAPPOBuilder(
config=builder.MAPPOConfig(
environment_spec=environment_spec,
agent_net_keys=self._agent_net_keys,
executor_variable_update_period=executor_variable_update_period,
discount=discount,
lambda_gae=lambda_gae,
clipping_epsilon=clipping_epsilon,
entropy_cost=entropy_cost,
baseline_cost=baseline_cost,
max_gradient_norm=max_gradient_norm,
max_queue_size=max_queue_size,
batch_size=batch_size,
sequence_length=sequence_length,
sequence_period=sequence_period,
checkpoint=checkpoint,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
checkpoint_subpath=checkpoint_subpath,
checkpoint_minute_interval=checkpoint_minute_interval,
evaluator_interval=evaluator_interval,
learning_rate_scheduler_fn=learning_rate_scheduler_fn,
),
trainer_fn=trainer_fn,
executor_fn=executor_fn,
)
def replay(self) -> Any:
"""Replay data storage.
Returns:
Any: replay data table built according the environment specification.
"""
return self._builder.make_replay_tables(self._environment_spec)
def counter(self, checkpoint: bool) -> Any:
"""Step counter
Args:
checkpoint (bool): whether to checkpoint the counter.
Returns:
Any: step counter object.
"""
if checkpoint:
return tf2_savers.CheckpointingRunner(
counting.Counter(),
time_delta_minutes=self._checkpoint_minute_interval,
directory=self._checkpoint_subpath,
subdirectory="counter",
)
else:
return counting.Counter()
def coordinator(self, counter: counting.Counter) -> Any:
"""Coordination helper for a distributed program
Args:
counter (counting.Counter): step counter object.
Returns:
Any: step limiter object.
"""
return lp_utils.StepsLimiter(counter, self._max_executor_steps) # type: ignore
def trainer(
self,
replay: reverb.Client,
counter: counting.Counter,
) -> mava.core.Trainer:
"""System trainer
Args:
replay (reverb.Client): replay data table to pull data from.
counter (counting.Counter): step counter object.
Returns:
mava.core.Trainer: system trainer.
"""
# Create the networks to optimize (online)
networks = self._network_factory( # type: ignore
environment_spec=self._environment_spec,
agent_net_keys=self._agent_net_keys,
)
# Create system architecture with target networks.
system_networks = self._architecture(
environment_spec=self._environment_spec,
observation_networks=networks["observations"],
policy_networks=networks["policies"],
critic_networks=networks["critics"],
agent_net_keys=self._agent_net_keys,
).create_system()
# create logger
trainer_logger_config = {}
if self._logger_config:
if "trainer" in self._logger_config:
trainer_logger_config = self._logger_config["trainer"]
trainer_logger = self._logger_factory( # type: ignore
"trainer", **trainer_logger_config
)
dataset = self._builder.make_dataset_iterator(replay)
counter = counting.Counter(counter, "trainer")
return self._builder.make_trainer(
networks=system_networks,
dataset=dataset,
counter=counter,
logger=trainer_logger,
)
def executor(
self,
executor_id: str,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
) -> mava.ParallelEnvironmentLoop:
"""System executor
Args:
executor_id (str): id to identify the executor process for logging purposes.
replay (reverb.Client): replay data table to push data to.
variable_source (acme.VariableSource): variable server for updating
network variables.
counter (counting.Counter): step counter object.
Returns:
mava.ParallelEnvironmentLoop: environment-executor loop instance.
"""
# Create the behavior policy.
networks = self._network_factory( # type: ignore
environment_spec=self._environment_spec,
agent_net_keys=self._agent_net_keys,
)
# Create system architecture with target networks.
system = self._architecture(
environment_spec=self._environment_spec,
observation_networks=networks["observations"],
policy_networks=networks["policies"],
critic_networks=networks["critics"],
agent_net_keys=self._agent_net_keys,
)
# create variables
_ = system.create_system()
# behaviour policy networks (obs net + policy head)
behaviour_policy_networks = system.create_behaviour_policy()
# Create the executor.
executor = self._builder.make_executor(
policy_networks=behaviour_policy_networks,
adder=self._builder.make_adder(replay),
variable_source=variable_source,
evaluator=False,
)
# TODO (Arnu): figure out why factory function are giving type errors
# Create the environment.
environment = self._environment_factory(evaluation=False) # type: ignore
# Create logger and counter; actors will not spam bigtable.
counter = counting.Counter(counter, "executor")
# Create executor logger
executor_logger_config = {}
if self._logger_config:
if "executor" in self._logger_config:
executor_logger_config = self._logger_config["executor"]
exec_logger = self._logger_factory( # type: ignore
f"executor_{executor_id}", **executor_logger_config
)
# Create the loop to connect environment and executor.
train_loop = self._train_loop_fn(
environment,
executor,
counter=counter,
logger=exec_logger,
**self._train_loop_fn_kwargs,
)
train_loop = DetailedPerAgentStatistics(train_loop)
return train_loop
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
logger: loggers.Logger = None,
) -> Any:
"""System evaluator (an executor process not connected to a dataset)
Args:
variable_source (acme.VariableSource): variable server for updating
network variables.
counter (counting.Counter): step counter object.
logger (loggers.Logger, optional): logger object. Defaults to None.
Returns:
Any: environment-executor evaluation loop instance for evaluating the
performance of a system.
"""
# Create the behavior policy.
networks = self._network_factory( # type: ignore
environment_spec=self._environment_spec,
agent_net_keys=self._agent_net_keys,
)
# Create system architecture with target networks.
system = self._architecture(
environment_spec=self._environment_spec,
observation_networks=networks["observations"],
policy_networks=networks["policies"],
critic_networks=networks["critics"],
agent_net_keys=self._agent_net_keys,
)
# create variables
_ = system.create_system()
# behaviour policy networks (obs net + policy head)
behaviour_policy_networks = system.create_behaviour_policy()
# Create the agent.
executor = self._builder.make_executor(
policy_networks=behaviour_policy_networks,
variable_source=variable_source,
evaluator=True,
)
# Make the environment.
environment = self._environment_factory(evaluation=True) # type: ignore
# Create logger and counter.
counter = counting.Counter(counter, "evaluator")
evaluator_logger_config = {}
if self._logger_config:
if "evaluator" in self._logger_config:
evaluator_logger_config = self._logger_config["evaluator"]
eval_logger = self._logger_factory( # type: ignore
"evaluator", **evaluator_logger_config
)
# Create the run loop and return it.
# Create the loop to connect environment and executor.
eval_loop = self._eval_loop_fn(
environment,
executor,
counter=counter,
logger=eval_logger,
**self._eval_loop_fn_kwargs,
)
eval_loop = DetailedPerAgentStatistics(eval_loop)
return eval_loop
def build(self, name: str = "mappo") -> Any:
"""Build the distributed system as a graph program.
Args:
name (str, optional): system name. Defaults to "mappo".
Returns:
Any: graph program for distributed system training.
"""
program = lp.Program(name=name)
with program.group("replay"):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group("counter"):
counter = program.add_node(lp.CourierNode(self.counter, self._checkpoint))
if self._max_executor_steps:
with program.group("coordinator"):
_ = program.add_node(lp.CourierNode(self.coordinator, counter))
with program.group("trainer"):
trainer = program.add_node(lp.CourierNode(self.trainer, replay, counter))
with program.group("evaluator"):
program.add_node(lp.CourierNode(self.evaluator, trainer, counter))
if not self._num_caches:
# Use the trainer as a single variable source.
sources = [trainer]
else:
with program.group("cacher"):
# Create a set of trainer caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
trainer, refresh_interval_ms=2000, stale_after_ms=4000
)
)
sources.append(cacher)
with program.group("executor"):
# Add executors which pull round-robin from our variable sources.
for executor_id in range(self._num_exectors):
source = sources[executor_id % len(sources)]
program.add_node(
lp.CourierNode(self.executor, executor_id, replay, source, counter)
)
return program
|
the-stack_106_29583 | # global
import mxnet as mx
import math
import numpy as np
from typing import Union, Tuple, Optional, List
from ivy.functional.backends.mxnet import _flat_array_to_1_dim_array, _handle_flat_arrays_in_out
def flip(x: mx.ndarray.ndarray.NDArray,
axis: Optional[Union[int, Tuple[int], List[int]]] = None)\
-> mx.ndarray.ndarray.NDArray:
num_dims = len(x.shape)
if not num_dims:
return x
if axis is None:
new_axis = list(range(num_dims))
else:
new_axis = axis
if type(new_axis) is int:
new_axis = [new_axis]
else:
new_axis = new_axis
new_axis = [item + num_dims if item < 0 else item for item in new_axis]
return mx.nd.flip(x, new_axis)
def expand_dims(x: mx.ndarray.ndarray.NDArray,
axis: Optional[Union[int, Tuple[int], List[int]]] = None) \
-> mx.ndarray.ndarray.NDArray:
if x.shape == ():
return _flat_array_to_1_dim_array(x)
return mx.nd.expand_dims(x, axis)
# Extra #
# ------#
def split(x, num_or_size_splits=None, axis=0, with_remainder=False):
if x.shape == ():
if num_or_size_splits is not None and num_or_size_splits != 1:
raise Exception('input array had no shape, but num_sections specified was {}'.format(num_or_size_splits))
return [x]
if num_or_size_splits == 1:
return [x]
elif with_remainder and isinstance(num_or_size_splits, int):
num_or_size_splits = x.shape[axis] if not num_or_size_splits else num_or_size_splits
num_chunks = x.shape[axis] / num_or_size_splits
num_chunks_int = math.floor(num_chunks)
remainder_size = int((num_chunks - num_chunks_int) * num_or_size_splits)
num_or_size_splits = [num_or_size_splits]*num_chunks_int + [remainder_size]
if isinstance(num_or_size_splits, (list, tuple)):
csum = [0] + np.cumsum(num_or_size_splits).tolist()
starts = csum[:-1]
ends = csum[1:]
if axis < 0:
slices = [tuple([Ellipsis, slice(s, e, 1)] + [slice(None, None, None)]*int(abs(axis)-1))
for s, e in zip(starts, ends)]
else:
slices = [tuple([slice(None, None, None)]*axis + [slice(s, e, 1)])
for s, e in zip(starts, ends)]
return [x[so] for so in slices]
return mx.nd.split(x, x.shape[axis] if not num_or_size_splits else num_or_size_splits, axis)
@_handle_flat_arrays_in_out
def repeat(x, repeats, axis=None):
return mx.nd.repeat(x, repeats, axis)
def tile(x, reps):
if isinstance(reps, mx.nd.ndarray.NDArray):
reps = reps.asnumpy().tolist()
return mx.nd.tile(_flat_array_to_1_dim_array(x), reps) |
the-stack_106_29585 | # Copyright 2018 Gaëtan Cassiers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilitary functions.
"""
import re
import networkx as nx
import matplotlib.pyplot as plt
def flatten(x):
return (z for y in x for z in y)
def draw_graph(g, cut=tuple()):
mapping = {node: node+g.nodes[node].get('op', '') for node in g}
cut = [(mapping[src], mapping[end], e_id) for src, end, e_id in cut]
g = nx.relabel_nodes(g, mapping)
pos=nx.nx_agraph.graphviz_layout(g, prog='dot')
n_colors = [n_color(g, n) for n in g]
el = set(g.edges)
nx.draw(
g,
pos,
with_labels=True,
arrows=True,
edgelist=list(el - set(cut)),
node_color=n_colors,
node_size=600)
if cut:
nx.draw_networkx_edges(
g,
pos,
edgelist=list(el & set(cut)),
edge_color='r',
arrows=True)
def is_split_node(n):
return bool(re.search('s[0-9]?$', n))
def n_color(g, n):
if g.nodes[n].get('IN'):
return 'xkcd:pale green'
elif g.nodes[n].get('OUT'):
return 'xkcd:beige'
elif re.search('s[0-9]?$', n): #n.endswith('s'):
return 'xkcd:light gray'
else:
return 'xkcd:light blue'
def draw_graph_cut(g, cut):
draw_graph(g, cut)
plt.show()
|
the-stack_106_29586 | #! /usr/local/bin/python
# script.py -- Make typescript of terminal session.
# Usage:
# -a Append to typescript.
# -p Use Python as shell.
# Author: Steen Lumholt.
import os, time, sys
import pty
def read(fd):
data = os.read(fd, 1024)
file.write(data)
return data
shell = 'sh'
filename = 'typescript'
mode = 'w'
if os.environ.has_key('SHELL'):
shell = os.environ['SHELL']
if '-a' in sys.argv:
mode = 'a'
if '-p' in sys.argv:
shell = 'python'
file = open(filename, mode)
sys.stdout.write('Script started, file is %s\n' % filename)
file.write('Script started on %s\n' % time.ctime(time.time()))
pty.spawn(shell, read)
file.write('Script done on %s\n' % time.ctime(time.time()))
sys.stdout.write('Script done, file is %s\n' % filename)
|
the-stack_106_29587 | """
In Bookmark Archiver, a Link represents a single entry that we track in the
json index. All links pass through all archiver functions and the latest,
most up-to-date canonical output for each is stored in "latest".
Link {
timestamp: str, (how we uniquely id links) _ _ _ _ ___
url: str, | \ / \ |\| ' |
base_url: str, |_/ \_/ | | |
domain: str, _ _ _ _ _ _
tags: str, |_) /| |\| | / `
type: str, | /"| | | | \_,
title: str, ,-'"`-.
sources: [str], /// / @ @ \ \\\\
latest: { \ :=| ,._,. |=: /
..., || ,\ \_../ /. ||
pdf: 'output.pdf', ||','`-._))'`.`||
wget: 'example.com/1234/index.html' `-' (/ `-'
},
history: {
...
pdf: [
{timestamp: 15444234325, status: 'skipped', result='output.pdf'},
...
],
wget: [
{timestamp: 11534435345, status: 'succeded', result='donuts.com/eat/them.html'}
]
},
}
"""
import datetime
from html import unescape
from collections import OrderedDict
from util import (
domain,
base_url,
str_between,
get_link_type,
merge_links,
wget_output_path,
)
from config import ANSI
def validate_links(links):
links = archivable_links(links) # remove chrome://, about:, mailto: etc.
links = uniquefied_links(links) # merge/dedupe duplicate timestamps & urls
links = sorted_links(links) # deterministically sort the links based on timstamp, url
if not links:
print('[X] No links found :(')
raise SystemExit(1)
for link in links:
link['title'] = unescape(link['title'])
link['latest'] = link.get('latest') or {}
if not link['latest'].get('wget'):
link['latest']['wget'] = wget_output_path(link)
if not link['latest'].get('pdf'):
link['latest']['pdf'] = None
if not link['latest'].get('screenshot'):
link['latest']['screenshot'] = None
if not link['latest'].get('dom'):
link['latest']['dom'] = None
return list(links)
def archivable_links(links):
"""remove chrome://, about:// or other schemed links that cant be archived"""
return (
link
for link in links
if any(link['url'].startswith(s) for s in ('http://', 'https://', 'ftp://'))
)
def uniquefied_links(sorted_links):
"""
ensures that all non-duplicate links have monotonically increasing timestamps
"""
unique_urls = OrderedDict()
lower = lambda url: url.lower().strip()
without_www = lambda url: url.replace('://www.', '://', 1)
without_trailing_slash = lambda url: url[:-1] if url[-1] == '/' else url.replace('/?', '?')
for link in sorted_links:
fuzzy_url = without_www(without_trailing_slash(lower(link['url'])))
if fuzzy_url in unique_urls:
# merge with any other links that share the same url
link = merge_links(unique_urls[fuzzy_url], link)
unique_urls[fuzzy_url] = link
unique_timestamps = OrderedDict()
for link in unique_urls.values():
link['timestamp'] = lowest_uniq_timestamp(unique_timestamps, link['timestamp'])
unique_timestamps[link['timestamp']] = link
return unique_timestamps.values()
def sorted_links(links):
sort_func = lambda link: (link['timestamp'].split('.', 1)[0], link['url'])
return sorted(links, key=sort_func, reverse=True)
def links_after_timestamp(links, timestamp=None):
if not timestamp:
yield from links
return
for link in links:
try:
if float(link['timestamp']) <= float(timestamp):
yield link
except (ValueError, TypeError):
print('Resume value and all timestamp values must be valid numbers.')
def lowest_uniq_timestamp(used_timestamps, timestamp):
"""resolve duplicate timestamps by appending a decimal 1234, 1234 -> 1234.1, 1234.2"""
timestamp = timestamp.split('.')[0]
nonce = 0
# first try 152323423 before 152323423.0
if timestamp not in used_timestamps:
return timestamp
new_timestamp = '{}.{}'.format(timestamp, nonce)
while new_timestamp in used_timestamps:
nonce += 1
new_timestamp = '{}.{}'.format(timestamp, nonce)
return new_timestamp
|
the-stack_106_29588 | import time
from collections import OrderedDict
from ignite.engine import Engine, Events
from ignite._utils import _to_hours_mins_secs
class RunningState(object):
"""An object that is used to pass internal and user-defined state between event handlers"""
def __init__(self, **kwargs):
# state, add epoch variable specifically
self._reset()
def _reset(self, **kwargs):
self.epoch = 0
self.iteration = 0
# volatiles
self.max_epochs = 0
self.output = None
self.metrics = OrderedDict()
for k, v in kwargs.items():
setattr(self, k, v)
def state_dict(self):
return {
'epoch': self.epoch,
'iteration': self.iteration
}
def load_state_dict(self, old_state):
epoch = old_state.get('epoch', 0)
iteration = old_state.get('iteration', 0)
self._reset(epoch=epoch, iteration=iteration)
class TeaEngine(Engine):
"""
TeaEngine mainly fixes the issue where origin Engine doesn't support
resume case
"""
def __init__(self, process_function):
super(TeaEngine, self).__init__(process_function)
self.state = RunningState()
def _run_once_on_dataset(self, dataloader):
start_time = time.time()
try:
for batch in dataloader:
self.state.iteration += 1
self._fire_event(Events.ITERATION_STARTED)
self.state.output = self._process_function(self, batch)
self._fire_event(Events.ITERATION_COMPLETED)
if self.should_terminate or self.should_terminate_single_epoch:
self.should_terminate_single_epoch = False
break
except BaseException as e:
self._logger.error("Current run is terminating due to exception: %s", str(e))
self._handle_exception(e)
time_taken = time.time() - start_time
hours, mins, secs = _to_hours_mins_secs(time_taken)
return hours, mins, secs
def run(self, dataloader, max_epochs=1, start_epoch=0, iteration=0):
"""
Runs the process_function, and support resume from other start_epoch or iteration
:param dataloader:
:param start_epoch: which epoch to start with
:param iteration: which iteration to start with
:param max_epochs:
:return: RunningState
"""
self.state = RunningState(epoch=start_epoch, iteration=iteration, max_epochs=max_epochs)
try:
self._logger.info("Engine run starting with max_epochs={}".format(max_epochs))
start_time = time.time()
self._fire_event(Events.STARTED)
while self.state.epoch < max_epochs and not self.should_terminate:
self.state.epoch += 1
self._fire_event(Events.EPOCH_STARTED)
hours, mins, secs = self._run_once_on_dataset(dataloader)
self._logger.info("Epoch[%s] Complete. Time taken: %02d:%02d:%02d", self.state.epoch, hours, mins, secs)
if self.should_terminate:
break
self._fire_event(Events.EPOCH_COMPLETED)
self._fire_event(Events.COMPLETED)
time_taken = time.time() - start_time
hours, mins, secs = _to_hours_mins_secs(time_taken)
self._logger.info("Engine run complete. Time taken %02d:%02d:%02d" % (hours, mins, secs))
except BaseException as e:
self._logger.error("Engine run is terminating due to exception: %s", str(e))
self._handle_exception(e)
return self.state
|
the-stack_106_29589 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.serializers import export
from wolframclient.utils.api import PIL, numpy
from wolframclient.utils.tests import TestCase as BaseTestCase
from wolframclient.utils.tests import path_to_file_in_data_dir
class TestCase(BaseTestCase):
def test_png_mode_I(self):
with PIL.open(path_to_file_in_data_dir('5x2.png')) as image:
self.assertEqual(
export(image, target_format='wl'),
b'ImportByteArray[ByteArray["iVBORw0KGgoAAAANSUhEUgAAAAUAAAACEAAAAADlkZXCAAAAH0lEQVR4nGP0+P39rf6+ky9/R7Aoen2+9shDWSRCHwCO7ws73c3PRQAAAABJRU5ErkJggg=="], "PNG"]'
)
def test_mode_L(self):
a = numpy.arange(10).reshape((2, 5))
img = PIL.fromarray(a, mode='L')
out = export(img, target_format='wl')
self.assertTrue(
out ==
b'Image[BinaryDeserialize[ByteArray["ODrCEAICBQAAAAAAAAAAAQA="]], "Byte", Rule[ColorSpace, "Grayscale"], Rule[Interleaving, True]]'
or out ==
b'Image[BinaryDeserialize[ByteArray["ODrCEAICBQAAAAAAAAAAAQA="]], "Byte", Rule[Interleaving, True], Rule[ColorSpace, "Grayscale"]]'
)
def test_bool_img(self):
a = numpy.array([[1, 0], [0, 1]], dtype='bool')
img = PIL.fromarray(a)
out = export(img, target_format='wl')
self.assertTrue(
out ==
b'Image[BinaryDeserialize[ByteArray["ODrCEAICAgEAAAA="]], "Bit", Rule[ColorSpace, Automatic], Rule[Interleaving, True]]'
or out ==
b'Image[BinaryDeserialize[ByteArray["ODrCEAICAgEAAAA="]], "Bit", Rule[Interleaving, True], Rule[ColorSpace, Automatic]]'
)
|
the-stack_106_29591 | # !usr/bin/env python
# coding:utf-8
"""
房价预测
author: prucehuang
email: [email protected]
date: 2018/12/15
"""
import numpy as np
import os
import sys
import pandas as pd
import matplotlib.pyplot as plot
from pandas.plotting import scatter_matrix
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.impute import SimpleImputer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from scipy.stats import randint
# file path
PROJECT_ROOT_DIR = sys.path[0] + '/../'
HOUSING_PATH = os.path.join(PROJECT_ROOT_DIR, 'datasets', 'housing')
def load_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
def quick_look_data():
print('--------------------------------------------------------------')
print('housing data head')
print('----------------------------')
print(housing.head())
print('--------------------------------------------------------------')
print('housing data describe')
print('----------------------------')
print(housing.describe())
print('--------------------------------------------------------------')
print('housing data info')
print('----------------------------')
print(housing.info())
# 0 longitude 20640 non-null float64
# 1 latitude 20640 non-null float64
# 2 housing_median_age 20640 non-null float64 有异常高的值
# 3 total_rooms 20640 non-null float64
# 4 total_bedrooms 20433 non-null float64 有空值
# 5 population 20640 non-null float64
# 6 households 20640 non-null float64
# 7 median_income 20640 non-null float64 分段处理,可以用来层次划分训练集测试集
# 8 median_house_value 20640 non-null float64 有异常高的值
# 9 ocean_proximity 20640 non-null object 字符串类型的类别
# 一共九个特征,一共目标房价
print('--------------------------------------------------------------')
print('housing data hist')
print('----------------------------')
# 将每一个特征用直方图的形式打印出来
housing.hist(bins=50, figsize=(20, 15))
plot.show()
print('--------------------------------------------------------------')
print('ocean proximity value counts')
print('----------------------------')
print(housing["ocean_proximity"].value_counts())
# <1H OCEAN 9136
# INLAND 6551
# NEAR OCEAN 2658
# NEAR BAY 2290
# ISLAND 5
# Name: ocean_proximity, dtype: int64
def discover_visualize_data():
print('--------------------------------------------------------------')
print('discover经纬度组合,看看房子的地址分布')
print('----------------------------')
# 经纬度决定了房子街区的位置
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.6)
plot.show()
print('--------------------------------------------------------------')
print('discover每个特征和房子的相关系数,选相关系数高的单独绘图看看')
print('----------------------------')
# look特征与房价的相关系数
housing_corr_matrix = housing.corr()
print(housing_corr_matrix['median_house_value'].sort_values(ascending=False))
# median_house_value 1.000000
# median_income 0.690647 高相关
# total_rooms 0.133989 有关系
# housing_median_age 0.103706 有关系
# households 0.063714
# total_bedrooms 0.047980
# population -0.026032
# longitude -0.046349
# latitude -0.142983 有负相关
# 进一步探索相关系数比较高的几个特征两两之间的关系
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
plot.show()
# 房价和平均收入正相关无疑了
housing.plot(kind="scatter", x="median_income", y="median_house_value", alpha=0.1)
plot.axis([0, 16, 0, 550000])
plot.show()
print('--------------------------------------------------------------')
print('尝试一些组合特征是不是会更好')
print('----------------------------')
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
housing["longitude_latitude"] = (housing["longitude"] + housing["latitude"])
housing_corr_matrix = housing.corr()
print(housing_corr_matrix['median_house_value'].sort_values(ascending=False))
# median_house_value 1.000000
# median_income 0.690647 高正相关
# rooms_per_household 0.158485 崛起的新组合特征
# total_rooms 0.133989
# housing_median_age 0.103706
# households 0.063714 可以去掉
# total_bedrooms 0.047980 可以去掉
# population_per_household -0.022030 可以去掉
# population -0.026032 可以去掉
# longitude -0.046349 可以去掉
# latitude -0.142983 可以去掉
# bedrooms_per_room -0.257419 崛起的新组合特征
# longitude_latitude -0.488857 崛起的新组合特征
housing.plot(kind="scatter", x="longitude_latitude", y="median_house_value", alpha=0.1)
plot.show()
def get_no_null_data(df):
'''
axis=1, reduce the columns, return a Series whose index is the original index 返回所有列不为空的index
与axis=1 对应的是 axis=0,reduce the index, return a Series whose index is the original column labels 返回所有数据不为空的列名
与any() 之对应的是 all(), any表示任意一个True则返回True,all表示任意一个为False则返回False
'''
return df[df.isnull().any(axis=1)]
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
# column index
longitude_ix, latitude_ix, rooms_ix, bedrooms_ix, population_ix, household_ix = 0, 1, 3, 4, 5, 6
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
longitude_latitude = X[:, longitude_ix] + X[:, latitude_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household, longitude_latitude,
bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household, longitude_latitude]
class CustomLabelBinarizer(BaseEstimator, TransformerMixin):
def __init__(self, sparse_output = False):
self.sparse_output = sparse_output
def fit(self, X, y = None):
return self
def transform(self, X, y = None):
enc = LabelBinarizer(sparse_output = self.sparse_output)
return enc.fit_transform(X)
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
def feature_clear_prepare():
## NULL值处理 用中位数填补
print(get_no_null_data(housing).head()) # 查看有空值的特征数据
imputer = SimpleImputer(strategy="median") # 空值用中位数替换
housing_num = housing.select_dtypes(include=[np.number])
imputer.fit(housing_num)
print(imputer.statistics_) # equal print(housing_num.median().values)
X = imputer.transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns, index=list(housing.index.values))
print(housing_tr.head())
print(housing_labels.head())
## 字符串数值处理
housing_cat = housing[['ocean_proximity']]
print(housing_cat.head(10))
housing_cat_1hot = LabelBinarizer().fit_transform(housing_cat)
print(housing_cat_1hot)
## 定义组合特征
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
housing_extra_attribs = pd.DataFrame(
housing_extra_attribs,
columns=list(housing.columns) + ['rooms_per_household', 'population_per_household', 'longitude_latitude'])
print(housing_extra_attribs.head())
def feature_clear(housing):
housing_num = housing.select_dtypes(include=[np.number])
num_attribs = list(housing_num)
cat_attribs = ['ocean_proximity']
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attribs)),
('one_hot', OneHotEncoder()),
])
full_pipeline = ColumnTransformer([
("num_pipeline", num_pipeline, num_attribs),
("cat_pipeline", cat_pipeline, cat_attribs),
])
return full_pipeline.fit(housing)
def display_score(model, housing_prepared, housing_labels):
housing_predictions = model.predict(housing_prepared)
# 平方误差
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print('mean_squared_error', tree_rmse)
# 绝对值误差
tree_mae = mean_absolute_error(housing_labels, housing_predictions)
print('mean_absolute_error', tree_mae)
# 交叉验证误差
scores = cross_val_score(model, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
scores = np.sqrt(-scores)
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
print(pd.Series(scores).describe())
if __name__ == "__main__":
pd.set_option('display.width', 1000) # 设置字符显示宽度
pd.set_option('display.max_columns', None) # 打印所有列,类似的max_rows打印所有行
'''
加载数据,并将数据划分成训练数据、测试数据
'''
housing = load_data()
# quick_look_data()
# 如果你想要安装收入就行层次取样就用StratifiedShuffleSplit
# # Divide by 1.5 to limit the number of income categories
# housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
# # Label those above 5 as 5
# housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
# split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
# for train_index, test_index in split.split(housing, housing["income_cat"]):
# print(train_index, test_index)
# strat_train_set = housing.loc[train_index]
# strat_test_set = housing.loc[test_index]
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=13)
train_set_size = len(train_set)
test_set_size = len(test_set)
print('train set count', train_set_size, ', percent', train_set_size*1.0/(train_set_size+test_set_size),
'\ntest set count', test_set_size, ', percent', test_set_size*1.0/(train_set_size+test_set_size), '\n')
housing = train_set.copy()
housing_labels = housing["median_house_value"].copy()
housing = housing.drop("median_house_value", axis=1) # drop labels for training set
'''
探索特征之间的相关性
'''
# discover_visualize_data()
'''
特征处理
'''
# feature_clear_prepare()
full_pipeline = feature_clear(housing)
housing_prepared = full_pipeline.transform(housing)
'''
算法模型
'''
# 线性回归模型
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
print('------------------LinearRegression-----------------------')
display_score(lin_reg, housing_prepared, housing_labels)
# 决策树模型
tree_reg = DecisionTreeRegressor(random_state=42)
tree_reg.fit(housing_prepared, housing_labels)
print('------------------DecisionTreeRegressor-----------------------')
display_score(tree_reg, housing_prepared, housing_labels)
# 随机森林
forest_reg = RandomForestRegressor(random_state=42, n_estimators=10)
forest_reg.fit(housing_prepared, housing_labels)
print('------------------RandomForestRegressor-----------------------')
display_score(forest_reg, housing_prepared, housing_labels)
svm_reg = SVR(kernel="linear")
svm_reg.fit(housing_prepared, housing_labels)
print('------------------SVR Linear-----------------------')
display_score(svm_reg, housing_prepared, housing_labels)
'''
自动选择超参数 -- GridSearchCV
'''
param_grid = [
# try 12 (3×4) combinations of hyperparameters
{'n_estimators': [3, 10, 30, 40, 50], 'max_features': [2, 4, 6, 8]},
# then try 6 (2×3) combinations with bootstrap set as False
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},
]
# n_estimators: The number of trees in the forest
# max_features: The number of features to consider when looking for the best split:
# bootstrap: Whether bootstrap samples are used when building trees.
forest_reg = RandomForestRegressor(random_state=42)
# train across 5 folds, that's a total of (12+6)*5=90 rounds of training
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(housing_prepared, housing_labels)
print(grid_search.best_params_)
print(grid_search.best_estimator_)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
print(pd.DataFrame(grid_search.cv_results_))
'''
自动选择超参数 -- RandomizedSearchCV
'''
param_distribs = {
'n_estimators': randint(low=1, high=200),
'max_features': randint(low=1, high=8),
}
forest_reg = RandomForestRegressor(random_state=42)
rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs,
n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42)
rnd_search.fit(housing_prepared, housing_labels)
print(rnd_search.best_params_)
print(rnd_search.best_estimator_)
cvres = rnd_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
'''
从model分析特征的相关性
'''
feature_importances = rnd_search.best_estimator_.feature_importances_
extra_attribs = ['rooms_per_household', 'population_per_household', 'longitude_latitude', 'bedrooms_per_room']
num_attribs = list(housing.select_dtypes(include=[np.number]))
cat_one_hot_attribs = list(full_pipeline.named_transformers_["cat_pipeline"]
.named_steps['one_hot'].categories_[0])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
print(sorted(zip(feature_importances, attributes), reverse=True))
# [(0.310678845742785, 'median_income'),
# (0.19356258849160035, 'longitude_latitude'),
# (0.10248760759120326, 'population_per_household'),
# (0.07228343188881887, 'INLAND'),
# (0.061960647537243355, 'bedrooms_per_room'),
# (0.058436650736405964, 'rooms_per_household'),
# (0.05251662618136168, 'latitude'),
# (0.05164879674447034, 'longitude'),
# (0.03418313966433936, 'housing_median_age'),
# (0.0138798046040645, 'population'),
# (0.013783289597624714, 'total_rooms'),
# (0.012858213758831696, 'total_bedrooms'),
# (0.012420175910613226, 'households'),
# (0.004607470514336529, '<1H OCEAN'),
# (0.00260023527307223, 'NEAR OCEAN'),
# (0.0019737702779430103, 'NEAR BAY'),
# (0.00011870548528599022, 'ISLAND')]
'''
测试集预测
'''
final_model = rnd_search.best_estimator_
y_test = test_set["median_house_value"].copy()
X_test = test_set.drop("median_house_value", axis=1)
X_test_prepared = full_pipeline.transform(X_test)
print('------------------final model -----------------------')
display_score(final_model, X_test_prepared, y_test)
|
the-stack_106_29594 | import ci.util
import container.registry
import container.util
def filter_image_file(
in_file:str,
out_file:str,
remove_files:[str]=[],
):
'''
processes an OCI container image [0] (from a local tar file) and writes a
modified copy to the specified `out_file`.
All files (specified as absolute paths w/o loading slash (/)) are removed from
all layer archives. Contained metadata is updated accordingly.
[0] https://github.com/opencontainers/image-spec
'''
if not remove_files:
ci.util.warning('no files to remove were specified - the output will remain unaltered')
container.util.filter_container_image(
image_file=in_file,
out_file=out_file,
remove_entries=remove_files,
)
def filter_image(
source_ref:str,
target_ref:str,
remove_files:[str]=[],
):
container.util.filter_image(
source_ref=source_ref,
target_ref=target_ref,
remove_files=remove_files,
)
|
the-stack_106_29597 | '''
Created on 02.05.2016
@author: lemmerfn
'''
import itertools
from functools import partial
from heapq import heappush, heappop
from collections.abc import Iterable
import numpy as np
import pandas as pd
import pysubgroup as ps
def add_if_required(result, sg, quality, task, check_for_duplicates=False, statistics=None):
if quality > task.min_quality:
if not ps.constraints_satisfied(task.constraints, sg, statistics, task.data):
return
if check_for_duplicates and (quality, sg, statistics) in result:
return
if len(result) < task.result_set_size:
heappush(result, (quality, sg, statistics))
elif quality > result[0][0]:
heappop(result)
heappush(result, (quality, sg, statistics))
def minimum_required_quality(result, task):
if len(result) < task.result_set_size:
return task.min_quality
else:
return result[0][0]
# Returns the cutpoints for discretization
def equal_frequency_discretization(data, attribute_name, nbins=5, weighting_attribute=None):
cutpoints = []
if weighting_attribute is None:
cleaned_data = data[attribute_name]
cleaned_data = cleaned_data[~np.isnan(cleaned_data)]
sorted_data = sorted(cleaned_data)
number_instances = len(sorted_data)
for i in range(1, nbins):
position = i * number_instances // nbins
while True:
if position >= number_instances:
break
val = sorted_data[position]
if val not in cutpoints:
break
position += 1
# print (sorted_data [position])
if val not in cutpoints:
cutpoints.append(val)
else:
cleaned_data = data[[attribute_name, weighting_attribute]]
cleaned_data = cleaned_data[~np.isnan(cleaned_data[attribute_name])]
cleaned_data.sort(order=attribute_name)
overall_weights = cleaned_data[weighting_attribute].sum()
remaining_weights = overall_weights
bin_size = overall_weights / nbins
sum_of_weights = 0
for row in cleaned_data:
sum_of_weights += row[weighting_attribute]
if sum_of_weights > bin_size:
if not row[attribute_name] in cutpoints:
cutpoints.append(row[attribute_name])
remaining_weights = remaining_weights - sum_of_weights
if remaining_weights < 1.5 * (bin_size):
break
sum_of_weights = 0
return cutpoints
def conditional_invert(val, invert):
return - 2 * (invert - 0.5) * val
def results_df_autoround(df):
return df.round({
'quality': 3,
'size_sg': 0,
'size_dataset': 0,
'positives_sg': 0,
'positives_dataset': 0,
'size_complement': 0,
'relative_size_sg': 3,
'relative_size_complement': 3,
'coverage_sg': 3,
'coverage_complement': 3,
'target_share_sg': 3,
'target_share_complement': 3,
'target_share_dataset': 3,
'lift': 3,
'size_sg_weighted': 1,
'size_dataset_weighted': 1,
'positives_sg_weighted': 1,
'positives_dataset_weighted': 1,
'size_complement_weighted': 1,
'relative_size_sg_weighted': 3,
'relative_size_complement_weighted': 3,
'coverage_sg_weighted': 3,
'coverage_complement_weighted': 3,
'target_share_sg_weighted': 3,
'target_share_complement_weighted': 3,
'target_share_dataset_weighted': 3,
'lift_weighted': 3})
def perc_formatter(x):
return "{0:.1f}%".format(x * 100)
def float_formatter(x, digits=2):
return ("{0:." + str(digits) + "f}").format(x)
def is_categorical_attribute(data, attribute_name):
return attribute_name in data.select_dtypes(exclude=['number']).columns.values
def is_numerical_attribute(data, attribute_name):
return attribute_name in data.select_dtypes(include=['number']).columns.values
def remove_selectors_with_attributes(selector_list, attribute_list):
return [x for x in selector_list if x.attributeName not in attribute_list]
def effective_sample_size(weights):
return sum(weights) ** 2 / sum(weights ** 2)
# from https://docs.python.org/3/library/itertools.html#recipes
def powerset(iterable, max_length=None):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
if max_length is None:
max_length = len(s)
if max_length < len(s):
max_length = len(s)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(max_length))
def overlap(sg, another_sg, data):
cover_sg = sg.covers(data)
cover_another_sg = another_sg.covers(data)
union = np.logical_or(cover_sg, cover_another_sg)
intercept = np.logical_and(cover_sg, cover_another_sg)
sim = np.sum(intercept) / np.sum(union)
return sim
#####
# bitset operations
#####
def to_bits(list_of_ints):
v = 0
for x in list_of_ints:
v += 1 << x
return v
def count_bits(bitset_as_int):
c = 0
while bitset_as_int > 0:
c += 1
bitset_as_int &= bitset_as_int - 1
return c
def find_set_bits(bitset_as_int):
while bitset_as_int > 0:
x = bitset_as_int.bit_length() - 1
yield x
bitset_as_int = bitset_as_int - (1 << x)
#####
# TID-list operations
#####
def intersect_of_ordered_list(list_1, list_2):
result = []
i = 0
j = 0
while i < len(list_1) and j < len(list_2):
if list_1[i] < list_2[j]:
i += 1
elif list_2[j] < list_1[i]:
j += 1
else:
result.append(list_1[i])
j += 1
i += 1
return result
class SubgroupDiscoveryResult:
def __init__(self, results, task):
self.task = task
self.results = results
assert isinstance(results, Iterable)
def to_descriptions(self):
return [(qual, sgd) for qual, sgd, stats in self.results]
def to_table(self, statistics_to_show=None, print_header=True, include_target=False):
if statistics_to_show is None:
statistics_to_show = type(self.task.target).statistic_types
table = []
if print_header:
row = ["quality", "subgroup"]
for stat in statistics_to_show:
row.append(stat)
table.append(row)
for (q, sg, stats) in self.results:
stats = self.task.target.calculate_statistics(sg, self.task.data, stats)
row = [str(q), str(sg)]
if include_target:
row.append(str(self.task.target))
for stat in statistics_to_show:
row.append(str(stats[stat]))
table.append(row)
return table
def to_dataframe(self, statistics_to_show=None, autoround=False, include_target=False):
if statistics_to_show is None:
statistics_to_show = type(self.task.target).statistic_types
res = self.to_table(statistics_to_show, True, include_target)
headers = res.pop(0)
df = pd.DataFrame(res, columns=headers, dtype=np.float64)
if autoround:
df = results_df_autoround(df)
return df
def to_latex(self, statistics_to_show=None):
if statistics_to_show is None:
statistics_to_show = type(self.task.target).statistic_types
df = self.to_dataframe(statistics_to_show)
latex = df.to_latex(index=False, col_space=10, formatters={
'quality': partial(float_formatter, digits=3),
'size_sg': partial(float_formatter, digits=0),
'size_dataset': partial(float_formatter, digits=0),
'positives_sg': partial(float_formatter, digits=0),
'positives_dataset': partial(float_formatter, digits=0),
'size_complement': partial(float_formatter, digits=0),
'relative_size_sg': perc_formatter,
'relative_size_complement': perc_formatter,
'coverage_sg': perc_formatter,
'coverage_complement': perc_formatter,
'target_share_sg': perc_formatter,
'target_share_complement': perc_formatter,
'target_share_dataset': perc_formatter,
'lift': partial(float_formatter, digits=1)})
latex = latex.replace(' AND ', r' $\wedge$ ')
return latex
|
the-stack_106_29598 | import os
import requests
import json
from flask import Flask, render_template, request
from twilio.rest import Client
app = Flask(__name__)
account_sid = os.environ.get('account_sid')
auth_token = os.environ.get('auth_token')
my_number = os.environ.get('my_number')
client = Client(account_sid, auth_token)
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'GET':
return render_template('index.html')
else:
message = request.form['message']
client.messages.create(
to=my_number,
from_="+14159407338",
body=message)
return render_template('success.html', message=message)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8000))
app.run(host='0.0.0.0', port=port, debug=True)
|
the-stack_106_29599 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM Corp. 2017 and later.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import logging
import warnings
import numpy as np
from qiskit.aqua.algorithms.many_sample.qsvm._qsvm_abc import _QSVM_ABC
from qiskit.aqua.utils import map_label_to_class_name, optimize_svm
logger = logging.getLogger(__name__)
class _QSVM_Binary(_QSVM_ABC):
"""The binary classifier."""
def construct_circuit(self, x1, x2, measurement=False):
warnings.warn("Please use the 'construct_circuit' in the qsvm class directly.",
DeprecationWarning)
return self._qalgo.construct_circuit(x1, x2, measurement)
def construct_kernel_matrix(self, x1_vec, x2_vec=None):
warnings.warn("Please use the 'construct_kernel_matrix' in the qsvm "
"class directly.", DeprecationWarning)
return self._qalgo.construct_kernel_matrix(x1_vec, x2_vec, self._qalgo.quantum_instance)
def get_predicted_confidence(self, data, return_kernel_matrix=False):
"""Get predicted confidence.
Args:
data (numpy.ndarray): NxD array, where N is the number of data,
D is the feature dimension.
Returns:
numpy.ndarray: Nx1 array, predicted confidence
numpy.ndarray (optional): the kernel matrix, NxN1, where N1 is
the number of support vectors.
"""
alphas = self._ret['svm']['alphas']
bias = self._ret['svm']['bias']
svms = self._ret['svm']['support_vectors']
yin = self._ret['svm']['yin']
kernel_matrix = self._qalgo.construct_kernel_matrix(data, svms)
confidence = np.sum(yin * alphas * kernel_matrix, axis=1) + bias
if return_kernel_matrix:
return confidence, kernel_matrix
else:
return confidence
def train(self, data, labels):
"""
Train the svm.
Args:
data (numpy.ndarray): NxD array, where N is the number of data,
D is the feature dimension.
labels (numpy.ndarray): Nx1 array, where N is the number of data
"""
scaling = 1.0 if self._qalgo.quantum_instance.is_statevector else None
kernel_matrix = self._qalgo.construct_kernel_matrix(data)
labels = labels * 2 - 1 # map label from 0 --> -1 and 1 --> 1
labels = labels.astype(np.float)
[alpha, b, support] = optimize_svm(kernel_matrix, labels, scaling=scaling)
support_index = np.where(support)
alphas = alpha[support_index]
svms = data[support_index]
yin = labels[support_index]
self._ret['kernel_matrix_training'] = kernel_matrix
self._ret['svm'] = {}
self._ret['svm']['alphas'] = alphas
self._ret['svm']['bias'] = b
self._ret['svm']['support_vectors'] = svms
self._ret['svm']['yin'] = yin
def test(self, data, labels):
"""
Test the svm.
Args:
data (numpy.ndarray): NxD array, where N is the number of data,
D is the feature dimension.
labels (numpy.ndarray): Nx1 array, where N is the number of data
Returns:
float: accuracy
"""
predicted_confidence, kernel_matrix = self.get_predicted_confidence(data, True)
binarized_predictions = (np.sign(predicted_confidence) + 1) / 2 # remap -1 --> 0, 1 --> 1
predicted_labels = binarized_predictions.astype(int)
accuracy = np.sum(predicted_labels == labels.astype(int)) / labels.shape[0]
logger.debug("Classification success for this set is {:.2f}% \n".format(accuracy * 100.0))
self._ret['kernel_matrix_testing'] = kernel_matrix
self._ret['testing_accuracy'] = accuracy
# test_success_ratio is deprecated
self._ret['test_success_ratio'] = accuracy
return accuracy
def predict(self, data):
"""
Predict using the svm.
Args:
data (numpy.ndarray): NxD array, where N is the number of data,
D is the feature dimension.
Returns:
numpy.ndarray: predicted labels, Nx1 array
"""
predicted_confidence = self.get_predicted_confidence(data)
binarized_predictions = (np.sign(predicted_confidence) + 1) / 2 # remap -1 --> 0, 1 --> 1
predicted_labels = binarized_predictions.astype(int)
return predicted_labels
def run(self):
"""Put the train, test, predict together."""
self.train(self._qalgo.training_dataset[0], self._qalgo.training_dataset[1])
if self._qalgo.test_dataset is not None:
self.test(self._qalgo.test_dataset[0], self._qalgo.test_dataset[1])
if self._qalgo.datapoints is not None:
predicted_labels = self.predict(self._qalgo.datapoints)
predicted_classes = map_label_to_class_name(predicted_labels,
self._qalgo.label_to_class)
self._ret['predicted_labels'] = predicted_labels
self._ret['predicted_classes'] = predicted_classes
return self._ret
def load_model(self, file_path):
model_npz = np.load(file_path)
model = {'alphas': model_npz['alphas'],
'bias': model_npz['bias'],
'support_vectors': model_npz['support_vectors'],
'yin': model_npz['yin']}
self._ret['svm'] = model
def save_model(self, file_path):
model = {'alphas': self._ret['svm']['alphas'],
'bias': self._ret['svm']['bias'],
'support_vectors': self._ret['svm']['support_vectors'],
'yin': self._ret['svm']['yin']}
np.savez(file_path, **model)
|
the-stack_106_29603 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright 2020 Ross Wightman
# Modified Model definition
import torch
import torch.nn as nn
from functools import partial
import math
import warnings
import torch.nn.functional as F
import numpy as np
import torch.utils
import torch.utils.checkpoint
from src.modeling.timesformer.vit_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from src.modeling.timesformer.helpers import load_pretrained, load_pretrained_kinetics, load_pretrained_imagenet, load_pretrained_CLIP_ViT
from src.modeling.timesformer.vit_utils import DropPath, to_2tuple, trunc_normal_
from src.modeling.xbert import BertAttention
# from .build import MODEL_REGISTRY
from torch import einsum
from einops import rearrange, reduce, repeat
import src.utils.grad_ckpt as grad_ckpt
from src.utils.logger import LOGGER, TB_LOGGER, add_log_to_file, RunningMeter
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'vit_base_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
}
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., with_qkv=True):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.with_qkv = with_qkv
if self.with_qkv:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_drop = nn.Dropout(attn_drop)
def forward(self, x):
B, N, C = x.shape
if self.with_qkv:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
qkv = x.reshape(B, N, self.num_heads, C //
self.num_heads).permute(0, 2, 1, 3)
q, k, v = qkv, qkv, qkv
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
if self.with_qkv:
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, layer_num, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0.1, act_layer=nn.GELU, norm_layer=nn.LayerNorm, attention_type='divided_space_time',
use_grad_checkpointing=False):
super().__init__()
self.attention_type = attention_type
assert(attention_type in ['divided_space_time',
'space_only', 'joint_space_time'])
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# Temporal Attention Parameters
if self.attention_type == 'divided_space_time':
self.temporal_norm1 = norm_layer(dim)
self.temporal_attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.temporal_fc = nn.Linear(dim, dim)
# drop path
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
# [dxli]
self.layer_num = layer_num
self.use_grad_checkpointing = use_grad_checkpointing
def forward(self, x, B, T, W):
num_spatial_tokens = (x.size(1) - 1) // T
H = num_spatial_tokens // W
if self.attention_type in ['space_only', 'joint_space_time']:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
elif self.attention_type == 'divided_space_time':
# Temporal
xt = x[:, 1:, :]
xt = rearrange(xt, 'b (h w t) m -> (b h w) t m',
b=B, h=H, w=W, t=T)
if self.use_grad_checkpointing:
# temporal_attn_out = torch.utils.checkpoint.checkpoint(self.temporal_attn, self.temporal_norm1(xt))
temporal_attn_out = grad_ckpt.CheckpointFunction.apply(self.temporal_attn, 1, self.temporal_norm1(xt))
else:
temporal_attn_out = self.temporal_attn(self.temporal_norm1(xt))
# res_temporal = self.drop_path(
# self.temporal_attn(self.temporal_norm1(xt)))
res_temporal = self.drop_path(temporal_attn_out)
res_temporal = rearrange(
res_temporal, '(b h w) t m -> b (h w t) m', b=B, h=H, w=W, t=T)
res_temporal = self.temporal_fc(res_temporal)
xt = x[:, 1:, :] + res_temporal
# Spatial
init_cls_token = x[:, 0, :].unsqueeze(1)
cls_token = init_cls_token.repeat(1, T, 1)
cls_token = rearrange(
cls_token, 'b t m -> (b t) m', b=B, t=T).unsqueeze(1)
xs = xt
xs = rearrange(xs, 'b (h w t) m -> (b t) (h w) m',
b=B, h=H, w=W, t=T)
xs = torch.cat((cls_token, xs), 1)
# [origial]
# res_spatial = self.drop_path(self.attn(self.norm1(xs)))
if self.use_grad_checkpointing:
spatial_attn_out = grad_ckpt.CheckpointFunction.apply(self.attn, 1, self.norm1(xs))
else:
# spatial_attn_out = torch.utils.checkpoint.checkpoint(self.attn, self.norm1(xs))
spatial_attn_out = self.attn(self.norm1(xs))
res_spatial = self.drop_path(spatial_attn_out)
# Taking care of CLS token
cls_token = res_spatial[:, 0, :]
cls_token = rearrange(cls_token, '(b t) m -> b t m', b=B, t=T)
# averaging for every frame
cls_token = torch.mean(cls_token, 1, True)
res_spatial = res_spatial[:, 1:, :]
res_spatial = rearrange(
res_spatial, '(b t) (h w) m -> b (h w t) m', b=B, h=H, w=W, t=T)
res = res_spatial
x = xt
# Mlp
x = torch.cat((init_cls_token, x), 1) + \
torch.cat((cls_token, res), 1)
x_res = x
x = self.norm2(x)
# x = x + self.drop_path(self.mlp(self.norm2(x)))
# MLP
# [origial]
# x = x_res + self.drop_path(self.mlp(x))
if self.use_grad_checkpointing:
# mlp_out = torch.utils.checkpoint.checkpoint(self.mlp, x)
mlp_out = grad_ckpt.CheckpointFunction.apply(self.mlp, 1, x)
else:
mlp_out = self.mlp(x)
x = x_res + self.drop_path(mlp_out)
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * \
(img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim,
kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, T, H, W = x.shape
x = rearrange(x, 'b c t h w -> (b t) c h w')
x = self.proj(x)
W = x.size(-1)
x = x.flatten(2).transpose(1, 2)
return x, T, W
class VisionTransformer(nn.Module):
""" Vision Transformere
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0.1, hybrid_backbone=None, norm_layer=nn.LayerNorm, num_frames=8, attention_type='divided_space_time', dropout=0.,
cross_attention_config=None, use_grad_checkpointing=False):
super().__init__()
self.attention_type = attention_type
self.depth = depth
self.dropout = nn.Dropout(dropout)
self.num_classes = num_classes
# num_features for consistency with other models
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
# Positional Embeddings
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches+1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if self.attention_type != 'space_only':
self.time_embed = nn.Parameter(
torch.zeros(1, num_frames, embed_dim))
self.time_drop = nn.Dropout(p=drop_rate)
# Attention Blocks
dpr = [x.item() for x in torch.linspace(0, drop_path_rate,
self.depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(layer_num=i, use_grad_checkpointing=use_grad_checkpointing,
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, attention_type=self.attention_type)
for i in range(self.depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(
embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
# initialization of temporal attention weights
if self.attention_type == 'divided_space_time':
i = 0
for m in self.blocks.modules():
m_str = str(m)
if 'Block' in m_str:
if i > 0:
nn.init.constant_(m.temporal_fc.weight, 0)
nn.init.constant_(m.temporal_fc.bias, 0)
i += 1
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'time_embed'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(
self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x, return_all_tokens=False):
B = x.shape[0]
x, T, W = self.patch_embed(x)
cls_tokens = self.cls_token.expand(x.size(0), -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# resizing the positional embeddings in case they don't match the input at inference
if x.size(1) != self.pos_embed.size(1):
pos_embed = self.pos_embed
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
P = int(other_pos_embed.size(2) ** 0.5)
H = x.size(1) // W
other_pos_embed = other_pos_embed.reshape(1, x.size(2), P, P)
new_pos_embed = F.interpolate(
other_pos_embed, size=(H, W), mode='nearest')
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
x = x + new_pos_embed
else:
x = x + self.pos_embed
x = self.pos_drop(x)
# Time Embeddings
if self.attention_type != 'space_only':
cls_tokens = x[:B, 0, :].unsqueeze(1)
x = x[:, 1:]
x = rearrange(x, '(b t) n m -> (b n) t m', b=B, t=T)
# Resizing time embeddings in case they don't match
if T != self.time_embed.size(1):
time_embed = self.time_embed.transpose(1, 2)
new_time_embed = F.interpolate(
time_embed, size=(T), mode='nearest')
new_time_embed = new_time_embed.transpose(1, 2)
x = x + new_time_embed
else:
x = x + self.time_embed
x = self.time_drop(x)
x = rearrange(x, '(b n) t m -> b (n t) m', b=B, t=T)
x = torch.cat((cls_tokens, x), dim=1)
# Attention blocks
for blk in self.blocks:
x = blk(x, B, T, W)
# Predictions for space-only baseline
if self.attention_type == 'space_only':
x = rearrange(x, '(b t) n m -> b t n m', b=B, t=T)
x = torch.mean(x, 1) # averaging predictions for every frame
x = self.norm(x)
if return_all_tokens:
return x
else:
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
if v.shape[-1] != patch_size:
patch_size = v.shape[-1]
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
class vit_base_patch16_224(nn.Module):
def __init__(self, cfg, **kwargs):
super(vit_base_patch16_224, self).__init__()
self.pretrained = True
patch_size = 16
self.model = VisionTransformer(img_size=cfg.DATA.TRAIN_CROP_SIZE, num_classes=cfg.MODEL.NUM_CLASSES, patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(
nn.LayerNorm, eps=1e-6), drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, num_frames=cfg.DATA.NUM_FRAMES, attention_type=cfg.TIMESFORMER.ATTENTION_TYPE, **kwargs)
self.attention_type = cfg.TIMESFORMER.ATTENTION_TYPE
self.model.default_cfg = default_cfgs['vit_base_patch16_224']
self.num_patches = (cfg.DATA.TRAIN_CROP_SIZE // patch_size) * \
(cfg.DATA.TRAIN_CROP_SIZE // patch_size)
pretrained_model = cfg.TIMESFORMER.PRETRAINED_MODEL
if self.pretrained:
load_pretrained(self.model, num_classes=self.model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter,
img_size=cfg.DATA.TRAIN_CROP_SIZE, num_patches=self.num_patches, attention_type=self.attention_type, pretrained_model=pretrained_model)
def forward(self, x):
x = self.model(x)
return x
class TimeSformer(nn.Module):
def __init__(self, model_cfg, input_format='BGR', cross_attention_config=None, **kwargs):
super(TimeSformer, self).__init__()
self.config_file = str(model_cfg)
# model-specific configurations
self.img_size = model_cfg['img_size']
self.patch_size = model_cfg['patch_size']
self.num_frames = model_cfg['num_frm']
self.attn_drop_rate = model_cfg['attn_drop_rate']
self.drop_path_rate = model_cfg['drop_path_rate']
self.drop_rate = model_cfg['drop_rate']
self.use_pooling = model_cfg['use_maxpooling']
self.use_grad_ckpt = model_cfg['gradient_checkpointing']
self.attention_type = 'divided_space_time'
LOGGER.info(f'Initializing TimeSformer with img_size={self.img_size}, patch_size={self.patch_size}, num_frames={self.num_frames}')
# will be ignored when loading official pretrained ckpt
self.num_classes = 400
self.input_format = input_format
assert input_format == "RGB", "Official TimeSformer uses RGB input."
self.model = VisionTransformer(img_size=self.img_size,
num_classes=self.num_classes,
patch_size=self.patch_size,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
drop_rate=self.drop_rate,
attn_drop_rate=self.attn_drop_rate,
drop_path_rate=self.drop_path_rate,
num_frames=self.num_frames,
attention_type=self.attention_type,
cross_attention_config=cross_attention_config,
use_grad_checkpointing=self.use_grad_ckpt,
**kwargs
)
if self.use_pooling:
self.maxpool_kernel_size = model_cfg['maxpool_kernel_size']
self.maxpooling = torch.nn.MaxPool2d(kernel_size=self.maxpool_kernel_size)
self.model.default_cfg = default_cfgs['vit_base_patch' + str(self.patch_size)+'_224']
self.num_patches = (self.img_size // self.patch_size) * (self.img_size // self.patch_size)
def forward(self, x):
x = self.model(x)
return x
def forward_features(self, x, return_all_tokens=True, pooling='temporal'):
b, c, t, h, w = x.shape
x = self.model.forward_features(x, return_all_tokens=return_all_tokens)
## apply pooling
W = H = self.img_size // self.patch_size
T = self.num_frames
cls_tokens = x[:, 0, :].unsqueeze(1)
other_tokens = x[:, 1:, :]
x = rearrange(other_tokens, 'b (h w t) m -> b t (h w) m', h=H, w=W, t=T)
assert pooling in ['temporal', 'spatial', 'none'], 'Invalid pooling type {}'.format(pooling)
if pooling == 'temporal':
x = torch.mean(x, dim=1)
x = torch.cat((cls_tokens, x), dim=1)
elif pooling == 'spatial': # spatial pooling
# x = torch.max(x, dim=2)[0]
x = torch.mean(x, dim=2)
x = torch.cat((cls_tokens, x), dim=1)
elif pooling == 'none':
cls_tokens_repeat = cls_tokens.unsqueeze(1).repeat(1, T, 1, 1)
x = torch.cat((cls_tokens_repeat, x), dim=2)
else:
raise NotImplementedError('Unsupported pooling type {}'.format(pooling))
return x
def _get_pooled_features(self, x):
b, t, h, w, c = x.shape
# x = rarrange(x.transpose(2, 4).transpose(3, 4), 'b t h w c -> (b t c) h w')
x = rearrange(x, 'b t h w c -> (b t c) h w')
x = self.maxpooling(x)
x = rearrange(x, '(b t c) h w -> b (t h w) c', b=b, t=t)
return x
def load_state_dict(self, pretrained_ckpt_path):
LOGGER.info('Loading TimeSformer checkpoints from {}'.format(pretrained_ckpt_path))
if pretrained_ckpt_path == "vit_base_patch16_224":
load_ckpt_func = load_pretrained_imagenet
elif "CLIP_ViT" in pretrained_ckpt_path:
load_ckpt_func = load_pretrained_CLIP_ViT
else:
load_ckpt_func = load_pretrained_kinetics
load_ckpt_func(self.model,
num_classes=self.model.num_classes,
in_chans=3,
filter_fn=_conv_filter,
img_size=self.img_size,
num_frames=self.num_frames,
num_patches=self.num_patches,
attention_type=self.attention_type,
pretrained_model=pretrained_ckpt_path
) |
the-stack_106_29605 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Clifford operator class.
"""
# pylint: disable=invalid-name, abstract-method
import re
import numpy as np
from qiskit.exceptions import QiskitError
from qiskit.circuit import QuantumCircuit, Instruction
from qiskit.circuit.library.standard_gates import IGate, XGate, YGate, ZGate, HGate, SGate
from qiskit.quantum_info.operators.base_operator import BaseOperator
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.scalar_op import ScalarOp
from qiskit.quantum_info.synthesis.clifford_decompose import decompose_clifford
from .stabilizer_table import StabilizerTable
from .clifford_circuits import _append_circuit
class Clifford(BaseOperator):
"""An N-qubit unitary operator from the Clifford group.
**Representation**
An *N*-qubit Clifford operator is stored as a length *2N*
:class:`~qiskit.quantum_info.StabilizerTable` using the convention
from reference [1].
* Rows 0 to *N-1* are the *destabilizer* group generators
* Rows *N-1* to *2N-1* are the *stabilizer* group generators.
The internal :class:`~qiskit.quantum_info.StabilizerTable` for the Clifford
can be accessed using the :attr:`table` attribute. The destabilizer or
stabilizer rows can each be accessed as a length-N Stabilizer table using
:attr:`destabilizer` and :attr:`stabilizer` attributes.
A more easily human readible representation of the Clifford operator can
be obtained by calling the :meth:`to_dict` method. This representation is
also used if a Clifford object is printed as in the following example
.. jupyter-execute::
from qiskit import QuantumCircuit
from qiskit.quantum_info import Clifford
# Bell state generation circuit
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
cliff = Clifford(qc)
# Print the Clifford
print(cliff)
# Print the Clifford destabilizer rows
print(cliff.destabilizer)
# Print the Clifford stabilizer rows
print(cliff.stabilizer)
**Circuit Conversion**
Clifford operators can be initialized from circuits containing *only* the
following Clifford gates: :class:`~qiskit.circuit.library.IGate`,
:class:`~qiskit.circuit.library.XGate`, :class:`~qiskit.circuit.library.YGate`,
:class:`~qiskit.circuit.library.ZGate`, :class:`~qiskit.circuit.library.HGate`,
:class:`~qiskit.circuit.library.SGate`, :class:`~qiskit.circuit.library.SdgGate`,
:class:`~qiskit.circuit.library.CXGate`, :class:`~qiskit.circuit.library.CZGate`,
:class:`~qiskit.circuit.library.SwapGate`.
They can be converted back into a :class:`~qiskit.circuit.QuantumCircuit`,
or :class:`~qiskit.circuit.Gate` object using the :meth:`~Clifford.to_circuit`
or :meth:`~Clifford.to_instruction` methods respectively. Note that this
decomposition is not necessarily optimal in terms of number of gates.
.. note::
A minimally generating set of gates for Clifford circuits is
the :class:`~qiskit.circuit.library.HGate` and
:class:`~qiskit.circuit.library.SGate` gate and *either* the
:class:`~qiskit.circuit.library.CXGate` or
:class:`~qiskit.circuit.library.CZGate` two-qubit gate.
Clifford operators can also be converted to
:class:`~qiskit.quantum_info.Operator` objects using the
:meth:`to_operator` method. This is done via decomposing to a circuit, and then
simulating the circuit as a unitary operator.
References:
1. S. Aaronson, D. Gottesman, *Improved Simulation of Stabilizer Circuits*,
Phys. Rev. A 70, 052328 (2004).
`arXiv:quant-ph/0406196 <https://arxiv.org/abs/quant-ph/0406196>`_
"""
def __init__(self, data, validate=True):
"""Initialize an operator object."""
# Initialize from another Clifford by sharing the underlying
# StabilizerTable
if isinstance(data, Clifford):
self._table = data._table
# Initialize from ScalarOp as N-qubit identity discarding any global phase
elif isinstance(data, ScalarOp):
if not data.is_unitary() or set(data._input_dims) != set([2]):
raise QiskitError("Can only initalize from N-qubit identity ScalarOp.")
self._table = StabilizerTable(
np.eye(2 * len(data._input_dims), dtype=np.bool))
# Initialize from a QuantumCircuit or Instruction object
elif isinstance(data, (QuantumCircuit, Instruction)):
self._table = Clifford.from_circuit(data)._table
# Initialize StabilizerTable directly from the data
else:
self._table = StabilizerTable(data)
# Validate table is a symplectic matrix
if validate and not Clifford._is_symplectic(self._table.array):
raise QiskitError(
'Invalid Clifford. Input StabilizerTable is not a valid'
' symplectic matrix.')
# Initialize BaseOperator
dims = self._table.num_qubits * (2,)
super().__init__(dims, dims)
def __repr__(self):
return 'Clifford({})'.format(repr(self.table))
def __str__(self):
return 'Clifford: Stabilizer = {}, Destabilizer = {}'.format(
str(self.stabilizer.to_labels()),
str(self.destabilizer.to_labels()))
def __eq__(self, other):
"""Check if two Clifford tables are equal"""
return super().__eq__(other) and self._table == other._table
# ---------------------------------------------------------------------
# Attributes
# ---------------------------------------------------------------------
def __getitem__(self, key):
"""Return a stabilizer Pauli row"""
return self._table.__getitem__(key)
def __setitem__(self, key, value):
"""Set a stabilizer Pauli row"""
self._table.__setitem__(key, value)
@property
def table(self):
"""Return StabilizerTable"""
return self._table
@table.setter
def table(self, value):
"""Set the stabilizer table"""
# Note this setter cannot change the size of the Clifford
# It can only replace the contents of the StabilizerTable with
# another StabilizerTable of the same size.
if not isinstance(value, StabilizerTable):
value = StabilizerTable(value)
self._table._array[:, :] = value._table._array
self._table._phase[:] = value._table._phase
@property
def stabilizer(self):
"""Return the stabilizer block of the StabilizerTable."""
return StabilizerTable(self._table[self.num_qubits:2*self.num_qubits])
@stabilizer.setter
def stabilizer(self, value):
"""Set the value of stabilizer block of the StabilizerTable"""
inds = slice(self.num_qubits, 2*self.num_qubits)
self._table.__setitem__(inds, value)
@property
def destabilizer(self):
"""Return the destabilizer block of the StabilizerTable."""
return StabilizerTable(self._table[0:self.num_qubits])
@destabilizer.setter
def destabilizer(self, value):
"""Set the value of destabilizer block of the StabilizerTable"""
inds = slice(0, self.num_qubits)
self._table.__setitem__(inds, value)
# ---------------------------------------------------------------------
# Utility Operator methods
# ---------------------------------------------------------------------
def is_unitary(self):
"""Return True if the Clifford table is valid."""
# A valid Clifford is always unitary, so this function is really
# checking that the underlying Stabilizer table array is a valid
# Clifford array.
return Clifford._is_symplectic(self.table.array)
# ---------------------------------------------------------------------
# BaseOperator Abstract Methods
# ---------------------------------------------------------------------
def conjugate(self):
"""Return the conjugate of the Clifford."""
return Clifford._conjugate_transpose(self, 'C')
def adjoint(self):
"""Return the conjugate transpose of the Clifford"""
return Clifford._conjugate_transpose(self, 'A')
def transpose(self):
"""Return the transpose of the Clifford."""
return Clifford._conjugate_transpose(self, 'T')
def compose(self, other, qargs=None, front=False):
"""Return the composed operator.
Args:
other (Clifford): an operator object.
qargs (list or None): a list of subsystem positions to apply
other on. If None apply on all
subsystems [default: None].
front (bool): If True compose using right operator multiplication,
instead of left multiplication [default: False].
Returns:
Clifford: The operator self @ other.
Raise:
QiskitError: if operators have incompatible dimensions for
composition.
Additional Information:
Composition (``@``) is defined as `left` matrix multiplication for
matrix operators. That is that ``A @ B`` is equal to ``B * A``.
Setting ``front=True`` returns `right` matrix multiplication
``A * B`` and is equivalent to the :meth:`dot` method.
"""
if qargs is None:
qargs = getattr(other, 'qargs', None)
# If other is a QuantumCircuit we can more efficiently compose
# using the _append_circuit method to update each gate recursively
# to the current Clifford, rather than converting to a Clifford first
# and then doing the composition of tables.
if not front and isinstance(other, (QuantumCircuit, Instruction)):
ret = self.copy()
_append_circuit(ret, other, qargs=qargs)
return ret
if not isinstance(other, Clifford):
other = Clifford(other)
# Validate dimensions. Note we don't need to get updated input or
# output dimensions from `_get_compose_dims` as the dimensions of the
# Clifford object can't be changed by composition
self._get_compose_dims(other, qargs, front)
# Pad other with identities if composeing on subsystem
other = self._pad_with_identity(other, qargs)
return self._compose_clifford(other, front=front)
def dot(self, other, qargs=None):
"""Return the right multiplied operator self * other.
Args:
other (Clifford): an operator object.
qargs (list or None): a list of subsystem positions to apply
other on. If None apply on all
subsystems [default: None].
Returns:
Clifford: The operator self * other.
Raises:
QiskitError: if operators have incompatible dimensions for
composition.
"""
return super().dot(other, qargs=qargs)
def tensor(self, other):
"""Return the tensor product operator self ⊗ other.
Args:
other (Clifford): a operator subclass object.
Returns:
Clifford: the tensor product operator self ⊗ other.
"""
return self._tensor_product(other, reverse=False)
def expand(self, other):
"""Return the tensor product operator other ⊗ self.
Args:
other (Clifford): an operator object.
Returns:
Clifford: the tensor product operator other ⊗ self.
"""
return self._tensor_product(other, reverse=True)
# ---------------------------------------------------------------------
# Representation conversions
# ---------------------------------------------------------------------
def to_dict(self):
"""Return dictionary represenation of Clifford object."""
return {
"stabilizer": self.stabilizer.to_labels(),
"destabilizer": self.destabilizer.to_labels()
}
@staticmethod
def from_dict(obj):
"""Load a Clifford from a dictionary"""
destabilizer = StabilizerTable.from_labels(obj.get('destabilizer'))
stabilizer = StabilizerTable.from_labels(obj.get('stabilizer'))
return Clifford(destabilizer + stabilizer)
def to_matrix(self):
"""Convert operator to Numpy matrix."""
return self.to_operator().data
def to_operator(self):
"""Convert to an Operator object."""
return Operator(self.to_instruction())
def to_circuit(self):
"""Return a QuantumCircuit implementing the Clifford.
For N <= 3 qubits this is based on optimal CX cost decomposition
from reference [1]. For N > 3 qubits this is done using the general
non-optimal compilation routine from reference [2].
Return:
QuantumCircuit: a circuit implementation of the Clifford.
References:
1. S. Bravyi, D. Maslov, *Hadamard-free circuits expose the
structure of the Clifford group*,
`arXiv:2003.09412 [quant-ph] <https://arxiv.org/abs/2003.09412>`_
2. S. Aaronson, D. Gottesman, *Improved Simulation of Stabilizer Circuits*,
Phys. Rev. A 70, 052328 (2004).
`arXiv:quant-ph/0406196 <https://arxiv.org/abs/quant-ph/0406196>`_
"""
return decompose_clifford(self)
def to_instruction(self):
"""Return a Gate instruction implementing the Clifford."""
return self.to_circuit().to_gate()
@staticmethod
def from_circuit(circuit):
"""Initialize from a QuantumCircuit or Instruction.
Args:
circuit (QuantumCircuit or ~qiskit.circuit.Instruction):
instruction to initialize.
Returns:
Clifford: the Clifford object for the instruction.
Raises:
QiskitError: if the input instruction is non-Clifford or contains
classical register instruction.
"""
if not isinstance(circuit, (QuantumCircuit, Instruction)):
raise QiskitError("Input must be a QuantumCircuit or Instruction")
# Convert circuit to an instruction
if isinstance(circuit, QuantumCircuit):
circuit = circuit.to_instruction()
# Initialize an identity Clifford
clifford = Clifford(np.eye(2 * circuit.num_qubits), validate=False)
_append_circuit(clifford, circuit)
return clifford
@staticmethod
def from_label(label):
"""Return a tensor product of single-qubit Clifford gates.
Args:
label (string): single-qubit operator string.
Returns:
Clifford: The N-qubit Clifford operator.
Raises:
QiskitError: if the label contains invalid characters.
Additional Information:
The labels correspond to the single-qubit Cliffords are
* - Label
- Stabilizer
- Destabilizer
* - ``"I"``
- +Z
- +X
* - ``"X"``
- -Z
- +X
* - ``"Y"``
- -Z
- -X
* - ``"Z"``
- +Z
- -X
* - ``"H"``
- +X
- +Z
* - ``"S"``
- +Z
- +Y
"""
# Check label is valid
label_gates = {
'I': IGate(), 'X': XGate(), 'Y': YGate(),
'Z': ZGate(), 'H': HGate(), 'S': SGate()
}
if re.match(r'^[IXYZHS\-+]+$', label) is None:
raise QiskitError('Label contains invalid characters.')
# Initialize an identity matrix and apply each gate
num_qubits = len(label)
op = Clifford(np.eye(2 * num_qubits, dtype=np.bool))
for qubit, char in enumerate(reversed(label)):
_append_circuit(op, label_gates[char], qargs=[qubit])
return op
# ---------------------------------------------------------------------
# Internal helper functions
# ---------------------------------------------------------------------
@staticmethod
def _is_symplectic(mat):
"""Return True if input is symplectic matrix."""
# Condition is
# table.T * [[0, 1], [1, 0]] * table = [[0, 1], [1, 0]]
# where we are block matrix multiplying using symplectic product
dim = len(mat) // 2
if mat.shape != (2 * dim, 2 * dim):
return False
one = np.eye(dim, dtype=np.int)
zero = np.zeros((dim, dim), dtype=np.int)
seye = np.block([[zero, one], [one, zero]])
arr = mat.astype(np.int)
return np.array_equal(np.mod(arr.T.dot(seye).dot(arr), 2), seye)
@staticmethod
def _conjugate_transpose(clifford, method):
"""Return the adjoint, conjugate, or transpose of the Clifford.
Args:
clifford (Clifford): a clifford object.
method (str): what function to apply 'A', 'C', or 'T'.
Returns:
Clifford: the modified clifford.
"""
ret = clifford.copy()
if method in ['A', 'T']:
# Apply inverse
# Update table
tmp = ret.destabilizer.X.copy()
ret.destabilizer.X = ret.stabilizer.Z.T
ret.destabilizer.Z = ret.destabilizer.Z.T
ret.stabilizer.X = ret.stabilizer.X.T
ret.stabilizer.Z = tmp.T
# Update phase
ret.table.phase ^= clifford.dot(ret).table.phase
if method in ['C', 'T']:
# Apply conjugate
ret.table.phase ^= np.mod(np.sum(
ret.table.X & ret.table.Z, axis=1), 2).astype(np.bool)
return ret
def _tensor_product(self, other, reverse=False):
"""Return the tensor product operator.
Args:
other (Clifford): another Clifford operator.
reverse (bool): If False return self ⊗ other, if True return
if True return (other ⊗ self) [Default: False].
Returns:
Clifford: the tensor product operator.
Raises:
QiskitError: if other cannot be converted into an Clifford.
"""
if not isinstance(other, Clifford):
other = Clifford(other)
if reverse:
cliff0 = self
cliff1 = other
else:
cliff0 = other
cliff1 = self
# Pad stabilizers and destabilizers
destab = (cliff0.destabilizer.expand(cliff1.num_qubits * 'I') +
cliff1.destabilizer.tensor(cliff0.num_qubits * 'I'))
stab = (cliff0.stabilizer.expand(cliff1.num_qubits * 'I') +
cliff1.stabilizer.tensor(cliff0.num_qubits * 'I'))
# Add the padded table
return Clifford(destab + stab, validate=False)
def _pad_with_identity(self, clifford, qargs):
"""Pad Clifford with identities on other subsystems."""
if qargs is None:
return clifford
padded = Clifford(StabilizerTable(
np.eye(2 * self.num_qubits, dtype=np.bool)), validate=False)
inds = list(qargs) + [self.num_qubits + i for i in qargs]
# Pad Pauli array
pauli = clifford.table.array
for i, pos in enumerate(qargs):
padded.table.array[inds, pos] = pauli[:, i]
padded.table.array[inds, self.num_qubits + pos] = pauli[:, clifford.num_qubits + i]
# Pad phase
padded.table.phase[inds] = clifford.table.phase
return padded
def _compose_clifford(self, other, front=False):
"""Return the composition channel assume other is Clifford of same size as self."""
if front:
table1 = self.table
table2 = other.table
else:
table1 = other.table
table2 = self.table
num_qubits = self.num_qubits
array1 = table1.array.astype(int)
phase1 = table1.phase.astype(int)
array2 = table2.array.astype(int)
phase2 = table2.phase.astype(int)
# Update Pauli table
pauli = StabilizerTable(array2.dot(array1) % 2)
# Add phases
phase = np.mod(array2.dot(phase1) + phase2, 2)
# Correcting for phase due to Pauli multiplication
ifacts = np.zeros(2 * num_qubits, dtype=np.int)
for k in range(2 * num_qubits):
row2 = array2[k]
x2 = table2.X[k]
z2 = table2.Z[k]
# Adding a factor of i for each Y in the image of an operator under the
# first operation, since Y=iXZ
ifacts[k] += np.sum(x2 & z2)
# Adding factors of i due to qubit-wise Pauli multiplication
for j in range(num_qubits):
x = 0
z = 0
for i in range(2 * num_qubits):
if row2[i]:
x1 = array1[i, j]
z1 = array1[i, j + num_qubits]
if (x | z) & (x1 | z1):
val = np.mod(np.abs(3 * z1 - x1) - np.abs(3 * z - x) - 1, 3)
if val == 0:
ifacts[k] += 1
elif val == 1:
ifacts[k] -= 1
x = np.mod(x + x1, 2)
z = np.mod(z + z1, 2)
p = np.mod(ifacts, 4) // 2
phase = np.mod(phase + p, 2)
return Clifford(StabilizerTable(pauli, phase), validate=False)
|
the-stack_106_29606 | ########
#### setup.py: script to build and help development of the Vulnerability catalog.
#### Date: 2018-02-18
#### Version: 1.0
#### Author: Daniel Avelino https://daavelino.github.io
########
import platform
import sys
import shutil
import os
import subprocess
# import secrets # Will be imported after check if python version is > 3.6.
import getpass
import re
import json
import pprint
from pathlib import Path
global MINIMAL_PYTHON_VERSION
global PROJECT_NAME
global APP_NAME
global HOME_DIR
MINIMAL_PYTHON_VERSION = (3, 6, 0)
PROJECT_NAME = 'base'
APP_NAME = 'catalog'
HOME_DIR = Path(os.getcwd())
BASE_DIR = Path(os.path.join(HOME_DIR, PROJECT_NAME,))
######### DRY functions:
def tuple_comparison(a:tuple, b:tuple):
'''
Evaluate tuples as an ordered list of numbers
and returns True if a >= b
'''
control = False
for i in range(0, len(b)):
if int(a[i]) > int(b[i]):
control = True
break
elif int(a[i]) < int(b[i]):
control = False
break
else:
control = True
return(control)
def get_req_version(resource, filename):
'''
return a list representing the resource version
as indicated in filename, ['resource', 'version'],
or None otherwise. Filename should be the pip file
requirements.txt.
'''
version = None
if not os.path.isfile(filename):
print('No such file or directory.')
return(version)
else:
f = open(filename,'r')
line = f.readline()
while line != '':
if re.search(resource, line):
regex = r'\W+' # \W+ matches any non-word character.
splitter = re.search(regex, line, re.DOTALL | re.IGNORECASE)
if splitter: # https://docs.python.org/3/library/re.html#re.search
splitter = splitter.group()
version = line.split(splitter)
for i in range(0, len(version)):
tmp = version[i]
version[i] = tmp.rstrip(os.linesep)
break
else:
line = f.readline()
f.close()
if version:
tmp = version[1].split('.')
version = tuple(tmp)
return(version)
######### End DRY functions
######## System properties:
def get_environment():
'''Returns a dictionary with relevant information about OS environment'''
env = dict()
env['system'] = platform.system()
if env['system'] == 'Linux':
env['system version'] = platform.linux_distribution()
if env['system'] == 'Windows':
env['system version'] = platform.linux_distribution()
return(env)
def check_parameters():
allowed_params = {
'build': "setup and builds the project from the scratch (with test data).",
'build-only': "Just build project, without load data or create users.",
'build-novenv': "same as 'build', but do not uses Python venv.",
'clean': "remove all project related files.",
'createstartscript': "creates the platform specific Catalog start script",
'createsuperuser': "creates an admin user to the catalog.",
'createvenv': "creates a functional Python's Virtual Environment at current directory.",
'database': "Setup an alternative database instead of default sqlite.",
'deep-clean': "remove all project related files and venv structure.",
'loadtestdata': "Add some test data into database.",
'templates': "updates project's Templates, forms and static files only.",
'urlsviews': "updates project's Urls and Views only."
}
# one and only one allowed parameter has to be passed.
if (len(sys.argv) == 2) and (sys.argv[1] in allowed_params.keys()):
param = sys.argv[1] # Because argv[0] is the file name.
return param
else:
print('\nUsage:', sys.argv[0], '<options>, where <options> are:\n')
for i in allowed_params.items():
print(' ' + i[0] + ': ' + i[1])
print('\nExiting.')
sys.exit(1)
def check_venv():
venv_dir = Path('venv',)
env = get_environment()
if not venv_dir.is_dir():
print('Missing venv structure. Creating...')
os.makedirs("venv")
os.system('python -m venv venv')
print('\n[Warning] Virtual Environment not load yet. '
'Please, load venv by running:')
if env['system'] == 'Linux':
print('\n source venv/bin/activate\n')
if env['system'] == 'Windows':
print('\n venv\\Scripts\\activate.bat\n')
param = check_parameters()
print('and run\n\n python setup.py', param,'\n\nscript again.')
sys.exit(0)
else:
if not 'VIRTUAL_ENV' in os.environ.keys(): # venv is not loaded:
print('\n[Warning] Virtual Environment not load yet. '
'Please, load venv by running:')
if env['system'] == 'Linux':
print('\n source venv/bin/activate\n')
if env['system'] == 'Windows':
print('\n venv\\Scripts\\activate.bat\n')
sys.exit(0)
else:
pass
def check_python_version():
'''
Ensure that Python version is greater than MINIMAL_PYTHON_VERSION
It also loads secrets module, new in Python 3.6 version.
https://docs.python.org/3/library/secrets.html
'''
#### Checking installed Python version:
python_version = (sys.version_info.major, sys.version_info.minor, \
sys.version_info.micro)
control = tuple_comparison(python_version, MINIMAL_PYTHON_VERSION)
if not control:
print('\n[Warning] Missing Python ', end='')
for i in range(0, len(MINIMAL_PYTHON_VERSION)):
print(str(MINIMAL_PYTHON_VERSION[i]) \
+ '.', end='' )
print(' (or greater).\n')
print('Please, get it at (https://www.python.org/downloads/).\nExiting.\n')
sys.exit(1)
else:
print('Importing secrets module')
global secrets
import secrets # secrets is New in Python 3.6.
def check_pip_version():
'Check if pip is installed. If not, install it properly.'
try:
import pip
except ImportError: # Exit, since it is a required dependency.
print('\n[Warning] Missing pip.\n')
print('Please, install it first (https://pip.pypa.io).\nExiting.\n')
sys.exit(1)
def check_django_version():
'Check if Django is installed. If not, install it properly.'
try:
from django.core.management import execute_from_command_line
except ImportError: # so, install it properly:
print('\n[Warning] Missing Django.\n')
django_min_version = ''
for i in range(0, len(MINIMAL_DJANGO_VERSION)):
django_min_version = django_min_version \
+ str(MINIMAL_DJANGO_VERSION[i]) \
+ '.'
django_min_version = django_min_version[:-1]
print('Using\n\n pip install django==' \
+ django_min_version +'\n\nto fix this dependency...\n')
os.system('pip install django==' + django_min_version)
print('Done.')
#### Check Django version:
#### We opt not to update Django version here to avoid unecessary
#### complications at development environment. Since this script
#### install Django using its correct version from the scratch, if
#### it find Django in an older version, things should be tested...
try:
import django
except ImportError:
pass
# It must already be present since we installed it previously.
django_version = django.VERSION[0:3]
django_min_version = ''
for i in range(0, len(MINIMAL_DJANGO_VERSION)):
django_min_version = django_min_version \
+ str(MINIMAL_DJANGO_VERSION[i]) \
+ '.'
django_min_version = django_min_version[:-1]
if not tuple_comparison(django_version, MINIMAL_DJANGO_VERSION):
print('\n[Warning] Django', django.__version__,
'(actual version) requires an update.\n')
print('Please, upgrade Django to', django_min_version, \
'version by running:\n')
print(' pip install django==' \
+ django_min_version + '\n\nExiting.\n')
sys.exit(1)
def check_whitenoise_version():
try:
import whitenoise # http://whitenoise.evans.io
except ImportError:
print('\n[Warning] Missing WhiteNoise.\n')
print('Using\n\n pip install whitenoise\n'
'\nto fix this dependency...\n')
os.system('pip install whitenoise')
print('Done.')
def check_system_reqs():
check_python_version()
check_pip_version()
check_django_version()
check_whitenoise_version()
def cleaning_old_stuff(control):
'''Cleaning created projects files'''
env = get_environment()
print('Cleaning old project structure...')
target = Path(PROJECT_NAME)
if target.is_dir():
shutil.rmtree(target)
if control == 'deep-clean':
if 'venv' in sys.prefix: # venv is loaded:
if env['system'] == 'Linux':
#os.system('deactivate')
# it will not work since deactivate is defined at python's parent process.
pass
if env['system'] == 'Windows':
os.system('venv\Scripts\deactivate.bat')
target = Path(os.path.join(os.path.curdir, 'venv'))
if target.is_dir():
shutil.rmtree(target)
print('Done.')
######## End of System properties.
######## Django properties:
def start_django_project():
'''Builds the project.'''
print('Starting creating Django structure...')
#### Starting project creation:
os.system('django-admin startproject' + ' ' + PROJECT_NAME)
BASEDIR = os.path.abspath(os.path.curdir)
os.chdir(PROJECT_NAME)
#### Introducing APP_NAME into the project:
os.system('python manage.py startapp' + ' ' + APP_NAME)
print('Done.')
os.chdir(BASEDIR)
def importing_settings():
'''Get settings from metadata/settings/settings.py'''
print('Copy settings.py from metadata...')
src_path = os.path.join(os.path.curdir, \
'metadata', \
'settings', \
'settings.py')
dst_path = os.path.join(os.path.curdir, 'base', 'base', 'settings.py')
shutil.copy(src_path, dst_path)
print('Done.')
def set_datamodel():
'''Enabling catalog data models into Django structure.'''
env = get_environment()
print('Copy data models...')
src_path = os.path.join(os.path.curdir, \
'metadata', \
'models', \
'catalog', \
'models.py')
dst_path = os.path.join(os.path.curdir, 'base', 'catalog', 'models.py')
shutil.copy(src_path, dst_path)
print('Done.')
#### Applying data models:
print('Applying data models...')
BASEDIR = os.path.abspath(os.path.curdir)
os.chdir(PROJECT_NAME)
os.system('python manage.py makemigrations' + ' ' + APP_NAME)
os.system('python manage.py sqlmigrate' \
+ ' ' \
+ APP_NAME \
+ ' ' \
+ '0001')
os.system('python manage.py migrate')
os.chdir(BASEDIR)
print('Done.')
def set_urls():
print('Setting Urls...')
# PROJECT_URLS:
src_path = os.path.join(os.path.curdir, \
'metadata', \
'urls', \
'admin', \
'urls.py')
dst_path = os.path.join(os.path.curdir, 'base', 'base', 'urls.py')
shutil.copy(src_path, dst_path)
# APP_URLS:
src_path = os.path.join(os.path.curdir,\
'metadata', \
'urls', \
'catalog', \
'urls.py')
dst_path = os.path.join(os.path.curdir, 'base', 'catalog', 'urls.py')
shutil.copy(src_path, dst_path)
print('Done.')
def set_views():
print('Setting Views...')
src_path = os.path.join(os.path.curdir,'metadata', 'views', 'catalog', )
dst_path = os.path.join(os.path.curdir, 'base', 'catalog', )
for i in os.listdir(src_path):
fsrc = os.path.join(src_path, i)
shutil.copy(fsrc, dst_path)
print('Done.')
def set_templates():
print('Setting templates...')
files = ['index.html',
'home.html',
'detail.html',
'add.html',
'update.html',
'delete.html',
'panorama.html',
'fastupdate.html',
'search.html',
'risk.html',
'cvss.html',
'upload.html'
]
tmpl_srcdir = os.path.join(os.path.curdir, \
'metadata', \
'templates', \
'catalog',)
tmpl_dstdir = os.path.join(os.path.curdir, \
'base', \
'catalog', \
'templates', \
'catalog',)
tmpl_main_path = os.path.join(tmpl_srcdir, 'tmpl_main.html')
# ensuring tmpl_dstdir exists:
if not Path(tmpl_dstdir).is_dir():
os.makedirs(tmpl_dstdir)
# reading tmpl_main.html data content:
fd = open(tmpl_main_path, 'rb') # 'rb' to avoid encoding problems.
tmpl_main_data = fd.read()
fd.close()
for i in files:
tmp = ''
tmp = i.split('.')
context = tmp[0]
# the template files with custom content are <context_custom_content.html>:
content_file = context + '_custom_content.html'
content_file = os.path.join(tmpl_srcdir, content_file)
fd = open(content_file, 'rb') # 'rb' to avoid encoding problems.
custom_data = fd.read()
fd.close()
# all these templates are wrapped to tmpl_main.html (tmpl_main_data)
tmpl_final_file = os.path.join(tmpl_dstdir, i)
fd = open(tmpl_final_file, 'wb') # 'wb' to avoid encoding problems.
data = tmpl_main_data.replace(b'__INSERT_CUSTOM_CONTENT__', \
bytes(custom_data))
fd.write(data)
fd.close()
print('Done.')
def set_login_template():
print('Setting login template...')
tmpl_srcdir = os.path.join(os.path.curdir, \
'metadata', \
'templates', \
'catalog', \
'login.html')
tmpl_dstdir = os.path.join(os.path.curdir, \
'base', \
'catalog', \
'templates', \
'catalog',)
# ensuring tmpl_dstdir exists:
if not Path(tmpl_dstdir).is_dir():
os.makedirs(tmpl_dstdir)
shutil.copy(tmpl_srcdir, tmpl_dstdir)
print('Done.')
def set_admin_template():
# we just need to put static files in the right place.
print('Setting admin template confs:')
CURR_DIR = os.path.abspath(os.path.curdir)
os.chdir(PROJECT_NAME)
os.system('python manage.py collectstatic --noinput')
os.chdir(CURR_DIR)
print('Done.')
def set_forms():
print('Setting Forms...')
CURR_DIR = Path(os.getcwd())
os.chdir(HOME_DIR)
src_path = os.path.join(os.path.curdir,'metadata', 'forms', 'catalog', )
dst_path = os.path.join(os.path.curdir, 'base', 'catalog', )
for i in os.listdir(src_path):
fsrc = os.path.join(src_path,i)
shutil.copy(fsrc, dst_path)
os.chdir(CURR_DIR)
print('Done.')
def set_static_files():
print('Setting Static Files...')
CURR_DIR = os.path.curdir
os.chdir(HOME_DIR)
src_path = os.path.join(os.path.curdir,'metadata', 'static', 'catalog', )
dst_path = os.path.join(os.path.curdir, 'base', 'catalog', 'static', )
if Path(dst_path).is_dir():
shutil.rmtree(dst_path)
shutil.copytree(src_path, dst_path)
os.chdir(CURR_DIR)
print('Done.')
def deployment_checklist():
print('Deployment checklist...')
# https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
key = secrets.token_hex(64)
src_path = os.path.join(os.path.curdir, \
'metadata', \
'settings', \
'settings.py')
dst_path = os.path.join(os.path.curdir, 'base', 'base', 'settings.py')
# Open template file and copy its content to avoid data appending:
fd = open(src_path, 'r')
data = fd.read()
fd.close()
# https://goo.gl/PtCXNN
fd = open(dst_path, 'w')
data = data.replace('__SECRET_KEY__', key)
fd.write(data)
fd.close()
print('Done.')
def create_test_environment():
print('Creating test environment...')
env = get_environment()
os.chdir(HOME_DIR)
BASEDIR = os.path.abspath(os.path.join(os.path.curdir, PROJECT_NAME,))
src_path = os.path.join(os.path.curdir, \
'test', \
'data', \
'initialcatalogdata.json')
fixturename_path = os.path.join(HOME_DIR, \
'base', \
'catalog', \
'fixturename',)
if not Path(fixturename_path).is_dir():
os.mkdir(fixturename_path)
shutil.copy(src_path, fixturename_path)
os.chdir(PROJECT_NAME)
fixturename_file = os.path.join(os.path.pardir, \
fixturename_path, \
'initialcatalogdata.json')
os.system('python manage.py loaddata' + ' ' + fixturename_file)
os.chdir(BASEDIR)
print('Done.')
def create_superuser():
env = get_environment()
BASEDIR = os.path.abspath(os.path.curdir)
os.chdir(PROJECT_NAME)
os.system('python manage.py createsuperuser')
os.chdir(BASEDIR)
def run_new_project():
env = get_environment()
print('Please access http://127.0.0.1:8000/admin to start app.')
BASEDIR = os.path.abspath(os.path.curdir)
os.chdir(PROJECT_NAME)
os.system('python manage.py runserver')
os.chdir(BASEDIR)
def set_database():
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
env = get_environment()
BASEDIR = os.path.abspath(os.path.curdir)
default_database = {
'default': {
'ENGINE': '',
'NAME': '',
'USER': '',
'PASSWORD':'',
'HOST': '',
'PORT':''
}
}
available_databases = {
# 'key': ['DB friendly name', 'ENGINE', 'NAME', 'USER', 'PASS', HOST', 'PORT', 'db binding']
'1': ['PostgreSQL', 'django.db.backends.postgresql', '', '', '', '127.0.0.1', '5432', 'psycopg2'],
'2': ['MySQL', 'django.db.backends.mysql', '', '', '', '127.0.0.1', '3306', 'mysqlclient'],
'3': ['Oracle', 'django.db.backends.oracle', '', '', '', '127.0.0.1', '1521', 'cx_Oracle'],
'4': ['SQLite3', 'django.db.backends.sqlite3', "os.path.join(BASE_DIR, 'db.sqlite3')", '', '', '', '', None]
}
print('\nAvailable databases:\n')
for i in available_databases.keys():
print(i, '-', available_databases[i][0])
chosen_db = input('\nWhich one would you like to use? ')
while chosen_db not in available_databases.keys():
chosen_db = input('Choose one of the numbers above: ')
print('\nLet us set', available_databases[chosen_db][0], 'database:')
default_database['default']['ENGINE'] = available_databases[chosen_db][1]
default_database['default']['NAME'] = input('Database name: ' \
+ available_databases[chosen_db][2]) \
or available_databases[chosen_db][2]
default_database['default']['USER'] = input('Database user name: ' \
+ available_databases[chosen_db][3]) \
or available_databases[chosen_db][3]
default_database['default']['PASSWORD'] = getpass.getpass('User password:')
pwd_verify = getpass.getpass('User password (again):')
while default_database['default']['PASSWORD'] != pwd_verify:
print('Password mismatch.')
default_database['default']['PASSWORD'] = getpass.getpass('User password:')
pwd_verify = getpass.getpass('User password (again):')
default_database['default']['HOST'] = input('Database Host address (' \
+ available_databases[chosen_db][5] \
+ '):') \
or available_databases[chosen_db][5]
default_database['default']['PORT'] = input('Database Port (' \
+ available_databases[chosen_db][6] \
+ '):') \
or available_databases[chosen_db][6]
#### Altering settings.py DATABASE entry:
settings_path = os.path.join(os.curdir, 'base', 'base', 'settings.py')
f = open(settings_path, 'r')
data = f.read()
f.close()
regex = r"DATABASES = (.*)}\n" # Thanks to https://regex101.com/r/lH0jK9/1
subst = json.dumps(default_database, indent=4)
subst = subst.replace('"', '\'')
subst = 'DATABASES = ' + subst + '\n'
result = re.sub(regex, subst, data, 0, re.DOTALL)
### Since 'NAME': value is a path, it could not be treated as a string.
if available_databases[chosen_db][0] == 'SQLite3':
result = result.replace("'NAME': 'os.path.join(BASE_DIR, 'db.sqlite3')',", \
"'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),")
f = open(settings_path, 'w')
f.write(result)
f.close()
#### Check if Database bindings is installed:
db_binding = available_databases[chosen_db][7]
try:
import db_binding
except ImportError:
os.system('pip install ' + db_binding)
set_datamodel()
# create_superuser()
def create_startscripts():
env = get_environment()
if env['system'] == 'Linux':
pass
if env['system'] == 'Windows':
filename = 'run.bat'
f = open(filename, 'w')
data = '@echo off\n\ncall venv\\Scripts\\activate.bat\npython run.py\npause'
f.write(data)
f.close()
def run():
param = check_parameters()
global MINIMAL_DJANGO_VERSION
MINIMAL_DJANGO_VERSION = get_req_version('django', 'requirements.txt')
if param == 'build':
check_parameters()
get_environment()
check_venv()
check_system_reqs()
cleaning_old_stuff('none')
start_django_project()
importing_settings()
set_datamodel()
set_urls()
set_views()
set_templates()
set_login_template()
set_forms()
set_static_files()
set_admin_template()
deployment_checklist()
create_superuser()
create_startscripts()
if param == 'build-only':
check_parameters()
get_environment()
check_venv()
check_system_reqs()
cleaning_old_stuff('none')
start_django_project()
importing_settings()
set_datamodel()
set_urls()
set_views()
set_templates()
set_login_template()
set_forms()
set_static_files()
set_admin_template()
deployment_checklist()
create_startscripts()
if param == 'build-novenv':
check_parameters()
get_environment()
#check_venv()
check_system_reqs()
cleaning_old_stuff('none')
start_django_project()
importing_settings()
set_datamodel()
set_urls()
set_views()
set_templates()
set_login_template()
set_forms()
set_static_files()
set_admin_template()
deployment_checklist()
create_superuser()
if param == 'clean':
cleaning_old_stuff('none')
if param == 'createsuperuser':
create_superuser()
if param == 'database':
set_database()
if param == 'deep-clean':
cleaning_old_stuff('deep-clean')
# Cleaning also venv dir.
if param == 'createvenv':
check_venv()
if param == 'templates':
set_templates()
set_login_template()
set_forms()
set_static_files()
set_admin_template()
if param == 'urlsviews':
set_urls()
set_views()
if param == 'loadtestdata':
create_test_environment()
if param == 'createstartscript':
create_startscripts()
run()
|
the-stack_106_29607 | from argparse import ArgumentParser
from copy import deepcopy
from typing import Any
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from pytorch_lightning import seed_everything
from torch.optim import Adam
from pl_bolts.callbacks.self_supervised import BYOLMAWeightUpdate
from pl_bolts.models.self_supervised.byol.models import SiameseArm
from pl_bolts.optimizers.lars_scheduling import LARSWrapper
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
class BYOL(pl.LightningModule):
def __init__(self,
num_classes,
learning_rate: float = 0.2,
weight_decay: float = 1.5e-6,
input_height: int = 32,
batch_size: int = 32,
num_workers: int = 0,
warmup_epochs: int = 10,
max_epochs: int = 1000,
**kwargs):
"""
PyTorch Lightning implementation of `Bootstrap Your Own Latent (BYOL)
<https://arxiv.org/pdf/2006.07733.pdf>`_
Paper authors: Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H. Richemond, \
Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, \
Bilal Piot, Koray Kavukcuoglu, Rémi Munos, Michal Valko.
Model implemented by:
- `Annika Brundyn <https://github.com/annikabrundyn>`_
.. warning:: Work in progress. This implementation is still being verified.
TODOs:
- verify on CIFAR-10
- verify on STL-10
- pre-train on imagenet
Example::
import pytorch_lightning as pl
from pl_bolts.models.self_supervised import BYOL
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.models.self_supervised.simclr.transforms import (
SimCLREvalDataTransform, SimCLRTrainDataTransform)
# model
model = BYOL(num_classes=10)
# data
dm = CIFAR10DataModule(num_workers=0)
dm.train_transforms = SimCLRTrainDataTransform(32)
dm.val_transforms = SimCLREvalDataTransform(32)
trainer = pl.Trainer()
trainer.fit(model, dm)
Train::
trainer = Trainer()
trainer.fit(model)
CLI command::
# cifar10
python byol_module.py --gpus 1
# imagenet
python byol_module.py
--gpus 8
--dataset imagenet2012
--data_dir /path/to/imagenet/
--meta_dir /path/to/folder/with/meta.bin/
--batch_size 32
Args:
datamodule: The datamodule
learning_rate: the learning rate
weight_decay: optimizer weight decay
input_height: image input height
batch_size: the batch size
num_workers: number of workers
warmup_epochs: num of epochs for scheduler warm up
max_epochs: max epochs for scheduler
"""
super().__init__()
self.save_hyperparameters()
self.online_network = SiameseArm()
self.target_network = deepcopy(self.online_network)
self.weight_callback = BYOLMAWeightUpdate()
def on_train_batch_end(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
# Add callback for user automatically since it's key to BYOL weight update
self.weight_callback.on_train_batch_end(self.trainer, self, batch, batch_idx, dataloader_idx)
def forward(self, x):
y, _, _ = self.online_network(x)
return y
def cosine_similarity(self, a, b):
a = F.normalize(a, dim=-1)
b = F.normalize(b, dim=-1)
sim = (a * b).sum(-1).mean()
return sim
def shared_step(self, batch, batch_idx):
(img_1, img_2), y = batch
# Image 1 to image 2 loss
y1, z1, h1 = self.online_network(img_1)
with torch.no_grad():
y2, z2, h2 = self.target_network(img_2)
loss_a = - 2 * self.cosine_similarity(h1, z2)
# Image 2 to image 1 loss
y1, z1, h1 = self.online_network(img_2)
with torch.no_grad():
y2, z2, h2 = self.target_network(img_1)
# L2 normalize
loss_b = - 2 * self.cosine_similarity(h1, z2)
# Final loss
total_loss = loss_a + loss_b
return loss_a, loss_b, total_loss
def training_step(self, batch, batch_idx):
loss_a, loss_b, total_loss = self.shared_step(batch, batch_idx)
# log results
result = pl.TrainResult(minimize=total_loss)
result.log_dict({'1_2_loss': loss_a, '2_1_loss': loss_b, 'train_loss': total_loss})
return result
def validation_step(self, batch, batch_idx):
loss_a, loss_b, total_loss = self.shared_step(batch, batch_idx)
# log results
result = pl.EvalResult(early_stop_on=total_loss, checkpoint_on=total_loss)
result.log_dict({'1_2_loss': loss_a, '2_1_loss': loss_b, 'train_loss': total_loss})
return result
def configure_optimizers(self):
optimizer = Adam(self.parameters(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay)
optimizer = LARSWrapper(optimizer)
scheduler = LinearWarmupCosineAnnealingLR(
optimizer,
warmup_epochs=self.hparams.warmup_epochs,
max_epochs=self.hparams.max_epochs
)
return [optimizer], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--online_ft', action='store_true', help='run online finetuner')
parser.add_argument('--dataset', type=str, default='cifar10', help='cifar10, imagenet2012, stl10')
(args, _) = parser.parse_known_args()
# Data
parser.add_argument('--data_dir', type=str, default='.')
parser.add_argument('--num_workers', default=0, type=int)
# optim
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1.5e-6)
parser.add_argument('--warmup_epochs', type=float, default=10)
# Model
parser.add_argument('--meta_dir', default='.', type=str, help='path to meta.bin for imagenet')
return parser
def cli_main():
from pl_bolts.callbacks.self_supervised import SSLOnlineEvaluator
from pl_bolts.datamodules import CIFAR10DataModule, STL10DataModule, ImagenetDataModule
from pl_bolts.models.self_supervised.simclr import SimCLRTrainDataTransform, SimCLREvalDataTransform
seed_everything(1234)
parser = ArgumentParser()
# trainer args
parser = pl.Trainer.add_argparse_args(parser)
# model args
parser = BYOL.add_model_specific_args(parser)
args = parser.parse_args()
# pick data
dm = None
# init default datamodule
if args.dataset == 'cifar10':
dm = CIFAR10DataModule.from_argparse_args(args)
dm.train_transforms = SimCLRTrainDataTransform(32)
dm.val_transforms = SimCLREvalDataTransform(32)
args.num_classes = dm.num_classes
elif args.dataset == 'stl10':
dm = STL10DataModule.from_argparse_args(args)
dm.train_dataloader = dm.train_dataloader_mixed
dm.val_dataloader = dm.val_dataloader_mixed
(c, h, w) = dm.size()
dm.train_transforms = SimCLRTrainDataTransform(h)
dm.val_transforms = SimCLREvalDataTransform(h)
args.num_classes = dm.num_classes
elif args.dataset == 'imagenet2012':
dm = ImagenetDataModule.from_argparse_args(args, image_size=196)
(c, h, w) = dm.size()
dm.train_transforms = SimCLRTrainDataTransform(h)
dm.val_transforms = SimCLREvalDataTransform(h)
args.num_classes = dm.num_classes
model = BYOL(**args.__dict__)
def to_device(batch, device):
(x1, x2), y = batch
x1 = x1.to(device)
y = y.to(device)
return x1, y
# finetune in real-time
online_eval = SSLOnlineEvaluator(z_dim=2048, num_classes=dm.num_classes)
online_eval.to_device = to_device
trainer = pl.Trainer.from_argparse_args(args, max_steps=300000, callbacks=[online_eval])
trainer.fit(model, dm)
if __name__ == '__main__':
cli_main()
|
the-stack_106_29609 | SrvGroup = libstarpy._GetSrvGroup()
Service = SrvGroup._GetService("","")
#Create objects
Obj=Service._New("TestClass");
#Define functions
def Obj_PythonAdd(self,x,y) :
print("Call python function...");
return x+y;
Obj.PythonAdd = Obj_PythonAdd;
#Call java functions
def Obj_PythonPrint(self,x,y) :
print( "Value defined in java is ",self.JavaValue );
print( "Function result from java ",self.JavaAdd(x,y) );
Obj.PythonPrint = Obj_PythonPrint;
#define Attributes
Obj.PythonValue = 200; |
the-stack_106_29611 | # Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bacpypes.apdu import APDU, ReadPropertyACK
from bacpypes.primitivedata import Tag
from bacpypes.constructeddata import ArrayOf
from thingsboard_gateway.connectors.bacnet.bacnet_converter import BACnetConverter, log
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
class BACnetUplinkConverter(BACnetConverter):
def __init__(self, config):
self.__config = config
def convert(self, config, data):
value = None
if isinstance(data, ReadPropertyACK):
value = self.__property_value_from_apdu(data)
if config is not None:
datatypes = {"attributes": "attributes",
"timeseries": "telemetry",
"telemetry": "telemetry"}
dict_result = {"deviceName": None, "deviceType": None, "attributes": [], "telemetry": []}
dict_result["deviceName"] = self.__config.get("deviceName", config[1].get("name", "BACnet device"))
dict_result["deviceType"] = self.__config.get("deviceType", "default")
dict_result[datatypes[config[0]]].append({config[1]["key"]: value})
else:
dict_result = value
log.debug("%r %r", self, dict_result)
return dict_result
@staticmethod
def __property_value_from_apdu(apdu: APDU):
tag_list = apdu.propertyValue.tagList
non_app_tags = [tag for tag in tag_list if tag.tagClass != Tag.applicationTagClass]
if non_app_tags:
raise RuntimeError("Value has some non-application tags")
first_tag = tag_list[0]
other_type_tags = [tag for tag in tag_list[1:] if tag.tagNumber != first_tag.tagNumber]
if other_type_tags:
raise RuntimeError("All tags must be the same type")
datatype = Tag._app_tag_class[first_tag.tagNumber]
if not datatype:
raise RuntimeError("unknown datatype")
if len(tag_list) > 1:
datatype = ArrayOf(datatype)
value = apdu.propertyValue.cast_out(datatype)
return value
|
the-stack_106_29612 | from enum import Enum
from itertools import permutations
from collections import defaultdict
with open("input1.txt","r") as f:
data = f.readlines()
dataDict = {}
for line in data:
line=line.split("=>")
line[0]=line[0].split(",")
for value in line[0]:
value = value.split(" ")
for line in data:
print(line) |
the-stack_106_29613 | # https://deeplearningcourses.com/c/support-vector-machines-in-python
# https://www.udemy.com/support-vector-machines-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
def getKaggleMNIST():
# MNIST data:
# column 0 is labels
# column 1-785 is data, with values 0 .. 255
# total size of CSV: (42000, 784)
train = pd.read_csv('../large_files/train.csv').values.astype(np.float32)
train = shuffle(train)
Xtrain = train[:-1000,1:]
Ytrain = train[:-1000,0].astype(np.int32)
Xtest = train[-1000:,1:]
Ytest = train[-1000:,0].astype(np.int32)
# scale the data
Xtrain /= 255.
Xtest /= 255.
# scaler = StandardScaler()
# Xtrain = scaler.fit_transform(Xtrain)
# Xtest = scaler.transform(Xtest)
return Xtrain, Ytrain, Xtest, Ytest
def get_spiral():
# Idea: radius -> low...high
# (don't start at 0, otherwise points will be "mushed" at origin)
# angle = low...high proportional to radius
# [0, 2pi/6, 4pi/6, ..., 10pi/6] --> [pi/2, pi/3 + pi/2, ..., ]
# x = rcos(theta), y = rsin(theta) as usual
radius = np.linspace(1, 10, 100)
thetas = np.empty((6, 100))
for i in range(6):
start_angle = np.pi*i / 3.0
end_angle = start_angle + np.pi / 2
points = np.linspace(start_angle, end_angle, 100)
thetas[i] = points
# convert into cartesian coordinates
x1 = np.empty((6, 100))
x2 = np.empty((6, 100))
for i in range(6):
x1[i] = radius * np.cos(thetas[i])
x2[i] = radius * np.sin(thetas[i])
# inputs
X = np.empty((600, 2))
X[:,0] = x1.flatten()
X[:,1] = x2.flatten()
# add noise
X += np.random.randn(600, 2)*0.5
# targets
Y = np.array([0]*100 + [1]*100 + [0]*100 + [1]*100 + [0]*100 + [1]*100)
return X, Y
def get_xor():
X = np.zeros((200, 2))
X[:50] = np.random.random((50, 2)) / 2 + 0.5 # (0.5-1, 0.5-1)
X[50:100] = np.random.random((50, 2)) / 2 # (0-0.5, 0-0.5)
X[100:150] = np.random.random((50, 2)) / 2 + np.array([[0, 0.5]]) # (0-0.5, 0.5-1)
X[150:] = np.random.random((50, 2)) / 2 + np.array([[0.5, 0]]) # (0.5-1, 0-0.5)
Y = np.array([0]*100 + [1]*100)
return X, Y
def get_donut():
N = 200
R_inner = 5
R_outer = 10
# distance from origin is radius + random normal
# angle theta is uniformly distributed between (0, 2pi)
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
Y = np.array([0]*(N//2) + [1]*(N//2))
return X, Y
def get_clouds():
N = 1000
c1 = np.array([2, 2])
c2 = np.array([-2, -2])
# c1 = np.array([0, 3])
# c2 = np.array([0, 0])
X1 = np.random.randn(N, 2) + c1
X2 = np.random.randn(N, 2) + c2
X = np.vstack((X1, X2))
Y = np.array([-1]*N + [1]*N)
return X, Y
def plot_decision_boundary(model, resolution=100, colors=('b', 'k', 'r')):
np.warnings.filterwarnings('ignore')
fig, ax = plt.subplots()
# Generate coordinate grid of shape [resolution x resolution]
# and evaluate the model over the entire space
x_range = np.linspace(model.Xtrain[:,0].min(), model.Xtrain[:,0].max(), resolution)
y_range = np.linspace(model.Xtrain[:,1].min(), model.Xtrain[:,1].max(), resolution)
grid = [[model._decision_function(np.array([[xr, yr]])) for yr in y_range] for xr in x_range]
grid = np.array(grid).reshape(len(x_range), len(y_range))
# Plot decision contours using grid and
# make a scatter plot of training data
ax.contour(x_range, y_range, grid.T, (-1, 0, 1), linewidths=(1, 1, 1),
linestyles=('--', '-', '--'), colors=colors)
ax.scatter(model.Xtrain[:,0], model.Xtrain[:,1],
c=model.Ytrain, lw=0, alpha=0.3, cmap='seismic')
# Plot support vectors (non-zero alphas)
# as circled points (linewidth > 0)
mask = model.alphas > 0.
ax.scatter(model.Xtrain[:,0][mask], model.Xtrain[:,1][mask],
c=model.Ytrain[mask], cmap='seismic')
# debug
ax.scatter([0], [0], c='black', marker='x')
plt.show()
|
the-stack_106_29615 | import re
import codecs
import nltk
import pymorphy2
s = '''
BEGIN:VCARD
VERSION:2.1
N;CHARSET=UTF-8;ENCODING=QUOTED-PRINTABLE:=D0=A1=D0=B0=D0=BD=D1=82=D0=B5=D1=85=D0=BD=D0=B8=D0=BA;=D0=96=D0=B5=D0=BD=D1=8F;;;
FN;CHARSET=UTF-8;ENCODING=QUOTED-PRINTABLE:=D0=96=D0=B5=D0=BD=D1=8F=20=D0=A1=D0=B0=D0=BD=D1=82=D0=B5=D1=85=D0=BD=D0=B8=D0=BA
TEL;PREF:+791123123
END:VCARD
N разбивается на поля через ; и заносится в название контакта. имя;фамилия;отчество
N заносится в поля контакта
FN разбивается на поля через пробел (0x20) и заносится в детали контакта (строка отображения):
Фамилия =D0=A1=D0=B0=D0=BD=D1=82=D0=B5=D1=85=D0=BD=D0=B8=D0=BA
пробел =20
Женя =D0=96=D0=B5=D0=BD=D1=8F
Отображается как
"Имя - Женя Фамилия - Сантехник"
BEGIN:VCARD
VERSION:3.0
N:FirstName;LastName;;;
FN:First Last
TEL;TYPE=CELL;TYPE=PREF:9999999999
END:VCARD
мих вик шепиль N;CHARSET=UTF-8;ENCODING=QUOTED-PRINTABLE:=D0=92=D0=B8=D0=BA;=D0=9C=D0=B8=D1=85;=D0=A8=D0=B5=D0=BF=D0=B8=D0=BB=D1=8C;;
тестовый контакт FN;CHARSET=UTF-8;ENCODING=QUOTED-PRINTABLE:=D0=A2=D0=B5=D1=81=D1=82=D0=BE=D0=B2=D1=8B=D0=B9=20=D0=BA=D0=BE=D0=BD=D1=82=D0=B0=D0=BA=D1=82
Такой контакт на ксиаоми отображается следующим образом:
В списке - "мих вик шепиль"
В деталях контакта - "тестовый контакт"
При раскрытии деталей - "мих вик шепиль"
a = 'ф'
b = a.encode('utf-8')
c = f'={str(b)[4:6].upper()}={str(b)[8:10]}'
'''
'''
d0,d1,d2,d3 - кириллица
'''
class WordChecker:
def __init__(self, in_str):
self.in_str = in_str
@staticmethod
def last_name_checker(in_str):
in_str = in_str.strip()
last_names = ['ов', 'ова', 'ев', 'ева', 'ский', 'ская', 'ин', 'ина', 'цко', 'шко']
black_list = ['Ирина', 'Марина']
if in_str in black_list:
return False
elif in_str[-2:] in last_names or in_str[-3:] in last_names or in_str[-4:] in last_names:
return True
@staticmethod
def par_name_checker(in_str):
in_str = in_str.strip()
last_names = ['евич', 'овна', 'ович', 'евна']
black_list = []
if in_str in black_list:
return False
elif in_str[-4:]:
return True
class VCFFile:
cyrillic = ['D0', 'D1', 'D2', 'D3', ';=D0', ';=D1', ';=D2', ';=D3']
order = {0: 'Имя', 1: 'Фамилия', 2: 'Отчество', 3: 'Обращение', 4: 'Прочее'}
fn_order = {0: 'Фамилия', 1: 'Имя', 2: 'Отчество'}
n_header = 'N;CHARSET=UTF-8;ENCODING=QUOTED-PRINTABLE:'
fn_header = 'FN;CHARSET=UTF-8;ENCODING=QUOTED-PRINTABLE:'
def __init__(self, in_file, out_file):
self.in_f = open(in_file, 'r')
self.out_f = open(out_file, 'w')
self.card = []
self.new_card = []
self.analyzer = dict()
self.flush_analyzer()
def flush_analyzer(self):
self.analyzer = {
'Original': {
'Имя': None,
'Фамилия': None,
'Отчество': None,
'Обращение': None,
'Прочее': None
},
'Found': {
'Имя': None,
'Фамилия': None,
'Отчество': None,
'Обращение': None,
'Прочее': None,
'name_field': None,
'surn_field': None,
'parn_field': None,
'mr_field': None,
'other_field': None
},
'Final': {
'Имя': None,
'Фамилия': None,
'Отчество': None,
'Обращение': None,
'Прочее': None
},
'Modified': False
}
def debug(self, in_str):
if self.debug:
print(f'DEBUG: {in_str}')
def pretty_print(self):
# '''
if self.analyzer['Modified']:
self.debug(f'MOD: {self.analyzer["Original"]} {self.analyzer["Final"]}')
else:
self.debug(f'UNMOD: {self.analyzer["Original"]} {self.analyzer["Final"]}')
# '''
# print(self.card)
def card_fixer(self):
"""
[
'BEGIN:VCARD',
'VERSION:2.1',
'N;CHARSET=UTF-8;ENCODING=QUOTED-PRINTABLE:;=D0=AA=D1=8A=D1=A7=D0=B5=D0=B2;;;',
'FN;CHARSET=UTF-8;ENCODING=QUOTED-PRINTABLE:=D0=AA=D1=8A=D1=A7=D0=B5=D0=B2',
'TEL;CELL:+734592700',
'END:VCARD'
]
Нужно менять позиции 2 и 3 в листе
"""
for line in self.card:
if re.match(r'^N;CHARSET', line):
# Отображение контакта
self.string_decoder(line[42:])
self.analyzer_function()
self.pretty_print()
self.new_card = self.card[:]
self.new_card[2], self.new_card[3] = self.vcf_encoder()
self.new_card.append('\n')
self.debug(f'New card: {self.new_card}')
def analyzer_function(self):
for key in self.analyzer['Original'].keys():
if self.analyzer['Original'][key]:
# Если какое-то поле заполнено в оригинале
if WordChecker.last_name_checker(self.analyzer['Original'][key]):
# Похоже, что это фамилия
if key == 'Фамилия':
pass
else:
# Похоже, что фамилия записана не как фамилия
self.analyzer['Found']['Фамилия'] = self.analyzer['Original'][key]
self.analyzer['Found']['surn_field'] = key
self.analyzer['Modified'] = True
if self.analyzer['Found'].get('Фамилия'):
# Анализатор нашел фамилию в данных
if self.analyzer['Original'].get('Фамилия'):
# Она была записана как что-то другое. В поле "Фамилия" было что-то еще
self.analyzer['Final']['Фамилия'] = self.analyzer['Found']['Фамилия']
self.analyzer['Final'][self.analyzer['Found']['surn_field']] = self.analyzer['Original']['Фамилия']
else:
# Она была записана как что-то другое в единственное поле
self.analyzer['Final']['Фамилия'] = self.analyzer['Found']['Фамилия']
self.analyzer['Final'][self.analyzer['Found']['surn_field']] = None
def vcf_encoder(self):
"""
{'Имя': None, 'Фамилия': 'Петров', 'Отчество': None, 'Обращение': None, 'Прочее': None}
"""
n_result = []
fn_result = f'{self.fn_header}'
for key in self.analyzer['Final']:
# self.debug(n_result)
if self.analyzer['Final'][key]:
# self.debug(f'{key, self.analyzer["Final"][key]}')
vcf_encoded_value = self.string_encoder(self.analyzer['Final'][key])
n_result.append(vcf_encoded_value)
fn_result += f'{vcf_encoded_value}=20'
else:
# self.debug(f'{key} None')
n_result.append('')
fn_result.rstrip('=20')
n_result = ';'.join(n_result)
n_result = f'{self.n_header}{n_result}'
return n_result, fn_result
def string_encoder(self, in_str):
result = ''
for letter in in_str:
if letter == ' ':
result += '=20'
elif letter in ['0', '1,', '2', '3', '4', '5', '6', '7', '8', '9']:
result += f'={int(letter) + 30}'
else:
encoded_str = letter.encode('utf-8')
vcf_string = f'={str(encoded_str)[4:6].upper()}={str(encoded_str)[8:10]}'
result += vcf_string.upper()
self.debug(f'{in_str}: {result}')
return result
def string_decoder(self, in_str):
elements = in_str.split(';')
for i in range(len(elements)):
if elements[i]:
try:
vcard_data = codecs.decode(''.join(elements[i].strip('=').strip().split('=')), 'hex').decode('utf-8')
self.analyzer['Original'][VCFFile.order[i]] = vcard_data
self.analyzer['Final'][VCFFile.order[i]] = vcard_data
except Exception as e:
self.debug(f'ERROR: {e} while processing {in_str}')
def run(self):
try:
lines = self.in_f.read().split('\n')
for line in lines:
if line == '':
continue
if re.match(r'^;+$', line):
continue
if line.startswith('='):
line = line.lstrip('=')
self.card[-1] += line
continue
if 1 < len(line) < 6:
self.card[-1] += line
continue
self.card.append(line)
if 'END:VCARD' in line:
self.card_fixer()
self.out_f.write('\n'.join(self.new_card))
self.card = []
self.new_card = []
self.flush_analyzer()
finally:
self.in_f.close()
self.out_f.close()
if __name__ == '__main__':
a = '/media/storage/egk/Pile/Contacts/Copy_Original_Контакты.vcf'
# a = '/media/storage/egk/Pile/Contacts/test2.vcf'
b = '/media/storage/egk/Pile/Contacts/Copy_Original_Контакты-decoded3.vcf'
v = VCFFile(a, b)
v.run()
|
the-stack_106_29616 |
from PyQt5 import Qt
from PyQt5.QtWidgets import QDialog, QTabWidget, QVBoxLayout, QPushButton, QHBoxLayout, QWidget
from PyQt5.QtCore import Qt
from pyqt_transparent_timer.settingsDialog.timerSettingsWidget.timerSettingsWidget import TimerSettingsWidget
class SettingsDialog(QDialog):
def __init__(self):
super().__init__()
self.__initUi()
def __initUi(self):
self.setWindowTitle('Settings')
self.setWindowFlags(Qt.WindowMinMaxButtonsHint | Qt.WindowCloseButtonHint)
self.__timerSettingsWidget = TimerSettingsWidget()
topWidget = QTabWidget()
topWidget.addTab(self.__timerSettingsWidget, 'Timer')
self.__okBtn = QPushButton()
self.__okBtn.clicked.connect(self.accept)
self.__okBtn.setText('OK')
closeBtn = QPushButton()
closeBtn.clicked.connect(self.close)
closeBtn.setText('Cancel')
lay = QHBoxLayout()
lay.addWidget(self.__okBtn)
lay.addWidget(closeBtn)
lay.setContentsMargins(0, 0, 0, 0)
bottomWidget = QWidget()
bottomWidget.setLayout(lay)
lay = QVBoxLayout()
lay.addWidget(topWidget)
lay.addWidget(bottomWidget)
self.setLayout(lay)
def __ok(self):
self.accept()
def get_time(self):
return self.__timerSettingsWidget.get_time() |
the-stack_106_29618 | import json
import uuid
import pytest
from app.api.mines.mine.models.mine import Mine
from app.api.permits.permit.models.permit import Permit
from app.api.permits.permit_amendment.models.permit_amendment import PermitAmendment
from app.extensions import db
from tests.constants import DUMMY_USER_KWARGS
@pytest.fixture(scope="function")
def setup_info(test_client):
test_permit_no = 'mx-test-231'
mine_1 = Mine.create_mine('1234567', 'TestMine', True, 'SW', DUMMY_USER_KWARGS)
mine_2 = Mine.create_mine('7654321', 'TestingMine', True, 'NW', DUMMY_USER_KWARGS)
mine_1.save()
mine_2.save()
permit = Permit.create(mine_2.mine_guid, test_permit_no, 'O', DUMMY_USER_KWARGS)
permit.save()
MINE_1_GUID = str(mine_1.mine_guid)
MINE_2_GUID = str(mine_2.mine_guid)
MINE_2_PERMIT_GUID = str(permit.permit_guid)
NON_EXISTENT_GUID = '8ef23184-02c4-4472-a912-380b5a0d9cae'
yield dict(
mine_1_guid=MINE_1_GUID,
mine_2_guid=MINE_2_GUID,
mine_2_permit_guid=MINE_2_PERMIT_GUID,
bad_guid=NON_EXISTENT_GUID,
in_use_permit_no=test_permit_no)
db.session.query(PermitAmendment).delete()
db.session.commit()
db.session.query(Permit).delete()
db.session.commit()
db.session.delete(mine_1)
db.session.delete(mine_2)
db.session.commit()
# GET
def test_get_permit_not_found(test_client, setup_info, auth_headers):
get_resp = test_client.get(
'/permits/' + setup_info.get('bad_guid'), headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_data == {'error': {'status': 404, 'message': 'Permit not found'}}
assert get_resp.status_code == 404
def test_get_permit(test_client, setup_info, auth_headers):
get_resp = test_client.get(
'/permits/' + setup_info.get('mine_2_permit_guid'),
headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_data['permit_guid'] == setup_info.get('mine_2_permit_guid')
assert get_resp.status_code == 200
def test_get_with_permit_no(test_client, setup_info, auth_headers):
get_resp = test_client.get(
'/permits?permit_no=' + setup_info.get('in_use_permit_no'),
headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_resp.status_code == 200
assert get_data.get('permit_no') == setup_info.get('in_use_permit_no')
#Create
def test_post_permit(test_client, setup_info, auth_headers):
PERMIT_NO = 'mx-test-999'
data = {
'mine_guid': setup_info.get('mine_1_guid'),
'permit_no': PERMIT_NO,
'permit_status_code': 'O',
'received_date': '1999-12-12',
'issue_date': '1999-12-21',
'authorization_end_date': '2012-12-02'
}
post_resp = test_client.post('/permits', headers=auth_headers['full_auth_header'], json=data)
post_data = json.loads(post_resp.data.decode())
assert post_resp.status_code == 200
assert post_data.get('mine_guid') == setup_info.get('mine_1_guid')
assert post_data.get('permit_no') == PERMIT_NO
assert len(post_data.get('amendments')) == 1
def test_post_permit_bad_mine_guid(test_client, setup_info, auth_headers):
data = {'mine_guid': setup_info.get('bad_guid')}
post_resp = test_client.post('/permits', headers=auth_headers['full_auth_header'], json=data)
assert post_resp.status_code == 404
def test_post_permit_with_duplicate_permit_no(test_client, setup_info, auth_headers):
permit = Permit.find_by_permit_guid(setup_info.get('mine_2_permit_guid'))
data = {'mine_guid': setup_info.get('mine_1_guid'), 'permit_no': permit.permit_no}
post_resp = test_client.post('/permits', headers=auth_headers['full_auth_header'], json=data)
assert post_resp.status_code == 400
def test_post_with_permit_guid(test_client, setup_info, auth_headers):
PERMIT_NO = 'mx-test-999'
data = {
'mine_guid': setup_info.get('mine_1_guid'),
'permit_no': PERMIT_NO,
'permit_status_code': 'O',
'received_date': '1999-12-12',
'issue_date': '1999-12-21',
'authorization_end_date': '2012-12-02'
}
post_resp = test_client.post(
'/permits/' + setup_info.get('mine_2_permit_guid'),
headers=auth_headers['full_auth_header'],
data=data)
assert post_resp.status_code == 400
#Put
def test_put_permit(test_client, setup_info, auth_headers):
permit_guid = setup_info.get('mine_2_permit_guid')
permit = Permit.find_by_permit_guid(permit_guid)
old_permit_status = permit.permit_status_code
data = {'permit_status_code': 'C'}
put_resp = test_client.put(
'/permits/' + permit_guid, headers=auth_headers['full_auth_header'], json=data)
put_data = json.loads(put_resp.data.decode())
assert put_resp.status_code == 200
assert put_data.get('permit_status_code') == 'C'
assert put_data.get('permit_status_code') != old_permit_status
def test_put_permit_bad_permit_guid(test_client, setup_info, auth_headers):
permit_guid = setup_info.get('bad_guid')
data = {'permit_status_code': 'C'}
put_resp = test_client.put(
'/permits/' + permit_guid, headers=auth_headers['full_auth_header'], json=data)
assert put_resp.status_code == 404
|
the-stack_106_29620 | import os
import sys
from pathlib import Path
from typing import Optional, Union, Tuple
import sumo_rl
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
import traci
import sumolib
import gym
from gym.envs.registration import EnvSpec
import numpy as np
import pandas as pd
from .traffic_signal import TrafficSignal
from gym.utils import EzPickle, seeding
from pettingzoo import AECEnv
from pettingzoo.utils.agent_selector import agent_selector
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector, wrappers
from pettingzoo.utils.conversions import parallel_wrapper_fn
LIBSUMO = 'LIBSUMO_AS_TRACI' in os.environ
def env(**kwargs):
env = SumoEnvironmentPZ(**kwargs)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class SumoEnvironment(gym.Env):
"""
SUMO Environment for Traffic Signal Control
:param net_file: (str) SUMO .net.xml file
:param route_file: (str) SUMO .rou.xml file
:param out_csv_name: (Optional[str]) name of the .csv output with simulation results. If None no output is generated
:param use_gui: (bool) Wheter to run SUMO simulation with GUI visualisation
:param virtual_display: (Optional[Tuple[int,int]]) Resolution of a virtual display for rendering
:param begin_time: (int) The time step (in seconds) the simulation starts
:param num_seconds: (int) Number of simulated seconds on SUMO. The time in seconds the simulation must end.
:param max_depart_delay: (int) Vehicles are discarded if they could not be inserted after max_depart_delay seconds
:param delta_time: (int) Simulation seconds between actions
:param min_green: (int) Minimum green time in a phase
:param max_green: (int) Max green time in a phase
:single_agent: (bool) If true, it behaves like a regular gym.Env. Else, it behaves like a MultiagentEnv (https://github.com/ray-project/ray/blob/master/python/ray/rllib/env/multi_agent_env.py)
:sumo_seed: (int/string) Random seed for sumo. If 'random' it uses a randomly chosen seed.
:fixed_ts: (bool) If true, it will follow the phase configuration in the route_file and ignore the actions.
:sumo_warnings: (bool) If False, remove SUMO warnings in the terminal
"""
CONNECTION_LABEL = 0 # For traci multi-client support
def __init__(
self,
net_file: str,
route_file: str,
out_csv_name: Optional[str] = None,
use_gui: bool = False,
virtual_display: Optional[Tuple[int,int]] = None,
begin_time: int = 0,
num_seconds: int = 20000,
max_depart_delay: int = 100000,
time_to_teleport: int = -1,
delta_time: int = 5,
yellow_time: int = 2,
min_green: int = 5,
max_green: int = 50,
single_agent: bool = False,
sumo_seed: Union[str,int] = 'random',
fixed_ts: bool = False,
sumo_warnings: bool = True,
):
self._net = net_file
self._route = route_file
self.use_gui = use_gui
if self.use_gui:
self._sumo_binary = sumolib.checkBinary('sumo-gui')
else:
self._sumo_binary = sumolib.checkBinary('sumo')
self.virtual_display = virtual_display
assert delta_time > yellow_time, "Time between actions must be at least greater than yellow time."
self.begin_time = begin_time
self.sim_max_time = num_seconds
self.delta_time = delta_time # seconds on sumo at each step
self.max_depart_delay = max_depart_delay # Max wait time to insert a vehicle
self.time_to_teleport = time_to_teleport
self.min_green = min_green
self.max_green = max_green
self.yellow_time = yellow_time
self.single_agent = single_agent
self.sumo_seed = sumo_seed
self.fixed_ts = fixed_ts
self.sumo_warnings = sumo_warnings
self.label = str(SumoEnvironment.CONNECTION_LABEL)
SumoEnvironment.CONNECTION_LABEL += 1
self.sumo = None
if LIBSUMO:
traci.start([sumolib.checkBinary('sumo'), '-n', self._net]) # Start only to retrieve traffic light information
conn = traci
else:
traci.start([sumolib.checkBinary('sumo'), '-n', self._net], label='init_connection'+self.label)
conn = traci.getConnection('init_connection'+self.label)
self.ts_ids = list(conn.trafficlight.getIDList())
self.traffic_signals = {ts: TrafficSignal(self,
ts,
self.delta_time,
self.yellow_time,
self.min_green,
self.max_green,
self.begin_time,
conn) for ts in self.ts_ids}
conn.close()
self.vehicles = dict()
self.reward_range = (-float('inf'), float('inf'))
self.metadata = {}
self.spec = EnvSpec('SUMORL-v0')
self.run = 0
self.metrics = []
self.out_csv_name = out_csv_name
self.observations = {ts: None for ts in self.ts_ids}
self.rewards = {ts: None for ts in self.ts_ids}
def _start_simulation(self):
sumo_cmd = [self._sumo_binary,
'-n', self._net,
'-r', self._route,
'--max-depart-delay', str(self.max_depart_delay),
'--waiting-time-memory', '10000',
'--time-to-teleport', str(self.time_to_teleport)]
if self.begin_time > 0:
sumo_cmd.append('-b {}'.format(self.begin_time))
if self.sumo_seed == 'random':
sumo_cmd.append('--random')
else:
sumo_cmd.extend(['--seed', str(self.sumo_seed)])
if not self.sumo_warnings:
sumo_cmd.append('--no-warnings')
if self.use_gui:
sumo_cmd.extend(['--start', '--quit-on-end'])
if self.virtual_display is not None:
sumo_cmd.extend(['--window-size', f'{self.virtual_display[0]},{self.virtual_display[1]}'])
from pyvirtualdisplay.smartdisplay import SmartDisplay
print("Creating a virtual display.")
self.disp = SmartDisplay(size=self.virtual_display)
self.disp.start()
print("Virtual display started.")
if LIBSUMO:
traci.start(sumo_cmd)
self.sumo = traci
else:
traci.start(sumo_cmd, label=self.label)
self.sumo = traci.getConnection(self.label)
if self.use_gui:
self.sumo.gui.setSchema(traci.gui.DEFAULT_VIEW, "real world")
def reset(self):
if self.run != 0:
self.close()
self.save_csv(self.out_csv_name, self.run)
self.run += 1
self.metrics = []
self._start_simulation()
self.traffic_signals = {ts: TrafficSignal(self,
ts,
self.delta_time,
self.yellow_time,
self.min_green,
self.max_green,
self.begin_time,
self.sumo) for ts in self.ts_ids}
self.vehicles = dict()
if self.single_agent:
return self._compute_observations()[self.ts_ids[0]]
else:
return self._compute_observations()
@property
def sim_step(self):
"""
Return current simulation second on SUMO
"""
return self.sumo.simulation.getTime()
def step(self, action):
# No action, follow fixed TL defined in self.phases
breakpoint()
if action is None or action == {}:
for _ in range(self.delta_time):
self._sumo_step()
else:
self._apply_actions(action)
self._run_steps()
observations = self._compute_observations()
rewards = self._compute_rewards()
dones = self._compute_dones()
self._compute_info()
if self.single_agent:
return observations[self.ts_ids[0]], rewards[self.ts_ids[0]], dones['__all__'], {}
else:
return observations, rewards, dones, {}
def _run_steps(self):
time_to_act = False
while not time_to_act:
self._sumo_step()
for ts in self.ts_ids:
self.traffic_signals[ts].update()
if self.traffic_signals[ts].time_to_act:
time_to_act = True
def _apply_actions(self, actions):
"""
Set the next green phase for the traffic signals
:param actions: If single-agent, actions is an int between 0 and self.num_green_phases (next green phase)
If multiagent, actions is a dict {ts_id : greenPhase}
"""
if self.single_agent:
if self.traffic_signals[self.ts_ids[0]].time_to_act:
self.traffic_signals[self.ts_ids[0]].set_next_phase(actions)
else:
for ts, action in actions.items():
if self.traffic_signals[ts].time_to_act:
self.traffic_signals[ts].set_next_phase(action)
def _compute_dones(self):
dones = {ts_id: False for ts_id in self.ts_ids}
dones['__all__'] = self.sim_step > self.sim_max_time
return dones
def _compute_info(self):
info = self._compute_step_info()
self.metrics.append(info)
def _compute_observations(self):
self.observations.update({ts: self.traffic_signals[ts].compute_observation() for ts in self.ts_ids if self.traffic_signals[ts].time_to_act})
return {ts: self.observations[ts].copy() for ts in self.observations.keys() if self.traffic_signals[ts].time_to_act}
def _compute_rewards(self):
self.rewards.update({ts: self.traffic_signals[ts].compute_reward() for ts in self.ts_ids if self.traffic_signals[ts].time_to_act})
return {ts: self.rewards[ts] for ts in self.rewards.keys() if self.traffic_signals[ts].time_to_act}
@property
def observation_space(self):
return self.traffic_signals[self.ts_ids[0]].observation_space
@property
def action_space(self):
return self.traffic_signals[self.ts_ids[0]].action_space
def observation_spaces(self, ts_id):
return self.traffic_signals[ts_id].observation_space
def action_spaces(self, ts_id):
return self.traffic_signals[ts_id].action_space
def _sumo_step(self):
self.sumo.simulationStep()
def _compute_step_info(self):
return {
'step_time': self.sim_step,
'reward': self.traffic_signals[self.ts_ids[0]].last_reward,
'total_stopped': sum(self.traffic_signals[ts].get_total_queued() for ts in self.ts_ids),
'total_wait_time': sum(sum(self.traffic_signals[ts].get_waiting_time_per_lane()) for ts in self.ts_ids)
}
def close(self):
if self.sumo is None:
return
if not LIBSUMO:
traci.switch(self.label)
traci.close()
self.sumo = None
def __del__(self):
self.close()
def render(self, mode='human'):
if self.virtual_display:
#img = self.sumo.gui.screenshot(traci.gui.DEFAULT_VIEW,
# f"temp/img{self.sim_step}.jpg",
# width=self.virtual_display[0],
# height=self.virtual_display[1])
img = self.disp.grab()
if mode == 'rgb_array':
return np.array(img)
return img
def save_csv(self, out_csv_name, run):
if out_csv_name is not None:
df = pd.DataFrame(self.metrics)
Path(Path(out_csv_name).parent).mkdir(parents=True, exist_ok=True)
df.to_csv(out_csv_name + '_conn{}_run{}'.format(self.label, run) + '.csv', index=False)
# Below functions are for discrete state space
def encode(self, state, ts_id):
phase = int(np.where(state[:self.traffic_signals[ts_id].num_green_phases] == 1)[0])
min_green = state[self.traffic_signals[ts_id].num_green_phases]
density_queue = [self._discretize_density(d) for d in state[self.traffic_signals[ts_id].num_green_phases + 1:]]
# tuples are hashable and can be used as key in python dictionary
return tuple([phase, min_green] + density_queue)
def _discretize_density(self, density):
return min(int(density*10), 9)
class SumoEnvironmentPZ(AECEnv, EzPickle):
metadata = {'render.modes': ['human', 'rgb_array'], 'name': "sumo_rl_v0"}
def __init__(self, **kwargs):
EzPickle.__init__(self, **kwargs)
self._kwargs = kwargs
self.seed()
self.env = SumoEnvironment(**self._kwargs)
self.agents = self.env.ts_ids
self.possible_agents = self.env.ts_ids
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.reset()
# spaces
self.action_spaces = {a: self.env.action_spaces(a) for a in self.agents}
self.observation_spaces = {a: self.env.observation_spaces(a) for a in self.agents}
# dicts
self.rewards = {a: 0 for a in self.agents}
self.dones = {a: False for a in self.agents}
self.infos = {a: {} for a in self.agents}
def seed(self, seed=None):
self.randomizer, seed = seeding.np_random(seed)
def reset(self):
self.env.reset()
self.agents = self.possible_agents[:]
self.agent_selection = self._agent_selector.reset()
self.rewards = {agent: 0 for agent in self.agents}
self._cumulative_rewards = {agent: 0 for agent in self.agents}
self.dones = {agent: False for agent in self.agents}
self.infos = {agent: {} for agent in self.agents}
def observe(self, agent):
obs = self.env.observations[agent].copy()
return obs
def state(self):
raise NotImplementedError('Method state() currently not implemented.')
def close(self):
self.env.close()
def render(self, mode='human'):
return self.env.render(mode)
def save_csv(self, out_csv_name, run):
self.env.save_csv(out_csv_name, run)
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
agent = self.agent_selection
if not self.action_spaces[agent].contains(action):
raise Exception('Action for agent {} must be in Discrete({}).'
'It is currently {}'.format(agent, self.action_spaces[agent].n, action))
self.env._apply_actions({agent: action})
if self._agent_selector.is_last():
self.env._run_steps()
self.env._compute_observations()
self.rewards = self.env._compute_rewards()
self.env._compute_info()
else:
self._clear_rewards()
done = self.env._compute_dones()['__all__']
self.dones = {a : done for a in self.agents}
self.agent_selection = self._agent_selector.next()
self._cumulative_rewards[agent] = 0
self._accumulate_rewards()
|
the-stack_106_29621 | from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import Signup1
from .models import Intern
from django.contrib import messages
from django.contrib.auth.decorators import login_required
import tablib
# Create your views here.
@login_required
def home(request):
if request.user.username == "hod":
context=Signup1.objects.all()
return render(request,'web_app/home.html',{'context':context})
else:
user=request.user.get_full_name()
context=Signup1.objects.all().filter(full_name=user)
return render(request,'web_app/home.html',{'context':context})
@login_required
def homepage(request):
context=Intern.objects.all()
return render(request,'web_app/homepage.html',{'context':context})
@login_required
def application(request):
return render(request,'web_app/application.html')
def apphome(request):
return render(request,'web_app/apphome.html')
@login_required
def createpost(request):
full_name = request.POST["full_name"]
if full_name==request.user.get_full_name():
gender = request.POST["gender"]
phone_no = request.POST["phone_no"]
email_id= request.POST["email_id"]
internship_id= request.POST["internship_id"]
resume_file=request.FILES["resume_file"]
post = Signup1(full_name=full_name,gender=gender,phone_no=phone_no,email=email_id,internship_id=internship_id,resume_file=resume_file)
messages.success(request, f'Applied for the internship succesfully')
post.save()
return render(request,'web_app/applied.html')
else:
messages.warning(request, f'Full name does not match')
return render(request,'web_app/application.html')
@login_required
def profile(request):
return render(request, 'users/profile.html')
@login_required
def export_excel(request):
headers = ('Student Name', 'Gender','Email Id','Phone No','Company Name','Job Profile')
data = []
data = tablib.Dataset(*data, headers=headers)
entries = Signup1.objects.all().filter(status="Accepted")
for entry in entries:
data.append((entry.full_name,entry.gender,entry.email,entry.phone_no,entry.internship.comp_name,entry.internship.job))
response = HttpResponse(data.xls, content_type='application/vnd.ms-excel;charset=utf-8')
response['Content-Disposition'] = "attachment; filename=export.xls"
return response |
the-stack_106_29623 | # Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'AWS XRay'
prefix = 'xray'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
BatchGetTraces = Action('BatchGetTraces')
CreateGroup = Action('CreateGroup')
CreateSamplingRule = Action('CreateSamplingRule')
DeleteGroup = Action('DeleteGroup')
DeleteSamplingRule = Action('DeleteSamplingRule')
GetEncryptionConfig = Action('GetEncryptionConfig')
GetGroup = Action('GetGroup')
GetGroups = Action('GetGroups')
GetSamplingRules = Action('GetSamplingRules')
GetSamplingStatisticSummaries = Action('GetSamplingStatisticSummaries')
GetSamplingTargets = Action('GetSamplingTargets')
GetServiceGraph = Action('GetServiceGraph')
GetTimeSeriesServiceStatistics = Action('GetTimeSeriesServiceStatistics')
GetTraceGraph = Action('GetTraceGraph')
GetTraceSummaries = Action('GetTraceSummaries')
PutEncryptionConfig = Action('PutEncryptionConfig')
PutTelemetryRecords = Action('PutTelemetryRecords')
PutTraceSegments = Action('PutTraceSegments')
UpdateGroup = Action('UpdateGroup')
UpdateSamplingRule = Action('UpdateSamplingRule')
|
the-stack_106_29626 | #! /usr/bin/env python
# Copyright (C) 2016, Michael F. Plass
from __future__ import print_function
A = 10
B = 11
C = 12
D = 13
E = 14
F = 15
class Assembly:
inst = None
cur = 0
passno = 0
changed = False
symtab = None
commenton = 0
comments = None
def __init__(self):
self.inst = []
self.symtab = dict()
self.comments = []
def v(self, sym):
'''Get the current value of a symbol
sym is the symbolic name
'''
ans = self.symtab.get(sym, None)
if ans == None:
ans = self.cur + 3
self.changed = True
self.symtab[sym] = ans
return ans
def equate(self, sym, val):
'''Define the value of a symbol
sym is the symbolic name
val is the value
'''
if val != self.symtab.get(sym, None):
self.symtab[sym] = (val << 0)
self.changed = True
def L(self, sym):
'''Define a symbolic label
sym is the symbolic name
'''
self.equate(sym, self.cur)
def C(self, commentary):
'''Comment about an instruction
'''
while len(self.comments) <= self.commenton:
self.comments.append('')
self.comments[self.commenton] = commentary
def dc(self, x):
'''Define numeric constant x
>>> import lasm
>>> a = lasm.Assembly()
>>> a.dc(0x1234)
>>> a.dc(0x5678)
>>> print(a.inst)
[4660, 22136]
'''
assert(x == int(x) and -32768 <= x <= 0xFFFF)
x = x & 0xFFFF
if self.cur == len(self.inst):
self.inst.append(x)
changed = True
elif self.inst[self.cur] != x:
self.inst[self.cur] = x
changed = True
self.commenton = self.cur
self.cur = self.cur + 1;
if (len(self.comments) > self.commenton):
self.comments[self.commenton] = ''
def addi(self, val, s, d):
'''Add immediate instruction
val is the immediate value (signed, 4 bits)
s is the source register
d is the destination register
'''
self.genop(val, 0, s, d)
def mov(self, s, d):
'''Move register instruction
s is the source register
d is the destination register
'''
self.genop(0, 0, s, d)
def addA(self, s, d):
'''Add A and specified register
s is the addend register
d is the destination register
'''
self.genop(0, 6, s, d)
def subB(self, s, d):
'''Subtract B from the specified register
s is the source register
d is the destination register
'''
self.genop(1, 5, s, d)
def clr(self, d):
'''Clear the destination register d
'''
self.genop(1, 5, B, d)
def AnotB(self, d):
'''Bitwise A & ~B operation
This assumes that register 0 contains 0.
d is the destination register
'''
self.genop(0, 7, 0, d)
def orAB(self, d):
'''Bitwise OR operation
Computes (A | B)
d is the destination register
'''
self.genop(0, 7, B, d)
def jmp(self, loc):
'''Jump
Register C is altered if a short jump cannot be used
loc is an absolute location, or a symbolic name
'''
if type(loc) == str:
loc = self.v(loc)
delta = loc - (self.cur + 1)
if (-8 <= delta < 8):
self.addi(delta, F, F)
else:
self.addi(1, F, C)
self.addi(0, D, F)
self.dc(loc)
def call(self, loc):
'''Call a subroutine
Register E is used to hold the link address.
Register C may be altered.
loc is a numeric value or a symbolic name
'''
# Not clear the short form of this is that useful.
# A variant that used C+1 as the return address would
# save an instruction.
if type(loc) == str:
loc = self.v(loc)
delta = loc - (self.cur + 2)
if (-8 <= delta < 8):
self.addi(1, F, E)
self.addi(delta, F, F)
else:
self.addi(3, F, E)
self.addi(1, F, C)
self.addi(0, D, F)
self.dc(loc)
def genop(self, val, f, s, d):
'''Generic loopy instruction
Any loopy instruction may be specified this way,
but for the common cases the convenience operators
are more easily understood.
val is the immediate value (signed, 4 bits)
f contions the function bits for deriving the first addend
s is the second addend register
d is the destination register
'''
assert(val == int(val))
if val > 7:
val = val - 0x10000
assert(-8 <= val < 8)
assert(0 <= f < 16)
assert(0 <= s < 16)
assert(0 <= d < 16)
self.dc((val << 12) + (f << 8) + (s << 4) + (d << 0))
def assemble(f):
'''
Assembler for loopy code
The code to be assembled should be packaged into a function
that takes an assembly object. During the assembly process
it will be called multiple times, until the values of all
the symbols have a known value.
Example:
>>> import lasm
>>> def simple(a):
... from lasm import B, C, F
... a.L('Start')
... a.addi(1, F, C)
... a.jmp('run')
... a.equate('data', a.cur);
... a.dc(0x1234); a.C('Data')
... a.dc(0x5678)
... a.dc(0xABCD)
... a.L('run')
... a.clr(B)
... a.genop(1, 5, B, B)
... a.jmp(a.cur)
... a.jmp('Start')
...
>>> x = lasm.assemble(simple)
>>> x.inst
[4348, 12543, 4660, 22136, 43981, 5563, 5563, 61695, 4348, 223, 0]
>>> x.comments
['', '', 'Data', '', '', '', '', '', '', '', '']
'''
a = Assembly()
a.passno = 1
f(a)
a.passno = 2
tries = 0
while a.changed:
assert(tries < 10)
a.cur = 0
a.changed = False
f(a)
while len(a.comments) < len(a.inst):
a.comments.append('')
return a
if __name__ == "__main__":
import doctest
doctest.testmod()
|
the-stack_106_29627 | import json
from pathlib import Path
lines = Path('input').open().readlines()
max_id = 0
seats = dict()
for line in lines:
row = int(line[:7].replace('B', '1').replace('F', '0'), 2)
col = int(line[7:].replace('R', '1').replace('L', '0'), 2)
seat_id = row * 8 + col
if row not in seats:
seats[row] = '........'
seats[row] = seats[row][:col] + 'X' + seats[row][col + 1:]
max_id = max(max_id, seat_id)
part2 = ''
for row in sorted(seats):
if '.' in seats[row]:
free_col = seats[row].find('.')
seat_id = row * 8 + free_col
part2 += f'{row} {free_col} {seats[row]} {seat_id}' + '\n'
print('part 1', max_id)
print('part 2\n', part2)
|
the-stack_106_29628 | from __future__ import print_function
from six.moves import range
import sys
import shutil
import numpy as np
import os
import random
import time
from PIL import Image
from copy import deepcopy
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torchvision.utils as vutils
from itertools import repeat, cycle
from torch.nn.functional import softmax, log_softmax
from torch.nn.functional import cosine_similarity
from tensorboardX import summary
from tensorboardX import FileWriter
from tensorboardX import SummaryWriter
import torch.nn.functional as F
from miscc.config import cfg
from miscc.utils import mkdir_p
from torch.optim import lr_scheduler
from models.model_sscgan import *
from trainers import tri_loss
dir = './log'
writer = SummaryWriter(dir)
# ################## Shared functions ###################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def load_network():
netM = M_NET()
netM.apply(weights_init)
netM_dec = M_NET_dec()
netM_dec.apply(weights_init)
netG = G_NET()
netG.apply(weights_init)
netBD = Bi_Dis()
# netBD.apply(weights_init)
netE = Encoder()
netE.apply(weights_init)
netC = RESNET_C()
netsD = []
for i in range(3): # 3 discriminators for background, parent and child stage
netsD.append(D_NET(i))
for i in range(len(netsD)):
netsD[i].apply(weights_init)
count = 0
if cfg.TRAIN.NET_G != '':
state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
count = cfg.TRAIN.NET_G[istart:iend]
count = int(count) + 1
if cfg.TRAIN.NET_D != '':
for i in range(len(netsD)):
print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
state_dict = torch.load('%s_%d.pth' % (cfg.TRAIN.NET_D, i))
netsD[i].load_state_dict(state_dict)
if cfg.CUDA:
netG.cuda()
netC.cuda()
netM.cuda()
netM_dec.cuda()
netE.cuda()
netBD.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
return netM, netM_dec, netE, netBD, netG, netC, netsD, len(netsD), count
def define_optimizers(netBD, netE, netM, netM_dec, netG, netC, netsD):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
optimizerBD = optim.Adam(netBD.parameters(), lr=2e-4, betas=(0.5, 0.999))
optimizerM = []
optimizerM.append(optim.Adam(netM.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999)))
optimizerM.append(optim.Adam(netM_dec.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999)))
optimizerG = []
optimizerG.append(optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999)))
optimizerG.append(optim.Adam(netE.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999)))
optimizerG.append(optim.Adam([{'params': netsD[0].jointConv.parameters()}, {'params': netsD[0].logits.parameters()}],
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999)))
optimizerG_mask = optim.Adam(netG.h_net3.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
optimizerC = []
ignored_params = list(map(id, netC.classifier.parameters()))
print('the num of new layers:', len(ignored_params), flush=True)
base_params = filter(lambda p: id(p) not in ignored_params, netC.parameters())
opt = optim.SGD(
[{'params': base_params}, {'params': netC.classifier.parameters(), 'lr': cfg.TRAIN.CLASSIFIER_LR}], \
lr=cfg.TRAIN.CLASSIFIER_LR,
momentum=0.9)
optimizerC.append(opt)
return optimizerBD, optimizerM, optimizerG, optimizerC, optimizerG_mask, optimizersD
def save_model(netM, netG, netE, avg_param_G, netC, netsD, epoch, model_dir):
load_params(netG, avg_param_G)
torch.save(
netM.state_dict(),
'%s/netM_%d.pth' % (model_dir, epoch))
torch.save(
netE.state_dict(),
'%s/netE_%d.pth' % (model_dir, epoch))
torch.save(
netG.state_dict(),
'%s/netG_%d.pth' % (model_dir, epoch))
torch.save(
netC.state_dict(),
'%s/netC_%d.pth' % (model_dir, epoch))
for i in range(len(netsD)):
netD = netsD[i]
torch.save(
netD.state_dict(),
'%s/netD%d.pth' % (model_dir, i))
print('Save G/Ds models.')
def save_img_results(imgs_tcpu, fake_imgs, num_imgs, count, image_dir, summary_writer):
num = cfg.TRAIN.VIS_COUNT
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples%09d.png' % (image_dir, count),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
for i in range(len(fake_imgs)):
fake_img = fake_imgs[i]
vutils.save_image(
fake_img.data, '%s/count_%09d_fake_samples%d.png' %
(image_dir, count, i), normalize=True)
fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy()
fake_img_set = np.transpose(fake_img_set, (1, 2, 0))
fake_img_set = (fake_img_set + 1) * 255 / 2
fake_img_set = fake_img_set.astype(np.uint8)
summary_writer.flush()
class SSCGAN_train(object):
def __init__(self, output_dir, label, unlabel, test, imsize):
# report.export_sources(os.path.join(output_dir, 'Src'))
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
self.log_dir = os.path.join(output_dir, 'Log')
self.tsne_dir = os.path.join(output_dir, 'Tsne')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
mkdir_p(self.log_dir)
mkdir_p(self.tsne_dir)
self.summary_writer = FileWriter(self.log_dir)
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
torch.cuda.set_device(self.gpus[0])
self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.num_classes = cfg.CLASSES
# self.alpha_cm = cfg.TRAIN.ALPHA
self.label_data = label
self.unlabel_data = unlabel
self.test_data = test
self.num_batches = len(self.unlabel_data)
def prepare_data(self, data):
fimgs, cimgs, c_code, _, warped_bbox, digit_label = data
real_vfimgs, real_vcimgs = [], []
if cfg.CUDA:
vc_code = Variable(c_code).cuda()
for i in range(len(warped_bbox)):
warped_bbox[i] = Variable(warped_bbox[i]).float().cuda()
else:
vc_code = Variable(c_code)
for i in range(len(warped_bbox)):
warped_bbox[i] = Variable(warped_bbox[i])
if cfg.CUDA:
real_vfimgs.append(Variable(fimgs[0]).cuda())
real_vcimgs.append(Variable(cimgs[0]).cuda())
else:
real_vfimgs.append(Variable(fimgs[0]))
real_vcimgs.append(Variable(cimgs[0]))
return fimgs, real_vfimgs, real_vcimgs, vc_code, warped_bbox, digit_label
def train_Dnet(self, idx):
if idx == 0 or idx == 1 or idx == 2:
criterion, criterion_one, criterion_class = self.criterion, self.criterion_one, self.criterion_class
netD, optD = self.netsD[idx], self.optimizersD[idx]
real_imgs = self.real_cimgs[0]
real_imgs_unlabel = self.real_cimgs_unlabel[0]
fake_imgs = self.fake_img
# random y + fake z
fake_imgs_fake_z = self.fake_img_fake_z
# forward
if idx == 0:
netD.zero_grad()
real_logits = netD(real_imgs, self.label_digit)
real_logits_unlabel = netD(real_imgs_unlabel, self.label_digit)
fake_logits = netD(fake_imgs.detach(), self.label_digit)
fake_logits_enc_real = netD(fake_imgs_fake_z.detach(), self.u2_label_digit)
real_labels = torch.ones_like(real_logits[1])
fake_labels = torch.zeros_like(real_logits[1])
if idx == 1:
netD.zero_grad()
real_logits, fea_label = netD(real_imgs, self.label_digit)
fake_logits, fea_fake = netD(fake_imgs.detach(), self.label_digit)
fake_logits_enc_real, fea_fake = netD(fake_imgs_fake_z.detach(), self.u2_label_digit)
real_labels = torch.ones_like(real_logits)
fake_labels = torch.zeros_like(real_logits)
if idx == 2:
# forward + loss
netD.zero_grad()
real_logits = netD(self.noise, self.label_digit)
fake_logits = netD(self.fake_z, self.label_digit)
real_labels = torch.ones_like(real_logits)
fake_labels = torch.zeros_like(real_logits)
errD_real = criterion_one(real_logits, real_labels)
errD_fake = criterion_one(fake_logits, fake_labels)
errD = errD_real + errD_fake
# loss
if idx == 0:
errD_real = criterion_one(real_logits[1], real_labels)
errD_fake = criterion_one(fake_logits[1], fake_labels)
errD_fake_enc_real = criterion_one(fake_logits_enc_real[1], fake_labels)
errD_real_unlabel = criterion_one(real_logits_unlabel[1], real_labels)
Y_c, _ = self.netC(real_imgs_unlabel)
Ypersudo = torch.argmax(F.softmax(Y_c.detach(), dim=1), dim=1).detach()
persudo_y_logits = netD(real_imgs_unlabel, Ypersudo)
errD_class = criterion_class(real_logits[2], self.label_digit) + criterion_class(fake_logits[2], self.label_digit) + \
criterion_class(persudo_y_logits[2], Ypersudo)
errD = errD_real + errD_fake + errD_real_unlabel + errD_fake_enc_real + errD_class
if idx == 1:
errD_real = criterion_one(real_logits, real_labels) # Real/Fake loss for the real image
errD_fake = criterion_one(fake_logits, fake_labels) # Real/Fake loss for the fake image
errD_fake_enc_real = criterion_one(fake_logits_enc_real, fake_labels)
Y_c, _ = self.netC(real_imgs_unlabel)
Ypersudo = torch.argmax(F.softmax(Y_c.detach(), dim=1), dim=1).detach()
persudo_y_logits, fea_unlabel = netD(real_imgs_unlabel, Ypersudo)
real_labels = torch.ones_like(persudo_y_logits)
errD_persudo = criterion_one(persudo_y_logits, real_labels)
''' triplet part '''
fea_real = torch.cat([fea_label, fea_unlabel], dim=0)
y_concat_real = torch.cat([self.label_digit, Ypersudo], dim=0)
errD_triplet = 1.0 * tri_loss.triplet_loss(fea_real, y_concat_real, 0.5, 'r', 0)
# errD_triplet = 1.0*tri_loss.triplet_loss_fake(fea_real, fea_fake, y_concat_real, 0.5, 'r', 0)
errD = errD_real + errD_fake + errD_persudo + errD_fake_enc_real + errD_triplet
errD.backward()
optD.step()
return errD
def train_Gnet_cycle(self):
self.netE.zero_grad()
self.netG.zero_grad()
# Encoder
self.zx = self.netE(self.input_var, self.label_digit)
self.xzx = self.netG(self.one_hot_label_random, self.zx, self.c_code)
self.zxzx = self.netE(self.xzx, self.label_digit)
# Cycle loss
errG_cycle_real = self.criterion_l1loss(self.zx, self.zxzx)
errG_cycle_real.backward()
self.optimizerG[0].step()
self.optimizerG[1].step()
return errG_cycle_real
def train_Gnet(self):
self.netE.zero_grad()
self.netM.zero_grad()
self.netM_dec.zero_grad()
self.netG.zero_grad()
self.netC.zero_grad()
for myit in range(len(self.netsD)):
self.netsD[myit].zero_grad()
criterion_one = self.criterion_one
criterion_class = self.criterion_class
# Encoder
self.fake_z = self.netE(self.input_var, self.label_digit)
self.fake_img_fake_z = self.netG(self.one_hot_label_random, self.fake_z, self.c_code)
# MineGAN
self.fake_img = self.netG(self.one_hot_label, self.noise, self.c_code)
# fool BD loss
pred_enc_z = self.netBD(self.input_var, self.fake_z)
pred_gen_z = self.netBD(self.fake_img, self.noise)
fool_BD_loss = (pred_enc_z.mean()) - (pred_gen_z.mean())
# semantic and feature matching loss
fake_pred, feat_xz = self.netC(self.fake_img)
fake_pred_2, feat_xz_2 = self.netC(self.fake_img_fake_z)
errG_ce = criterion_class(fake_pred, self.label_digit)
errG_ce_2 = criterion_class(fake_pred_2, self.u2_label_digit)
errG_semantic = errG_ce + errG_ce_2
# D_overall loss
outputs = self.netsD[0](self.fake_img, self.label_digit)
real_labels = torch.ones_like(outputs[1])
errG_Dmagn_fake = criterion_one(outputs[1], real_labels)
outputs = self.netsD[0](self.fake_img_fake_z, self.u2_label_digit)
errG_Dmagn_fake_z_rep = criterion_one(outputs[1], real_labels)
errG_D_magn = errG_Dmagn_fake + errG_Dmagn_fake_z_rep
# Dz
output = self.netsD[2](self.fake_z, self.label_digit)
errG_Dz_fake = criterion_one(output, real_labels)
errG_Dz = errG_Dz_fake
# D_y loss
outputs, _ = self.netsD[1](self.fake_img, self.label_digit)
real_labels = torch.ones_like(outputs)
errG_Dy_fake = criterion_one(outputs, real_labels)
outputs, _ = self.netsD[1](self.fake_img_fake_z, self.u2_label_digit)
errG_Dy_fake_z_rep = criterion_one(outputs, real_labels)
errG_Dy = errG_Dy_fake + errG_Dy_fake_z_rep
# D_overall info loss
pred_c = self.netsD[0](self.fake_img, self.label_digit)
errG_info_dis = criterion_class(pred_c[0], torch.nonzero(self.c_code.long())[:, 1])
# Cycle loss
errG_total = errG_semantic * 3 + errG_D_magn + errG_Dy + errG_Dz + errG_info_dis + fool_BD_loss
errG_total.backward()
self.optimizerG[0].step()
self.optimizerG[1].step()
self.optimizerG[2].step()
return errG_total, errG_ce, errG_ce_2, errG_Dmagn_fake, errG_Dmagn_fake_z_rep, \
errG_Dy_fake, errG_Dy_fake_z_rep, errG_Dz
def train_Cnet(self):
self.netC.zero_grad()
criterion_class, criterion_one, criterion_mse = self.criterion_class, self.criterion_one, self.criterion_mse
unlabel_prediction, _ = self.netC(self.real_cimgs_unlabel[0])
unlabel_prediction_digit = torch.argmax(F.softmax(unlabel_prediction, dim=1), dim=1)
x_mix_unlabel, y_mix, self.lam = self.cutmix_data_between(self.real_cimgs[0], self.label_digit,
self.real_cimgs_unlabel[0],
unlabel_prediction_digit,
alpha=0.2)
unlabel_mix_pred, _ = self.netC(x_mix_unlabel)
loss_unlabel = criterion_class(unlabel_mix_pred, self.label_digit) * self.lam + \
criterion_class(unlabel_mix_pred, unlabel_prediction_digit) * (1. - self.lam)
# real loss
pred_real, _ = self.netC(self.real_cimgs[0])
loss_real = criterion_class(pred_real, self.label_digit.cuda())
# temporal-ensemble loss
self.outputs[self.j * self.batch_size: (self.j + 1) * self.batch_size] = pred_real.data.clone()
te_loss = criterion_mse(self.zcomp, unlabel_prediction)
errC = loss_real + loss_unlabel + self.w * te_loss
errC.backward()
self.optimizerC[0].step()
return errC
def update_ema_variables(self, netC, netC_ema, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(netC_ema.parameters(), netC.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def calc_metrics_C(self, modelC, modelD, loader):
total_C = 0
correct_C = 0
correct_fake = 0
noise = Variable(torch.FloatTensor(self.batch_size, cfg.GAN.Z_DIM)).cuda()
for i, data in enumerate(loader):
noise.data.normal_(0, 1)
u1, u2, real_cimgs, c_code, u4, label_digit = self.prepare_data(data)
label_digit = label_digit.cuda()
label_one_hot = self.get_float_one_hot(label_digit)
# fakeimg = self.netG(label_one_hot, noise, c_code)
_, _, output_fake_d = modelD(real_cimgs[0], label_digit)
# output_fake, _ = modelC(fakeimg)
_, predicted_fake = torch.max(output_fake_d.data, 1)
correct_fake += (predicted_fake == label_digit.data.view_as(predicted_fake)).sum()
output_C, _ = modelC(real_cimgs[0])
_, predicted_C = torch.max(output_C.data, 1)
total_C += label_digit.size(0)
correct_C += (predicted_C == label_digit.data.view_as(predicted_C)).sum()
acc = 100 * float(correct_C) / total_C
acc_fake = 100 * float(correct_fake) / total_C
return acc, acc_fake
def get_float_one_hot(self, label):
digit_2_onehot = torch.zeros([self.batch_size, cfg.CLASSES])
for i in range(self.batch_size):
digit_2_onehot[i][label[i]] = 1
digit_2_onehot = digit_2_onehot.float()
digit_2_onehot = digit_2_onehot.cuda()
return digit_2_onehot
def rand_bbox(self, size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
# cx = np.random.randint(W)
# cy = np.random.randint(H)
try:
cx = np.random.randint(low=cut_w // 2,
high=W - (cut_w // 2) + 1) # or low=(cut_w//2) - 1, high=W - (cut_w//2)
cy = np.random.randint(low=cut_h // 2, high=H - (cut_h // 2) + 1)
except:
print('lam:', lam)
print('W:', W, 'cut_w:', cut_w)
print('H:', H, 'cut_h:', cut_h)
print('low:', cut_w // 2, 'high:', W - cut_w // 2)
print('low:', cut_h // 2, 'high:', H - cut_h // 2)
exit(0)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def cutmix_data_between(self, x1, y1, x2, y2, alpha=1.0):
'''Compute the mixup data. Return mixed inputs, mixed target, and lambda'''
if alpha > 0.:
lam = np.random.beta(alpha, alpha)
else:
lam = 1.
bbx1, bby1, bbx2, bby2 = self.rand_bbox(x1.size(), lam)
x = x1.clone()
x[:, :, bbx1:bbx2, bby1:bby2] = x2[:, :, bbx1:bbx2, bby1:bby2].data
y = lam * y1 + (1 - lam) * y2
mixed_x = Variable(x.cuda())
mixed_y = Variable(y.cuda())
return mixed_x, mixed_y, lam
def cutmix_criterion(self, y_a, y_b, lam):
return lambda criterion, pred: lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def ramp_up(self, epoch, max_epochs, max_val, mult):
if epoch == 0:
return 0.
elif epoch >= max_epochs:
return max_val
return max_val * np.exp(mult * (1. - float(epoch) / max_epochs) ** 2)
def weight_schedule(self, epoch, max_epochs, max_val, mult, n_labeled, n_samples):
max_val = max_val * (float(n_labeled) / n_samples)
return self.ramp_up(epoch, max_epochs, max_val, mult)
def cal_gradient_penalty(self, netD, real_data, fake_data, type='mixed', constant=1.0):
# adapted from cyclegan
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
Returns the gradient penalty loss
"""
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
interpolatesv = []
for i in range(len(real_data)):
alpha = torch.rand(real_data[i].shape[0], 1)
alpha = alpha.expand(real_data[i].shape[0],
real_data[i].nelement() // real_data[i].shape[0]).contiguous().view(
*real_data[i].shape)
alpha = alpha.cuda()
interpolatesv.append(alpha * real_data[i] + ((1 - alpha) * fake_data[i]))
else:
raise NotImplementedError('{} not implemented'.format(type))
# require grad
for i in range(len(interpolatesv)):
interpolatesv[i].requires_grad_(True)
# feed into D
disc_interpolates = netD(*interpolatesv)
# cal penalty
gradient_penalty = 0
for i in range(len(disc_interpolates)):
for j in range(len(interpolatesv)):
gradients = torch.autograd.grad(outputs=disc_interpolates[i], inputs=interpolatesv[j],
grad_outputs=torch.ones(disc_interpolates[i].size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True,
allow_unused=True)
if gradients[0] is not None: # it will return None if input is not used in this output (allow unused)
gradients = gradients[0].view(real_data[j].size(0), -1) # flat the data
gradient_penalty += (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() # added eps
return gradient_penalty
def train_BD(self):
self.optimizerBD.zero_grad()
# make prediction on pairs
# print (self.fake_img_fake_z_rep.shape, self.fake_z.shape)
pred_enc_z = self.netBD(self.input_var, self.fake_z.detach())
pred_gen_z = self.netBD(self.fake_img.detach(), self.noise)
real_data = [self.input_var, self.fake_z.detach()]
fake_data = [self.fake_img.detach(), self.noise]
penalty = self.cal_gradient_penalty(self.netBD, real_data, fake_data, type='mixed', constant=1.0)
D_loss = -(pred_enc_z.mean()) + (pred_gen_z.mean()) + penalty * 10
D_loss.backward()
self.optimizerBD.step()
def train(self):
self.mtype = 'z_repa'
self.netM, self.netM_dec, self.netE, self.netBD, self.netG, self.netC, self.netsD, self.num_Ds, start_count = load_network()
avg_param_G = copy_G_params(self.netG)
self.optimizerBD, self.optimizerM, self.optimizerG, self.optimizerC, self.opt_mask, self.optimizersD = \
define_optimizers(self.netBD, self.netE, self.netM, self.netM_dec, self.netG, self.netC, self.netsD)
self.criterion = nn.BCELoss(reduce=False)
self.criterion_one = nn.BCELoss()
self.criterion_class = nn.CrossEntropyLoss()
self.criterion_mse = nn.MSELoss()
self.criterion_l1loss = nn.L1Loss()
self.real_labels = \
Variable(torch.FloatTensor(self.batch_size).fill_(1))
self.fake_labels = \
Variable(torch.FloatTensor(self.batch_size).fill_(0))
nz = cfg.GAN.Z_DIM
self.noise = Variable(torch.FloatTensor(self.batch_size, nz))
self.noise_new = Variable(torch.FloatTensor(self.batch_size, nz))
fixed_noise = \
Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1))
if cfg.CUDA:
self.criterion.cuda()
self.criterion_one.cuda()
self.criterion_class.cuda()
self.criterion_mse.cuda()
self.criterion_l1loss.cuda()
self.real_labels = self.real_labels.cuda()
self.fake_labels = self.fake_labels.cuda()
self.noise, fixed_noise, self.noise_new = self.noise.cuda(), fixed_noise.cuda(), self.noise_new.cuda()
print("Starting normal SSC-GAN training..")
count = start_count
start_epoch = start_count // (self.num_batches)
self.global_step = 0
exp_lr_scheduler = lr_scheduler.StepLR(self.optimizerC[0], step_size=20, gamma=0.5)
n_classes = cfg.CLASSES
n_samples = len(self.unlabel_data) * self.batch_size
Z = torch.zeros(n_samples, n_classes).float().cuda()
z = torch.zeros(n_samples, n_classes).float().cuda()
self.outputs = torch.zeros(n_samples, n_classes).float().cuda()
for epoch in range(start_epoch, self.max_epoch):
start_t = time.time()
exp_lr_scheduler.step(epoch)
w = self.weight_schedule(epoch, self.max_epoch, max_val=30., mult=-5., n_labeled=3000, n_samples=n_samples)
self.w = torch.autograd.Variable(torch.FloatTensor([w]).cuda(), requires_grad=False)
self.j = 0
for (data_label), (data_unlabel) in zip(cycle(self.label_data), self.unlabel_data):
compare = []
self.imgs_tcpu, self.real_fimgs, self.real_cimgs, \
self.c_code, self.warped_bbox, self.label_digit = self.prepare_data(data_label)
self.imgs_tcpu_unlabel, self.real_fimgs_unlabel, self.real_cimgs_unlabel, \
self.c_code_unlabel, self.warped_bbox_unlabel, u2 = self.prepare_data(data_unlabel)
self.input_var = torch.autograd.Variable(self.real_cimgs[0].cuda())
self.u2_label_digit = u2.cuda()
self.one_hot_label = self.get_float_one_hot(self.label_digit)
self.one_hot_label_random = self.get_float_one_hot(u2)
self.label_digit = self.label_digit.cuda()
self.noise.data.normal_(0, 1)
self.zcomp = Variable(z[self.j * self.batch_size: (self.j + 1) * self.batch_size], requires_grad=False)
# Encoder
self.fake_z = self.netE(self.input_var, self.label_digit)
# random y + fake_z
self.fake_img_fake_z = self.netG(self.one_hot_label_random, self.fake_z, self.c_code)
# MineGAN
self.fake_img = self.netG(self.one_hot_label, self.noise, self.c_code)
# Update Discriminator networks
errD_total = 0
for i in range(self.num_Ds):
errD = self.train_Dnet(i)
errD_total += errD
self.train_BD()
# Update the Generator networks
errG_total, errG_ce, errG_ce_2, errG_Dmagn_fake, errG_Dmagn_fake_z_rep, \
errG_Dy_fake, errG_Dy_fake_z_rep, errG_Dz = self.train_Gnet()
errG_cycle_real = self.train_Gnet_cycle()
for p, avg_p in zip(self.netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
# Update the Generator networks
errC_total = self.train_Cnet()
self.j += 1
self.global_step += 1
count = count + 1
if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0:
backup_para = copy_G_params(self.netG)
save_model(self.netM, self.netG, self.netE, avg_param_G, self.netC, self.netsD, count, self.model_dir)
# Save images
load_params(self.netG, avg_param_G)
self.netG.eval()
self.netC.eval()
self.netE.eval()
self.netM.eval()
self.netsD[0].eval()
with torch.set_grad_enabled(False):
fake_z = self.netE(self.input_var, self.u2_label_digit)
self.fake_imgfix_fake_z = self.netG(self.one_hot_label_random, fake_z, self.c_code)
self.fake_imgfix_fake_z_random = self.netG(self.one_hot_label, fake_z, self.c_code)
self.fake_imgfix_fake_z_mine = self.netG(self.one_hot_label, fixed_noise, self.c_code)
fixed_noise_zcode_cycle = self.netE(self.fake_imgfix_fake_z_mine, self.label_digit)
self.fake_imgfix_fake_z_mine_cycle = self.netG(self.one_hot_label, fixed_noise_zcode_cycle, self.c_code)
compare.append(self.fake_imgfix_fake_z)
compare.append(self.fake_imgfix_fake_z_random)
compare.append(self.fake_imgfix_fake_z_mine)
compare.append(self.fake_imgfix_fake_z_mine_cycle)
acc, acc2 = self.calc_metrics_C(self.netC, self.netsD[0], self.test_data)
print(count)
print('Accuracy of the C on the %d test images: %.2f %% D_clas images: %.2f %%' % (
len(self.test_data) * cfg.TRAIN.BATCH_SIZE, acc, acc2))
save_img_results(self.imgs_tcpu, (compare),
self.num_Ds, count, self.image_dir, self.summary_writer)
self.netC.train()
self.netG.train()
self.netE.train()
self.netM.train()
self.netsD[0].train()
load_params(self.netG, backup_para)
alpha = 0.6
Z = alpha * Z + (1. - alpha) * self.outputs
z = Z * (1. / (1. - alpha ** (epoch + 1)))
end_t = time.time()
print('''[%d/%d][%d]
Loss_C: %.2f Loss_G: %.2f Loss_D: %.2f
errG_ce: %.2f, errG_ce_2: %.2f, errG_Dmagn_fake: %.2f,
errG_Dmagn_fake_z_rep: %.2f,
errG_Dy_fake: %.2f, errG_Dy_fake_z_rep: %.2f,
errG_cycle_real: %.6f
Time: %.2fs
'''
% (epoch, self.max_epoch, self.num_batches,
errC_total.item(), errG_total.item(), errD_total.item(),
errG_ce.item(), errG_ce_2.item(), errG_Dmagn_fake.item(), errG_Dmagn_fake_z_rep.item(), \
errG_Dy_fake.item(), errG_Dy_fake_z_rep.item(), errG_cycle_real.item(),
end_t - start_t))
save_model(self.netM, self.netG, self.netE, avg_param_G, self.netC, self.netsD, count, self.model_dir)
self.summary_writer.close()
class SSCGAN_test(object):
def __init__(self, dataloader, testloader):
self.save_dir = os.path.join(cfg.SAVE_DIR, 'images')
mkdir_p(self.save_dir)
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
torch.cuda.set_device(self.gpus[0])
self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
self.dataloader = dataloader
self.testloader = testloader
def sample_pseudo_labels_onehot_1_label(self, num_classes, batch_size, choice):
labels = np.random.choice(a=choice, size=batch_size, replace=False, p=None)
pseudo_labels = torch.from_numpy(labels)
pseudo_labels = pseudo_labels.type(torch.long).cuda()
labels_onehot = np.eye(num_classes)[labels]
pseudo_labels_onehot = torch.from_numpy(labels_onehot)
pseudo_labels_onehot = pseudo_labels_onehot.type(torch.float).cuda()
return pseudo_labels_onehot, pseudo_labels
def sample_pseudo_labels_c_code(self, num_classes, batch_size, flag=False):
labels = np.random.randint(low=0, high=num_classes, size=(batch_size))
pseudo_labels = torch.from_numpy(labels)
pseudo_labels = pseudo_labels.type(torch.long).cuda()
labels_onehot = np.eye(num_classes)[labels]
pseudo_labels_onehot = torch.from_numpy(labels_onehot)
pseudo_labels_onehot = pseudo_labels_onehot.type(torch.float).cuda()
return pseudo_labels_onehot, pseudo_labels
def save_image(self, images, save_dir, iname):
img_name = '%s.png' % (iname)
full_path = os.path.join(save_dir, img_name)
if (iname.find('mask') == -1) or (iname.find('foreground') != -1):
img = images.add(1).div(2).mul(255).clamp(0, 255).byte()
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(full_path)
else:
img = images.mul(255).clamp(0, 255).byte()
ndarr = img.data.cpu().numpy()
ndarr = np.reshape(ndarr, (ndarr.shape[-1], ndarr.shape[-1], 1))
ndarr = np.repeat(ndarr, 3, axis=2)
im = Image.fromarray(ndarr)
im.save(full_path)
def generate_definite_y_img(self):
from models.model_sscgan import G_NET
netG = G_NET().cuda().eval()
model_dict = torch.load(cfg.TRAIN.NET_G, map_location='cuda:0')
netG.load_state_dict(model_dict)
print('Load', cfg.TRAIN.NET_G)
nz = cfg.GAN.Z_DIM
noise = torch.FloatTensor(self.batch_size, nz).cuda()
cnt = 0
# given number
num = 123
class_label = [num]
for i in range(1000):
noise.data.normal_(0, 1)
y_code, y_code_digit = self.sample_pseudo_labels_onehot_1_label(cfg.CLASSES, 1, class_label)
c_code, c_code_digit = self.sample_pseudo_labels_c_code(50, 1)
with torch.set_grad_enabled(False):
fake_img = netG(y_code, noise, c_code)
self.save_image(fake_img[0], self.save_dir, 'fake_img_idx_' + str(cnt))
cnt += 1
|
the-stack_106_29630 | """TODO(fquad): Add a description here."""
import json
import os
import datasets
# TODO(fquad): BibTeX citation
_CITATION = """\
@ARTICLE{2020arXiv200206071
author = {Martin, d'Hoffschmidt and Maxime, Vidal and
Wacim, Belblidia and Tom, Brendlé},
title = "{FQuAD: French Question Answering Dataset}",
journal = {arXiv e-prints},
keywords = {Computer Science - Computation and Language},
year = "2020",
month = "Feb",
eid = {arXiv:2002.06071},
pages = {arXiv:2002.06071},
archivePrefix = {arXiv},
eprint = {2002.06071},
primaryClass = {cs.CL}
}
"""
# TODO(fquad):
_DESCRIPTION = """\
FQuAD: French Question Answering Dataset
We introduce FQuAD, a native French Question Answering Dataset. FQuAD contains 25,000+ question and answer pairs.
Finetuning CamemBERT on FQuAD yields a F1 score of 88% and an exact match of 77.9%.
"""
_URL = "https://storage.googleapis.com/illuin/fquad/"
_URLS = {
"train": _URL + "train.json.zip",
"valid": _URL + "valid.json.zip",
}
class Fquad(datasets.GeneratorBasedBuilder):
"""TODO(fquad): Short description of my dataset."""
# TODO(fquad): Set up version.
VERSION = datasets.Version("0.1.0")
def _info(self):
# TODO(fquad): Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"context": datasets.Value("string"),
"questions": datasets.features.Sequence(datasets.Value("string")),
"answers": datasets.features.Sequence(
{"texts": datasets.Value("string"), "answers_starts": datasets.Value("int32")}
),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://fquad.illuin.tech/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(fquad): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
download_urls = _URLS
dl_dir = dl_manager.download_and_extract(download_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir["train"], "train.json")},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir["valid"], "valid.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(fquad): Yields (key, example) tuples from the dataset
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for id1, examples in enumerate(data["data"]):
for id2, example in enumerate(examples["paragraphs"]):
questions = [question["question"] for question in example["qas"]]
answers = [answer["answers"] for answer in example["qas"]]
texts = [answer[0]["text"] for answer in answers]
answers_starts = [answer[0]["answer_start"] for answer in answers]
yield str(id1) + "_" + str(id2), {
"context": example["context"],
"questions": questions,
"answers": {"texts": texts, "answers_starts": answers_starts},
}
|
the-stack_106_29631 | #!/usr/bin/env python3
# Copyright (2021-) Shahruk Hossain <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Iterable
from tensorflow.keras.layers import Layer, Input, ReLU
from tensorflow.keras.models import Sequential
from kaldi_tflite.lib.layers import TDNN, BatchNorm, StatsPooling
from kaldi_tflite.lib.io import KaldiNnet3Reader
def cfg2layers(layerCfg: dict) -> Iterable[Layer]:
"""
Uses the given layer config to instantiate one or more
tensorflow layers.
Parameters
----------
layerCfg : dict
Layer config. May specify multiple layers by specifying
a list of layer types, e.g. ["affine", "relu", "batchnorm"].
See ${TOP}/data/kaldi_models/configs/ for examples.
Returns
-------
Iterable[Layer]
One or more layers instantiated from the layer config.
Raises
------
KeyError
If the layer config is missing necessary properties.
ValueError
If the type specified in the layer config is not supported.
"""
layerTypes = layerCfg.get("type", [])
if isinstance(layerTypes, str):
layerTypes = [layerTypes]
if len(layerTypes) == 0:
raise KeyError("layer config does not define layer 'type'")
name = layerCfg.get("name", None)
layers = []
for layerType in layerTypes:
t = layerType.lower()
cfg = layerCfg.get("cfg", {})
if t in ["affine", "tdnn"]:
cfg["name"] = f"{name}.affine"
layer = TDNN(**cfg)
elif t in ["relu"]:
layer = ReLU(name=f"{name}.relu")
elif t in ["batchnorm", "bn"]:
layer = BatchNorm(name=f"{name}.batchnorm")
elif t in ["stats", "stats_extraction", "stats_pooling"]:
cfg["name"] = name
layer = StatsPooling(**cfg)
else:
raise ValueError(f"unsupported layer type '{t}'")
layers.append(layer)
return layers
def SequentialFromConfig(cfg: dict, nnet3Path: str = None, name: str = None) -> Sequential:
"""
Creates a tensorflow.keras.Sequential model using the given configuration.
Parameters
----------
cfg : dict
Model config. See ${TOP}/data/kaldi_models/configs/ for examples.
Returns
-------
Sequential
Model created using the config.
nnet3Path : str, optional
If path to nnet3 raw file provided, the created tensorflow model
will be initialized using weights from nnet3 model. By default None.
name : str, optional
Name to give to sequential model, by default None.
Raises
------
ValueError
If config is missing layer configs.
If first layer in layer config is not an input layer.
"""
layersConfig = cfg.get("layers", [])
if len(layersConfig) == 0:
raise ValueError("no layers defined in config")
layers = []
# First layer should be input.
inputCfg = layersConfig[0]
if inputCfg.get("type", "") != "input":
raise ValueError("first layer in sequential model needs to be of type 'input'")
batchSize, timesteps, featDim = inputCfg["shape"]
layers.append(Input(shape=(timesteps, featDim), batch_size=batchSize))
# Creating rest of the layers.
for lCfg in cfg["layers"][1:]:
layers.extend(cfg2layers(lCfg))
mdl = Sequential(layers, name=name)
# Initializing weights if path to nnet3 model given.
if nnet3Path is not None:
nnet3Mdl = KaldiNnet3Reader(nnet3Path, True)
for layer in mdl.layers:
try:
layer.set_weights(nnet3Mdl.getWeights(layer.name))
except KeyError:
print(f"component with name '{layer.name}' not found in nnet3 model, "
"skipping initialization")
return mdl
|
the-stack_106_29633 | # Copyright (c) 2017-2018 {Flair Inc.} WESLEY PENG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from taf.foundation.api.ui.support import WaitHandler
class BrowserWaitHandler(WaitHandler):
def __init__(
self,
handler=None,
timeout=None,
poll_frequency=1.0
):
super(BrowserWaitHandler, self).__init__(
handler, timeout
)
self.poll_frequency = poll_frequency or 1.0
def wait(self, timeout=None):
"""
Waits until the page is fully loaded
:param timeout: float in seconds
:return:
"""
try:
self.timeout = float(timeout or self.timeout)
self.poll_frequency = float(self.poll_frequency)
WebDriverWait(
self.handler,
self.timeout,
self.poll_frequency
).until(
lambda driver: driver.execute_script(
'return document.readyState=="complete";'
),
'Failed to fully load page in {} seconds'.format(
self.timeout
)
)
except TimeoutException:
raise
|
the-stack_106_29634 | from __future__ import absolute_import, print_function
from django.conf import settings
CLIENT_ID = getattr(settings, 'OAUTH2_APP_ID', None)
CLIENT_SECRET = getattr(settings, 'OAUTH2_API_SECRET', None)
REQUIRE_VERIFIED_EMAIL = getattr(settings, 'OAUTH2_REQUIRE_VERIFIED_EMAIL', False)
UNIQUE_USERID_FIELD = getattr(settings, 'OAUTH2_UNIQUE_USERID_FIELD', 'id')
ERR_NO_ORG_ACCESS = 'You do not have access to the required Generic organization.'
ERR_NO_PRIMARY_EMAIL = 'We were unable to find a primary email address associated with your Generic acount.'
ERR_NO_SINGLE_PRIMARY_EMAIL = 'We were unable to find a single primary email address associated with your Generic acount.'
ERR_NO_VERIFIED_PRIMARY_EMAIL = 'We were unable to find a verified, primary email address associated with your Generic acount.'
ERR_NO_SINGLE_VERIFIED_PRIMARY_EMAIL = 'We were unable to find a single verified, primary email address associated with your Generic acount.'
SCOPE = getattr(settings, 'OAUTH2_SCOPE', 'user:email')
# deprecated please use OAUTH2_API_DOMAIN and GITHUB_BASE_DOMAIN
DOMAIN = getattr(settings, 'OAUTH2_DOMAIN', 'api.oauth2.com')
BASE_DOMAIN = getattr(settings, 'OAUTH2_BASE_DOMAIN', 'oauth2.com')
API_DOMAIN = getattr(settings, 'OAUTH2_API_DOMAIN', DOMAIN)
ACCESS_TOKEN_URL = 'https://{0}/oauth/access_token'.format(BASE_DOMAIN)
AUTHORIZE_URL = 'https://{0}/oauth/authorize'.format(BASE_DOMAIN)
|
the-stack_106_29637 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.metrics import r2_score
from quant_benchmark.model_train.models import skols, enet_model, lasso, ridge
def _sklearn_train(X, y):
lr = skols()
ls = lasso()
rr = ridge()
en = enet_model(alpha=0.1, l1_ratio=0.7)
#
##------------------LinearRegression--------------------------------
print('--------lr----------')
lr.fit(X, y)
lr_predict = lr.predict(X)
lr_coef = lr.coef_
r2 = r2_score(y, lr_predict)
print("r2 score:", r2)
#
##------------------LassoRegression--------------------------------
print('--------lasso----------')
ls.fit(X, y)
ls_predict = ls.predict(X)
lasso_coef = ls.coef_
r2 = r2_score(y, ls_predict)
print("r2 score:", r2)
#
##------------------RidgeRegression--------------------------------
print('--------ridge----------')
rr.fit(X, y)
rr_predict = rr.predict(X)
rr_coef = rr.coef_
r2 = r2_score(y, rr_predict)
print("r2 score:", r2)
#
##------------------ElasticNet---------------------------------------
print('--------en----------')
en.fit(X, y)
en_predict = en.predict(X)
en_coef = en.coef_
r2 = r2_score(y, en_predict)
print("r2 score:", r2)
return lr_coef, lasso_coef, rr_coef, en_coef
def split_Xy(df):
assert "Label" in df.columns
y = df["Label"]
X = df[df.columns.drop("Label")]
return X, y
def sklearn_train(df):
X, y = split_Xy(df)
return _sklearn_train(X, y)
def training(df, label_name="Label"):
idx = df.columns.drop(label_name)
lr_coef_container = {}
lasso_coef_container = {}
rr_coef_container = {}
en_coef_container = {}
for dt in df.index.levels[1]:
print(dt)
df_dt = df.xs(dt, level="datetime")
lr_coef, lasso_coef, rr_coef, en_coef = sklearn_train(df_dt)
lr_coef_container[dt] = lr_coef
lasso_coef_container[dt] = lasso_coef
rr_coef_container[dt] = rr_coef
en_coef_container[dt] = en_coef
lr_coef_df = pd.DataFrame(lr_coef_container, index=idx).transpose()
lasso_coef_df = pd.DataFrame(lasso_coef_container, index=idx).transpose()
rr_coef_df = pd.DataFrame(rr_coef_container, index=idx).transpose()
en_coef_df = pd.DataFrame(en_coef_container, index=idx).transpose()
return lr_coef_df, lasso_coef_df, rr_coef_df, en_coef_df
|
the-stack_106_29638 | import utils.data as du
import tensorflow as tf
from sequence_labeler import SequenceLabeler
import configparser
import numpy as np
import random
np.random.seed(1337)
random.seed = 1337
def test(test_path, data_save_path, conf_path, model_save_path, model_name, embedding_path, out_file_path):
ow = open(out_file_path, "w")
with tf.Graph().as_default():
np.random.seed(1337)
tf.set_random_seed(1337)
config = configparser.ConfigParser()
config.read(conf_path)
processors_num = int(config.get("Training", "processors"))
embedding_size = int(config.get("Network", "embedding_size"))
cell_rnn_size = int(config.get("Network", "cell_rnn_size"))
hidden_layer_size = int(config.get("Network", "hidden_layer_size"))
vocab, num_classes, max_length, cl, cl_inv, text_field, category_field, feature_field = du.load_params(
data_save_path)
test_data = du.load_data_with_maps(test_path, vocab=vocab, max_length=max_length,
text_field=text_field, category_field=category_field,
feature_field=feature_field,
cl_map=cl)
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False, inter_op_parallelism_threads=processors_num,
intra_op_parallelism_threads=processors_num)
sess = tf.Session(config=session_conf)
with sess.as_default():
print("Initializing Embedding")
embedding = du.load_embeddings(embedding_path, vocab) if embedding_path != 'random' else du.initialize_random_embeddings(
len(vocab), embedding_size)
print("Building nn_model")
sequence_labeler = SequenceLabeler(sequence_length=max_length, embedding=embedding, cell_size=cell_rnn_size, num_classes=len(cl), hls=hidden_layer_size, verbose=False)
sequence_labeler.build_network()
tf.global_variables_initializer().run()
test_x, test_y = du.get_training_data(test_data, len(cl))
saver = tf.train.Saver(max_to_keep=1)
saver.restore(sess=sess, save_path=model_save_path+"/"+model_name)
loss, accuracy, predictions = sequence_labeler.predict(sess, test_x, test_y)
for i in range(len(test_data)):
l = test_data[i].original_length
tokens = test_data[i].original_tokens[0:l]
golds = test_data[i].labels[0:l]
predic = predictions[i]
pred_nums = predic[0:l]
pred_labels = [cl_inv[x] for x in pred_nums]
for a in zip(tokens, golds, pred_labels):
ow.write(" ".join(a)+"\n")
ow.write("\n")
ow.close()
if __name__ == "__main__":
test_path = "ner_data/english/ner_conll_03_testb.json"
data_save_path = "data_params.pkl"
conf_path = "conf/test.properties"
model_save_path = "ner_model"
model_name = "model"
summaries_dir = "ner_summaries"
embedding_path = "random"
out = "testb.tsv"
test(test_path, data_save_path, conf_path, model_save_path, model_name, embedding_path, out) |
the-stack_106_29641 | import os
import sys
print("Print running a script from a script", sys.argv[1])
import scribus
if len(sys.argv) < 2 :
print("Not enough argumemnts")
else:
if scribus.haveDoc():
filename = sys.argv[1]
pdf = scribus.PDFfile()
pdf.file = filename + ".pdf"
pdf.save()
else :
print("No file open") |
the-stack_106_29642 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# medallion documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 28 20:47:27 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx-prompt',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'medallion'
copyright = '2017, OASIS Open'
author = 'OASIS Open'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.1'
# The full version, including alpha/beta/rc tags.
release = '2.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
],
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'medalliondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc, 'medallion.tex', 'medallion Documentation',
'OASIS Open', 'manual',
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc, 'medallion', 'medallion Documentation',
[author], 1,
),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc, 'medallion', 'medallion Documentation',
author, 'medallion', 'One line description of project.',
'Miscellaneous',
),
]
|
the-stack_106_29644 | #!/usr/bin/env python3
"""RAK811 balena.io demo.
Minimalistic Balena / OTAA demo: send the CPU temperature every 5 minutes in
Cayenne LPP format.
Copyright 2019 Philippe Vanhaesendonck
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from os import environ
from struct import pack
from sys import exit
from time import sleep
from rak811.rak811 import Mode, Rak811
from serial.serialutil import SerialException
# Get environment data
# Application EUI and Key are required parameters
app_eui = environ.get('APP_EUI')
app_key = environ.get('APP_KEY')
# LoRaWan band
band = environ.get('BAND', 'US915')
# Serial port may change depending on the RPi and its configuration:
# - mini UART: /dev/ttyS0
# - PL011: /dev/ttyAMA0
# The rak811 library uses by default /dev/serial0 symlink which is not present
# in the balena container
port = environ.get('SERIAL_PORT', '/dev/ttyAMA0')
# The /sys path exposing CPU temperature:
path_cpu_temp = environ.get('PATH_CPU_TEMP',
'/sys/class/thermal/thermal_zone0/temp')
print('*******************')
print('*** Configuration')
print('*******************')
print('Band : ', band)
print('App Key: ', app_key)
print('App EUI: ', app_eui)
print('Port : ', port)
print('Path : ', path_cpu_temp)
print()
try:
lora = Rak811(port=port)
except SerialException as e:
print('Cannot instantiate Rak811 class.')
print('This is most probably an issue with the serial port.')
print('Check your device configuration and the SERIAL_PORT variable')
print('Error:')
print(e)
sleep(600)
exit(0)
print('Initialise RAK811 module...')
lora.hard_reset()
lora.mode = Mode.LoRaWan
lora.band = band
print('Device EUI is:', lora.get_config('dev_eui'))
print('Setup RAK811 module...')
if app_key is None or app_eui is None:
print('App Key and EUI are required...')
print('Set APP_KEY and APP_EUI in balenaCloud console')
sleep(600)
exit(0)
lora.set_config(app_eui=app_eui,
app_key=app_key)
print('Joining TTN...')
lora.join_otaa()
lora.dr = 5
print('Entering application loop')
print('You can send downlinks from the TTN console')
try:
while True:
# Read temperature
with open(path_cpu_temp, 'rb') as f:
temp = f.read().strip()
temp = float(int(temp)) / 1000
print('Sending CPU temperature: {0:.1f}'.format(temp))
# Cayenne LPP temperature (Code 103) is stored as 0.1 °C Signed MSB
lora.send(pack('>BBh', 1, 103, int(temp * 10 + 0.5)))
while lora.nb_downlinks:
print('Received:', lora.get_downlink()['data'].hex())
sleep(300)
except: # noqa: E722
pass
print('Cleaning up')
lora.close()
exit(0)
|
the-stack_106_29645 | GRAINS_EXPECTATIONS = {
'ubuntu1604': {
'os': 'Ubuntu',
'oscodename': 'xenial',
'os_family': 'Debian',
'osfullname': 'Ubuntu',
'osarch': 'amd64',
'osrelease': '16.04',
'osrelease_info': [16, 4],
},
'ubuntu1804': {
'os': 'Ubuntu',
'oscodename': 'bionic',
'os_family': 'Debian',
'osfullname': 'Ubuntu',
'osarch': 'amd64',
'osrelease': '18.04',
'osrelease_info': [18, 4],
},
'rhel6': {
'os': 'RedHat',
'oscodename': 'Santiago',
'os_family': 'RedHat',
'osfullname': 'Red Hat Enterprise Linux Server',
'osrelease': '6.8',
'osrelease_info': [6, 8],
},
'rhel7': {
'os': 'RedHat',
'oscodename': 'Maipo',
'os_family': 'RedHat',
'osfullname': 'Red Hat Enterprise Linux Server',
'osrelease': '7.2',
'osrelease_info': [7, 2],
},
'rhel8': {
'os': 'RedHat',
'oscodename': 'Ootpa',
'os_family': 'RedHat',
'osfullname': 'Red Hat Enterprise Linux',
'osrelease': '8.1',
'osrelease_info': [8, 1],
},
'sles11sp3': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 11 SP3',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '11.3',
'osrelease_info': [11, 3],
},
'sles11sp4': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 11 SP4',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '11.4',
'osrelease_info': [11, 4],
},
'sles12': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 12',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '12',
'osrelease_info': [12],
},
'sles12sp1': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 12 SP1',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '12.1',
'osrelease_info': [12, 1],
},
'sles12sp2': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 12 SP2',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '12.2',
'osrelease_info': [12, 2],
},
'sles12sp3': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 12 SP3',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '12.3',
'osrelease_info': [12, 3],
},
'sles12sp4': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 12 SP4',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '12.4',
'osrelease_info': [12, 4],
},
'sles15': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 15',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '15',
'osrelease_info': [15],
},
'sles15sp1': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 15 SP1',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '15.1',
'osrelease_info': [15, 1],
},
'sles15sp2': {
'os': 'SUSE',
'oscodename': 'SUSE Linux Enterprise Server 15 SP2',
'os_family': 'Suse',
'osfullname': 'SLES',
'osrelease': '15.2',
'osrelease_info': [15, 2],
},
'opensuse423': {
'os': 'SUSE',
'oscodename': 'openSUSE Leap 42.3',
'os_family': 'Suse',
'osfullname': 'Leap',
'osrelease': '42.3',
'osrelease_info': [42, 3],
},
'opensuse150': {
'os': 'SUSE',
'oscodename': 'openSUSE Leap 15.0',
'os_family': 'Suse',
'osfullname': 'Leap',
'osrelease': '15.0',
'osrelease_info': [15, 0],
},
'opensuse151': {
'os': 'SUSE',
'oscodename': 'openSUSE Leap 15.1',
'os_family': 'Suse',
'osfullname': 'Leap',
'osrelease': '15.1',
'osrelease_info': [15, 1],
},
'tumbleweed': {
'os': 'SUSE',
'oscodename': 'openSUSE Tumbleweed',
'os_family': 'Suse',
'osfullname': 'Tumbleweed',
'osrelease': None, # This grain changes on each snapshot
'osrelease_info': None, # This grain changes on each snapshot
},
'leap42sp1': {
'os': 'openSUSE Leap',
'oscodename': 'openSUSE Leap 42.1 (x86_64)',
'os_family': 'Suse',
'osfullname': 'openSUSE Leap',
'osrelease': '42.1',
'osrelease_info': [42, 1],
},
'centos7': {
'os': 'CentOS',
'oscodename': 'CentOS Linux 7 (Core)',
'os_family': 'RedHat',
'osfullname': 'CentOS Linux',
'osrelease': '7.6.1810',
'osrelease_info': [7, 6, 1810],
},
}
|
the-stack_106_29647 | from rest_framework import serializers
from core.models import Tag, Ingridient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag object"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngridientSerializer(serializers.ModelSerializer):
"""Serializer for ingridient object"""
class Meta:
model = Ingridient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Serializer for Recipe Object"""
ingridients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingridient.objects.all(),
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = ('id', 'title', 'ingridients', 'tags', 'time_minutes',
'price', 'link',)
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""Serialize a Recipe details"""
ingridients = IngridientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
|
the-stack_106_29648 | # Copyright (C) 2013 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from io import StringIO
import logging
import os
import re
import sys
from unittest import mock
import fixtures
from keystoneauth1 import session
import testtools
from testtools import matchers
from neutronclient.common import clientmanager
from neutronclient.neutron.v2_0 import network
from neutronclient import shell as openstack_shell
DEFAULT_USERNAME = 'username'
DEFAULT_PASSWORD = 'password'
DEFAULT_TENANT_ID = 'tenant_id'
DEFAULT_TENANT_NAME = 'tenant_name'
DEFAULT_AUTH_URL = 'http://127.0.0.1:5000/v2.0/'
DEFAULT_TOKEN = '3bcc3d3a03f44e3d8377f9247b0ad155'
DEFAULT_URL = 'http://quantum.example.org:9696/'
DEFAULT_REGION = 'regionOne'
DEFAULT_ENDPOINT_TYPE = 'public'
DEFAULT_API_VERSION = '2.0'
DEFAULT_SERVICE_TYPE = 'network'
DEFAULT_SERVICE_NAME = 'neutron'
DEFAULT_RETRIES = 3
DEFAULT_TIMEOUT = 3.0
class ShellTest(testtools.TestCase):
FAKE_ENV = {
'OS_USERNAME': DEFAULT_USERNAME,
'OS_PASSWORD': DEFAULT_PASSWORD,
'OS_TENANT_ID': DEFAULT_TENANT_ID,
'OS_TENANT_NAME': DEFAULT_TENANT_NAME,
'OS_AUTH_URL': DEFAULT_AUTH_URL,
'OS_REGION_NAME': None,
'HTTP_PROXY': None,
'http_proxy': None,
}
# Patch os.environ to avoid required auth info.
def setUp(self):
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(
fixtures.EnvironmentVariable(
var, self.FAKE_ENV[var]))
def shell(self, argstr, check=False, expected_val=0):
# expected_val is the expected return value after executing
# the command in NeutronShell
orig = (sys.stdout, sys.stderr)
clean_env = {}
_old_env, os.environ = os.environ, clean_env.copy()
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
_shell = openstack_shell.NeutronShell('2.0')
_shell.run(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_val, exc_value.code)
finally:
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
sys.stdout.close()
sys.stderr.close()
sys.stdout, sys.stderr = orig
os.environ = _old_env
return stdout, stderr
def test_run_unknown_command(self):
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
stdout, stderr = self.shell('fake', check=True)
self.assertFalse(stdout)
self.assertIn("Unknown command ['fake']", stderr.strip())
def test_help(self):
required = 'usage:'
help_text, stderr = self.shell('help')
self.assertThat(
help_text,
matchers.MatchesRegex(required))
def test_bash_completion(self):
required = '.*os_user_domain_id.*'
bash_completion, stderr = self.shell('bash-completion')
self.assertThat(
bash_completion,
matchers.MatchesRegex(required))
def test_help_on_subcommand(self):
required = [
'.*?^usage: .* quota-list']
stdout, stderr = self.shell('help quota-list')
for r in required:
self.assertThat(
stdout,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_help_command(self):
required = 'usage:'
help_text, stderr = self.shell('help network-create')
self.assertThat(
help_text,
matchers.MatchesRegex(required))
def test_bash_completion_in_outputs_of_help_command(self):
help_text, stderr = self.shell('help')
completion_cmd = "bash-completion"
completion_help_str = ("Prints all of the commands and options "
"for bash-completion.")
self.assertIn(completion_cmd, help_text)
self.assertIn(completion_help_str, help_text)
def test_bash_completion_command(self):
# just check we have some output
required = [
'.*--tenant_id',
'.*help',
'.*--dns-nameserver']
help_text, stderr = self.shell('neutron bash-completion')
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_build_option_parser(self):
neutron_shell = openstack_shell.NeutronShell('2.0')
result = neutron_shell.build_option_parser('descr', '2.0')
self.assertIsInstance(result, argparse.ArgumentParser)
@mock.patch.object(openstack_shell.NeutronShell, 'run')
def test_main_with_unicode(self, fake_shell):
unicode_text = u'\u7f51\u7edc'
argv = ['net-list', unicode_text, unicode_text]
fake_shell.return_value = 0
ret = openstack_shell.main(argv=argv)
fake_shell.assert_called_once_with([u'net-list', unicode_text,
unicode_text])
self.assertEqual(0, ret)
def test_endpoint_option(self):
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# Neither $OS_ENDPOINT_TYPE nor --os-endpoint-type
namespace = parser.parse_args([])
self.assertEqual('public', namespace.os_endpoint_type)
# --endpoint-type but not $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--os-endpoint-type=admin'])
self.assertEqual('admin', namespace.os_endpoint_type)
def test_endpoint_environment_variable(self):
fixture = fixtures.EnvironmentVariable("OS_ENDPOINT_TYPE",
"public")
self.useFixture(fixture)
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# $OS_ENDPOINT_TYPE but not --endpoint-type
namespace = parser.parse_args([])
self.assertEqual("public", namespace.os_endpoint_type)
# --endpoint-type and $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--endpoint-type=admin'])
self.assertEqual('admin', namespace.endpoint_type)
def test_timeout_option(self):
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# Neither $OS_ENDPOINT_TYPE nor --endpoint-type
namespace = parser.parse_args([])
self.assertIsNone(namespace.http_timeout)
# --endpoint-type but not $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--http-timeout=50'])
self.assertEqual(50, namespace.http_timeout)
def test_timeout_environment_variable(self):
fixture = fixtures.EnvironmentVariable("OS_NETWORK_TIMEOUT",
"50")
self.useFixture(fixture)
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
namespace = parser.parse_args([])
self.assertEqual(50, namespace.http_timeout)
def test_run_incomplete_command(self):
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
cmd = (
'--os-username test --os-password test --os-project-id test '
'--os-auth-strategy keystone --os-auth-url '
'%s port-create' %
DEFAULT_AUTH_URL)
stdout, stderr = self.shell(cmd, check=True, expected_val=2)
search_str = "Try 'neutron help port-create' for more information"
self.assertTrue(any(search_str in string for string
in stderr.split('\n')))
def _test_authenticate_user(self, expect_verify, expect_insecure,
**options):
base_options = {'os_cloud': None,
'http_timeout': DEFAULT_TIMEOUT,
'region_name': DEFAULT_REGION,
'network_service_name': DEFAULT_SERVICE_NAME,
'neutron_service_type': DEFAULT_SERVICE_TYPE}
options.update(base_options)
if options.get('os_token'):
options.update({'auth_type': 'token'})
options.update({'os_token': 'token', 'os_url': 'url'})
else:
options.update({'os_token': None, 'os_url': None})
with mock.patch.object(openstack_shell.NeutronShell,
'run_subcommand'), \
mock.patch.object(session, 'Session') as session_mock, \
mock.patch.object(clientmanager, 'ClientManager') as cmgr_mock:
shell = openstack_shell.NeutronShell(DEFAULT_API_VERSION)
shell.options = mock.Mock(spec=options.keys())
for k, v in options.items():
setattr(shell.options, k, v)
shell.options.os_endpoint_type = DEFAULT_ENDPOINT_TYPE
shell.options.retries = DEFAULT_RETRIES
if not (options.get('os_token') and options.get('os_url')):
auth = mock.ANY
auth_session = mock.sentinel.session
session_mock.return_value = auth_session
else:
auth = None
auth_session = None
shell.authenticate_user()
if not (options.get('os_token') and options.get('os_url')):
session_mock.assert_called_once_with(
auth=mock.ANY, verify=expect_verify,
cert=options.get('cert'),
timeout=DEFAULT_TIMEOUT)
else:
self.assertFalse(session_mock.called)
cmgr_mock.assert_called_once_with(
retries=DEFAULT_RETRIES,
raise_errors=False,
session=auth_session,
url=options.get('os_url'),
token=options.get('os_token'),
region_name=DEFAULT_REGION,
api_version=DEFAULT_API_VERSION,
service_type=DEFAULT_SERVICE_TYPE,
service_name=DEFAULT_SERVICE_NAME,
endpoint_type=DEFAULT_ENDPOINT_TYPE,
auth=auth,
insecure=expect_insecure,
log_credentials=True)
def test_authenticate_secure_with_cacert_with_cert(self):
self._test_authenticate_user(
insecure=False, cacert='cacert', cert='cert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_secure_with_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cacert='cacert', cert='cert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_insecure_with_cacert_with_cert(self):
self._test_authenticate_user(
insecure=True, cacert='cacert', cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_with_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cacert='cacert', cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_secure_without_cacert_with_cert(self):
self._test_authenticate_user(
insecure=False, cert='cert',
expect_verify=True, expect_insecure=False)
def test_authenticate_secure_without_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cert='cert',
expect_verify=True, expect_insecure=False)
def test_authenticate_insecure_without_cacert_with_cert(self):
self._test_authenticate_user(
insecure=True, cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_without_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_secure_with_cacert_without_cert(self):
self._test_authenticate_user(
insecure=False, cacert='cacert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_secure_with_cacert_without_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cacert='cacert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_insecure_with_cacert_without_cert(self):
self._test_authenticate_user(
insecure=True, cacert='cacert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_with_cacert_without_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cacert='cacert',
expect_verify=False, expect_insecure=True)
def test_commands_dict_populated(self):
# neutron.shell.COMMANDS is populated once NeutronShell is initialized.
# To check COMMANDS during NeutronShell initialization,
# reset COMMANDS to some dummy value before calling NeutronShell().
self.useFixture(fixtures.MockPatchObject(openstack_shell,
'COMMANDS', None))
openstack_shell.NeutronShell('2.0')
self.assertLessEqual(
{'net-create': network.CreateNetwork,
'net-delete': network.DeleteNetwork,
'net-list': network.ListNetwork,
'net-show': network.ShowNetwork,
'net-update': network.UpdateNetwork}.items(),
openstack_shell.COMMANDS['2.0'].items())
|
the-stack_106_29649 | """
A manager for multiple workers.
-- [email protected]
"""
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name
# pylint: disable=abstract-class-not-used
# pylint: disable=abstract-class-little-used
from argparse import Namespace
from multiprocessing import Process
import numpy as np
import os
import pickle
import shutil
import time
try:
from sets import Set
except ImportError:
Set = set
# Local
from .exd_utils import EVAL_ERROR_CODE
_TIME_TOL = 1e-5
class WorkerManager(object):
""" A Base class for a worker manager. """
def __init__(self, worker_ids):
""" Constructor. """
if hasattr(worker_ids, '__iter__'):
self.worker_ids = worker_ids
else:
self.worker_ids = list(range(worker_ids))
self.num_workers = len(self.worker_ids)
# These will be set in reset
self.experiment_designer = None
self.latest_results = None
# Reset
self.reset()
def reset(self):
""" Resets everything. """
self.experiment_designer = None
self.latest_results = [] # A list of namespaces
self._child_reset()
def _child_reset(self):
""" Child reset. """
raise NotImplementedError('Implement in a child class.')
def fetch_latest_results(self):
""" Returns the latest results. """
ret_idxs = []
for i in range(len(self.latest_results)):
if (self.latest_results[i].receive_time <=
self.experiment_designer.get_curr_spent_capital() + _TIME_TOL):
ret_idxs.append(i)
keep_idxs = [i for i in range(len(self.latest_results)) if i not in ret_idxs]
ret = [self.latest_results[i] for i in ret_idxs]
self.latest_results = [self.latest_results[i] for i in keep_idxs]
return ret
def close_all_queries(self):
""" Closes all queries. """
raise NotImplementedError('Implement in a child class.')
def set_experiment_designer(self, experiment_designer):
""" Set the experiment designer. """
self.experiment_designer = experiment_designer
def a_worker_is_free(self):
""" Returns true if a worker is free. """
raise NotImplementedError('Implement in a child class.')
def all_workers_are_free(self):
""" Returns true if all workers are free. """
raise NotImplementedError('Implement in a child class.')
def _dispatch_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches an entire batch of experiments. """
raise NotImplementedError('Implement in a child class.')
def get_time_distro_info(self):
""" Returns information on the time distribution. """
#pylint: disable=no-self-use
return ''
def get_poll_time_real(self):
""" Returns the poll time. """
raise NotImplementedError('Implement in a child class.')
# A synthetic worker manager - for simulating multiple workers ---------------------------
class SyntheticWorkerManager(WorkerManager):
""" A Worker manager for synthetic functions. Mostly to be used in simulations. """
def __init__(self, num_workers, time_distro='caller_eval_cost',
time_distro_params=None):
""" Constructor. """
self.worker_pipe = None
super(SyntheticWorkerManager, self).__init__(num_workers)
# Set up the time sampler
self.time_distro = time_distro
self.time_distro_params = time_distro_params
self.time_sampler = None
self._set_up_time_sampler()
def _set_up_time_sampler(self):
""" Set up the sampler for the time random variable. """
self.time_distro_params = Namespace() if self.time_distro_params is None else \
self.time_distro_params
if self.time_distro == 'caller_eval_cost':
pass
elif self.time_distro == 'const':
if not hasattr(self.time_distro_params, 'const_val'):
self.time_distro_params.const_val = 1
self.time_sampler = lambda num_samples: (np.ones((num_samples,)) *
self.time_distro_params.const_val)
elif self.time_distro == 'uniform':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.ub = 2.0
self.time_distro_params.lb = 0.0
ub = self.time_distro_params.ub
lb = self.time_distro_params.lb
self.time_sampler = lambda num_samples: (np.random.random((num_samples,)) *
(ub - lb) + lb)
elif self.time_distro == 'halfnormal':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.sigma = np.sqrt(np.pi/2)
self.time_sampler = lambda num_samples: np.abs(np.random.normal(
scale=self.time_distro_params.sigma, size=(num_samples,)))
else:
raise NotImplementedError('Not implemented time_distro = %s yet.'%(
self.time_distro))
def _child_reset(self):
""" Child reset. """
self.worker_pipe = [[wid, 0.0] for wid in self.worker_ids]
def sort_worker_pipe(self):
""" Sorts worker pipe by finish time. """
self.worker_pipe.sort(key=lambda x: x[-1])
def a_worker_is_free(self):
""" Returns true if a worker is free. """
return self.worker_pipe[0][-1] # Always return true as this is synthetic.
def all_workers_are_free(self):
""" Returns true if all workers are free. """
return self.worker_pipe[-1][-1]
def close_all_queries(self):
""" Close all queries. """
pass
def _dispatch_experiment(self, func_caller, qinfo, worker_id, **kwargs):
""" Dispatch experiment. """
# Set worker id and whether or not eval_time should be returned
qinfo.worker_id = worker_id # indicate which worker
qinfo = func_caller.eval_from_qinfo(qinfo, **kwargs)
if self.time_distro == 'caller_eval_cost':
if hasattr(qinfo, 'caller_eval_cost') and qinfo.caller_eval_cost is not None:
qinfo.eval_time = qinfo.caller_eval_cost
else:
qinfo.eval_time = 1.0
else:
qinfo.eval_time = float(self.time_sampler(1))
qinfo.receive_time = qinfo.send_time + qinfo.eval_time
# Store the result in latest_results
self.latest_results.append(qinfo)
return qinfo
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatch a single experiment. """
worker_id = self.worker_pipe[0][0]
qinfo = self._dispatch_experiment(func_caller, qinfo, worker_id, **kwargs)
# Sort the pipe
self.worker_pipe[0][-1] = qinfo.receive_time
self.sort_worker_pipe()
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches an entire batch of experiments. """
assert len(qinfos) == self.num_workers
for idx in range(self.num_workers):
qinfo = self._dispatch_experiment(func_caller, qinfos[idx],
self.worker_pipe[idx][0], **kwargs)
self.worker_pipe[idx][-1] = qinfo.receive_time
self.sort_worker_pipe()
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return self.time_distro
def get_poll_time_real(self):
""" Return 0.0 as the poll time. """
return 0.0
# Real worker manager - for simulating multiple workers --------------------------------
class RealWorkerManager(WorkerManager):
""" A worker manager for resnet. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, worker_ids, tmp_dir,
poll_time=0.5, sleep_time_after_new_process=0.5):
""" Constructor. """
super(RealWorkerManager, self).__init__(worker_ids)
self.poll_time = poll_time
self.sleep_time_after_new_process = sleep_time_after_new_process
self.tmp_dir = tmp_dir
self._rwm_set_up()
self._child_reset()
def _rwm_set_up(self):
""" Sets things up for the child. """
# Create the result directories. """
self.result_dir_names = {wid:'%s/result_%s'%(self.tmp_dir, str(wid)) for wid in
self.worker_ids}
# Create the working directories
self.working_dir_names = {wid:'%s/working_%s/tmp'%(self.tmp_dir,
str(wid)) for wid in self.worker_ids}
# Create the last receive times
self.last_receive_times = {wid:0.0 for wid in self.worker_ids}
# Create file names
self._result_file_name = 'result.p'
self._num_file_read_attempts = 10
# self._file_read_poll_time = 0.5 # wait for 0.5 seconds
@classmethod
def _delete_dirs(cls, list_of_dir_names):
""" Deletes a list of directories. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
@classmethod
def _delete_and_create_dirs(cls, list_of_dir_names):
""" Deletes a list of directories and creates new ones. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
def _child_reset(self):
""" Resets child. """
# Delete/create the result and working directories.
if not hasattr(self, 'result_dir_names'): # Just for the super constructor.
return
self._delete_and_create_dirs(list(self.result_dir_names.values()))
self._delete_dirs(list(self.working_dir_names.values()))
self.free_workers = Set(self.worker_ids)
self.func_callers_for_each_worker = {wid:None for wid in self.worker_ids}
self.qinfos_in_progress = {wid:None for wid in self.worker_ids}
self.worker_processes = {wid:None for wid in self.worker_ids}
def _get_result_file_name_for_worker(self, worker_id):
""" Computes the result file name for the worker. """
return os.path.join(self.result_dir_names[worker_id], self._result_file_name)
# def _read_result_from_file(self, result_file_name):
# """ Reads the result from the file name. """
# #pylint: disable=bare-except
# num_attempts = 0
# while num_attempts < self._num_file_read_attempts:
# try:
# file_reader = open(result_file_name, 'r')
# read_in = file_reader.read().strip()
# try:
# # try converting to float. If not successful, it is likely an error string.
# read_in = float(read_in)
# except:
# pass
# file_reader.close()
# result = read_in
# break
# except:
# print('Encountered error when reading %s. Trying again.'%(result_file_name))
# time.sleep(self.poll_time)
# file_reader.close()
# result = EVAL_ERROR_CODE
# return result
def _read_result_from_file(self, result_file_name):
""" Reads the result from the file name. """
#pylint: disable=bare-except
num_attempts = 0
while num_attempts < self._num_file_read_attempts:
try:
file_reader = open(result_file_name, 'rb')
result = pickle.load(file_reader)
break
except:
print('Encountered error when reading %s. Trying again.'%(result_file_name))
time.sleep(self.poll_time)
file_reader.close()
result = EVAL_ERROR_CODE
return result
def _read_result_from_worker_and_update(self, worker_id):
""" Reads the result from the worker. """
# pylint: disable=maybe-no-member
# Read the file
result_file_name = self._get_result_file_name_for_worker(worker_id)
result_qinfo = self._read_result_from_file(result_file_name)
saved_qinfo = self.qinfos_in_progress[worker_id]
# Now update the relevant qinfo and put it to latest_results
if isinstance(result_qinfo, Namespace):
assert self.func_callers_for_each_worker[worker_id].domain.members_are_equal(
result_qinfo.point, saved_qinfo.point)
qinfo = result_qinfo
elif result_qinfo == EVAL_ERROR_CODE:
qinfo = saved_qinfo
qinfo.val = EVAL_ERROR_CODE
else:
raise ValueError('Could not read qinfo object: %s.'%(str(qinfo)))
qinfo.receive_time = self.experiment_designer.get_curr_spent_capital()
qinfo.eval_time = qinfo.receive_time - qinfo.send_time
if not hasattr(qinfo, 'true_val'):
qinfo.true_val = qinfo.val
self.latest_results.append(qinfo)
# Update receive time
self.last_receive_times[worker_id] = qinfo.receive_time
# Delete the file.
os.remove(result_file_name)
# Delete content in a working directory.
shutil.rmtree(self.working_dir_names[worker_id])
# Add the worker to the list of free workers and clear qinfos in progress.
self.worker_processes[worker_id].terminate()
self.worker_processes[worker_id] = None
self.qinfos_in_progress[worker_id] = None
self.func_callers_for_each_worker[worker_id] = None
self.free_workers.add(worker_id)
def _worker_is_free(self, worker_id):
""" Checks if worker with worker_id is free. """
if worker_id in self.free_workers:
return True
worker_result_file_name = self._get_result_file_name_for_worker(worker_id)
if os.path.exists(worker_result_file_name):
self._read_result_from_worker_and_update(worker_id)
else:
return False
def _get_last_receive_time(self):
""" Returns the last time we received a job. """
all_receive_times = list(self.last_receive_times.values())
return max(all_receive_times)
def a_worker_is_free(self):
""" Returns true if a worker is free. """
for wid in self.worker_ids:
if self._worker_is_free(wid):
return self._get_last_receive_time()
return None
def all_workers_are_free(self):
""" Returns true if all workers are free. """
all_are_free = True
for wid in self.worker_ids:
all_are_free = self._worker_is_free(wid) and all_are_free
if all_are_free:
return self._get_last_receive_time()
else:
return None
def _dispatch_experiment(self, func_caller, qinfo, worker_id, **kwargs):
""" Dispatches experiment to worker_id. """
#pylint: disable=star-args
if self.qinfos_in_progress[worker_id] is not None:
err_msg = 'qinfos_in_progress: %s,\nfree_workers: %s.'%(
str(self.qinfos_in_progress), str(self.free_workers))
print(err_msg)
raise ValueError('Check if worker is free before sending experiment.')
# First add all the data to qinfo
qinfo.worker_id = worker_id
qinfo.working_dir = self.working_dir_names[worker_id]
qinfo.result_file = self._get_result_file_name_for_worker(worker_id)
# Create the working directory
os.makedirs(qinfo.working_dir)
# Dispatch the experiment in a new process
target_func = lambda: func_caller.eval_from_qinfo(qinfo, **kwargs)
self.worker_processes[worker_id] = Process(target=target_func)
self.worker_processes[worker_id].start()
time.sleep(self.sleep_time_after_new_process)
# Add the qinfo to the in progress bar and remove from free_workers
self.qinfos_in_progress[worker_id] = qinfo
self.func_callers_for_each_worker[worker_id] = func_caller
self.free_workers.discard(worker_id)
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches a single experiment to a free worker. """
worker_id = self.free_workers.pop()
self._dispatch_experiment(func_caller, qinfo, worker_id, **kwargs)
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches a batch of experiments. """
assert len(qinfos) == self.num_workers
for idx in range(self.num_workers):
self._dispatch_experiment(func_caller, qinfos[idx], self.worker_ids[idx], **kwargs)
def close_all_queries(self):
""" Closes all queries. """
pass
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return 'realtime'
def get_poll_time_real(self):
""" Return 0.0 as the poll time. """
return self.poll_time
|
the-stack_106_29651 | # boudoir - decadence de la marotte
# https://www.youtube.com/watch?v=EnVdmTRvllw
# https://gist.github.com/jf-parent/17abfe962599eca3bdb2982fbef047c5
Scale.default = Scale.major
Root.default = 0
Clock.bpm = 120
chords = [[0, 1], [(1, 2), (2, 3)]]
Clock.stop()
def main():
print('main')
~p1 >> sawbass(chords, oct=[4,[4,5],7], dur=1, formant=[1,2,3], lpf=1200, sus=var([1,.5],[16,8])).after(420, 'stop')
Clock.future(0, main)
def main1():
print('main1')
~p2 >> gong(chords, amp=linvar([.2,1.2],16)).after(360, 'stop')
Clock.future(30, main1)
def main2():
print('main2')
~b1 >> play('-*', amp=linvar([.8,0],16)).after(240, 'stop')
~b2 >> play('~', dur=var([.5,1,2],16), amp=.8).after(240, 'stop')
~b3 >> play('(gg)', dur=1, amp=linvar([.8,0],16)).after(240, 'stop')
Clock.future(60, main2)
def main3():
print('main3')
~p6 >> blip([(0,1),(2,3),(4,5),(1,2),(2,3),(4,5)], oct=[4,5], amp=linvar([.5,0],16), chop=linvar([0,2,4],8), formant=linvar([0,2,6],32), vib=10, vibdepth=linvar([0,.2,1],[32,8,2]), tremolo=2, echo=2, sus=2).after(120, 'stop')
Clock.future(120, main3)
def main4():
print('main4')
~p7 >> donk([0,1], dur=var([.25,.5],8)).after(320, 'stop')
Clock.future(160, main4)
def main5():
print('main5')
~p8 >> pluck(chords, vib=1, amp=1.2).after(120, 'stop')
Clock.future(240, main5)
def main6():
print('main6')
~p3 >> bug([5]).every(12, 'offadd', 12).after(120, 'stop')
Clock.future(320, main6)
def main7():
print('main7')
~p4 >> squish(P[:4], dur=2, amp=.4).after(60, 'stop')
Clock.future(360, main7)
Clock.future(400, Clock.clear)
|
the-stack_106_29652 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class BigdftPsolver(AutotoolsPackage, CudaPackage):
"""BigDFT-Psolver: a flexible real-space Poisson Solver based on
Interpolating Scaling Functions. It constitutes a fundamental building block
of BigDFT code, and it can also be used separately and linked to other codes."""
homepage = "https://bigdft.org/"
url = "https://gitlab.com/l_sim/bigdft-suite/-/archive/1.9.2/bigdft-suite-1.9.2.tar.gz"
git = "https://gitlab.com/l_sim/bigdft-suite.git"
version('develop', branch='devel')
version('1.9.2', sha256='dc9e49b68f122a9886fa0ef09970f62e7ba21bb9ab1b86be9b7d7e22ed8fbe0f')
version('1.9.1', sha256='3c334da26d2a201b572579fc1a7f8caad1cbf971e848a3e10d83bc4dc8c82e41')
version('1.9.0', sha256='4500e505f5a29d213f678a91d00a10fef9dc00860ea4b3edf9280f33ed0d1ac8')
version('1.8.3', sha256='f112bb08833da4d11dd0f14f7ab10d740b62bc924806d77c985eb04ae0629909')
version('1.8.2', sha256='042e5a3b478b1a4c050c450a9b1be7bcf8e13eacbce4759b7f2d79268b298d61')
version('1.8.1', sha256='e09ff0ba381f6ffbe6a3c0cb71db5b73117874beb41f22a982a7e5ba32d018b3')
variant('mpi', default=True, description='Enable MPI support')
variant('openmp', default=True, description='Enable OpenMP support')
variant('scalapack', default=True, description='Enable SCALAPACK support')
depends_on('python@:2.8', type=('build', 'run'), when="@:1.8.3")
depends_on('[email protected]:', type=('build', 'run'), when="@1.9.0:")
depends_on('[email protected]:', type=('build', 'run'), when="@develop")
depends_on('blas')
depends_on('lapack')
depends_on('py-pyyaml')
depends_on('mpi', when='+mpi')
depends_on('scalapack', when='+scalapack')
for vers in ['1.8.1', '1.8.2', '1.8.3', '1.9.0', '1.9.1', '1.9.2', 'develop']:
depends_on('bigdft-futile@{0}'.format(vers), when='@{0}'.format(vers))
for vers in ['1.8.3', '1.9.0', '1.9.1', '1.9.2', 'develop']:
depends_on('bigdft-atlab@{0}'.format(vers), when='@{0}'.format(vers))
build_directory = "psolver"
def autoreconf(self, spec, prefix):
autoreconf = which('autoreconf')
with working_dir(self.build_directory):
autoreconf('-fi')
def configure_args(self):
spec = self.spec
prefix = self.prefix
python_version = spec['python'].version.up_to(2)
pyyaml = join_path(spec['py-pyyaml'].prefix.lib,
'python{0}'.format(python_version))
openmp_flag = []
if '+openmp' in spec:
openmp_flag.append(self.compiler.openmp_flag)
linalg = []
if '+scalapack' in spec:
linalg.append(spec['scalapack'].libs.ld_flags)
linalg.append(spec['lapack'].libs.ld_flags)
linalg.append(spec['blas'].libs.ld_flags)
args = [
"FCFLAGS=%s" % " ".join(openmp_flag),
"--with-ext-linalg=%s" % " ".join(linalg),
"--with-pyyaml-path=%s" % pyyaml,
"--with-futile-libs=%s" % spec['bigdft-futile'].prefix.lib,
"--with-futile-incs=%s" % spec['bigdft-futile'].headers.include_flags,
"--with-moduledir=%s" % prefix.include,
"--prefix=%s" % prefix,
"--without-etsf-io",
]
if '+mpi' in spec:
args.append("CC=%s" % spec['mpi'].mpicc)
args.append("CXX=%s" % spec['mpi'].mpicxx)
args.append("FC=%s" % spec['mpi'].mpifc)
args.append("F90=%s" % spec['mpi'].mpifc)
args.append("F77=%s" % spec['mpi'].mpif77)
else:
args.append("--disable-mpi")
if '+openmp' in spec:
args.append("--with-openmp")
else:
args.append("--without-openmp")
if spec.satisfies('@1.8.3:') or spec.satisfies('@develop'):
args.append("--with-atlab-libs=%s" % spec['bigdft-atlab'].prefix.lib)
if '+cuda' in spec:
args.append("--enable-cuda-gpu")
args.append("--with-cuda-path=%s" % spec['cuda'].prefix)
args.append("--with-cuda-libs=%s" % spec['cuda'].libs.link_flags)
return args
@property
def libs(self):
shared = "+shared" in self.spec
return find_libraries(
'libPSolver-*', root=self.prefix, shared=shared, recursive=True
)
|
the-stack_106_29653 | #!/usr/bin/env python
import os
import warnings
import open3d as o3
import matplotlib.pyplot as plt
import numpy as np
import progressbar
import pyquaternion as pq
import transforms3d as t3
import util
import time
T_w_o = np.identity(4)
T_w_o[:3, :3] = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
T_o_w = util.invert_ht(T_w_o)
eulerdef = 'sxyz'
# TODO: Modify these for your workspace
csvdelimiter = ','
datadir = '/app/dataset/data/'
resultdir = '/app/dataset/nclt'
snapshotfile = 'snapshot.npz'
sessionfile = 'sessiondata.npz'
# TODO: Comment out the sessions you are not using
sessions = [
'2012-01-08',
'2012-01-15',
'2012-01-22',
'2012-02-02',
'2012-02-04',
'2012-02-05',
'2012-02-12',
'2012-02-18',
'2012-02-19',
'2012-03-17',
'2012-03-25',
'2012-03-31',
'2012-04-29',
'2012-05-11',
'2012-05-26',
'2012-06-15',
'2012-08-04',
'2012-08-20',
'2012-09-28',
'2012-10-28',
'2012-11-04',
'2012-11-16',
'2012-11-17',
'2012-12-01',
'2013-01-10',
'2013-02-23',
'2013-04-05'
]
lat0 = np.radians(42.293227)
lon0 = np.radians(-83.709657)
re = 6378135.0
rp = 6356750
rns = (re * rp)**2.0 \
/ ((re * np.cos(lat0))**2.0 + (rp * np.sin(lat0))**2.0)**1.5
rew = re**2.0 / np.sqrt((re * np.cos(lat0))**2.0 + (rp * np.sin(lat0))**2.0)
veloheadertype = np.dtype({
'magic': ('<u8', 0),
'count': ('<u4', 8),
'utime': ('<u8', 12),
'pad': ('V4', 20)})
veloheadersize = 24
velodatatype = np.dtype({
'x': ('<u2', 0),
'y': ('<u2', 2),
'z': ('<u2', 4),
'i': ('u1', 6),
'l': ('u1', 7)})
velodatasize = 8
def load_snapshot(sessionname):
cloud = o3.PointCloud()
trajectory = o3.LineSet()
with np.load(os.path.join(resultdir, sessionname, snapshotfile)) as data:
cloud.points = o3.Vector3dVector(data['points'])
cloud.colors = o3.Vector3dVector(
util.intensity2color(data['intensities'] / 255.0))
trajectory.points = o3.Vector3dVector(data['trajectory'])
lines = np.reshape(range(data['trajectory'].shape[0] - 1), [-1, 1]) \
+ [0, 1]
trajectory.lines = o3.Vector2iVector(lines)
trajectory.colors = o3.Vector3dVector(
np.tile([0.0, 0.5, 0.0], [lines.shape[0], 1]))
return cloud, trajectory
def view_snapshot(sessionname):
cloud, trajectory = load_snapshot(sessionname)
o3.draw_geometries([cloud, trajectory])
def pose2ht(pose):
r, p, y = pose[3:]
return t3.affines.compose(
pose[:3], t3.euler.euler2mat(r, p, y, eulerdef), np.ones(3))
def latlon2xy(latlon):
lat = latlon[:, [0]]
lon = latlon[:, [1]]
return np.hstack([np.sin(lat - lat0) * rns,
np.sin(lon - lon0) * rew * np.cos(lat0)])
def data2xyzi(data):
xyzil = data.view(velodatatype)
xyz = np.hstack(
[xyzil[axis].reshape([-1, 1]) for axis in ['x', 'y', 'z']])
xyz = xyz * 0.005 - 100.0
return xyz, xyzil['i']
def save_trajectories():
trajectorydir = os.path.join(resultdir, 'trajectories_gt')
util.makedirs(trajectorydir)
trajectories = [session(s).T_w_r_gt[::20, :2, 3] for s in sessions]
for i in range(len(trajectories)):
plt.clf()
[plt.plot(t[:, 0], t[:, 1], color=(0.5, 0.5, 0.5)) \
for t in trajectories]
plt.plot(trajectories[i][:, 0], trajectories[i][:, 1], color='y')
plt.savefig(os.path.join(trajectorydir, sessions[i] + '.svg'))
class session:
def __init__(self, session):
self.session = session
self.dir = os.path.join(resultdir, self.session)
try:
data = np.load(os.path.join(self.dir, sessionfile))
self.velofiles = data['velofiles']
self.t_velo = data['t_velo']
self.velorawfile = data['velorawfile']
self.t_rawvelo = data['t_rawvelo']
self.i_rawvelo = data['i_rawvelo']
self.t_gt = data['t_gt']
self.T_w_r_gt = data['T_w_r_gt']
self.T_w_r_gt_velo = data['T_w_r_gt_velo']
self.t_cov_gt = data['t_cov_gt']
self.cov_gt = data['cov_gt']
self.t_odo = data['t_odo']
self.T_w_r_odo = data['T_w_r_odo']
self.T_w_r_odo_velo = data['T_w_r_odo_velo']
self.t_relodo = data['t_relodo']
self.relodo = data['relodo']
self.relodocov = data['relodocov']
self.t_gps = data['t_gps']
self.gps = data['gps']
except:
velodir = os.path.join(datadir, 'velodyne_data',
self.session, 'velodyne_sync')
#self.session + '_vel', 'velodyne_sync')
self.velofiles = [os.path.join(velodir, file) \
for file in os.listdir(velodir) \
if os.path.splitext(file)[1] == '.bin']
self.velofiles.sort()
self.t_velo = np.array([
int(os.path.splitext(os.path.basename(velofile))[0]) \
for velofile in self.velofiles])
self.velorawfile = os.path.join(datadir, 'velodyne_data',
self.session, 'velodyne_hits.bin')
#self.session + '_vel', 'velodyne_hits.bin')
self.t_rawvelo = []
self.i_rawvelo = []
with open(self.velorawfile, 'rb') as file:
data = np.array(file.read(veloheadersize))
while data:
header = data.view(veloheadertype)
if header['magic'] != 0xad9cad9cad9cad9c:
break
self.t_rawvelo.append(header['utime'])
self.i_rawvelo.append(file.tell() - veloheadersize)
file.seek(header['count'] * velodatasize, os.SEEK_CUR)
data = np.array(file.read(veloheadersize))
print(time.time())
self.t_rawvelo = np.array(self.t_rawvelo)
self.i_rawvelo = np.array(self.i_rawvelo)
posefile = os.path.join(
datadir, 'ground_truth', 'groundtruth_' + self.session + '.csv')
posedata = np.genfromtxt(posefile, delimiter=csvdelimiter)
posedata = posedata[np.logical_not(np.any(np.isnan(posedata), 1))]
self.t_gt = posedata[:, 0]
self.T_w_r_gt = np.stack([T_w_o.dot(pose2ht(pose_o_r)) \
for pose_o_r in posedata[:, 1:]])
self.T_w_r_gt_velo = np.stack(
[self.get_T_w_r_gt(t) for t in self.t_velo])
cov_gtfile = os.path.join(
datadir, 'ground_truth_cov', 'cov_' + self.session + '.csv')
cov_gt = np.genfromtxt(cov_gtfile, delimiter=csvdelimiter)
self.t_cov_gt = cov_gt[:, 0]
self.cov_gt = np.stack(
[np.reshape(roc[[
1, 2, 3, 4, 5, 6,
2, 7, 8, 9, 10, 11,
3, 8, 12, 13, 14, 15,
4, 9, 13, 16, 17, 18,
5, 10, 14, 17, 19, 20,
6, 11, 15, 18, 20, 21
]], [6, 6]) for roc in cov_gt])
sensordir = os.path.join(
datadir, 'sensor_data', self.session) # + '_sen')
odofile = os.path.join(sensordir, 'odometry_mu_100hz.csv')
ododata = np.genfromtxt(odofile, delimiter=csvdelimiter)
self.t_odo = ododata[:, 0]
self.T_w_r_odo = np.stack([T_w_o.dot(pose2ht(pose_o_r)) \
for pose_o_r in ododata[:, 1:]])
self.T_w_r_odo_velo = np.stack(
[self.get_T_w_r_odo(t) for t in self.t_velo])
relodofile = os.path.join(sensordir, 'odometry_mu.csv')
relodo = np.genfromtxt(relodofile, delimiter=csvdelimiter)
self.t_relodo = relodo[:, 0]
self.relodo = relodo[:, [1, 2, 6]]
relodocovfile = os.path.join(sensordir, 'odometry_cov.csv')
relodocov = np.genfromtxt(relodocovfile, delimiter=csvdelimiter)
self.relodocov = np.stack(
[np.reshape(roc[[
1, 2, 3, 4, 5, 6,
2, 7, 8, 9, 10, 11,
3, 8, 12, 13, 14, 15,
4, 9, 13, 16, 17, 18,
5, 10, 14, 17, 19, 20,
6, 11, 15, 18, 20, 21
]], [6, 6]) for roc in relodocov])
gpsfile = os.path.join(sensordir, 'gps.csv')
gps = np.genfromtxt(gpsfile, delimiter=csvdelimiter)[:, [0, 3, 4]]
self.t_gps = gps[:, 0]
self.gps = latlon2xy(gps[:, 1:])
util.makedirs(self.dir)
np.savez(os.path.join(self.dir, sessionfile),
velofiles=self.velofiles,
t_velo=self.t_velo,
velorawfile=self.velorawfile,
t_rawvelo=self.t_rawvelo,
i_rawvelo=self.i_rawvelo,
t_gt=self.t_gt,
T_w_r_gt=self.T_w_r_gt,
T_w_r_gt_velo=self.T_w_r_gt_velo,
t_cov_gt=self.t_cov_gt,
cov_gt=self.cov_gt,
t_odo=self.t_odo,
T_w_r_odo=self.T_w_r_odo,
T_w_r_odo_velo=self.T_w_r_odo_velo,
t_relodo=self.t_relodo,
relodo=self.relodo,
relodocov=self.relodocov,
t_gps=self.t_gps,
gps=self.gps)
def get_velo(self, i):
return data2xyzi(np.fromfile(self.velofiles[i]))
def get_velo_raw(self, i):
with open(self.velorawfile, 'rb') as file:
data = np.array(file.read(veloheadersize))
header = data.view(veloheadertype)
data = np.fromfile(file, count=header['count']).view(velodatatype)
xyz = np.empty([data.shape[0], 3])
intensities = np.empty([data.shape[0], 1])
for i in range(data.shape[0]):
xyz[i], intensities[i] = data2xyzi(data[i])
return xyz, intensities
def get_T_w_r_gt(self, t):
i = np.clip(np.searchsorted(self.t_gt, t), 1, self.t_gt.size - 1) \
+ np.array([-1, 0])
return util.interpolate_ht(self.T_w_r_gt[i], self.t_gt[i], t)
def get_T_w_r_odo(self, t):
i = np.clip(np.searchsorted(self.t_odo, t), 1, self.t_odo.size - 1) \
+ np.array([-1, 0])
return util.interpolate_ht(self.T_w_r_odo[i], self.t_odo[i], t)
def save_snapshot(self):
print(self.session)
naccupoints = int(3e7)
nscans = len(self.velofiles)
nmaxpoints = naccupoints / nscans
accupoints = np.full([naccupoints, 3], np.nan)
accuintensities = np.empty([naccupoints, 1])
ipstart = 0
with progressbar.ProgressBar(max_value=nscans) as bar:
for i in range(nscans):
points, intensities = self.get_velo(i)
npoints = min(points.shape[0], nmaxpoints)
ip = np.random.choice(points.shape[0], npoints, replace=False)
points = np.hstack([points[ip], np.ones([npoints, 1])]).T
points = self.T_w_r_gt_velo[i].dot(points)[:3].T
accupoints[ipstart:ipstart+npoints] = points
intensities = intensities[ip].reshape([-1, 1])
accuintensities[ipstart:ipstart+npoints] = intensities
ipstart += npoints
bar.update(i)
trajectory = self.T_w_r_gt[:, :3, 3]
util.makedirs(self.dir)
np.savez(os.path.join(self.dir, snapshotfile),
points=accupoints, intensities=accuintensities,
trajectory=trajectory)
if __name__ == '__main__':
for s in sessions:
session(s)
|
the-stack_106_29654 | import uuid
import requests
import time
import json
import pandas as pd
from lxml import etree
import re
import random
positionName_list, salary_list, city_list,district_list, companyShortName_list, education_list,workYear_list ,industryField_list,financeStage_list,companySize_list,job_desc_list= [], [], [],[], [], [],[],[],[],[],[]
def get_uuid():
return str(uuid.uuid4())
def get_lagou(page,city,kd):
url = "https://www.lagou.com/jobs/positionAjax.json"
querystring = {"px": "new", "city": city, "needAddtionalResult": "false", "isSchoolJob": "0"}
payload = "first=false&pn=" + str(page) + "&kd="+str(kd)
cookie = "JSESSIONID=" + get_uuid() + ";"\
"user_trace_token=" + get_uuid() + "; LGUID=" + get_uuid() + "; index_location_city=%E6%88%90%E9%83%BD; " \
"SEARCH_ID=" + get_uuid() + '; _gid=GA1.2.717841549.1514043316; ' \
'_ga=GA1.2.952298646.1514043316; ' \
'LGSID=' + get_uuid() + "; " \
"LGRID=" + get_uuid() + "; "
headers = {'cookie': cookie,'origin': "https://www.lagou.com",'x-anit-forge-code': "0",'accept-encoding': "gzip, deflate, br",'accept-language': "zh-CN,zh;q=0.8,en;q=0.6",'user-agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",'content-type': "application/x-www-form-urlencoded; charset=UTF-8",'accept': "application/json, text/javascript, */*; q=0.01",'referer': "https://www.lagou.com/jobs/list_Java?px=new&city=%E6%88%90%E9%83%BD",'x-requested-with': "XMLHttpRequest",'connection': "keep-alive",'x-anit-forge-token': "None",'cache-control': "no-cache",'postman-token': "91beb456-8dd9-0390-a3a5-64ff3936fa63"}
response = requests.request("POST", url, data=payload.encode('utf-8'), headers=headers, params=querystring)
# print(response.text)
hjson = json.loads(response.text)
for i in range(15):
positionName=hjson['content']['positionResult']['result'][i]['positionName']
companyId = hjson['content']['positionResult']['result'][i]['companyId']
positionId= hjson['content']['positionResult']['result'][i]['positionId']
salary = hjson['content']['positionResult']['result'][i]['salary']
city= hjson['content']['positionResult']['result'][i]['city']
district= hjson['content']['positionResult']['result'][i]['district']
companyShortName= hjson['content']['positionResult']['result'][i]['companyShortName']
education= hjson['content']['positionResult']['result'][i]['education']
workYear= hjson['content']['positionResult']['result'][i]['workYear']
industryField= hjson['content']['positionResult']['result'][i]['industryField']
financeStage= hjson['content']['positionResult']['result'][i]['financeStage']
companySize= hjson['content']['positionResult']['result'][i]['companySize']
job_desc = get_job_desc(positionId)
positionName_list.append(positionName)
salary_list.append(salary)
city_list.append(city)
district_list.append(district)
companyShortName_list.append(companyShortName)
education_list.append(education)
workYear_list.append(workYear)
industryField_list.append(industryField)
financeStage_list.append(financeStage)
companySize_list.append(companySize)
#job_desc_list.append(job_desc)
job_desc_list.append('')
print("positionName:%s,companyId:%s,salary:%s,district:%s,companyShortName:%s,education:%s,workYear:%s"%(positionName,companyId,salary,district,companyShortName,education,workYear))
#print("position:%s"%(job_desc))
def get_job_desc(id):
url = "https://www.lagou.com/jobs/"+str(id)+".html"
cookie = "JSESSIONID=" + get_uuid() + ";"\
"user_trace_token=" + get_uuid() + "; LGUID=" + get_uuid() + "; index_location_city=%E6%88%90%E9%83%BD; " \
"SEARCH_ID=" + get_uuid() + '; _gid=GA1.2.717841549.1514043316; ' \
'_ga=GA1.2.952298646.1514043316; ' \
'LGSID=' + get_uuid() + "; " \
"LGRID=" + get_uuid() + "; "
headers = {'cookie': cookie,'origin': "https://www.lagou.com",'x-anit-forge-code': "0",'accept-encoding': "gzip, deflate, br",'accept-language': "zh-CN,zh;q=0.8,en;q=0.6",'user-agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",'content-type': "application/x-www-form-urlencoded; charset=UTF-8",'accept': "application/json, text/javascript, */*; q=0.01",'referer': "https://www.lagou.com/jobs/list_Java?px=new&city=%E6%88%90%E9%83%BD",'x-requested-with': "XMLHttpRequest",'connection': "keep-alive",'x-anit-forge-token': "None",'cache-control': "no-cache",'postman-token': "91beb456-8dd9-0390-a3a5-64ff3936fa63"}
response = requests.request("GET", url, headers=headers)
x = etree.HTML(response.text)
data = x.xpath('//*[@id="job_detail"]/dd[2]/div/*/text()')
return ''.join(data)
def write_to_csv(city,job):
infos = {'positionName': positionName_list, 'salary': salary_list, 'city': city_list, 'district': district_list, 'companyShortName': companyShortName_list, 'education': education_list,'workYear':workYear_list,'industryField':industryField_list,'financeStage':financeStage_list,'companySize':companySize_list,'job_desc':job_desc_list}
data = pd.DataFrame(infos, columns=['positionName', 'salary', 'city', 'district', 'companyShortName', 'education','workYear','industryField','financeStage','companySize','job_desc'])
data.to_csv("lagou-"+city+"-"+job+".csv")
def main(pages,city,job):
for n in range(1, pages+1):
get_lagou(n,city,job)
time.sleep(round(random.uniform(3, 5), 2))
write_to_csv(city,job)
if __name__ == "__main__":
main(30,'','Python')
#main(30,"广州",'Python')
#main(1, "广州", '测试') |
the-stack_106_29655 | from direct.showbase.DirectObject import DirectObject
from direct.showbase.TkGlobal import *
from .Tree import *
import Pmw
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
DEFAULT_BT_WIDTH = 50.0
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
class MemoryExplorer(Pmw.MegaWidget, DirectObject):
#--------------------------------------------------------------------------
# Init
#--------------------------------------------------------------------------
def __init__(self, parent = None, nodePath = None, **kw):
if nodePath is None:
nodePath = render
optiondefs = (('menuItems', [], Pmw.INITOPT),)
self.defineoptions(kw, optiondefs)
Pmw.MegaWidget.__init__(self, parent)
self.nodePath = nodePath
self.renderItem = None
self.render2dItem = None
self.buttons = []
self.labels = []
self.rootItem = None
self.btWidth = DEFAULT_BT_WIDTH
self.createScrolledFrame()
self.createScale()
self.createRefreshBT()
self.balloon = Pmw.Balloon(self.interior())
def createScrolledFrame(self):
self.frame = Pmw.ScrolledFrame(self.interior(),
labelpos = 'n',
label_text = 'ScrolledFrame',
usehullsize = 1,
hull_width = 200,
hull_height = 220,)
self.frame.pack(padx = 3, pady = 3, fill = BOTH, expand = 1)
def createScale(self):
self.scaleCtrl = Scale(self.interior(),
label = "Graph Scale",
from_= 0.0,
to = 20.0,
resolution = 0.1,
orient = HORIZONTAL,
command = self.onScaleUpdate)
self.scaleCtrl.pack(side = LEFT, fill = BOTH, expand = 1)
self.scaleCtrl.set(0.0)
def createRefreshBT(self):
self.refreshBT = Button(self.interior(), text = 'Refresh', command = self.refresh)
self.refreshBT.pack(side = LEFT, fill = BOTH, expand = 1)
#--------------------------------------------------------------------------
# Item Ctrls
#--------------------------------------------------------------------------
def createDefaultCtrls(self):
if self.renderItem == None or self.render2dItem == None:
return
totalBytes = self.renderItem.getVertexBytes()+self.render2dItem.getVertexBytes()
self.addChildCtrl(self.renderItem, totalBytes)
self.addChildCtrl(self.render2dItem, totalBytes)
self.setTitle("ALL", totalBytes)
def setTitle(self, parent, bytes):
self.frame["label_text"] = "[%s] - %s bytes" % (parent, bytes)
def resetCtrls(self):
for button in self.buttons:
self.balloon.unbind(button)
button.destroy()
self.buttons = []
for label in self.labels:
label.destroy()
self.labels = []
def getNewButton(self, width, ratio):
newBT = Button(self.frame.interior(),
anchor = W,
width = width)
if ratio == 0.0:
newBT['bg'] = "grey"
newBT['text'] = "."
else:
newBT['bg'] = Pmw.Color.hue2name(0.0, 1.0-ratio)
newBT['text'] = "%0.2f%%" % (ratio*100.0)
return newBT
def addSelfCtrl(self, item, totalBytes):
self.addLabel("[self] : %s bytes" % item.getSelfVertexBytes())
bt = self.addButton(item.getSelfVertexBytes(),
totalBytes,
self.onSelfButtonLClick,
self.onSelfButtonRClick,
item)
def addChildCtrl(self, item, totalBytes):
self.addLabel("%s [+%s] : %s bytes" % (item.getName(),
item.getNumChildren(),
item.getVertexBytes()))
bt = self.addButton(item.getVertexBytes(),
totalBytes,
self.onChildButtonLClick,
self.onChildButtonRClick,
item)
def addButton(self, vertexBytes, totalBytes, funcLClick, funcRClick, item):
width = self.getBTWidth(vertexBytes, totalBytes)
if totalBytes == 0:
ratio = 0.0
else:
ratio = vertexBytes/float(totalBytes)
bt = self.getNewButton(width, ratio)
def callbackL(event):
funcLClick(item)
def callbackR(event):
funcRClick(item)
bt.bind("<Button-1>", callbackL)
bt.bind("<Button-3>", callbackR)
bt.pack(side = TOP, anchor = NW)
self.buttons.append(bt)
self.balloon.bind(bt, item.getPathName())
return bt
def addLabel(self, label):
label = Label(self.frame.interior(), text = label)
label.pack(side = TOP, anchor = NW, expand = 0)
self.labels.append(label)
def getBTWidth(self, vertexBytes, totalBytes):
if totalBytes == 0:
return 1
width = int(self.btWidth * vertexBytes / totalBytes)
if width == 0:
width = 1
return width
#--------------------------------------------------------------------------
# Callback
#--------------------------------------------------------------------------
def onScaleUpdate(self, arg):
self.btWidth = DEFAULT_BT_WIDTH + DEFAULT_BT_WIDTH * float(arg)
if self.rootItem:
self.updateBTWidth()
else:
self.updateDefaultBTWidth()
def updateBTWidth(self):
self.buttons[0]['width'] = self.getBTWidth(self.rootItem.getSelfVertexBytes(),
self.rootItem.getVertexBytes())
btIndex = 1
for item in self.rootItem.getChildren():
self.buttons[btIndex]['width'] = self.getBTWidth(item.getVertexBytes(),
self.rootItem.getVertexBytes())
btIndex += 1
def updateDefaultBTWidth(self):
if self.renderItem == None or self.render2dItem == None:
return
totalBytes = self.renderItem.getVertexBytes() + self.render2dItem.getVertexBytes()
self.buttons[0]['width'] = self.getBTWidth(self.renderItem.getVertexBytes(), totalBytes)
self.buttons[1]['width'] = self.getBTWidth(self.render2dItem.getVertexBytes(), totalBytes)
def onSelfButtonLClick(self, item):
pass
def onSelfButtonRClick(self, item):
parentItem = item.getParent()
self.resetCtrls()
self.addItemCtrls(parentItem)
def onChildButtonLClick(self, item):
if item.getNumChildren() == 0:
return
self.resetCtrls()
self.addItemCtrls(item)
def onChildButtonRClick(self, item):
parentItem = item.getParent()
if parentItem:
self.resetCtrls()
self.addItemCtrls(parentItem.getParent())
def addItemCtrls(self, item):
self.rootItem = item
if item == None:
self.createDefaultCtrls()
else:
self.addSelfCtrl(item, item.getVertexBytes())
for child in item.getChildren():
self.addChildCtrl(child, item.getVertexBytes())
self.setTitle(item.getPathName(), item.getVertexBytes())
#--------------------------------------------------------------------------
# List & Analyze
#--------------------------------------------------------------------------
def makeList(self):
self.renderItem = MemoryExplorerItem(None, render)
self.buildList(self.renderItem)
self.render2dItem = MemoryExplorerItem(None, render2d)
self.buildList(self.render2dItem)
def buildList(self, parentItem):
for nodePath in parentItem.nodePath.getChildren():
item = MemoryExplorerItem(parentItem, nodePath)
parentItem.addChild(item)
self.buildList(item)
def analyze(self):
self.renderItem.analyze()
self.render2dItem.analyze()
def refresh(self):
self.makeList()
self.analyze()
self.resetCtrls()
self.createDefaultCtrls()
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
class MemoryExplorerItem:
def __init__(self, parent, nodePath):
self.parent = parent
self.nodePath = nodePath
self.children = []
self.selfVertexBytes = 0
self.childrenVertexBytes = 0
self.numFaces = 0
self.textureBytes = 0
if parent:
self.pathName = parent.pathName + "/" + nodePath.getName()
else:
self.pathName = nodePath.getName()
def getParent(self):
return self.parent
def addChild(self, child):
self.children.append(child)
def getNumChildren(self):
return len(self.children)
def getChildren(self):
return self.children
def getName(self):
return self.nodePath.getName()
def getPathName(self):
return self.pathName
def getVertexBytes(self):
return self.selfVertexBytes + self.childrenVertexBytes
def getSelfVertexBytes(self):
return self.selfVertexBytes
def analyze(self):
self.selfVertexBytes = 0
self.childrenVertexBytes = 0
self.numFaces = 0
self.textureBytes = 0
self.calcTextureBytes()
if self.nodePath.node().isGeomNode():
geomNode = self.nodePath.node()
for i in range(geomNode.getNumGeoms()):
geom = geomNode.getGeom(i)
self.calcVertexBytes(geom)
self.calcNumFaces(geom)
self.analyzeChildren()
def calcVertexBytes(self, geom):
vData = geom.getVertexData()
for j in range(vData.getNumArrays()):
array = vData.getArray(j)
self.selfVertexBytes += array.getDataSizeBytes()
def calcTextureBytes(self):
texCol = self.nodePath.findAllTextures()
for i in range(texCol.getNumTextures()):
tex = texCol.getTexture(i)
self.textureBytes += tex.estimateTextureMemory()
# what about shared textures by multiple nodes ?
def calcNumFaces(self, geom):
for k in range(geom.getNumPrimitives()):
primitive = geom.getPrimitive(k)
self.numFaces += primitive.getNumFaces()
def analyzeChildren(self):
for child in self.children:
child.analyze()
self.childrenVertexBytes += child.getVertexBytes()
self.numFaces += child.numFaces
def ls(self, indent = ""):
print(indent + self.nodePath.getName() + " " + str(self.vertexBytes) + " " + str(self.numFaces) + " " + str(self.textureBytes))
indent = indent + " "
for child in self.children:
child.ls(indent)
|
the-stack_106_29659 | # dale
from default.sets import InitialSetting
from default.webdriver_utilities.pre_drivers import pgdas_driver
from default.webdriver_utilities.wbs import WDShorcuts
from default.interact import *
from default.interact import _contmatic_select_by_name
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException, ElementClickInterceptedException
from time import sleep
import pyautogui as pygui
import os
link = "ChromeDriver/chromedriver.exe"
possible = ['GIA']
class GIA(InitialSetting, WDShorcuts):
def __init__(self, *args, compt):
__r_social, __cnpj, login, senha = args
# __anexo, __valor_n_ret, __valor_ret, already_declared
# competencia declarada
self.compt_used = compt
self.client_path = self.files_pathit(
__r_social.strip(), self.compt_used)
# self.client_path = self.pathit(self.compt, main_path, __r_social)
# drivers declarados
menuX, menuY = 20, 27
def fecha_janela_contribuintes_gia():
sleep(1)
pygui.click(1322, 333, duration=.5)
pygui.hotkey('left', 'enter')
# self.GIA()
# if certificado...
if not self.certifs_exist('GiaScreenShoot', 1):
janelas_gias = pygui.getWindowsWithTitle('GIA')
for win in janelas_gias:
if win.title == 'GIA':
win.maximize()
win.activate()
break
else:
# there is no break...
self.abre_programa(self.get_env_for_path(
'\\Desktop\\GIA.exe'), path=True)
IE = __cnpj
my_print = login
print(my_print)
# pygui.hotkey('alt', 'tab')
print(IE)
#
try:
fecha_janela_contribuintes_gia()
except IndexError:
print('Não precisei fechar')
self.pt1_gia_software(IE, self.compt_used)
pygui.doubleClick(menuX+35, menuY)
# consistir
sleep(3)
pygui.click(menuX, menuY)
sleep(.5)
foritab(2, 'up')
pygui.hotkey('enter')
pygui.click(x=836, y=394)
foritab(7, 'tab')
pygui.hotkey('enter', 'enter', interval=.25)
pygui.hotkey('enter')
self.save_novagia_pdf()
self.driver = driver = pgdas_driver(self.client_path)
super().__init__(self.driver)
driver.get(
'https://www3.fazenda.sp.gov.br/CAWEB/Account/Login.aspx')
llg = driver.find_element(By.ID,
'ConteudoPagina_txtUsuario')
llg.clear()
llg.send_keys(login)
ssn = driver.find_element(By.XPATH,
"//input[@type='password']")
ssn.clear()
ssn.send_keys(senha)
self.send_keys_anywhere(Keys.TAB)
self.send_keys_anywhere(Keys.ENTER)
print('pressione f7 p/ continuar após captcha')
press_key_b4('f8')
# self.find_submit_form()
# enter entrar
sleep(5)
driver.find_element(By.LINK_TEXT, 'Nova GIA').click()
sleep(3)
driver.find_element(By.PARTIAL_LINK_TEXT,
'Documentos Fiscais (Normal, Substit. e Coligida)').click()
sleep(2)
driver_clicks = driver.find_elements(By.XPATH,
"//input[@type='file']")
driver_clicks[0].send_keys(self.clieninput_filepath())
driver.find_elements(By.XPATH,
"//input[@type='button']")[0].click()
try:
driver.switch_to.alert.accept()
except NoAlertPresentException:
print('Sem alerta')
sleep(5)
"""
bt_imprime = driver.find_element(By.CSS_SELECTOR, '[alt="Imprimir"]')
self.exec_list(click=bt_imprime, enter=pygui)
print('Glória a Deus f7 p continuar')
press_key_b4('f7')
"""
self.save_save_img2pdf()
driver.close()
sleep(5)
# pygui.hotkey('enter')
# ############################################ parei daqui
def save_save_img2pdf(self):
from PIL import Image
path1 = f'{self.client_path}/GiaScreenShoot.png'
path2 = f'{self.client_path}/Recibo_{self.compt_used}.pdf'
self.driver.save_screenshot(path1)
image1 = Image.open(path1)
try:
im1 = image1.convert('RGB')
except ValueError:
im1 = image1
im1.save(path2)
def save_novagia_pdf(self):
from shutil import copy
pathinit = r'C:\Users\user\Documents\SEFAZ\GIA\TNormal'
pathinit += f'\\{os.listdir(pathinit)[0]}'
# copy(r"C:\Users\User\Documents\SEFAZ\GIA\TNormal\{}".format(os.listdir(r"C:\Users\User\Documents\SEFAZ\GIA\TNormal")[0]), r"C:\Users\user\OneDrive\_FISCAL-2021\2021\01-2021\GIA_Tharles Marli")
copy(pathinit, self.client_path)
def pt1_gia_software(self, ie, cpt_write):
cpt_write = "".join(cpt_write.split('-'))
print(cpt_write
)
menuX, menuY = 20, 27
[pygui.click(menuX, menuY, duration=2.5) for i in range(1)]
sleep(2)
pygui.hotkey('tab', 'enter', interval=.25)
pygui.hotkey('tab', 'tab')
pygui.write(ie, interval=.1)
foritab(2, 'tab', 'enter')
pygui.hotkey('tab', 'tab', 'enter')
sleep(.2)
pygui.write(cpt_write)
sleep(.5)
pygui.hotkey('tab', 'enter')
sleep(.2)
pygui.hotkey('left', 'enter', 'enter', 'tab', 'enter', interval=.25)
def clieninput_filepath(self, filetype='sfz'):
dir_searched_now = self.client_path
file = [os.path.join(dir_searched_now, fname) for fname in os.listdir(
dir_searched_now) if fname.lower().endswith(filetype)]
return file[0] if len(file) == 1 else False
def exec_list(self, **args):
"""
:param args: somente dicionarios
:return:
"""
from time import sleep
import pyautogui as pygui
from concurrent.futures import ThreadPoolExecutor
executors_list = []
with ThreadPoolExecutor(max_workers=5) as executor:
for key, vs in args.items():
if key == 'click':
executors_list.append(executor.submit(vs.click))
else:
executors_list.append(
executor.submit(pygui.hotkey, str(key)))
print('else')
sleep(2)
print('sleeping')
def abre_programa(self, name, path=False):
"""
:param name: path/to/nameProgram
:param path: False => contmatic, True => any path
:return: winleft+r open
"""
if path is False:
programa = _contmatic_select_by_name(name)
else:
programa = name
senha = '240588140217'
sleep(1)
pygui.hotkey('winleft', 'r')
# pesquisador
sleep(1)
pygui.write(programa)
sleep(1)
pygui.hotkey('enter')
sleep(10)
# p.write(senha)
# p.hotkey('tab', 'enter', interval=.5)
pygui.sleep(5)
# pygui.click(x=1508, y=195) # fecha a janela inicial no G5
def get_env_for_path(self, path='\\Desktop\\GIA.exe'):
p1path = os.getenv('APPDATA')
p1path = os.path.abspath(os.path.join(os.path.dirname(p1path), '..'))
p1path += path
return p1path
# CONTMATIC_PATH = p1path + r'\Microsoft\Windows\Start Menu\Programs\Contmatic Phoenix'
# def gerar_cert(self, arq):
# import os
# save = os.path.join(self.client_path, arq)
# self.driver.save_screenshot(save)
def certifs_exist(self, startswith, at_least=2):
arqs_search = self.files_get_anexos_v4(self.client_path, 'png')
arqs_search = [
self.path_leaf(f, True) for f in arqs_search]
arqs_search = [f for f in arqs_search if f.startswith(startswith)]
if len(arqs_search) >= at_least:
return True
return False
|
the-stack_106_29661 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing task families."""
import json
from absl import logging
from absl.testing import parameterized
from task_set import datasets
import tensorflow.compat.v1 as tf
class SingleTaskTestCase(parameterized.TestCase, tf.test.TestCase):
def task_test(self, task):
"""Smoke test tasks to ensure they can produce gradients."""
params = task.initial_params()
loss = task.call_split(params, datasets.Split.TRAIN)
grads = task.gradients(loss, params)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(grads)
class TaskFamilyTestCase(parameterized.TestCase, tf.test.TestCase):
"""Base TestCase used for testing sampled task families."""
def __init__(self, sampler, getter, *args, **kwargs):
self.sampler = sampler
self.getter = getter
super(TaskFamilyTestCase, self).__init__(*args, **kwargs)
@parameterized.parameters(range(20))
def test_configs_are_unique_and_consistent(self, seed):
"""Test that samplers produce the same configs for the same seeds."""
cfg1 = self.sampler(seed)
cfg2 = self.sampler(seed)
self.assertEqual(cfg1, cfg2)
cfg3 = self.sampler(seed + 10)
self.assertNotEqual(cfg1, cfg3)
@parameterized.parameters(range(20))
def test_serialize_configs(self, seed):
"""Test that configs are serializable."""
cfg = self.sampler(seed)
try:
_ = json.dumps(cfg)
except ValueError:
self.fail("Failed to serialize config to json!")
@parameterized.parameters(range(2))
def test_run_task_graph(self, seed):
"""Test that a graph can be constructed, and gradients can be computed."""
cfg = self.sampler(seed)
logging.info("Checking cfg: %s", cfg)
task = self.getter(cfg)
params = task.initial_params()
loss = task.call_split(params, datasets.Split.TRAIN)
grads = task.gradients(loss, params)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(grads)
@parameterized.parameters(range(2, 10))
def test_build_task_graph(self, seed):
"""Test that a graph can be constructed.
This is faster than constructing and running, thus we can run more seeds.
Args:
seed: seed to call the sampler with.
"""
cfg = self.sampler(seed)
logging.info("Checking cfg: %s", cfg)
tf.reset_default_graph()
self.getter(cfg)
|
the-stack_106_29662 |
from Modelo import sistema
from Vista import Ventanappal
from PyQt5.QtWidgets import QApplication
import sys
class Controlador():
def __init__(self, vista, modelo):
self.__mivista= vista
self.__mimodelo=modelo
def mostrarimg(self,nimagen):
return self.__mimodelo.mostrarimg(nimagen)
def main():
app=QApplication(sys.argv)
modelo=sistema()
vista=Ventanappal()
coord=Controlador(vista,modelo)
vista.setcoord(coord)
vista.show()
sys.exit(app.exec_())
main()
|
the-stack_106_29664 | from decimal import Decimal
from django.contrib.gis.db.models.fields import BaseSpatialField, GeometryField
from django.contrib.gis.db.models.sql import AreaField
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.core.exceptions import FieldError
from django.db.models import (
BooleanField, FloatField, IntegerField, TextField, Transform,
)
from django.db.models.expressions import Func, Value
from django.db.models.functions import Cast
NUMERIC_TYPES = (int, float, Decimal)
class GeoFuncMixin:
function = None
output_field_class = None
geom_param_pos = (0,)
def __init__(self, *expressions, **extra):
if 'output_field' not in extra and self.output_field_class:
extra['output_field'] = self.output_field_class()
super().__init__(*expressions, **extra)
# Ensure that value expressions are geometric.
for pos in self.geom_param_pos:
expr = self.source_expressions[pos]
if not isinstance(expr, Value):
continue
try:
output_field = expr.output_field
except FieldError:
output_field = None
geom = expr.value
if not isinstance(geom, Geometry) or output_field and not isinstance(output_field, GeometryField):
raise TypeError("%s function requires a geometric argument in position %d." % (self.name, pos + 1))
if not geom.srid and not output_field:
raise ValueError("SRID is required for all geometries.")
if not output_field:
self.source_expressions[pos] = Value(geom, output_field=GeometryField(srid=geom.srid))
@property
def name(self):
return self.__class__.__name__
@property
def srid(self):
return self.source_expressions[self.geom_param_pos[0]].field.srid
@property
def geo_field(self):
return GeometryField(srid=self.srid) if self.srid else None
def as_sql(self, compiler, connection, function=None, **extra_context):
if not self.function and not function:
function = connection.ops.spatial_function_name(self.name)
return super().as_sql(compiler, connection, function=function, **extra_context)
def resolve_expression(self, *args, **kwargs):
res = super().resolve_expression(*args, **kwargs)
# Ensure that expressions are geometric.
source_fields = res.get_source_fields()
for pos in self.geom_param_pos:
field = source_fields[pos]
if not isinstance(field, GeometryField):
raise TypeError(
"%s function requires a GeometryField in position %s, got %s." % (
self.name, pos + 1, type(field).__name__,
)
)
base_srid = res.srid
for pos in self.geom_param_pos[1:]:
expr = res.source_expressions[pos]
expr_srid = expr.output_field.srid
if expr_srid != base_srid:
# Automatic SRID conversion so objects are comparable.
res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs)
return res
def _handle_param(self, value, param_name='', check_types=None):
if not hasattr(value, 'resolve_expression'):
if check_types and not isinstance(value, check_types):
raise TypeError(
"The %s parameter has the wrong type: should be %s." % (
param_name, check_types)
)
return value
class GeoFunc(GeoFuncMixin, Func):
pass
class GeomOutputGeoFunc(GeoFunc):
def __init__(self, *expressions, **extra):
if 'output_field' not in extra:
extra['output_field'] = GeometryField()
super(GeomOutputGeoFunc, self).__init__(*expressions, **extra)
def resolve_expression(self, *args, **kwargs):
res = super().resolve_expression(*args, **kwargs)
res.output_field.srid = res.srid
return res
class SQLiteDecimalToFloatMixin:
"""
By default, Decimal values are converted to str by the SQLite backend, which
is not acceptable by the GIS functions expecting numeric values.
"""
def as_sqlite(self, compiler, connection):
for expr in self.get_source_expressions():
if hasattr(expr, 'value') and isinstance(expr.value, Decimal):
expr.value = float(expr.value)
return super().as_sql(compiler, connection)
class OracleToleranceMixin:
tolerance = 0.05
def as_oracle(self, compiler, connection):
tol = self.extra.get('tolerance', self.tolerance)
return super().as_sql(compiler, connection, template="%%(function)s(%%(expressions)s, %s)" % tol)
class Area(OracleToleranceMixin, GeoFunc):
output_field_class = AreaField
arity = 1
def as_sql(self, compiler, connection, **extra_context):
if connection.ops.geography:
self.output_field.area_att = 'sq_m'
else:
# Getting the area units of the geographic field.
geo_field = self.geo_field
if geo_field.geodetic(connection):
if connection.features.supports_area_geodetic:
self.output_field.area_att = 'sq_m'
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
else:
units_name = geo_field.units_name(connection)
if units_name:
self.output_field.area_att = AreaMeasure.unit_attname(units_name)
return super().as_sql(compiler, connection, **extra_context)
def as_oracle(self, compiler, connection):
self.output_field = AreaField('sq_m') # Oracle returns area in units of meters.
return super().as_oracle(compiler, connection)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
extra_context['template'] = '%(function)s(%(expressions)s, %(spheroid)d)'
extra_context['spheroid'] = True
return self.as_sql(compiler, connection, **extra_context)
class Azimuth(GeoFunc):
output_field_class = FloatField
arity = 2
geom_param_pos = (0, 1)
class AsGeoJSON(GeoFunc):
output_field_class = TextField
def __init__(self, expression, bbox=False, crs=False, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
if options:
expressions.append(options)
super().__init__(*expressions, **extra)
class AsGML(GeoFunc):
geom_param_pos = (1,)
output_field_class = TextField
def __init__(self, expression, version=2, precision=8, **extra):
expressions = [version, expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
super().__init__(*expressions, **extra)
def as_oracle(self, compiler, connection, **extra_context):
source_expressions = self.get_source_expressions()
version = source_expressions[0]
clone = self.copy()
clone.set_source_expressions([source_expressions[1]])
extra_context['function'] = 'SDO_UTIL.TO_GML311GEOMETRY' if version.value == 3 else 'SDO_UTIL.TO_GMLGEOMETRY'
return super(AsGML, clone).as_sql(compiler, connection, **extra_context)
class AsKML(AsGML):
def as_sqlite(self, compiler, connection):
# No version parameter
clone = self.copy()
clone.set_source_expressions(self.get_source_expressions()[1:])
return clone.as_sql(compiler, connection)
class AsSVG(GeoFunc):
output_field_class = TextField
def __init__(self, expression, relative=False, precision=8, **extra):
relative = relative if hasattr(relative, 'resolve_expression') else int(relative)
expressions = [
expression,
relative,
self._handle_param(precision, 'precision', int),
]
super().__init__(*expressions, **extra)
class BoundingCircle(OracleToleranceMixin, GeoFunc):
def __init__(self, expression, num_seg=48, **extra):
super().__init__(*[expression, num_seg], **extra)
def as_oracle(self, compiler, connection):
clone = self.copy()
clone.set_source_expressions([self.get_source_expressions()[0]])
return super(BoundingCircle, clone).as_oracle(compiler, connection)
class Centroid(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 1
class Difference(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
class DistanceResultMixin:
def source_is_geography(self):
return self.get_source_fields()[0].geography and self.srid == 4326
def convert_value(self, value, expression, connection, context):
if value is None:
return None
dist_att = None
geo_field = self.geo_field
if geo_field.geodetic(connection):
if connection.features.supports_distance_geodetic:
dist_att = 'm'
else:
dist_att = geo_field.units_name(connection)
if dist_att:
return DistanceMeasure(**{dist_att: value})
return value
class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
geom_param_pos = (0, 1)
output_field_class = FloatField
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = spheroid
expressions += (self._handle_param(spheroid, 'spheroid', bool),)
super().__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection):
function = None
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
expr2 = self.source_expressions[1]
geography = self.source_is_geography()
if expr2.output_field.geography != geography:
if isinstance(expr2, Value):
expr2.output_field.geography = geography
else:
self.source_expressions[1] = Cast(
expr2,
GeometryField(srid=expr2.output_field.srid, geography=geography),
)
if not geography and geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special distance functions
if self.spheroid:
# DistanceSpheroid is more accurate and resource intensive than DistanceSphere
function = connection.ops.spatial_function_name('DistanceSpheroid')
# Replace boolean param by the real spheroid of the base field
self.source_expressions[2] = Value(geo_field._spheroid)
else:
function = connection.ops.spatial_function_name('DistanceSphere')
return super().as_sql(compiler, connection, function=function)
def as_oracle(self, compiler, connection):
if self.spheroid:
self.source_expressions.pop(2)
return super().as_oracle(compiler, connection)
def as_sqlite(self, compiler, connection, **extra_context):
if self.spheroid:
self.source_expressions.pop(2)
if self.geo_field.geodetic(connection):
# SpatiaLite returns NULL instead of zero on geodetic coordinates
extra_context['template'] = 'COALESCE(%(function)s(%(expressions)s, %(spheroid)s), 0)'
extra_context['spheroid'] = int(bool(self.spheroid))
return super().as_sql(compiler, connection, **extra_context)
class Envelope(GeomOutputGeoFunc):
arity = 1
class ForceRHR(GeomOutputGeoFunc):
arity = 1
class GeoHash(GeoFunc):
output_field_class = TextField
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
super().__init__(*expressions, **extra)
def as_mysql(self, compiler, connection):
clone = self.copy()
# If no precision is provided, set it to the maximum.
if len(clone.source_expressions) < 2:
clone.source_expressions.append(Value(100))
return clone.as_sql(compiler, connection)
class Intersection(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
@BaseSpatialField.register_lookup
class IsValid(OracleToleranceMixin, GeoFuncMixin, Transform):
lookup_name = 'isvalid'
output_field = BooleanField()
def as_oracle(self, compiler, connection, **extra_context):
sql, params = super().as_oracle(compiler, connection, **extra_context)
return "CASE %s WHEN 'TRUE' THEN 1 ELSE 0 END" % sql, params
class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super().__init__(expr1, **extra)
def as_sql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not connection.features.supports_length_geodetic:
raise NotImplementedError("This backend doesn't support Length on geodetic fields")
return super().as_sql(compiler, connection)
def as_postgresql(self, compiler, connection):
function = None
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if self.source_is_geography():
self.source_expressions.append(Value(self.spheroid))
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid
function = connection.ops.spatial_function_name('LengthSpheroid')
self.source_expressions.append(Value(geo_field._spheroid))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
function = connection.ops.length3d
return super().as_sql(compiler, connection, function=function)
def as_sqlite(self, compiler, connection):
function = None
geo_field = GeometryField(srid=self.srid)
if geo_field.geodetic(connection):
function = 'GeodesicLength' if self.spheroid else 'GreatCircleLength'
return super().as_sql(compiler, connection, function=function)
class LineLocatePoint(GeoFunc):
output_field_class = FloatField
arity = 2
geom_param_pos = (0, 1)
class MakeValid(GeoFunc):
pass
class MemSize(GeoFunc):
output_field_class = IntegerField
arity = 1
class NumGeometries(GeoFunc):
output_field_class = IntegerField
arity = 1
class NumPoints(GeoFunc):
output_field_class = IntegerField
arity = 1
def as_sql(self, compiler, connection):
if self.source_expressions[self.geom_param_pos[0]].output_field.geom_type != 'LINESTRING':
if not connection.features.supports_num_points_poly:
raise TypeError('NumPoints can only operate on LineString content on this database.')
return super().as_sql(compiler, connection)
class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
arity = 1
def as_postgresql(self, compiler, connection):
function = None
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not self.source_is_geography():
raise NotImplementedError("ST_Perimeter cannot use a non-projected non-geography field.")
dim = min(f.dim for f in self.get_source_fields())
if dim > 2:
function = connection.ops.perimeter3d
return super().as_sql(compiler, connection, function=function)
def as_sqlite(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection):
raise NotImplementedError("Perimeter cannot use a non-projected field.")
return super().as_sql(compiler, connection)
class PointOnSurface(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 1
class Reverse(GeoFunc):
arity = 1
class Scale(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, 'x', NUMERIC_TYPES),
self._handle_param(y, 'y', NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES))
super().__init__(*expressions, **extra)
class SnapToGrid(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]]
)
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]]
)
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.')
super().__init__(*expressions, **extra)
class SymDifference(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
class Transform(GeomOutputGeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, 'srid', int),
]
if 'output_field' not in extra:
extra['output_field'] = GeometryField(srid=srid)
super().__init__(*expressions, **extra)
@property
def srid(self):
# Make srid the resulting srid of the transformation
return self.source_expressions[1].value
class Translate(Scale):
def as_sqlite(self, compiler, connection):
if len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate
self.source_expressions.append(Value(0))
return super().as_sqlite(compiler, connection)
class Union(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
|
the-stack_106_29666 | import logging
import os.path
import sys
import datetime
def initialize_logger(output_dir, stream_loglevel = logging.INFO, all_loglevel=logging.DEBUG):
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except:
sys.exit("Error happened in create log folder:%s" % output_dir)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel(stream_loglevel)
formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
now = datetime.datetime.now()
error_log_filename = "error-%4d-%02d-%02d.log" % (now.year, now.month, now.day)
all_log_filename = "all-%4d-%02d-%02d.log" % (now.year, now.month, now.day)
# create error file handler and set level to error
handler = logging.FileHandler(os.path.join(output_dir, error_log_filename), "a+", encoding=None, delay="true")
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# create debug file handler and set level to debug
handler = logging.FileHandler(os.path.join(output_dir, all_log_filename), "a+")
handler.setLevel(all_loglevel)
formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.info("Start to log...")
|
the-stack_106_29668 | # coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Original Work: Copyright (c) 2018 Character Encoding Detector contributors. https://github.com/chardet
from .enums import ProbingState, MachineState
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJP_SM_MODEL
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
super(EUCJPProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
self.distribution_analyzer = EUCJPDistributionAnalysis()
self.context_analyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
super(EUCJPProber, self).reset()
self.context_analyzer.reset()
@property
def charset_name(self):
return "EUC-JP"
@property
def language(self):
return "Japanese"
def feed(self, byte_str):
for i in range(len(byte_str)):
# PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.context_analyzer.feed(self._last_char, char_len)
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.context_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.context_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf)
|
the-stack_106_29672 | """
sphinx.domains.std
~~~~~~~~~~~~~~~~~~
The standard domain.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import unicodedata
import warnings
from copy import copy
from typing import cast
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.roles import XRefRole
from sphinx.util import ws_re, logging, docname_join
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import clean_astext, make_refnode
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple, Type, Union # NOQA
from docutils.parsers.rst import Directive # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import RoleFunction # NOQA
logger = logging.getLogger(__name__)
# RE for option descriptions
option_desc_re = re.compile(r'((?:/|--|-|\+)?[^\s=]+)(=?\s*.*)')
# RE for grammar tokens
token_re = re.compile(r'`(\w+)`', re.U)
class GenericObject(ObjectDescription):
"""
A generic x-ref directive registered with Sphinx.add_object_type().
"""
indextemplate = ''
parse_node = None # type: Callable[[GenericObject, BuildEnvironment, str, addnodes.desc_signature], str] # NOQA
def handle_signature(self, sig, signode):
# type: (str, addnodes.desc_signature) -> str
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
# type: (str, str, addnodes.desc_signature) -> None
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
if self.indextemplate:
colon = self.indextemplate.find(':')
if colon != -1:
indextype = self.indextemplate[:colon].strip()
indexentry = self.indextemplate[colon + 1:].strip() % (name,)
else:
indextype = 'single'
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, '', None))
std = cast(StandardDomain, self.env.get_domain('std'))
std.add_object(self.objtype, name, self.env.docname, targetname)
class EnvVar(GenericObject):
indextemplate = _('environment variable; %s')
class EnvVarXRefRole(XRefRole):
"""
Cross-referencing role for environment variables (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
# type: (nodes.document, BuildEnvironment, nodes.Element, bool) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, '', None),
('single', _('environment variable; %s') % varname, tgtid, '', None)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class Target(SphinxDirective):
"""
Generic target for user-defined cross-reference types.
"""
indextemplate = ''
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self):
# type: () -> List[nodes.Node]
# normalize whitespace in fullname like XRefRole does
fullname = ws_re.sub(' ', self.arguments[0].strip())
targetname = '%s-%s' % (self.name, fullname)
node = nodes.target('', '', ids=[targetname])
self.state.document.note_explicit_target(node)
ret = [node] # type: List[nodes.Node]
if self.indextemplate:
indexentry = self.indextemplate % (fullname,)
indextype = 'single'
colon = indexentry.find(':')
if colon != -1:
indextype = indexentry[:colon].strip()
indexentry = indexentry[colon + 1:].strip()
inode = addnodes.index(entries=[(indextype, indexentry,
targetname, '', None)])
ret.insert(0, inode)
name = self.name
if ':' in self.name:
_, name = self.name.split(':', 1)
std = cast(StandardDomain, self.env.get_domain('std'))
std.add_object(name, fullname, self.env.docname, targetname)
return ret
class Cmdoption(ObjectDescription):
"""
Description of a command-line option (.. option).
"""
def handle_signature(self, sig, signode):
# type: (str, addnodes.desc_signature) -> str
"""Transform an option description into RST nodes."""
count = 0
firstname = ''
for potential_option in sig.split(', '):
potential_option = potential_option.strip()
m = option_desc_re.match(potential_option)
if not m:
logger.warning(__('Malformed option description %r, should '
'look like "opt", "-opt args", "--opt args", '
'"/opt args" or "+opt args"'), potential_option,
location=(self.env.docname, self.lineno))
continue
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
signode['allnames'] = [optname]
else:
signode['allnames'].append(optname)
count += 1
if not firstname:
raise ValueError
return firstname
def add_target_and_index(self, firstname, sig, signode):
# type: (str, str, addnodes.desc_signature) -> None
currprogram = self.env.ref_context.get('std:program')
for optname in signode.get('allnames', []):
targetname = optname.replace('/', '-')
if not targetname.startswith('-'):
targetname = '-arg-' + targetname
if currprogram:
targetname = '-' + currprogram + targetname
targetname = 'cmdoption' + targetname
signode['names'].append(targetname)
domain = cast(StandardDomain, self.env.get_domain('std'))
self.state.document.note_explicit_target(signode)
for optname in signode.get('allnames', []):
domain.add_program_option(currprogram, optname,
self.env.docname, signode['ids'][0])
# create only one index entry for the whole option
if optname == firstname:
self.indexnode['entries'].append(
('pair', _('%scommand line option; %s') %
((currprogram and currprogram + ' ' or ''), sig),
signode['ids'][0], '', None))
class Program(SphinxDirective):
"""
Directive to name the program for which options are documented.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self):
# type: () -> List[nodes.Node]
program = ws_re.sub('-', self.arguments[0].strip())
if program == 'None':
self.env.ref_context.pop('std:program', None)
else:
self.env.ref_context['std:program'] = program
return []
class OptionXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
# type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode['std:program'] = env.ref_context.get('std:program')
return title, target
def split_term_classifiers(line):
# type: (str) -> List[Union[str, None]]
# split line into a term and classifiers. if no classifier, None is used..
parts = re.split(' +: +', line) + [None]
return parts
def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None):
# type: (BuildEnvironment, Iterable[nodes.Node], str, str, int, str) -> nodes.term
# get a text-only representation of the term and register it
# as a cross-reference target
term = nodes.term('', '', *textnodes)
term.source = source
term.line = lineno
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
termtext = term.astext()
if new_id is None:
new_id = nodes.make_id('term-' + termtext)
if new_id in gloss_entries:
new_id = 'term-' + str(len(gloss_entries))
gloss_entries.add(new_id)
std = cast(StandardDomain, env.get_domain('std'))
std.add_object('term', termtext.lower(), env.docname, new_id)
# add an index entry too
indexnode = addnodes.index()
indexnode['entries'] = [('single', termtext, new_id, 'main', index_key)]
indexnode.source, indexnode.line = term.source, term.line
term.append(indexnode)
term['ids'].append(new_id)
term['names'].append(new_id)
return term
class Glossary(SphinxDirective):
"""
Directive to create a glossary with cross-reference targets for :term:
roles.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'sorted': directives.flag,
}
def run(self):
# type: () -> List[nodes.Node]
node = addnodes.glossary()
node.document = self.state.document
# This directive implements a custom format of the reST definition list
# that allows multiple lines of terms before the definition. This is
# easy to parse since we know that the contents of the glossary *must
# be* a definition list.
# first, collect single entries
entries = [] # type: List[Tuple[List[Tuple[str, str, int]], StringList]]
in_definition = True
was_empty = True
messages = [] # type: List[nodes.Node]
for line, (source, lineno) in zip(self.content, self.content.items):
# empty line -> add to last definition
if not line:
if in_definition and entries:
entries[-1][1].append('', source, lineno)
was_empty = True
continue
# unindented line -> a term
if line and not line[0].isspace():
# enable comments
if line.startswith('.. '):
continue
# first term of definition
if in_definition:
if not was_empty:
messages.append(self.state.reporter.system_message(
2, 'glossary term must be preceded by empty line',
source=source, line=lineno))
entries.append(([(line, source, lineno)], StringList()))
in_definition = False
# second term and following
else:
if was_empty:
messages.append(self.state.reporter.system_message(
2, 'glossary terms must not be separated by empty '
'lines', source=source, line=lineno))
if entries:
entries[-1][0].append((line, source, lineno))
else:
messages.append(self.state.reporter.system_message(
2, 'glossary seems to be misformatted, check '
'indentation', source=source, line=lineno))
else:
if not in_definition:
# first line of definition, determines indentation
in_definition = True
indent_len = len(line) - len(line.lstrip())
if entries:
entries[-1][1].append(line[indent_len:], source, lineno)
else:
messages.append(self.state.reporter.system_message(
2, 'glossary seems to be misformatted, check '
'indentation', source=source, line=lineno))
was_empty = False
# now, parse all the entries into a big definition list
items = []
for terms, definition in entries:
termtexts = [] # type: List[str]
termnodes = [] # type: List[nodes.Node]
system_messages = [] # type: List[nodes.Node]
for line, source, lineno in terms:
parts = split_term_classifiers(line)
# parse the term with inline markup
# classifiers (parts[1:]) will not be shown on doctree
textnodes, sysmsg = self.state.inline_text(parts[0], lineno)
# use first classifier as a index key
term = make_glossary_term(self.env, textnodes, parts[1], source, lineno)
term.rawsource = line
system_messages.extend(sysmsg)
termtexts.append(term.astext())
termnodes.append(term)
termnodes.extend(system_messages)
defnode = nodes.definition()
if definition:
self.state.nested_parse(definition, definition.items[0][1],
defnode)
termnodes.append(defnode)
items.append((termtexts,
nodes.definition_list_item('', *termnodes)))
if 'sorted' in self.options:
items.sort(key=lambda x:
unicodedata.normalize('NFD', x[0][0].lower()))
dlist = nodes.definition_list()
dlist['classes'].append('glossary')
dlist.extend(item[1] for item in items)
node += dlist
return messages + [node]
def token_xrefs(text):
# type: (str) -> List[nodes.Node]
retnodes = [] # type: List[nodes.Node]
pos = 0
for m in token_re.finditer(text):
if m.start() > pos:
txt = text[pos:m.start()]
retnodes.append(nodes.Text(txt, txt))
refnode = addnodes.pending_xref(
m.group(1), reftype='token', refdomain='std', reftarget=m.group(1))
refnode += nodes.literal(m.group(1), m.group(1), classes=['xref'])
retnodes.append(refnode)
pos = m.end()
if pos < len(text):
retnodes.append(nodes.Text(text[pos:], text[pos:]))
return retnodes
class ProductionList(SphinxDirective):
"""
Directive to list grammar productions.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self):
# type: () -> List[nodes.Node]
domain = cast(StandardDomain, self.env.get_domain('std'))
node = addnodes.productionlist() # type: nodes.Element
i = 0
for rule in self.arguments[0].split('\n'):
if i == 0 and ':' not in rule:
# production group
continue
i += 1
try:
name, tokens = rule.split(':', 1)
except ValueError:
break
subnode = addnodes.production()
subnode['tokenname'] = name.strip()
if subnode['tokenname']:
idname = nodes.make_id('grammar-token-%s' % subnode['tokenname'])
if idname not in self.state.document.ids:
subnode['ids'].append(idname)
self.state.document.note_implicit_target(subnode, subnode)
domain.add_object('token', subnode['tokenname'], self.env.docname, idname)
subnode.extend(token_xrefs(tokens))
node.append(subnode)
return [node]
class StandardDomain(Domain):
"""
Domain for all objects that don't fit into another domain or are added
via the application interface.
"""
name = 'std'
label = 'Default'
object_types = {
'term': ObjType(_('glossary term'), 'term', searchprio=-1),
'token': ObjType(_('grammar token'), 'token', searchprio=-1),
'label': ObjType(_('reference label'), 'ref', 'keyword',
searchprio=-1),
'envvar': ObjType(_('environment variable'), 'envvar'),
'cmdoption': ObjType(_('program option'), 'option'),
'doc': ObjType(_('document'), 'doc', searchprio=-1)
} # type: Dict[str, ObjType]
directives = {
'program': Program,
'cmdoption': Cmdoption, # old name for backwards compatibility
'option': Cmdoption,
'envvar': EnvVar,
'glossary': Glossary,
'productionlist': ProductionList,
} # type: Dict[str, Type[Directive]]
roles = {
'option': OptionXRefRole(warn_dangling=True),
'envvar': EnvVarXRefRole(),
# links to tokens in grammar productions
'token': XRefRole(),
# links to terms in glossary
'term': XRefRole(lowercase=True, innernodeclass=nodes.inline,
warn_dangling=True),
# links to headings or arbitrary labels
'ref': XRefRole(lowercase=True, innernodeclass=nodes.inline,
warn_dangling=True),
# links to labels of numbered figures, tables and code-blocks
'numref': XRefRole(lowercase=True,
warn_dangling=True),
# links to labels, without a different title
'keyword': XRefRole(warn_dangling=True),
# links to documents
'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline),
} # type: Dict[str, Union[RoleFunction, XRefRole]]
initial_data = {
'progoptions': {}, # (program, name) -> docname, labelid
'objects': {}, # (type, name) -> docname, labelid
'citations': {}, # citation_name -> docname, labelid, lineno
'citation_refs': {}, # citation_name -> list of docnames
'labels': { # labelname -> docname, labelid, sectionname
'genindex': ('genindex', '', _('Index')),
'modindex': ('py-modindex', '', _('Module Index')),
'search': ('search', '', _('Search Page')),
},
'anonlabels': { # labelname -> docname, labelid
'genindex': ('genindex', ''),
'modindex': ('py-modindex', ''),
'search': ('search', ''),
},
}
dangling_warnings = {
'term': 'term not in glossary: %(target)s',
'ref': 'undefined label: %(target)s (if the link has no caption '
'the label must precede a section header)',
'numref': 'undefined label: %(target)s',
'keyword': 'unknown keyword: %(target)s',
'doc': 'unknown document: %(target)s',
'option': 'unknown option: %(target)s',
'citation': 'citation not found: %(target)s',
}
enumerable_nodes = { # node_class -> (figtype, title_getter)
nodes.figure: ('figure', None),
nodes.table: ('table', None),
nodes.container: ('code-block', None),
} # type: Dict[Type[nodes.Node], Tuple[str, Callable]]
def __init__(self, env):
# type: (BuildEnvironment) -> None
super().__init__(env)
# set up enumerable nodes
self.enumerable_nodes = copy(self.enumerable_nodes) # create a copy for this instance
for node, settings in env.app.registry.enumerable_nodes.items():
self.enumerable_nodes[node] = settings
def clear_doc(self, docname):
# type: (str) -> None
for key, (fn, _l) in list(self.data['progoptions'].items()):
if fn == docname:
del self.data['progoptions'][key]
for key, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][key]
for key, (fn, _l, lineno) in list(self.data['citations'].items()):
if fn == docname:
del self.data['citations'][key]
for key, docnames in list(self.data['citation_refs'].items()):
if docnames == [docname]:
del self.data['citation_refs'][key]
elif docname in docnames:
docnames.remove(docname)
for key, (fn, _l, _l) in list(self.data['labels'].items()):
if fn == docname:
del self.data['labels'][key]
for key, (fn, _l) in list(self.data['anonlabels'].items()):
if fn == docname:
del self.data['anonlabels'][key]
def merge_domaindata(self, docnames, otherdata):
# type: (List[str], Dict) -> None
# XXX duplicates?
for key, data in otherdata['progoptions'].items():
if data[0] in docnames:
self.data['progoptions'][key] = data
for key, data in otherdata['objects'].items():
if data[0] in docnames:
self.data['objects'][key] = data
for key, data in otherdata['citations'].items():
if data[0] in docnames:
self.data['citations'][key] = data
for key, data in otherdata['citation_refs'].items():
citation_refs = self.data['citation_refs'].setdefault(key, [])
for docname in data:
if docname in docnames:
citation_refs.append(docname)
for key, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][key] = data
for key, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][key] = data
def process_doc(self, env, docname, document):
# type: (BuildEnvironment, str, nodes.document) -> None
self.note_citations(env, docname, document)
self.note_citation_refs(env, docname, document)
self.note_labels(env, docname, document)
def note_citations(self, env, docname, document):
# type: (BuildEnvironment, str, nodes.document) -> None
for node in document.traverse(nodes.citation):
node['docname'] = docname
label = cast(nodes.label, node[0]).astext()
if label in self.data['citations']:
path = env.doc2path(self.data['citations'][label][0])
logger.warning(__('duplicate citation %s, other instance in %s'), label, path,
location=node, type='ref', subtype='citation')
self.data['citations'][label] = (docname, node['ids'][0], node.line)
def note_citation_refs(self, env, docname, document):
# type: (BuildEnvironment, str, nodes.document) -> None
for node in document.traverse(addnodes.pending_xref):
if node['refdomain'] == 'std' and node['reftype'] == 'citation':
label = node['reftarget']
citation_refs = self.data['citation_refs'].setdefault(label, [])
citation_refs.append(docname)
def note_labels(self, env, docname, document):
# type: (BuildEnvironment, str, nodes.document) -> None
labels, anonlabels = self.data['labels'], self.data['anonlabels']
for name, explicit in document.nametypes.items():
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if isinstance(node, nodes.target) and 'refid' in node:
# indirect hyperlink targets
node = document.ids.get(node['refid'])
labelid = node['names'][0]
if (node.tagname == 'footnote' or
'refuri' in node or
node.tagname.startswith('desc_')):
# ignore footnote labels, labels automatically generated from a
# link and object descriptions
continue
if name in labels:
logger.warning(__('duplicate label %s, other instance in %s'),
name, env.doc2path(labels[name][0]),
location=node)
anonlabels[name] = docname, labelid
if node.tagname in ('section', 'rubric'):
title = cast(nodes.title, node[0])
sectname = clean_astext(title)
elif self.is_enumerable_node(node):
sectname = self.get_numfig_title(node)
if not sectname:
continue
elif node.traverse(addnodes.toctree):
n = node.traverse(addnodes.toctree)[0]
if n.get('caption'):
sectname = n['caption']
else:
continue
else:
# anonymous-only labels
continue
labels[name] = docname, labelid, sectname
def add_object(self, objtype, name, docname, labelid):
# type: (str, str, str, str) -> None
self.data['objects'][objtype, name] = (docname, labelid)
def add_program_option(self, program, name, docname, labelid):
# type: (str, str, str, str) -> None
self.data['progoptions'][program, name] = (docname, labelid)
def check_consistency(self):
# type: () -> None
for name, (docname, labelid, lineno) in self.data['citations'].items():
if name not in self.data['citation_refs']:
logger.warning(__('Citation [%s] is not referenced.'), name,
type='ref', subtype='citation',
location=(docname, lineno))
def build_reference_node(self, fromdocname, builder, docname, labelid,
sectname, rolename, **options):
# type: (str, Builder, str, str, str, str, Any) -> nodes.Element
nodeclass = options.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **options)
innernode = nodes.inline(sectname, sectname)
if innernode.get('classes') is not None:
innernode['classes'].append('std')
innernode['classes'].append('std-' + rolename)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# set more info in contnode; in case the
# get_relative_uri call raises NoUri,
# the builder will then have to resolve these
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(
fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if typ == 'ref':
resolver = self._resolve_ref_xref
elif typ == 'numref':
resolver = self._resolve_numref_xref
elif typ == 'keyword':
resolver = self._resolve_keyword_xref
elif typ == 'doc':
resolver = self._resolve_doc_xref
elif typ == 'option':
resolver = self._resolve_option_xref
elif typ == 'citation':
resolver = self._resolve_citation_xref
else:
resolver = self._resolve_obj_xref
return resolver(env, fromdocname, builder, typ, target, node, contnode)
def _resolve_ref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if node['refexplicit']:
# reference to anonymous label; the reference uses
# the supplied link caption
docname, labelid = self.data['anonlabels'].get(target, ('', ''))
sectname = node.astext()
else:
# reference to named label; the final node will
# contain the section name after the label
docname, labelid, sectname = self.data['labels'].get(target,
('', '', ''))
if not docname:
return None
return self.build_reference_node(fromdocname, builder,
docname, labelid, sectname, 'ref')
def _resolve_numref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if target in self.data['labels']:
docname, labelid, figname = self.data['labels'].get(target, ('', '', ''))
else:
docname, labelid = self.data['anonlabels'].get(target, ('', ''))
figname = None
if not docname:
return None
target_node = env.get_doctree(docname).ids.get(labelid)
figtype = self.get_enumerable_node_type(target_node)
if figtype is None:
return None
if figtype != 'section' and env.config.numfig is False:
logger.warning(__('numfig is disabled. :numref: is ignored.'), location=node)
return contnode
try:
fignumber = self.get_fignumber(env, builder, figtype, docname, target_node)
if fignumber is None:
return contnode
except ValueError:
logger.warning(__("no number is assigned for %s: %s"), figtype, labelid,
location=node)
return contnode
try:
if node['refexplicit']:
title = contnode.astext()
else:
title = env.config.numfig_format.get(figtype, '')
if figname is None and '{name}' in title:
logger.warning(__('the link has no caption: %s'), title, location=node)
return contnode
else:
fignum = '.'.join(map(str, fignumber))
if '{name}' in title or 'number' in title:
# new style format (cf. "Fig.{number}")
if figname:
newtitle = title.format(name=figname, number=fignum)
else:
newtitle = title.format(number=fignum)
else:
# old style format (cf. "Fig.%s")
newtitle = title % fignum
except KeyError as exc:
logger.warning(__('invalid numfig_format: %s (%r)'), title, exc, location=node)
return contnode
except TypeError:
logger.warning(__('invalid numfig_format: %s'), title, location=node)
return contnode
return self.build_reference_node(fromdocname, builder,
docname, labelid, newtitle, 'numref',
nodeclass=addnodes.number_reference,
title=title)
def _resolve_keyword_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# keywords are oddballs: they are referenced by named labels
docname, labelid, _ = self.data['labels'].get(target, ('', '', ''))
if not docname:
return None
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def _resolve_doc_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# directly reference to document by source name; can be absolute or relative
refdoc = node.get('refdoc', fromdocname)
docname = docname_join(refdoc, node['reftarget'])
if docname not in env.all_docs:
return None
else:
if node['refexplicit']:
# reference with explicit title
caption = node.astext()
else:
caption = clean_astext(env.titles[docname])
innernode = nodes.inline(caption, caption, classes=['doc'])
return make_refnode(builder, fromdocname, docname, None, innernode)
def _resolve_option_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
progname = node.get('std:program')
target = target.strip()
docname, labelid = self.data['progoptions'].get((progname, target), ('', ''))
if not docname:
commands = []
while ws_re.search(target):
subcommand, target = ws_re.split(target, 1)
commands.append(subcommand)
progname = "-".join(commands)
docname, labelid = self.data['progoptions'].get((progname, target),
('', ''))
if docname:
break
else:
return None
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def _resolve_citation_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
docname, labelid, lineno = self.data['citations'].get(target, ('', '', 0))
if not docname:
if 'ids' in node:
# remove ids attribute that annotated at
# transforms.CitationReference.apply.
del node['ids'][:]
return None
try:
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
except NoUri:
# remove the ids we added in the CitationReferences
# transform since they can't be transfered to
# the contnode (if it's a Text node)
if not isinstance(contnode, nodes.Element):
del node['ids'][:]
raise
def _resolve_obj_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
objtypes = self.objtypes_for_role(typ) or []
for objtype in objtypes:
if (objtype, target) in self.data['objects']:
docname, labelid = self.data['objects'][objtype, target]
break
else:
docname, labelid = '', ''
if not docname:
return None
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
results = [] # type: List[Tuple[str, nodes.Element]]
ltarget = target.lower() # :ref: lowercases its target automatically
for role in ('ref', 'option'): # do not try "keyword"
res = self.resolve_xref(env, fromdocname, builder, role,
ltarget if role == 'ref' else target,
node, contnode)
if res:
results.append(('std:' + role, res))
# all others
for objtype in self.object_types:
key = (objtype, target)
if objtype == 'term':
key = (objtype, ltarget)
if key in self.data['objects']:
docname, labelid = self.data['objects'][key]
results.append(('std:' + self.role_for_objtype(objtype),
make_refnode(builder, fromdocname, docname,
labelid, contnode)))
return results
def get_objects(self):
# type: () -> Iterator[Tuple[str, str, str, str, str, int]]
# handle the special 'doc' reference here
for doc in self.env.all_docs:
yield (doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1)
for (prog, option), info in self.data['progoptions'].items():
if prog:
fullname = ".".join([prog, option])
yield (fullname, fullname, 'cmdoption', info[0], info[1], 1)
else:
yield (option, option, 'cmdoption', info[0], info[1], 1)
for (type, name), info in self.data['objects'].items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
for name, info in self.data['labels'].items():
yield (name, info[2], 'label', info[0], info[1], -1)
# add anonymous-only labels as well
non_anon_labels = set(self.data['labels'])
for name, info in self.data['anonlabels'].items():
if name not in non_anon_labels:
yield (name, name, 'label', info[0], info[1], -1)
def get_type_name(self, type, primary=False):
# type: (ObjType, bool) -> str
# never prepend "Default"
return type.lname
def is_enumerable_node(self, node):
# type: (nodes.Node) -> bool
return node.__class__ in self.enumerable_nodes
def get_numfig_title(self, node):
# type: (nodes.Node) -> str
"""Get the title of enumerable nodes to refer them using its title"""
if self.is_enumerable_node(node):
_, title_getter = self.enumerable_nodes.get(node.__class__, (None, None))
if title_getter:
return title_getter(node)
else:
for subnode in node:
if subnode.tagname in ('caption', 'title'):
return clean_astext(subnode)
return None
def get_enumerable_node_type(self, node):
# type: (nodes.Node) -> str
"""Get type of enumerable nodes."""
def has_child(node, cls):
# type: (nodes.Element, Type) -> bool
return any(isinstance(child, cls) for child in node)
if isinstance(node, nodes.section):
return 'section'
elif isinstance(node, nodes.container):
if node.get('literal_block') and has_child(node, nodes.literal_block):
return 'code-block'
else:
return None
else:
figtype, _ = self.enumerable_nodes.get(node.__class__, (None, None))
return figtype
def get_figtype(self, node):
# type: (nodes.Node) -> str
"""Get figure type of nodes.
.. deprecated:: 1.8
"""
warnings.warn('StandardDomain.get_figtype() is deprecated. '
'Please use get_enumerable_node_type() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.get_enumerable_node_type(node)
def get_fignumber(self, env, builder, figtype, docname, target_node):
# type: (BuildEnvironment, Builder, str, str, nodes.Element) -> Tuple[int, ...]
if figtype == 'section':
if builder.name == 'latex':
return tuple()
elif docname not in env.toc_secnumbers:
raise ValueError # no number assigned
else:
anchorname = '#' + target_node['ids'][0]
if anchorname not in env.toc_secnumbers[docname]:
# try first heading which has no anchor
return env.toc_secnumbers[docname].get('')
else:
return env.toc_secnumbers[docname].get(anchorname)
else:
try:
figure_id = target_node['ids'][0]
return env.toc_fignumbers[docname][figtype][figure_id]
except (KeyError, IndexError):
# target_node is found, but fignumber is not assigned.
# Maybe it is defined in orphaned document.
raise ValueError
def get_full_qualified_name(self, node):
# type: (nodes.Element) -> str
if node.get('reftype') == 'option':
progname = node.get('std:program')
command = ws_re.split(node.get('reftarget'))
if progname:
command.insert(0, progname)
option = command.pop()
if command:
return '.'.join(['-'.join(command), option])
else:
return None
else:
return None
def setup(app):
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(StandardDomain)
return {
'version': 'builtin',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
the-stack_106_29673 | """
Test cases for the metadata reading/writing of pyslim.
"""
from __future__ import print_function
from __future__ import division
import pyslim
import msprime
import tests
import unittest
import random
import json
def get_msprime_examples():
demographic_events = [
msprime.MassMigration(
time=5, source=1, destination=0, proportion=1.0)
]
for n in [2, 10, 100]:
for mutrate in [0.0]:
for recrate in [0.0, 1.0]:
yield msprime.simulate(n, mutation_rate=mutrate,
recombination_rate=recrate)
population_configurations =[
msprime.PopulationConfiguration(
sample_size=n, initial_size=100),
msprime.PopulationConfiguration(
sample_size=n, initial_size=100)
]
yield msprime.simulate(
population_configurations=population_configurations,
demographic_events=demographic_events,
recombination_rate=recrate,
mutation_rate=mutrate)
class TestAnnotate(tests.PyslimTestCase):
'''
Tests for tools to annotate existing msprime-derived tree sequences.
'''
def verify_annotated_tables(self, ts1, ts2):
'''
Verify that the tables returned after annotation are equal, up to the
expected forgetting of metadata.
'''
tables1 = ts1.tables
tables2 = ts2.tables
# compare nodes
self.assertArrayEqual(tables1.nodes.flags, tables2.nodes.flags)
self.assertArrayAlmostEqual(tables1.nodes.time, tables2.nodes.time)
self.assertArrayEqual(tables1.nodes.population, tables2.nodes.population)
# compare edges
self.assertEqual(tables1.edges, tables2.edges)
# compare sites
self.assertArrayEqual(tables1.sites.position, tables2.sites.position)
self.assertArrayEqual(tables1.sites.ancestral_state, tables2.sites.ancestral_state)
self.assertArrayEqual(tables1.sites.ancestral_state_offset,
tables2.sites.ancestral_state_offset)
# compare mutations
self.assertArrayEqual(tables1.mutations.site, tables2.mutations.site)
self.assertArrayEqual(tables1.mutations.node, tables2.mutations.node)
self.assertArrayEqual(tables1.mutations.derived_state, tables2.mutations.derived_state)
self.assertArrayEqual(tables1.mutations.derived_state_offset,
tables2.mutations.derived_state_offset)
def verify_annotated_trees(self, ts1, ts2):
'''
Verify the *trees* returned before and after annotation are equal.
'''
self.assertEqual(ts1.num_trees, ts2.num_trees)
for t1, t2 in zip(ts1.trees(), ts2.trees()):
self.assertEqual(t1.length, t2.length)
self.assertEqual(t1.get_parent_dict(), t2.get_parent_dict())
self.assertAlmostEqual(t1.total_branch_length, t2.total_branch_length)
def verify_consistency(self, ts):
'''
Check that individuals exist, and populations agree between nodes and individuals.
'''
def verify_defaults(self, ts):
'''
Verify the default values have been entered into metadata.
'''
mut_md = pyslim.extract_mutation_metadata(ts.tables)
for md in mut_md:
self.assertEqual(md.mutation_type, 1)
self.assertEqual(md.selection_coeff, 0.0)
self.assertEqual(md.population, msprime.NULL_POPULATION)
self.assertEqual(md.slim_time, 0)
node_md = pyslim.extract_node_metadata(ts.tables)
for md, node in zip(node_md, ts.nodes()):
if not node.is_sample():
self.assertEqual(md, None)
else:
self.assertEqual(md.is_null, False)
self.assertEqual(md.genome_type, pyslim.GENOME_TYPE_AUTOSOME)
for ind in ts.individuals():
self.assertArrayEqual(ind.location, [0, 0, 0])
self.assertEqual(ind.flags, pyslim.INDIVIDUAL_ALIVE)
ind_md = pyslim.extract_individual_metadata(ts.tables)
for md in ind_md:
self.assertEqual(md.sex, pyslim.INDIVIDUAL_TYPE_HERMAPHRODITE)
self.assertEqual(md.flags, 0)
pop_md = pyslim.extract_population_metadata(ts.tables)
for md in pop_md:
self.assertEqual(md.selfing_fraction, 0.0)
self.assertEqual(md.female_cloning_fraction, 0.0)
self.assertEqual(md.male_cloning_fraction, 0.0)
self.assertEqual(md.sex_ratio, 0.5)
self.assertEqual(md.bounds_x0, 0.0)
self.assertEqual(md.bounds_x1, 0.0)
self.assertEqual(md.bounds_y0, 0.0)
self.assertEqual(md.bounds_y1, 0.0)
self.assertEqual(md.bounds_z0, 0.0)
self.assertEqual(md.bounds_z1, 0.0)
self.assertEqual(len(md.migration_records), 0)
def verify_provenance(self, ts):
for u in ts.provenances():
msprime.validate_provenance(json.loads(u.record))
def test_basic_annotation(self):
for ts in get_msprime_examples():
slim_gen = 4
slim_ts = pyslim.annotate_defaults(ts, model_type="WF",
slim_generation=slim_gen)
self.verify_annotated_tables(ts, slim_ts)
self.verify_annotated_trees(ts, slim_ts)
self.verify_haplotype_equality(ts, slim_ts)
self.verify_defaults(slim_ts)
self.verify_provenance(slim_ts)
def test_annotate_individuals(self):
for ts in get_msprime_examples():
slim_ts = pyslim.annotate_defaults(ts, model_type="nonWF", slim_generation=1)
tables = slim_ts.tables
metadata = list(pyslim.extract_individual_metadata(tables))
self.assertEqual(len(metadata), slim_ts.num_individuals)
sexes = [random.choice([pyslim.INDIVIDUAL_TYPE_FEMALE, pyslim.INDIVIDUAL_TYPE_MALE])
for _ in metadata]
for j in range(len(metadata)):
metadata[j].sex = sexes[j]
pyslim.annotate_individual_metadata(tables, metadata)
new_ts = pyslim.load_tables(tables)
for j, ind in enumerate(new_ts.individuals()):
md = pyslim.decode_individual(ind.metadata)
self.assertEqual(md.sex, sexes[j])
def test_annotate_mutations(self):
for ts in get_msprime_examples():
slim_ts = pyslim.annotate_defaults(ts, model_type="nonWF", slim_generation=1)
tables = slim_ts.tables
metadata = list(pyslim.extract_mutation_metadata(tables))
self.assertEqual(len(metadata), slim_ts.num_mutations)
selcoefs = [random.uniform(0, 1) for _ in metadata]
for j in range(len(metadata)):
metadata[j].selection_coeff = selcoefs[j]
pyslim.annotate_mutation_metadata(tables, metadata)
new_ts = pyslim.load_tables(tables)
for j, x in enumerate(new_ts.mutations()):
md = pyslim.decode_mutation(x.metadata)
self.assertEqual(md.selection_coeff, selcoefs[j])
def test_annotate_nodes(self):
for ts in get_msprime_examples():
slim_ts = pyslim.annotate_defaults(ts, model_type="nonWF", slim_generation=1)
tables = slim_ts.tables
metadata = list(pyslim.extract_node_metadata(tables))
self.assertEqual(len(metadata), slim_ts.num_nodes)
gtypes = [random.choice([pyslim.GENOME_TYPE_X, pyslim.GENOME_TYPE_Y])
for _ in metadata]
for j in range(len(metadata)):
if metadata[j] is not None:
metadata[j].genome_type = gtypes[j]
pyslim.annotate_node_metadata(tables, metadata)
new_ts = pyslim.load_tables(tables)
for j, x in enumerate(new_ts.nodes()):
md = pyslim.decode_node(x.metadata)
if md is not None:
self.assertEqual(md.genome_type, gtypes[j])
|
the-stack_106_29674 | #!/bin/env python
# -*- coding: utf-8 -*-
##
# test_azure.py: Tests Azure Quantum functionality against a mock workspace.
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
## IMPORTS ##
import importlib
import os
import pytest
import qsharp
from qsharp.azure import AzureError, AzureJob, AzureTarget
import sys
## SETUP ##
@pytest.fixture(scope="session", autouse=True)
def set_environment_variables():
# Need to restart the IQ# kernel after setting the environment variable
os.environ["AZURE_QUANTUM_ENV"] = "mock"
importlib.reload(qsharp)
if "qsharp.chemistry" in sys.modules:
importlib.reload(qsharp.chemistry)
## TESTS ##
def test_empty_workspace():
"""
Tests behavior of a mock workspace with no providers.
"""
with pytest.raises(AzureError) as exception_info:
qsharp.azure.target()
assert exception_info.value.error_name == "NotConnected"
targets = qsharp.azure.connect(
storage="test",
subscription="test",
resourceGroup="test",
workspace="test"
)
assert targets == []
with pytest.raises(AzureError) as exception_info:
qsharp.azure.target("invalid.target")
assert exception_info.value.error_name == "InvalidTarget"
jobs = qsharp.azure.jobs()
assert jobs == []
def test_workspace_create_with_parameters():
"""
Tests behavior of a mock workspace with providers, using parameters to connect.
"""
targets = qsharp.azure.connect(
storage="test",
subscription="test",
resourceGroup="test",
workspace="WorkspaceNameWithMockProviders"
)
assert isinstance(targets, list)
assert len(targets) > 0
_test_workspace_with_providers_after_connection()
def test_workspace_create_with_resource_id():
"""
Tests behavior of a mock workspace with providers, using resource ID to connect.
Also verifies case-insensitivity of resource ID parsing.
"""
subscriptionId = "f846b2bd-d0e2-4a1d-8141-4c6944a9d387"
resourceGroupName = "test"
workspaceName = "WorkspaceNameWithMockProviders"
targets = qsharp.azure.connect(
resourceId=f"/subscriptions/{subscriptionId}/RESOurceGroups/{resourceGroupName}/providers/Microsoft.Quantum/Workspaces/{workspaceName}")
assert isinstance(targets, list)
assert len(targets) > 0
_test_workspace_with_providers_after_connection()
_test_workspace_job_execution()
def test_workspace_create_with_resource_id_and_storage():
"""
Tests behavior of a mock workspace with providers, using resource ID and storage connection string to connect.
"""
subscriptionId = "f846b2bd-d0e2-4a1d-8141-4c6944a9d387"
resourceGroupName = "test"
workspaceName = "WorkspaceNameWithMockProviders"
storageAccountConnectionString = "test"
targets = qsharp.azure.connect(
resourceId=f"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Quantum/Workspaces/{workspaceName}",
storage=storageAccountConnectionString)
assert isinstance(targets, list)
assert len(targets) > 0
_test_workspace_with_providers_after_connection()
def _test_workspace_with_providers_after_connection():
with pytest.raises(AzureError) as exception_info:
qsharp.azure.target()
assert exception_info.value.error_name == "NoTarget"
targets = qsharp.azure.connect()
for target in targets:
active_target = qsharp.azure.target(target.id)
assert isinstance(active_target, AzureTarget)
assert active_target == target
# Submit a snippet operation without parameters
op = qsharp.compile("""
operation HelloQ() : Result
{
Message($"Hello from quantum world!");
return Zero;
}
""")
job = qsharp.azure.submit(op)
assert isinstance(job, AzureJob)
retrieved_job = qsharp.azure.status(job.id)
assert isinstance(retrieved_job, AzureJob)
assert job.id == retrieved_job.id
def _test_workspace_job_execution():
# Execute a workspace operation with parameters
op = qsharp.QSharpCallable("Microsoft.Quantum.SanityTests.HelloAgain", None)
with pytest.raises(AzureError) as exception_info:
qsharp.azure.execute(op)
assert exception_info.value.error_name == "JobSubmissionFailed"
histogram = qsharp.azure.execute(op, count=3, name="test", timeout=3, poll=0.5)
assert isinstance(histogram, dict)
retrieved_histogram = qsharp.azure.output()
assert isinstance(retrieved_histogram, dict)
assert histogram == retrieved_histogram
# Check that the submitted job exists in the workspace
jobs = qsharp.azure.jobs()
assert isinstance(jobs, list)
assert len(jobs) == 1
# Check that job filtering works
jobs = qsharp.azure.jobs(jobs[0].id)
assert isinstance(jobs, list)
assert len(jobs) == 1
jobs = qsharp.azure.jobs("invalid")
assert isinstance(jobs, list)
assert len(jobs) == 0
|
the-stack_106_29676 | import datetime
import discord
from discord import TextChannel, Message
from discord.ext import commands
import utils.json_loader
class Modlog(commands.Cog):
"""Logging"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f"{self.__class__.__name__} Cog has been loaded\n-----")
@commands.has_permissions(administrator=True)
@commands.command(
name="modlog", description="setup mod-logs", usage="<user>"
)
async def modlog(self, ctx, channel: discord.TextChannel):
guild_ID = ctx.guild.id
data = utils.json_loader.read_json("server_config")
data[str(guild_ID)] = {"mod-logID": None,
"name": None, "guildID": ctx.guild.id}
if [str(ctx.guild.id)] not in data:
utils.json_loader.write_json(data, "server_config")
data[str(ctx.guild.id)]["mod-logID"] = channel.id
data[str(ctx.guild.id)]["guildID"] = ctx.guild.id
data[str(ctx.guild.id)]["name"] = ctx.guild.name
utils.json_loader.write_json(data, "server_config")
await ctx.send("Mod logs channel id stored successfully")
else:
await ctx.send("Mod logs were already set!")
return
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
data = utils.json_loader.read_json("server_config")
guild_ID = member.guild.id
modlogs = self.bot.get_channel(data[str(guild_ID)]["mod-logID"])
embed = discord.Embed(title=f"Member {member} joined the the server.", color=member.color,
timestamp=datetime.datetime.utcnow(),
description=f"**Their account was created at:** {member.created_at}")
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text=f"UUID: {member.id}")
await modlogs.send(embed=embed)
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member):
data = utils.json_loader.read_json("server_config")
guild_ID = member.guild.id
roles = [role for role in member.roles]
modlogs = self.bot.get_channel(data[str(guild_ID)]["mod-logID"])
embed = discord.Embed(title=f"Member {member} left from the server.", color=member.color,
timestamp=datetime.datetime.utcnow(),
description=f"**Their account was created at:** {member.created_at}")
embed.add_field(name="Their roles:", value=" ".join(
[role.mention for role in roles]))
embed.set_footer(text=f"UUID: {member.id}")
embed.set_thumbnail(url=member.avatar_url)
await modlogs.send(embed=embed)
@commands.Cog.listener()
async def on_message_edit(self, after, before):
guild_ID = after.guild.id
data = utils.json_loader.read_json("server_config")
modlogs = self.bot.get_channel(
data[str(guild_ID)]["guild_ID"]["mod-logID"])
if not after.author.bot:
if before.content != after.content:
embed = discord.Embed(title=f"Message Edited by {after.author}", color=after.author.color,
description=f"Message edited in {after.channel.mention}",
timestamp=datetime.datetime.utcnow())
fields = [("Before", before.content, False)]
embed.set_thumbnail(url=f"{after.author.avatar_url}")
embed.set_footer(text=f"UUID: {after.id}")
embed.add_field(
name="After", value=after.content, inline=False)
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await modlogs.send(embed=embed)
@commands.Cog.listener()
async def on_message_delete(self, message):
guild_ID = message.guild.id
data = utils.json_loader.read_json("server_config")
modlogs = self.bot.get_channel(data[str(guild_ID)]["mod-logID"])
if not message.author.bot:
embed = discord.Embed(title=f"Message deleted by {message.author}",
description=f"Message deleted in {message.channel.mention}", color=0xE74C3C,
timestamp=datetime.datetime.utcnow())
fields = [("Content", message.content, False)]
embed.set_thumbnail(url=message.author.avatar_url)
embed.set_footer(text=f"UUID: {message.author.id}")
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await modlogs.send(embed=embed)
@commands.Cog.listener()
async def on_member_update(self, after, before):
guild_ID = after.guild.id
data = utils.json_loader.read_json("server_config")
modlogs = self.bot.get_channel(data[str(guild_ID)]["mod-logID"])
if before.display_name != after.display_name:
embed = discord.Embed(title=f"Nickname change made by {after}", color=after.color,
timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f"{after.avatar_url}")
embed.set_footer(text=f"UUID: {after.id}")
fields = [("After", before.display_name, False),
("Before", after.display_name, False)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await modlogs.send(embed=embed)
elif before.roles != after.roles:
embed = discord.Embed(title=f"Roles updated for {after}", color=after.color,
timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f"{after.avatar_url}")
embed.set_footer(text=f"UUID: {after.id}")
fields = [("After", " |\u200B".join([r.mention for r in before.roles]), False),
("Before", " |\u200B ".join([r.mention for r in after.roles]), False)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await modlogs.send(embed=embed)
@commands.Cog.listener()
async def on_member_ban(self, guild, user: discord.Member):
message = Message
data = utils.json_loader.read_json("server_config")
member = user
guild_ID = member.guild.id
modlogs = self.bot.get_channel(data[str(guild_ID)]["mod-logID"])
embed = discord.Embed(title="**Member Banned**",
color=member.color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f"{member.avatar_url}")
embed.add_field(name=f"{member} was banned from the server",
value=f"**Moderator**: {message.author}")
embed.set_footer(text=f"UUID: {member.id}")
await modlogs.send(embed=embed)
@commands.Cog.listener()
async def on_member_unban(self, guild, user: discord.Member):
message = Message
data = utils.json_loader.read_json("server_config")
member = user
guild_ID = guild.id
modlogs = self.bot.get_channel(data[str(guild_ID)]["mod-logID"])
embed = discord.Embed(title="**Member Unbanned**",
color=member.color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f"{member.avatar_url}")
embed.add_field(name=f"{member} was unbanned from the server",
value=f"**Moderator**: {message.author}")
embed.set_footer(text=f"UUID: {member.id}")
await modlogs.send(embed=embed)
def setup(bot):
bot.add_cog(Modlog(bot))
|
the-stack_106_29679 | # -*- encoding: utf-8 -*-
"""
构建参数列表以及根据value反向获取key方法
Author: zsyoung
Date: 2019/01/09 15:00
"""
from stationcrawl.Constants import STATION_DICT
import datetime
def build_all_list():
"""
构建参数列表
:return: [[出发日期,出发车站,到达车站]] 示例:[['2019-01-20','CUW','CQW'],['2019-01-20','CUW','CXW']]
"""
all_list = []
for i in from_station.values():
for j in to_station.values():
new_list = [train_date, i, j]
all_list.append(new_list)
print(all_list)
return all_list
def get_key(dict, value):
"""
根据字典value获取key
:param dict:
:param value:
:return: key
"""
for k, v in dict.items():
if v == value:
return k
def get_tomorrow():
today = datetime.date.today()
oneday = datetime.timedelta(days=5)
tomorrow = today + oneday
return tomorrow
to_station = STATION_DICT
# from_station = {'重庆北': 'CUW', '重庆': 'CQW', '重庆南': 'CRW', '重庆西': 'CXW'}
from_station = {'上海': 'SHH', '上海南': 'SNH', '上海虹桥': 'AOH', '上海西': 'SXH'}
train_date = get_tomorrow()
|
the-stack_106_29680 | from __future__ import absolute_import
from setuptools import setup, find_packages
from codecs import open
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pypsa',
version='0.18.0',
author='PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html',
author_email='[email protected]',
description='Python for Power Systems Analysis',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/PyPSA/PyPSA',
license='MIT',
packages=find_packages(exclude=['doc', 'test']),
include_package_data=True,
python_requires='>=3.6',
install_requires=[
'numpy',
'scipy',
'pandas>=0.24.0',
'xarray',
'netcdf4',
'tables',
'pyomo>=5.7,<6.1',
'matplotlib',
'networkx>=1.10',
'deprecation'
],
extras_require = {
"dev": ["pytest", "pypower", "pandapower"],
"cartopy": ['cartopy>=0.16'],
"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme", "nbsphinx", "nbsphinx-link"],
'gurobipy':['gurobipy']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
])
|
the-stack_106_29681 | import ast
import os
import zipfile
import subprocess
import shutil
from django.http import JsonResponse
from .charts import newchart
from .app import Gldas as App
def getchart(request):
data = ast.literal_eval(request.body.decode('utf-8'))
data['instance_id'] = request.META['HTTP_COOKIE'].split('instance_id=')[1][0:9]
data['stats'] = True
return JsonResponse(newchart(data))
def uploadshapefile(request):
files = request.FILES.getlist('files')
instance_id = request.META['HTTP_COOKIE'].split('instance_id=')[1][0:9]
user_workspace = os.path.join(os.path.dirname(__file__), 'workspaces', 'user_workspaces', instance_id)
# delete old files in the directory then recreate
if os.path.exists(user_workspace):
shutil.rmtree(user_workspace)
os.mkdir(user_workspace)
# write the new files to the directory
for n, file in enumerate(files):
with open(os.path.join(user_workspace, file.name), 'wb') as dst:
for chunk in files[n].chunks():
dst.write(chunk)
# check that the user has provided geoserver settings
gs_eng = App.get_spatial_dataset_service(name='geoserver', as_engine=True)
gs_wfs = App.get_spatial_dataset_service(name='geoserver', as_wfs=True)
gs_store = 'user-uploads:' + instance_id
shp = [i for i in os.listdir(user_workspace) if i.endswith('.shp')][0].split('.')[0]
shppath = os.path.join(user_workspace, shp)
gs_eng.create_shapefile_resource(
store_id=gs_store,
shapefile_base=shppath,
overwrite=True
)
# rename the files and create a zip archive
files = os.listdir(user_workspace)
zippath = os.path.join(user_workspace, instance_id + '.zip')
archive = zipfile.ZipFile(zippath, mode='w')
for file in files:
archive.write(os.path.join(user_workspace, file), arcname=file)
archive.close()
# upload the archive to geoserver
shellpath = os.path.join(App.get_app_workspace().path, 'upload_shapefile.sh')
v1 = gs_eng.username
v2 = gs_eng.password
v3 = zippath
v4 = gs_eng.endpoint
v5 = App.package
v6 = shp
subprocess.call(['bash', shellpath, v1, v2, v3, v4, v5, v6])
return JsonResponse({'gsurl': gs_wfs, 'gsworksp': v5, 'shpname': v6})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.