content
stringlengths 5
1.05M
|
---|
def sequences_to_one_hot(sequences, chars='ACGTN'):
"""
:param sequences:
:param chars:
:return:
"""
seqlen = len(sequences[0])
char_to_int = dict((c, i) for i, c in enumerate(chars))
one_hot_encoded = []
for seq in sequences:
onehot_seq = []
integer_encoded = [char_to_int[base] for base in seq]
for value in integer_encoded:
letter = [0 for _ in range(len(chars))]
letter[value] = 1
onehot_seq.append(letter)
one_hot_encoded.append(onehot_seq)
one_hot_encoded = np.array(one_hot_encoded)
return one_hot_encoded
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
def char_to_int_multiple(string):
output = ''
for char in range(len(string)):
output += str(char_to_int[string[char]])
return output
def int_to_char_multiple(int_string):
output = ''
for char in range(len(int_string)):
output += str(int_to_char[int(int_string[char])])
return output
def library_pipeline_v2(self, cutoff=5):
positions = []
bases = []
quality = []
umi_list = []
barcode_list = []
for barcode in list(self.barcode_dict.keys()):
print(barcode)
lib_barcodes_dict = self.barcode_extraction_dict(barcode)
umi_input_list = list(lib_barcodes_dict.keys())
for umi in umi_input_list:
sequences = lib_barcodes_dict[umi]
if len(sequences) >= cutoff:
for read_name in sequences:
for read in self.read_dict[read_name]:
# make sure the read have the same length for position and sequence
if len(read.positions) == len(read.seq):
positions.extend(read.positions)
bases.extend(read.seq)
quality.extend(read.qual)
umi_list.extend([umi] * len(read.positions))
barcode_list.extend([barcode] * len(read.positions))
df = pd.DataFrame({
'position': positions,
'base': bases,
'coverage': quality,
'UMI': umi_list,
'barcode': barcode_list
})
group_df = df.groupby(['UMI', 'position', 'barcode'])['base'].apply(max_count)
group_df = group_df.reset_index()
group_df = group_df.fillna(0)
group_df[['base', 'fraction', 'coverage']] = pd.DataFrame(group_df['base'].tolist(), index=group_df.index)
group_df['UMI_pos'] = group_df['UMI'] + "_" + group_df['position'].astype(str)
self.umi_df = group_df
return self.umi_df |
# Generated by Django 3.2.9 on 2022-01-11 06:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('driver', '0003_driver_date'),
]
operations = [
migrations.RenameField(
model_name='driver',
old_name='contact',
new_name='email',
),
migrations.AddField(
model_name='driver',
name='phone_number',
field=models.IntegerField(blank=True, null=True),
),
]
|
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
"""Provides a base implementation of a statement representation."""
# pylint: disable=cyclic-import
from __future__ import annotations
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Optional, Set
import pynguin.testcase.statements.statementvisitor as sv
import pynguin.testcase.testcase as tc
import pynguin.testcase.variable.variablereference as vr
from pynguin.utils.generic.genericaccessibleobject import GenericAccessibleObject
class Statement(metaclass=ABCMeta):
"""An abstract base class of a statement representation."""
def __init__(
self, test_case: tc.TestCase, return_value: vr.VariableReference
) -> None:
self._test_case = test_case
self._return_value = return_value
self._logger = logging.getLogger(__name__)
@property
def return_value(self) -> vr.VariableReference:
"""Provides the return value of this statement.
:return: The return value of the statement execution
"""
return self._return_value
@return_value.setter
def return_value(self, reference: vr.VariableReference) -> None:
"""Updates the return value of this statement.
:param reference: The new return value
"""
self._return_value = reference
@property
def test_case(self) -> tc.TestCase:
"""Provides the test case in which this statement is used.
:return: The containing test case
"""
return self._test_case
@abstractmethod
def clone(self, test_case: tc.TestCase, offset: int = 0) -> Statement:
"""Provides a deep clone of this statement.
:param test_case: the new test case in which the clone will be used.
:param offset: Offset when cloning into a non empty test case.
:return: A deep clone of this statement
"""
@abstractmethod
def accept(self, visitor: sv.StatementVisitor) -> None:
"""Accepts a visitor to visit this statement."""
@abstractmethod
def accessible_object(self) -> Optional[GenericAccessibleObject]:
"""Provides the accessible which is used in this statement."""
@abstractmethod
def mutate(self) -> bool:
"""
Mutate this statement.
:return True, if a mutation happened.
"""
@abstractmethod
def get_variable_references(self) -> Set[vr.VariableReference]:
"""Get all references that are used in this statement.
Including return values."""
def references(self, var: vr.VariableReference) -> bool:
"""Check if this statement makes use of the given variable."""
return var in self.get_variable_references()
@abstractmethod
def replace(self, old: vr.VariableReference, new: vr.VariableReference) -> None:
"""Replace the old variable with the new variable."""
def get_position(self):
"""Provides the position of this statement in the test case."""
return self._return_value.get_statement_position()
def __eq__(self, other: Any) -> bool:
raise NotImplementedError("You need to override __eq__ for your statement type")
def __hash__(self) -> int:
raise NotImplementedError(
"You need to override __hash__ for your statement type"
)
|
# copyright Caleb Michael Carlin (2018)
# Released uder Lesser Gnu Public License (LGPL)
# See LICENSE file for details.
import numpy as np
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from itertools import cycle, count
from operator import add
import sys
class Hyperspiral(object):
"""
class for generating the positions of a 3D spiral so that we can
move through a crystal lattice for the purpose of filling the
orthorhombic representation of the crystal. Each call of tick returns
the next set of coordiantes on the grid, spiraling through the values of
x and y bounded by max_x and max_y before repeating for different values
of z up to max_z.
"""
def __init__(self, max_values):
self.max_x = max_values[0]
self.max_y = max_values[1]
self.max_z = max_values[2]
self.z_range = range(-self.max_z, self.max_z + 1)
self.z_spot = 0
self.position = [0, 0, 0]
self.movement = self.spiral_movements()
def spiral_distances(self):
"""
track the distance that is traveled each loop.
"""
for distance in count(1):
for _ in (0, 1):
yield distance
def clockwise_directions(self):
"""
defines the movements that the step can take.
"""
left = (-1, 0)
right = (1, 0)
up = (0, -1)
down = (0, 1)
return cycle((right, down, left, up))
def spiral_movements(self):
"""
factory for moving.
"""
for distance, direction in zip(self.spiral_distances(),
self.clockwise_directions()):
for _ in range(distance):
yield direction
def tick(self):
"""
manages advancing the spiral by one step.
"""
# first check to see if we need to start a new layer or stop
if self.check_end_of_layer():
if len(self.z_range) != 1:
# if we have completed a spiral in a plane, then we
# need a new Z value and restarting the spiral
self.movement = self.spiral_movements()
if self.z_range[self.z_spot] != 0.0:
self.position = [0, 0, self.z_range[self.z_spot]]
self.z_spot += 1
else:
# skip the z = 0 spot since we already did that
self.position = [0, 0, self.z_range[self.z_spot + 1]]
self.z_spot += 2
else:
raise Exception('Completed spiral without filling cell')
# get the next displacement
dx, dy = next(self.movement)
self.position = list(map(add, self.position, [dx, dy, 0]))
while self.out_of_bounds():
dx, dy = next(self.movement)
self.position = list(map(add, self.position, [dx, dy, 0]))
if ((abs(self.position[0]) > self.max_x) and
(abs(self.position[1]) > self.max_y)):
break
return self.position
def out_of_bounds(self):
"""
Test to see if the next step is outside our defined box.
"""
if abs(self.position[0]) > self.max_x:
return True
if abs(self.position[1]) > self.max_y:
return True
if abs(self.position[2]) > self.max_z:
return True
return False
def check_end_of_layer(self):
"""
Checks to see if the spiral has completed all positions on a single
plane.
"""
if self.position[0] <= self.max_x:
return False
if self.position[1] <= self.max_y:
return False
return True
|
# Generated by Django 2.1.3 on 2018-12-05 21:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("core", "0002_auto_20181205_2102"),
("pokemongo", "0004_community"),
]
operations = [
migrations.CreateModel(
name="CommunityMembershipDiscord",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("auto_import", models.BooleanField(default=True)),
],
options={
"verbose_name": "Community Discord Connection",
"verbose_name_plural": "Community Discord Connections",
},
),
migrations.AlterModelOptions(
name="community",
options={"verbose_name": "Community", "verbose_name_plural": "Communities"},
),
migrations.AddField(
model_name="communitymembershipdiscord",
name="community",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="pokemongo.Community"
),
),
migrations.AddField(
model_name="communitymembershipdiscord",
name="discord",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="core.DiscordGuild"
),
),
migrations.AddField(
model_name="community",
name="memberships_discord",
field=models.ManyToManyField(
through="pokemongo.CommunityMembershipDiscord", to="core.DiscordGuild"
),
),
]
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import uuid
from azure_devtools.perfstress_tests import PerfStressTest
from azure.identity import DefaultAzureCredential
from azure.identity.aio import DefaultAzureCredential as AsyncDefaultAzureCredential
from azure.keyvault.administration import (
KeyVaultAccessControlClient,
KeyVaultDataAction,
KeyVaultPermission,
KeyVaultRoleScope,
)
from azure.keyvault.administration.aio import KeyVaultAccessControlClient as AsyncKeyVaultAccessControlClient
class GetRoleDefinitionTest(PerfStressTest):
def __init__(self, arguments):
super().__init__(arguments)
# Auth configuration
self.credential = DefaultAzureCredential()
self.async_credential = AsyncDefaultAzureCredential()
# Create clients
vault_url = self.get_from_env("AZURE_MANAGEDHSM_URL")
self.client = KeyVaultAccessControlClient(vault_url, self.credential, **self._client_kwargs)
self.async_client = AsyncKeyVaultAccessControlClient(vault_url, self.async_credential, **self._client_kwargs)
self.role_name = uuid.uuid4()
self.scope = KeyVaultRoleScope.GLOBAL
self.permissions = [KeyVaultPermission(data_actions=[KeyVaultDataAction.CREATE_HSM_KEY])]
async def global_setup(self):
"""The global setup is run only once."""
await super().global_setup()
await self.async_client.set_role_definition(scope=self.scope, name=self.role_name, permissions=self.permissions)
async def global_cleanup(self):
"""The global cleanup is run only once."""
await self.async_client.delete_role_definition(scope=self.scope, name=self.role_name)
await super().global_cleanup()
async def close(self):
"""This is run after cleanup."""
await self.async_client.close()
await self.async_credential.close()
await super().close()
def run_sync(self):
"""The synchronous perf test."""
self.client.get_role_definition(self.scope, self.role_name)
async def run_async(self):
"""The asynchronous perf test."""
await self.async_client.get_role_definition(self.scope, self.role_name)
|
"""
Publisher node speed of a single Mobile Robot Wheel
"""
import rclpy
from rclpy.node import Node
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Float32
class MinimalPublisher(Node):
def __init__(self):
super().__init__('minimal_publisher1')
self.publisher1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 10) #Change according to topic in child script,String to Float32
self.publisher2 = self.create_publisher(Float32, '/rightMotorSpeedrobot1', 10) #Change according to topic in child script,String to Float32
self.subscription = self.create_subscription(
TFMessage,
'/tf',
self.listener_callback,
10)
# timer_period = 0.5 # seconds
# self.timer = self.create_timer(timer_period, self.timer_callback)
# self.i = 0
def listener_callback(self, msg):
if msg.transforms[0].child_frame_id == 'robot2' :
self.get_logger().info('Subscribing: "%f"' % msg.transforms[0].transform.translation.z)
msg = Float32()
msg.data = 9.0
self.publisher1.publish(msg)
self.publisher2.publish(msg)
self.get_logger().info('Publishing: "%s"' % msg.data)
# self.i += 1
def main(args=None):
rclpy.init(args=args)
minimal_publisher = MinimalPublisher()
rclpy.spin(minimal_publisher)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
minimal_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
days = 256
def calculate(f, day, data):
if day == 0:
return 1
if (f,day) in data:
return data[(f,day)]
if f > 0:
result = calculate(f-1, day-1, data)
else:
result = calculate(8, day-1, data) + calculate(6, day-1, data)
data[(f,day)] = result
return result
with open('6.input') as inputFile:
fish = map(int,inputFile.readlines()[0].split(','))
data = {}
total = 0
for f in fish:
total += calculate(f, days, data)
print (total)
|
import re
class EventError(Exception):
pass
class Event(object):
"""
Signal-like object for Socket.IO events that supports
filtering on channels. Registering event handlers is
performed by using the Event instance as a decorator::
@on_message
def message(request, socket, message):
...
Event handlers can also be registered for particular
channels using the channel keyword argument with a
regular expression pattern::
@on_message(channel="^room-")
def message(request, socket, message):
...
The ``on_connect`` event cannot be registered with a
channel pattern since channel subscription occurs
after a connection is established.
"""
def __init__(self, supports_channels=True):
self.supports_channels = supports_channels
self.handlers = []
def __call__(self, handler=None, channel=None):
"""
Decorates the given handler. The event may be called
with only a channel argument, in which case return a
decorator with the channel argument bound.
"""
if handler is None:
def handler_with_channel(handler):
return self.__call__(handler, channel)
return handler_with_channel
if channel:
if not self.supports_channels:
raise EventError("The %s event does not support channels so "
"the handler `%s` could not be registered" %
(self.name, handler.__name__))
channel = re.compile(channel)
self.handlers.append((handler, channel))
def send(self, request, socket, context, *args):
"""
When an event is sent, run all relevant handlers. Relevant
handlers are those without a channel pattern when the given
socket is not subscribed to any particular channel, or the
handlers with a channel pattern that matches any of the
channels that the given socket is subscribed to.
In the case of subscribe/unsubscribe, match the channel arg
being sent to the channel pattern.
"""
for handler, pattern in self.handlers:
no_channel = not pattern and not socket.channels
if self.name.endswith("subscribe") and pattern:
matches = [pattern.match(args[0])]
else:
matches = [pattern.match(c) for c in socket.channels if pattern]
if no_channel or filter(None, matches):
handler(request, socket, context, *args)
on_connect = Event(False) # request, socket, context
on_message = Event() # request, socket, context, message
on_subscribe = Event() # request, socket, context, channel
on_unsubscribe = Event() # request, socket, context, channel
on_error = Event() # request, socket, context, exception
on_disconnect = Event() # request, socket, context
on_finish = Event() # request, socket, context
# Give each event a name attribute.
for k, v in globals().items():
if isinstance(v, Event):
setattr(v, "name", k)
|
"""例子兼测试工具."""
import aiofiles
import base64
from sanic import Sanic
from sanic_jinja2 import SanicJinja2
from sanic.response import json
from sanic_mail import Sanic_Mail
app = Sanic(__name__)
jinja = SanicJinja2(app)
Sanic_Mail.SetConfig(
app,
MAIL_SENDER=<你的发送邮箱>,
MAIL_SENDER_PASSWORD=<你的密码>,
MAIL_SEND_HOST=<邮箱服务器地址>,
MAIL_SEND_PORT=<端口>,
MAIL_TLS=<是否使用TLS>
)
sender = Sanic_Mail(app)
@app.get('/send')
async def send(request):
attachments = {}
async with aiofiles.open("source/README.md", "rb") as f:
attachments["README.md"] = await f.read()
async with aiofiles.open('source/猫.jpg', "rb") as f:
attachments['猫.jpg'] = await f.read()
await app.send_email(
targetlist="[email protected]",
subject="测试发送",
content="测试发送uu",
attachments=attachments
)
return json({"result": "ok"})
@app.get('/send_html')
async def send_html(request):
attachments = {}
msgimgs = {}
async with aiofiles.open("source/README.md", "rb") as f:
attachments["README.md"] = await f.read()
async with aiofiles.open('source/猫.jpg', "rb") as f:
attachments['猫.jpg'] = await f.read()
msgimgs['猫.jpg'] = attachments['猫.jpg']
content = jinja.env.get_template('default.html').render(
name='sanic!',pic1="猫"
)
await app.send_email(
targetlist="[email protected]",
subject="测试发送",
content=content,
html=True,
msgimgs = msgimgs,
attachments=attachments
)
return json({"result": "ok"})
if __name__ == "__main__":
app.run(host='127.0.0.1', port=5000, debug=True)
|
@bottle.get("/update/<db_name>/<doc_id>")
def update(db_name, doc_id):
b_path = os.path.join(bottle.data_store, db_name)
c = Connector(db_path)
new_data = request.get("row")
#data = c.format_data()
doc = c.data.update_one({"_id":doc_id}, {"$set":new_data})
if doc is not None:
return True
return False
@bottle.get("/delete/<db_name>/<doc_id>")
def delete(db_name, doc_id):
db_path = os.path.join(bottle.data_store, db_name)
c = Connector(db_path)
new_data = request.get("row")
#data = c.format_data()
doc = c.data.remove_one({"_id": doc_id})
if doc is not None:
return True
return False
@bottle.get("/add_row/<db_name>")
def add_row(db_name, doc_id):
db_path = os.path.join(bottle.data_store, db_name)
c = Connector(db_path)
new_data = request.get("row")
#data = c.format_data()
doc = c.data.insert_one(new_data)
if doc is not None:
return True
return False
@bottle.get("/add_column/<db_name>")
def add_col(db_name, doc_id):
db_path = os.path.join(bottle.data_store, db_name)
c = Connector(db_path)
new_data = request.get("row")
#data = c.format_data()
for item in c.data.find():
doc = c.data.update_one({"_id":item["_id"]}, {"$set":new_data})
doc = c.data.count(new_data)
if doc > 0:
return True
return False
|
#!/usr/bin/env python
#########################################################################################
#
# Motion correction of fMRI data.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Karun Raju, Tanguy Duval, Julien Cohen-Adad
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import os
from spinalcordtoolbox.moco import ParamMoco, moco_wrapper
from spinalcordtoolbox.utils.sys import init_sct, set_global_loglevel
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, display_viewer_syntax, list_type
from spinalcordtoolbox.reports.qc import generate_qc
def get_parser():
# initialize parameters
# TODO: create a class ParamFmriMoco which inheritates from ParamMoco
param_default = ParamMoco(group_size=1, metric='MeanSquares', smooth='0')
# parser initialisation
parser = SCTArgumentParser(
description="Motion correction of fMRI data. Some robust features include:\n"
" - group-wise (-g)\n"
" - slice-wise regularized along z using polynomial function (-p)\n"
" (For more info about the method, type: isct_antsSliceRegularizedRegistration)\n"
" - masking (-m)\n"
" - iterative averaging of target volume\n"
"\n"
"The outputs of the motion correction process are:\n"
" - the motion-corrected fMRI volumes\n"
" - the time average of the corrected fMRI volumes\n"
" - a time-series with 1 voxel in the XY plane, for the X and Y motion direction (two separate "
"files), as required for FSL analysis.\n"
" - a TSV file with the slice-wise average of the motion correction for XY (one file), that "
"can be used for Quality Control.\n"
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input data (4D). Example: fmri.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-g',
metavar=Metavar.int,
type=int,
help="Group nvols successive fMRI volumes for more robustness."
)
optional.add_argument(
'-m',
metavar=Metavar.file,
help="Binary mask to limit voxels considered by the registration metric."
)
optional.add_argument(
'-param',
metavar=Metavar.list,
type=list_type(',', str),
help=f"R|Advanced parameters. Assign value with \"=\"; Separate arguments with \",\".\n"
f" - poly [int]: Degree of polynomial function used for regularization along Z. For no regularization "
f"set to 0. Default={param_default.poly}.\n"
f" - smooth [mm]: Smoothing kernel. Default={param_default.smooth}.\n"
f" - iter [int]: Number of iterations. Default={param_default.iter}.\n"
f" - metric {{MI, MeanSquares, CC}}: Metric used for registration. Default={param_default.metric}.\n"
f" - gradStep [float]: Searching step used by registration algorithm. The higher the more deformation "
f"allowed. Default={param_default.gradStep}.\n"
f" - sampling [None or 0-1]: Sampling rate used for registration metric. "
f"Default={param_default.sampling}.\n"
f" - numTarget [int]: Target volume or group (starting with 0). Default={param_default.num_target}.\n"
f" - iterAvg [int]: Iterative averaging: Target volume is a weighted average of the "
f"previously-registered volumes. Default={param_default.iterAvg}.\n"
)
optional.add_argument(
'-ofolder',
metavar=Metavar.folder,
action=ActionCreateFolder,
default='./',
help="Output path."
)
optional.add_argument(
'-x',
choices=['nn', 'linear', 'spline'],
default='linear',
help="Final interpolation."
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Remove temporary files. O = no, 1 = yes"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved. (Note: "
"Both '-qc' and '-qc-seg' are required in order to generate a QC report.)"
)
optional.add_argument(
'-qc-seg',
metavar=Metavar.file,
help="Segmentation of spinal cord to improve cropping in qc report. (Note: "
"Both '-qc' and '-qc-seg' are required in order to generate a QC report.)"
)
optional.add_argument(
'-qc-fps',
metavar=Metavar.float,
type=float,
default=3,
help="This float number is the number of frames per second for the output gif images."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
# initialization
param = ParamMoco(group_size=1, metric='MeanSquares', smooth='0')
# Fetch user arguments
param.fname_data = arguments.i
param.path_out = arguments.ofolder
param.remove_temp_files = arguments.r
param.interp = arguments.x
if arguments.g is not None:
param.group_size = arguments.g
if arguments.m is not None:
param.fname_mask = arguments.m
if arguments.param is not None:
param.update(arguments.param)
path_qc = arguments.qc
qc_fps = arguments.qc_fps
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
qc_seg = arguments.qc_seg
mutually_inclusive_args = (path_qc, qc_seg)
is_qc_none, is_seg_none = [arg is None for arg in mutually_inclusive_args]
if not (is_qc_none == is_seg_none):
raise parser.error("Both '-qc' and '-qc-seg' are required in order to generate a QC report.")
# run moco
fname_output_image = moco_wrapper(param)
set_global_loglevel(verbose) # moco_wrapper changes verbose to 0, see issue #3341
# QC report
if path_qc is not None:
generate_qc(fname_in1=fname_output_image, fname_in2=param.fname_data, fname_seg=qc_seg,
args=sys.argv[1:], path_qc=os.path.abspath(path_qc), fps=qc_fps, dataset=qc_dataset,
subject=qc_subject, process='sct_fmri_moco')
display_viewer_syntax([fname_output_image, param.fname_data], mode='ortho,ortho')
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
|
"""Input pipeline for the U-Seg-Net dataset.
The filenames have format "{id}.png".
"""
import os
import sys
sys.path.extend(['..'])
import numpy as np
import tensorflow as tf
from utils.utils import get_args
from utils.config import process_config
appendix = '.png'
class USegNetLoader:
"""
Class that provides dataloading for U-SegNet model.
The dataset will be [bs, im, im, 1].
Every image has it mask.
"""
def __init__(self, config):
self.config = config
data_dir = os.path.join('..', 'data', 'u_segnet', 'READY_U_SEGNET')
train_dir = os.path.join(data_dir, 'train')
eval_dir = os.path.join(data_dir, 'dev')
test_dir = os.path.join(data_dir, 'test')
# Get the file names from the train and dev sets
self.train_images = np.array([os.path.join(train_dir, 'images', f) for f in os.listdir(os.path.join(train_dir, 'images')) if f.endswith(appendix)])
self.eval_images = np.array([os.path.join(eval_dir, 'images', f) for f in os.listdir(os.path.join(eval_dir, 'images')) if f.endswith(appendix)])
self.test_images = np.array([os.path.join(test_dir, 'images', f) for f in os.listdir(os.path.join(test_dir, 'images')) if f.endswith(appendix)])
self.train_masks = np.array([os.path.join(train_dir, 'masks', f) for f in os.listdir(os.path.join(train_dir, 'masks')) if f.endswith(appendix)])
self.eval_masks = np.array([os.path.join(eval_dir, 'masks', f) for f in os.listdir(os.path.join(eval_dir, 'masks')) if f.endswith(appendix)])
self.test_masks = np.array([os.path.join(test_dir, 'masks', f) for f in os.listdir(os.path.join(test_dir, 'masks')) if f.endswith(appendix)])
assert self.train_images.shape[0] == self.train_masks.shape[0], "Training files must have the same length"
assert self.eval_images.shape[0] == self.eval_masks.shape[0], "Evaluation files must have the same length"
# Define datasets sizes
self.train_size = self.train_images.shape[0]
self.eval_size = self.eval_images.shape[0]
self.test_size = self.test_images.shape[0]
# Define number of iterations per epoch
self.num_iterations_train = (self.train_size + self.config.batch_size - 1) // self.config.batch_size
self.num_iterations_eval = (self.eval_size + self.config.batch_size - 1) // self.config.batch_size
self.num_iterations_test = (self.test_size + self.config.batch_size - 1) // self.config.batch_size
self.features_placeholder = None
self.labels_placeholder = None
self.dataset = None
self.iterator = None
self.init_iterator_op = None
self.next_batch = None
self._build_dataset_api()
@staticmethod
def _parse_function(filename, label, size):
"""Obtain the image and mask from the filename (for both training and validation).
The following operations are applied:
- Decode the image and mask from jpeg format
- Convert to float and to range [0, 1]
"""
image_string = tf.read_file(filename)
mask_string = tf.read_file(label)
# Don't use tf.image.decode_image, or the output shape will be undefined
image_decoded = tf.image.decode_jpeg(image_string, channels=1)
mask_decoded = tf.image.decode_jpeg(mask_string, channels=1)
# This will convert to float values in [0, 1]
image = tf.image.convert_image_dtype(image_decoded, tf.float32)
mask = tf.image.convert_image_dtype(mask_decoded, tf.float32)
image = tf.image.resize_images(image, [size, size])
mask = tf.image.resize_images(mask, [size, size])
return image, mask
@staticmethod
def _train_preprocess(image, mask, use_random_flip, use_random_crop, crop_factor, mode='train'):
"""Image preprocessing for training.
Apply the following operations:
- Horizontally flip the image with probability 1/2
- Apply random brightness and saturation
"""
if mode == 'train':
seed = np.random.randint(1234)
if use_random_flip:
image = tf.image.random_flip_left_right(image, seed)
mask = tf.image.random_flip_left_right(mask, seed)
if use_random_crop:
image_size = image.shape
crop_size = [crop_factor * image_size[0], crop_factor * image_size[1], 1]
image = tf.image.resize_images(tf.image.random_crop(image, crop_size, seed), image_size)
mask = tf.image.resize_images(tf.image.random_crop(mask, crop_size, seed), image_size)
image = tf.image.random_brightness(image, max_delta=32.0 / 255.0)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
# Make sure the image is still in [0, 1]
image = tf.clip_by_value(image, 0.0, 1.0)
return image, mask
def _build_dataset_api(self):
with tf.device('/cpu:0'):
self.features_placeholder = tf.placeholder(tf.string, [None, ])
self.labels_placeholder = tf.placeholder(tf.string, [None, ])
self.mode_placeholder = tf.placeholder(tf.string, shape=())
# Create a Dataset serving batches of images and labels
# We don't repeat for multiple epochs because we always train and evaluate for one epoch
parse_fn = lambda f, l: self._parse_function(
f,
l,
self.config.image_size
)
train_fn = lambda f, l: self._train_preprocess(
f,
l,
self.config.use_random_flip,
self.config.use_random_crop,
self.config.crop_factor,
self.mode_placeholder
)
self.dataset = (tf.data.Dataset.from_tensor_slices(
(self.features_placeholder, self.labels_placeholder))
.map(parse_fn, num_parallel_calls=self.config.num_parallel_calls)
.map(train_fn, num_parallel_calls=self.config.num_parallel_calls)
.batch(self.config.batch_size)
.prefetch(1) # make sure you always have one batch ready to serve
)
# Create reinitializable iterator from dataset
self.iterator = self.dataset.make_initializable_iterator()
self.iterator_init_op = self.iterator.initializer
self.next_batch = self.iterator.get_next()
def initialize(self, sess, mode='train'):
if mode == 'train':
idx = np.array(range(self.train_size))
np.random.shuffle(idx)
self.train_images = self.train_images[idx]
self.train_masks = self.train_masks[idx]
sess.run(self.iterator_init_op, feed_dict={
self.features_placeholder: self.train_images,
self.labels_placeholder: self.train_masks,
self.mode_placeholder: mode})
elif mode == 'eval':
sess.run(self.iterator_init_op, feed_dict={
self.features_placeholder: self.eval_images,
self.labels_placeholder: self.eval_masks,
self.mode_placeholder: mode})
else:
sess.run(self.iterator_init_op, feed_dict={
self.features_placeholder: self.eval_images,
self.labels_placeholder: self.eval_masks,
self.mode_placeholder: mode})
def get_inputs(self):
return self.next_batch
def main(config):
"""
Function to test from console
:param config:
:return:
"""
tf.reset_default_graph()
sess = tf.Session()
data_loader = USegNetLoader(config)
images, labels = data_loader.get_inputs()
print('Train')
data_loader.initialize(sess, mode='train')
out_im, out_l = sess.run([images, labels])
print(out_im.shape, out_im.dtype)
print(out_l.shape, out_l.dtype)
print('Eval')
data_loader.initialize(sess, mode='eval')
out_im, out_l = sess.run([images, labels])
print(out_im.shape, out_im.dtype)
print(out_l.shape, out_l.dtype)
print('Test')
data_loader.initialize(sess, mode='test')
out_im, out_l = sess.run([images, labels])
print(out_im.shape, out_im.dtype)
print(out_l.shape, out_l.dtype)
if __name__ == '__main__':
# capture the config path from the run arguments
# then process the json configuration file
try:
args = get_args()
config = process_config(args.config)
main(config)
except Exception as e:
print('Missing or invalid arguments %s' % e)
|
#!/usr/bin/python
from os import walk
from os.path import join
import os
classCount = 0
labels = []
filesList = []
X0 = []
y = []
k = 0
root_path = "../images/"
#"""
for x in walk(join(root_path, "Train/")):
filescount = 0
fc = 0
if x[0].__contains__("missclassified"):
continue
if len(x[2]) > 0:
print "Processing class %i..."% (k)
k += 1
for filename in x[2]:
if filename.endswith(".jpg") or filename.endswith(".JPG") or filename.endswith(".JPEG") or filename.endswith(".jpeg"):
filesList.append(str(x[0]) + "/" + filename + " " + str(classCount))
#print join(x[0], filename)
filescount += 1
if filescount > 0:
labels.append(str(x[0]) + " " + str(classCount))
classCount += 1
else:
continue
print classCount-1, filescount
models_root = root_path
print 'Creating train labels'
f = open(join(models_root, "train.txt"), 'wb')
for lbl in filesList:
f.write(lbl + '\n')
f.close()
k = 0
X1 = []
y1 = []
classCount = 0
labels = []
filesList = []
for x in walk(join(root_path, "Val/")):
filescount = 0
fc = 0
if x[0].__contains__("missclassified"):
continue
if len(x[2]) > 0:
print "Processing class %i..."% (k)
k += 1
for filename in x[2]:
if filename.endswith(".jpg") or filename.endswith(".JPG") or filename.endswith(".JPEG") or filename.endswith(".jpeg"):
filesList.append(str(x[0]) + "/" + filename + " " + str(classCount))
#print join(x[0], filename)
filescount += 1
if filescount > 0:
labels.append(str(x[1]) + " " + str(classCount))
classCount += 1
else:
continue
print classCount-1, filescount
models_root = root_path
print 'Creating test labels'
f = open(join(models_root, "val.txt"), 'wb')
for lbl in filesList:
f.write(lbl + '\n')
|
# Make a plot of the "best fit" spectra to any object, of arbitrary
# template type. Only works for a single fiber.
#
# Tim Hutchinson, University of Utah, Oct 2014
# [email protected]
from time import gmtime, strftime
import numpy as n
from astropy.io import fits
import matplotlib.pyplot as p
p.interactive(True)
from astropy.convolution import convolve, Box1DKernel
from redmonster.datamgr import spec, io
from redmonster.physics import zfinder, zfitter, zpicker
from redmonster.sandbox import yanny as y
from redmonster.physics import misc
from redmonster.physics.misc import poly_array
plate = 3686 # Set plate, mjd, and fiberid here
mjd = 55268
fiberid = [89,100,102] #935, 937
specs = spec.Spec(plate=plate, mjd=mjd, fiberid=fiberid)
# Use Charlie's SSPs
ztemp = zfinder.ZFinder(fname='ndArch-ssp_em_galaxy-v000.fits', npoly=4,
zmin=-0.01, zmax=1.2)
# Use Carlos' stellar templates
#ztemp = zfinder.ZFinder(fname='ndArch-all-CAP-grids.fits', npoly=4,
#zmin=-.005, zmax=.005)
# Use spEigenstars from IDL pipeline
#ztemp = zfinder.ZFinder(fname='ndArch-spEigenStar-55734.fits', npoly=4,
#zmin=-.005, zmax=.005)
# Use Nao's quasars
#ztemp = zfinder.ZFinder(fname='ndArch-QSO-V003.fits', npoly=4, zmin=.4,
#zmax=3.5)
ztemp.zchi2(specs.flux, specs.loglambda, specs.ivar, npixstep=1)
zfit_temp = zfitter.ZFitter(ztemp.zchi2arr, ztemp.zbase)
zfit_temp.z_refine()
#temp_flags = misc.comb_flags(specs, ztemp, zfit_temp)
#zpick = zpicker.ZPicker(specs, ztemp, zfit_temp)
# Solve for parameters, create model
import pdb; pdb.set_trace()
minloc = n.unravel_index( ztemp.zchi2arr.argmin(), ztemp.zchi2arr.shape )
pmat = n.zeros( (specs.flux.shape[-1],ztemp.npoly+1) )
this_temp = ztemp.templates[minloc[1:-1]]
pmat[:,0] = this_temp[(minloc[-1]*ztemp.npixstep)+ztemp.pixoffset:
(minloc[-1]*ztemp.npixstep)+ztemp.pixoffset+
specs.flux.shape[-1]]
polyarr = poly_array(ztemp.npoly, specs.flux.shape[-1])
pmat[:,1:] = n.transpose(polyarr)
ninv = n.diag(specs.ivar[0])
f = n.linalg.solve( n.dot(n.dot(n.transpose(pmat),ninv),pmat),
n.dot( n.dot(n.transpose(pmat),ninv),specs.flux[0]) )
model = n.dot(pmat, f)
# Make plot
p.plot(10**specs.loglambda, specs.flux[0],'r', label='Data')
p.plot(10**specs.loglambda, model, 'k', label='Model', hold=True)
p.title('Plate %s Fiber %s, z=%.4f' % (plate, fiberid[0], zfit_temp.z[0][0]),
size=18)
p.xlabel(r'Wavelength ($\AA$)', size=16)
p.ylabel(r'Flux ($10^{-17}$ erg s$^{-1}$cm$^{-2}$$\AA^{-1}$)', size=16)
p.legend()
# Optionally, smooth data and then plot
'''
smoothed = convolve(specs.flux[0], Box1DKernel(5))
p.plot(10**specs.loglambda, smoothed,'r', label='Data')
p.plot(10**specs.loglambda, model, 'k', label='Model', hold=True)
p.title('Plate %s Fiber %s, z=%.4f' % (plate, fiberid[0], zfit_temp.z[0][0]),
size=18)
p.xlabel(r'Wavelength ($\AA$)', size=16)
p.ylabel(r'Flux ($10^{-17}$ erg s$^{-1}$cm$^{-2}$$\AA^{-1}$)', size=16)
p.legend()
'''
|
"""
Model Training
Author: Gerald M
Trains the model by using clean-up, pre-processing and augmentation modules.
Can be run from command line with following,
ipython -- trainmodel.py Adam 1e-4 BCE elu 1 GM_UNet 6
to dictate the following:
- loss function
- learning rate
- optimizer
- activation fuction
- number of GPUs
- model name
- amount of augmentation
Model architecture, weights and training history are saved into a dated
directory under models.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import subprocess
import time
import datetime
import numpy as np
import pandas as pd
# Tensorflow imports
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, LambdaCallback, Callback
from tensorflow.keras.optimizers import Adam, SGD
from keras.utils import multi_gpu_model
from keras import backend as K
# import custom modules
import cleanup
import preprocessing
import losses
# Import different architectures
from architectures import nestedunetmodel
from architectures import multiresunetmodel
from architectures import unetmodel
from architectures import unetmodelv2
from architectures import unetmodelv3
# Set the training precision to speed up training time..?
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
# Alpha scheduler for modifying loss weights
# Added a delay to only start modifying alpha after set number of epochs
class AlphaScheduler(Callback):
def __init__(self, alpha, delay, function):
self.alpha = alpha
self.delay = delay
self.function = function
def on_epoch_end(self, epoch, logs=None):
updated_alpha = self.function(K.get_value(self.alpha), self.delay, epoch)
K.set_value(self.alpha, updated_alpha)
# print("End of epoch {}, alpha={}".format(epoch, self.alpha))
def reduce_alpha(alpha, delay, epoch):
if epoch < delay:
val = alpha
else:
val = np.clip(alpha - 0.01, 0.01, 1)
return val
alpha = K.variable(1., dtype='float32')
delay = 0
if __name__ == '__main__':
if len(sys.argv) > 0:
opt_arg1 = str(sys.argv[1])
lr_arg2 = float(sys.argv[2])
loss_arg3 = str(sys.argv[3])
act_arg4 = str(sys.argv[4])
ngpu_arg5 = int(sys.argv[5])
model_arg6 = str(sys.argv[6])
aug_arg7 = int(sys.argv[7])
if opt_arg1 == 'Adam': optimizer = Adam(lr=lr_arg2)
if opt_arg1 == 'SGD': optimizer = SGD(lr=lr_arg2)
if loss_arg3 == 'BCE': loss = 'binary_crossentropy' # works best
if loss_arg3 == 'FTL': loss = losses.focal_tversky # works well but still not as good as BCE
if loss_arg3 == 'Combo': loss = losses.combo_loss
if loss_arg3 == 'MSE': loss = 'mean_squared_error'
if loss_arg3 == 'BL': loss = losses.surface_loss
if loss_arg3 == 'BCE+BL': loss = losses.bce_surface_loss(alpha)
if loss_arg3 == 'DSC+BL': loss = losses.dsc_surface_loss(alpha)
if act_arg4 == 'relu': act = 'relu'
if act_arg4 == 'elu': act = 'elu'
modelname = model_arg6
# Scale batch size to number of GPUs being used
batch = 8 * ngpu_arg5
# Choose which data to train on and how many images to augment
# 4 - works well without any noise augmentation
# 5 - perhaps will work well with noise augmentation? Does
# 6 - works best without using all memory (6 transforms so six sets of training data per transform)
n = aug_arg7
training_dir = 'data/GM_MG_combined_data_n217'
train_x, train_y, val_x, val_y = preprocessing.preprocess(training_dir, n)
# Get today's date for model saving
strdate = datetime.datetime.today().strftime('%Y_%m_%d')
savedirpath = os.path.join('models', strdate+'_'+opt_arg1+str(lr_arg2)+'_'+loss_arg3+'_'+act_arg4+'_GPUs'+str(ngpu_arg5)+'_Batch'+str(batch)+'_Aug'+str(aug_arg7)+'_'+model_arg6)
if not os.path.exists(savedirpath):
os.makedirs(savedirpath)
filepath = os.path.join(savedirpath, modelname+'_weights.best.hdf5')
# Log output to file
# log_stdout = sys.stdout
# logfile = open(os.path.join(savedirpath, 'logfile.txt'), 'w')
# sys.stdout = logfile
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
if model_arg6 == 'GM_UNet': model = unetmodelv2.unet(inputsize=(None, None, 1), optfn=optimizer, lossfn=loss, actfn=act) # Works best
if model_arg6 == 'GM_UNetv3': model = unetmodelv3.unet(inputsize=(None, None, 1), optfn=optimizer, lossfn=loss, actfn=act) # Works best
if model_arg6 == 'ZX_UNet': model = unetmodel.unet(inputsize=(None, None, 1), optfn=optimizer, lossfn=loss, actfn=act)
if model_arg6 == 'ZX_NestedUNet': model = nestedunetmodel.nestedunet(inputsize=(None, None, 1), optfn=optimizer, lossfn=loss, actfn=act)
if model_arg6 == 'ZX_MultiResUNet': model = multiresunetmodel.multiresunet(inputsize=(None, None, 1), optfn=optimizer, lossfn=loss, actfn=act) # Does not work
model.compile(optimizer=optimizer, loss=[loss], metrics=[losses.dice_loss, loss])
# Serialize model to JSON
modeljson = model.to_json()
with open(os.path.join(savedirpath, modelname+'_model.json'), 'w') as jsonfile:
jsonfile.write(modeljson)
checkpoint = ModelCheckpoint(filepath, monitor='val_dice_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor='val_dice_loss', mode='min', patience=30, verbose=1)
redonplat = ReduceLROnPlateau(monitor='val_dice_loss', mode='min', patience=10, verbose=1)
modalpha = AlphaScheduler(alpha, delay, reduce_alpha)
callbacks_list = [checkpoint, early, redonplat, modalpha]
# Capture the Git repo status being executed
gitstr = subprocess.check_output('git log -1'.split()).decode()
print('Training on following git commit...')
print(gitstr)
tstart = time.time()
history = model.fit(train_x, train_y,
validation_data=(val_x, val_y),
batch_size=batch,
epochs=250,
shuffle=True,
callbacks=callbacks_list)
tend = time.time()
# Write out the training history to file
pd.DataFrame(history.history).to_csv(os.path.join(savedirpath, 'trainhistory.csv'))
cleanup.clean()
# Plot out to see progress
# import matplotlib.pyplot as plt
# plt.plot(history.history['dice_loss'])
# plt.plot(history.history['val_dice_loss'])
# plt.title('Dice loss')
# plt.ylabel('Loss')
# plt.xlabel('Epoch')
# plt.legend(['Train', 'Test'], loc='upper right')
# plt.show()
minutes, seconds = divmod(tend-tstart, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
print('============================================')
print('')
print(' Model: {}'.format(model_arg6))
print(' Data set: {}'.format(training_dir))
print('Augmentation n: {} w/ training {}, val {}'.format(n, len(train_x), len(val_x)))
print(' Batch per GPU: {}'.format(batch))
print(' N GPUs: {}'.format(ngpu_arg5))
print(' Optimiser: {}'.format(opt_arg1))
print(' Loss: {}'.format(loss_arg3))
print(' Activation: {}'.format(act_arg4))
print(' Learning rate: {}'.format(lr_arg2))
print('Best val. loss: {:0.4f}'.format(np.min(history.history['val_dice_loss'])))
print('Execution time: {:0.0f} days {:0.0f} hrs {:0.0f} mins {:0.0f} secs'.format(days, hours, minutes, seconds))
print('')
print('============================================')
# Log output to file
log_stdout = sys.stdout
logfile = open(os.path.join(savedirpath, 'logfile.txt'), 'w')
sys.stdout = logfile
# Capture the Git repo status being executed
gitstr = subprocess.check_output('git log -1'.split()).decode()
print('Training on following git commit...')
print(gitstr)
print('')
print('============================================')
print('')
print(' Model: {}'.format(model_arg6))
print(' Data set: {}'.format(training_dir))
print('Augmentation n: {} w/ training {}, val {}'.format(n, len(train_x), len(val_x)))
print(' Batch per GPU: {}'.format(batch))
print(' N GPUs: {}'.format(ngpu_arg5))
print(' Optimiser: {}'.format(opt_arg1))
print(' Loss: {}'.format(loss_arg3))
print(' Activation: {}'.format(act_arg4))
print(' Learning rate: {}'.format(lr_arg2))
print('Best val. loss: {:0.4f}'.format(np.min(history.history['val_dice_loss'])))
print('Execution time: {:0.0f} days {:0.0f} hrs {:0.0f} mins {:0.0f} secs'.format(days, hours, minutes, seconds))
print('')
print('============================================')
sys.stdout = log_stdout
logfile.close()
|
##
# Copyright (c) 2009-2017 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##
"""
RFC 5397 (WebDAV Current Principal Extension) XML Elements
This module provides XML element definitions for use with the
DAV:current-user-principal property.
See RFC 5397: http://www.ietf.org/rfc/rfc5397.txt
"""
__all__ = []
from txdav.xml.base import WebDAVElement, dav_namespace
from txdav.xml.element import registerElement, registerElementClass
@registerElement
@registerElementClass
class CurrentUserPrincipal(WebDAVElement):
"""
Current principal information
"""
name = "current-user-principal"
allowed_children = {
(dav_namespace, "href"): (0, 1),
(dav_namespace, "unauthenticated"): (0, 1),
}
|
import os
import logging
import h5py
import numpy as np
from skimage.io import imread
from tqdm import tqdm
from constants import HEIGHT, WIDTH
logging.getLogger('tensorflow').setLevel(logging.WARNING)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%H:%M:%S', )
logger = logging.getLogger(__name__)
MAIN_DIR = os.path.abspath("data")
TRAIN_DIR = os.path.join(MAIN_DIR, 'train')
TEST_DIR = os.path.join(MAIN_DIR, 'test')
MASK_DIR = os.path.join(MAIN_DIR, 'train_masks')
TRAIN_FILE = os.path.join(MAIN_DIR, "train.h5")
TEST_FILE = os.path.join(MAIN_DIR, "test.h5")
class Dataset:
def __init__(self):
pass
@staticmethod
def read_img(fname):
return imread(fname).astype(np.uint8)
def cache_train(self):
logger.info('Creating cache file for train')
train_files = sorted(os.listdir(TRAIN_DIR))
train_size = len(train_files)
file = h5py.File(TRAIN_FILE, 'w')
x_data = file.create_dataset('x_data', shape=(train_size, HEIGHT, WIDTH, 3), dtype=np.uint8)
y_data = file.create_dataset('y_data', shape=(train_size, HEIGHT, WIDTH, 1), dtype=np.uint8)
names = file.create_dataset('names', shape=(train_size,), dtype=h5py.special_dtype(vlen=str))
logger.info(f'There are {train_size} files in train')
for i, fn in tqdm(enumerate(train_files), total=train_size):
img = self.read_img(os.path.join(TRAIN_DIR, fn))
x_data[i, :, :, :] = img
y_data[i, :, :, :] = imread(os.path.join(MASK_DIR, fn.replace('.jpg', '_mask.gif'))).reshape(HEIGHT, WIDTH, 1)
names[i] = fn
file.close()
def cache_test(self):
logger.info('Creating cache file for test')
file = h5py.File(TEST_FILE, 'w')
test_files = sorted(os.listdir(TEST_DIR))
test_size = len(test_files)
x_data = file.create_dataset('x_data', shape=(test_size, HEIGHT, WIDTH, 3), dtype=np.uint8)
names = file.create_dataset('names', shape=(test_size,), dtype=h5py.special_dtype(vlen=str))
logger.info(f'There are {test_size} files in test')
for i, fn in tqdm(enumerate(test_files), total=test_size):
img = self.read_img(os.path.join(TEST_DIR, fn))
x_data[i, :, :, :] = img
names[i] = fn
file.close()
def cache(self):
self.cache_train()
self.cache_test()
if __name__ == '__main__':
Dataset().cache()
|
#!/opt/anaconda/bin/python
#Classe runSnap legge da una singola cartella i file in essa contenuti
#li ordina in modo decrescente per data e crea
#le coppie per lo start di SNAP
#infine crea il file name da associare all'output di SNAP
import subprocess
import os,sys
import cioppy
import string
ciop = cioppy.Cioppy()
# define the exit codes - need to be better assessed
SUCCESS = 0
ERR_FAILED = 134
# add a trap to exit gracefully
def clean_exit(exit_code):
log_level = 'INFO'
if exit_code != SUCCESS:
log_level = 'ERROR'
msg = { SUCCESS: 'Download successfully concluded',
ERR_FAILED: 'Unable to complete the download'}
ciop.log(log_level, msg[exit_code])
def main():
outdir=ciop.tmp_dir
input = sys.stdin.readlines()
print "il file di input e': ", input
try:
input_file = input[0][string.find(input[0], "'")+1:string.rfind(input[0],"'")]
print "input file pulito dall a capo: ",input_file
#print "sys.stdin ", input
#for input in sys.stdin:
#print "sys.stdin ", input
process=subprocess.Popen(['opensearch-client',input_file,'enclosure'], stdout=subprocess.PIPE)
out, err=process.communicate()
output_file=ciop.copy(out,outdir, extract=False)
print output_file
res = ciop.publish(output_file, metalink=False)
print 'result from publish to hdfs: ', res
#res = ciop.publish(output_file, mode='silent', metalink=True)
#print 'result from publish string: ', res
#subprocess.call(["ls","-l",res])
subprocess.call(["ls","-l",output_file])
except:
print "unexpected error...exiting"
try:
main()
except SystemExit as e:
if e.args[0]:
clean_exit(e.args[0])
raise
#else:
# atexit.register(clean_exit, 0)
#ciop.publish(outdef, metalink = true)
|
#!/usr/bin/env python3
import sys
import lzma
import re
import msgpack
def parse_args(args):
if not args:
return {}
d = {}
args = args.split(',')
for arg in args:
k, v = arg.split('=', 1)
d[k] = v
return d
def parse_scope(scope):
RE = re.compile(r"^(?P<scope>[a-zA-Z\.]+)(\[(?P<args>[a-zA-Z0-9,=&|\(\)]+)\])?$")
m = RE.match(scope)
if not m:
raise Exception("bad scope")
m = m.groupdict()
args = parse_args(m['args'])
return m['scope'], args
def match_meta(meta, args):
for k, v in args.items():
if k not in meta or meta[k] != v:
return False
return True
def main():
fpath = sys.argv[1]
scope = sys.argv[2]
scope = parse_scope(scope)
with lzma.open(fpath, 'rb') as fh:
it = msgpack.Unpacker(fh, raw=False)
for fpath, meta in it:
if meta['scope'] != scope[0]:
continue
if not match_meta(meta, scope[1]):
continue
print(fpath)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 10:56:13 2019
@author: nico
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import cmath
#%% Lipieza de gráficos
#os.system ("clear") # limpia la terminal de python
#plt.close("all") #cierra todos los graficos
#%% TRABAJAMOS CON LA FFT
# Esta funcion recive los la fft de la señal y se encarga de plotearla controlando varios parámetros
# y1l: etiqueta vertical del módulo
# y2l: etiqueta vertical de la fase
# p1t: título del modulo
# p2t: título de la fase
# tp: título de la figura
# loc1: localización de las etiquetas en el módulo
# loc2: localización de las etiquetas en la fase
# c: color del gráfico por defecto es rojo ['r', 'b', 'g', 'c', 'm', 'y', 'k']
# l: nombre de la etiqueta
# db: grafica el modulo en dB si esta en 'ON' o en veces si esta en 'off' por defecto esta actiado
# tipo: determina si quiero usar plot o stem por defecto esta activado plot
# m: marker por defecto esta '-'
# ls: Linestyle por defecto = 'None' (es la interpolacion)
# col_ax: activa o desactiva el color de los ejes por defecto esta encendido
def plotFFT (fftsignal, fs, norm=0, y1l='Amplitud Normlizada [db] ', y2l='Fase [rad] ', p1t=' ',
p2t=' ', tp="FFT de la señal", loc1='upper right', loc2='upper right', c=0,
l=' ', db='ON', tipo='plot', m='.',ls='None', col_ax = 'on') :
mod_signal, fase_signal = Mod_and_Angle_signal (fftsignal, db, norm)
N =len(fftsignal)
df= fs / N
col= ['r', 'b', 'g', 'c', 'm', 'y', 'k']
#%% Ploteo de la FFT
plt.figure(tp)
plt.subplot(2,1,1)
freq = np.linspace(0, (N-1)*df, N) / fs
if tipo == 'stem':
plt.stem(freq[0:int(N/2)], mod_signal[0:int(N/2)], col[c], label='modulo '+ l,
marker=m, linestyle=ls)
else:
plt.plot(freq[0:int(N/2)], mod_signal[0:int(N/2)], col[c], label='modulo '+ l,
marker=m, linestyle=ls)
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel(y1l)
if col_ax == 'ON'or col_ax == 'on' :
plt.axhline(0, color="black")
plt.axvline(0, color="black")
#plt.xlim((0.2,0.3))
plt.grid()
plt.title('Modulo de la señal '+p1t)
plt.legend(loc = loc1)
plt.subplot(2,1,2)
if tipo == 'stem':
plt.stem(freq[0:int(N/2)], fase_signal[0:int(N/2)], col[c], label='fase '+ l,
marker=m, linestyle=ls)
else:
plt.plot(freq[0:int(N/2)], fase_signal[0:int(N/2)], col[c], label='fase '+ l,
marker=m, linestyle=ls)
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel(y2l)
if col_ax == 'ON'or col_ax == 'on' :
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.title('fase de la señal '+p2t)
plt.legend(loc = loc2)
plt.tight_layout() #para ajustar el tamaño de lo contrario se puperpinan los titulos
plt.show()
return 0
#%% my DFT
# Esta función realiza la DFT de una señal de tamaño N
# solo necesita como parámetro la señal a transformar
def myDFT (signal) :
N =len(signal)
Signal = np.empty(N)
Signal[:N-1] = np.nan
W = [ ]
W = np.zeros((N,N),dtype=complex) # tengo que limpiar la memoria, el vector
for k in range (0, N-1):
for n in range (0, N-1):
W[k][n] = cmath.exp(-1j * 2 * np.pi * k * n/N) # calcula los twiddles factors
Signal = np.dot(W, signal) # Realiza la multiplicación punto a punto de la señal
return Signal
#%% convierto la señal en dB y la normalizo
# Separa la señal transformada en módulo y fase
# Por defecto retorna el módulo en dB y normalizado, de no quererlo en db utilizar db='off'
def Mod_and_Angle_signal (fftsignal, db='ON', norm=0) :
if norm == 0 :
norm =len(fftsignal)
mod_signal = np.abs(fftsignal) *2 / norm
fase_signal = np.angle(fftsignal)
if db == 'ON'or db == 'on' :
mod_signal = 20 *np.log10(mod_signal)
return mod_signal, fase_signal |
"""Connector"""
from typing import Any, Dict, Optional
from .connector import Connector
from .generator import ConfigGenerator, ConfigGeneratorUI
__all__ = ["Connector", "ConfigGenerator", "ConfigGeneratorUI", "connect"]
def connect(
config_path: str,
*,
update: bool = False,
_auth: Optional[Dict[str, Any]] = None,
_concurrency: int = 1,
**kwargs: Any,
) -> Connector:
"""Connect to a website.
Parameters
----------
config_path
The path to the config. It can be hosted, e.g. "yelp", or from
local filesystem, e.g. "./yelp"
_auth: Optional[Dict[str, Any]] = None
The parameters for authentication, e.g. OAuth2
_concurrency: int = 5
The concurrency setting. By default it is 1 reqs/sec.
update: bool = True
Force update the config file even if the local version exists.
**kwargs
Parameters that shared by different queries.
Returns
-------
Connector
a Connector object.
Example
-------
>>> from dataprep.connector import connect
>>> dc = connect("yelp", _auth={"access_token": access_token}, _concurrency=3)
"""
return Connector(config_path, update=update, _auth=_auth, _concurrency=_concurrency, **kwargs)
def config_generator_ui(existing: Optional[Dict[str, Any]] = None) -> None:
"""Create a Config Generator UI.
Parameters
----------
existing: Optional[Dict[str, Any]] = None
Optionally pass in an existing configuration.
"""
ConfigGeneratorUI(existing).display()
|
#
# PyNetlist is an open source framework
# for object-oriented electronic circuit synthesis,
# published under the MIT License (MIT).
#
# Copyright (c) 2015 Jonathan Binas
#
from base import File
import spice
|
from flask_restx import Namespace, fields
user_api = Namespace('user', description='access information about users')
user_dto = user_api.model('user', {
'id': fields.String,
'email': fields.String(required=True, description='the users email address'),
'email_verified': fields.Boolean(required=True),
'is_disabled': fields.Boolean(required=True)
}) |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
# if not root:
# return
# nodes = []
# self.preorder_traverse(nodes, root)
# for i in range(len(nodes)-1):
# nodes[i].left = None
# nodes[i].right = nodes[i+1]
# nodes[-1].left = None
# nodes[-1].right = None
# def preorder_traverse(self, nodes, node):
# if not node:
# return
# nodes.append(node)
# self.preorder_traverse(nodes, node.left)
# self.preorder_traverse(nodes, node.right)
self.helper(root, None)
def helper(self, root, tail):
if not root:
return tail
root.right = self.helper(root.left, self.helper(root.right, tail))
root.left = None
return root
|
#!/usr/bin/python3
# model_generator.py
def generate_model():
# generate a keras model
return model
|
# (5A)###################################################
db.define_table(
'lab_tracking',
YES_NO_FIELD,
)
db.define_table(
'lab_tracking_chart',
Field('choose_file', 'upload', uploadfield='file_data'),
Field('file_data', 'blob'),
Field('file_description', requires=IS_NOT_EMPTY()),
)
db.define_table(
'image_tracking',
YES_NO_FIELD,
)
db.define_table(
'image_tracking_chart',
Field('choose_file', 'upload', uploadfield='file_data'),
Field('file_data', 'blob'),
Field('file_description', requires=IS_NOT_EMPTY()),
)
db.define_table(
'referral_tracking',
YES_NO_FIELD,
)
db.define_table(
'referral_tracking_chart',
Field('choose_file', 'upload', uploadfield='file_data'),
Field('file_data', 'blob'),
Field('file_description', requires=IS_NOT_EMPTY()),
)
db.define_table(
'lab_follow_up',
YES_NO_FIELD,
)
db.define_table(
'image_follow_up',
YES_NO_FIELD,
)
db.define_table(
'referral_follow_up',
YES_NO_FIELD,
)
db.define_table(
'developmental_screening',
Field("patient_name", requires=IS_NOT_EMPTY()),
Field("patient_dob", "date", label="Patient DOB", requires=DATE_VALIDATOR),
Field("service_date", "date", label="Service Date", requires=DATE_VALIDATOR),
*SCREENSHOT_FIELDS
)
db.define_table(
'lab_follow_up_normal_example',
Field("patient_name", requires=IS_NOT_EMPTY()),
Field("patient_dob", "date", label="Patient DOB", requires=DATE_VALIDATOR),
Field("service_date", "date", label="Service Date", requires=DATE_VALIDATOR),
*SCREENSHOT_FIELDS
)
db.define_table(
'lab_follow_up_abnormal_example',
Field("patient_name", requires=IS_NOT_EMPTY()),
Field("patient_dob", "date", label="Patient DOB", requires=DATE_VALIDATOR),
Field("service_date", "date", label="Service Date", requires=DATE_VALIDATOR),
*SCREENSHOT_FIELDS
)
db.define_table(
'image_follow_up_normal_example',
Field("patient_name", requires=IS_NOT_EMPTY()),
Field("patient_dob", "date", label="Patient DOB", requires=DATE_VALIDATOR),
Field("service_date", "date", label="Service Date", requires=DATE_VALIDATOR),
*SCREENSHOT_FIELDS
)
db.define_table(
'image_follow_up_abnormal_example',
Field("patient_name", requires=IS_NOT_EMPTY()),
Field("patient_dob", "date", label="Patient DOB", requires=DATE_VALIDATOR),
Field("service_date", "date", label="Service Date", requires=DATE_VALIDATOR),
*SCREENSHOT_FIELDS
)
db.define_table(
'referral_follow_up_normal_example',
Field("patient_name", requires=IS_NOT_EMPTY()),
Field("patient_dob", "date", label="Patient DOB", requires=DATE_VALIDATOR),
Field("service_date", "date", label="Service Date", requires=DATE_VALIDATOR),
*SCREENSHOT_FIELDS
)
db.define_table(
'referral_follow_up_abnormal_example',
Field("patient_name", requires=IS_NOT_EMPTY()),
Field("patient_dob", "date", label="Patient DOB", requires=DATE_VALIDATOR),
Field("service_date", "date", label="Service Date", requires=DATE_VALIDATOR),
*SCREENSHOT_FIELDS
)
# (5B)###################################################
db.define_table(
'referral_blurb',
YES_NO_FIELD,
)
db.define_table(
'specialist_order_example',
Field('choose_file', 'upload', uploadfield='file_data'),
Field('file_data', 'blob'),
Field('file_description', requires=IS_NOT_EMPTY()),
)
db.define_table(
'psych_order_example',
Field('choose_file', 'upload', uploadfield='file_data'),
Field('file_data', 'blob'),
Field('file_description', requires=IS_NOT_EMPTY()),
)
# (5C)###################################################
db.define_table(
'er_ip_log',
Field('choose_file', 'upload', uploadfield='file_data'),
Field('file_data', 'blob'),
Field('file_description', requires=IS_NOT_EMPTY()),
)
# (6A)###################################################
|
from pyramid.security import Allow, Everyone, Authenticated
class ScorecardRecordFactory(object):
__acl__ = [(Allow, Everyone, "view"), (Allow, Authenticated, "create")]
def __init__(self, request):
pass
|
from django.apps import AppConfig
class ShopManagementConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "shop_management"
verbose_name = "مدیریت فروشگاهها"
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
import uuid
class NetworkManagementClientOperationsMixin(object):
def check_dns_name_availability(
self, location, domain_name_label=None, custom_headers=None, raw=False, **operation_config):
"""Checks whether a domain name in the cloudapp.net zone is available for
use.
:param location: The location of the domain name.
:type location: str
:param domain_name_label: The domain name to be verified. It must
conform to the following regular expression:
^[a-z][a-z0-9-]{1,61}[a-z0-9]$.
:type domain_name_label: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DnsNameAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2017_03_01.models.DnsNameAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-03-01"
# Construct URL
url = self.check_dns_name_availability.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if domain_name_label is not None:
query_parameters['domainNameLabel'] = self._serialize.query("domain_name_label", domain_name_label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DnsNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_dns_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability'}
|
from website.models import WikumUser
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
users = WikumUser.objects.all()
for user in users:
user.comments_read = ''
user.save() |
# Generated by Django 2.1.2 on 2018-10-16 08:29
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Datapoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.IntegerField()),
('data', models.TextField()),
],
),
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('columns', models.TextField()),
('display_columns', models.TextField()),
('num_user_labels', models.IntegerField()),
],
),
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('dataset', models.ForeignKey(on_delete='CASCADE', related_name='labels', to='dataset.Dataset')),
],
),
migrations.CreateModel(
name='UserLabel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datapoint', models.ForeignKey(on_delete='CASCADE', related_name='user_labels', to='dataset.Datapoint')),
('label', models.ForeignKey(on_delete='CASCADE', to='dataset.Label')),
('user', models.ForeignKey(on_delete='CASCADE', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='datapoint',
name='dataset',
field=models.ForeignKey(on_delete='CASCADE', related_name='datapoints', to='dataset.Dataset'),
),
]
|
# -*- coding: utf-8 -*-
"""
Utilities for helping to plot cellpy-data.
"""
import os
from io import StringIO
import sys
import warnings
import importlib
import logging
import itertools
import collections
from pathlib import Path
from cellpy.parameters.internal_settings import (
get_headers_summary,
get_headers_normal,
get_headers_step_table,
get_headers_journal,
)
from cellpy.utils import helpers
try:
import matplotlib.pyplot as plt
plt_available = True
except ImportError:
plt_available = False
try:
from holoviews import opts
import holoviews as hv
from holoviews.plotting.links import RangeToolLink
hv_available = True
except ImportError:
hv_available = False
bokeh_available = importlib.util.find_spec("bokeh") is not None
# logger = logging.getLogger(__name__)
logging.captureWarnings(True)
SYMBOL_DICT = {
"all": [
"s",
"o",
"v",
"^",
"<",
">",
"D",
"p",
"*",
"1",
"2",
".",
",",
"3",
"4",
"8",
"p",
"d",
"h",
"H",
"+",
"x",
"X",
"|",
"_",
],
"simple": ["s", "o", "v", "^", "<", ">", "*", "d"],
}
COLOR_DICT = {
"classic": ["b", "g", "r", "c", "m", "y", "k"],
"grayscale": ["0.00", "0.40", "0.60", "0.70"],
"bmh": [
"#348ABD",
"#A60628",
"#7A68A6",
"#467821",
"#D55E00",
"#CC79A7",
"#56B4E9",
"#009E73",
"#F0E442",
"#0072B2",
],
"dark_background": [
"#8dd3c7",
"#feffb3",
"#bfbbd9",
"#fa8174",
"#81b1d2",
"#fdb462",
"#b3de69",
"#bc82bd",
"#ccebc4",
"#ffed6f",
],
"ggplot": [
"#E24A33",
"#348ABD",
"#988ED5",
"#777777",
"#FBC15E",
"#8EBA42",
"#FFB5B8",
],
"fivethirtyeight": ["#30a2da", "#fc4f30", "#e5ae38", "#6d904f", "#8b8b8b"],
"seaborn-colorblind": [
"#0072B2",
"#009E73",
"#D55E00",
"#CC79A7",
"#F0E442",
"#56B4E9",
],
"seaborn-deep": ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"],
"seaborn-bright": [
"#003FFF",
"#03ED3A",
"#E8000B",
"#8A2BE2",
"#FFC400",
"#00D7FF",
],
"seaborn-muted": ["#4878CF", "#6ACC65", "#D65F5F", "#B47CC7", "#C4AD66", "#77BEDB"],
"seaborn-pastel": [
"#92C6FF",
"#97F0AA",
"#FF9F9A",
"#D0BBFF",
"#FFFEA3",
"#B0E0E6",
],
"seaborn-dark-palette": [
"#001C7F",
"#017517",
"#8C0900",
"#7600A1",
"#B8860B",
"#006374",
],
}
hdr_summary = get_headers_summary()
hdr_raw = get_headers_normal()
hdr_steps = get_headers_step_table()
hdr_journal = get_headers_journal()
def _hv_bokeh_available():
if not hv_available:
print("You need holoviews. But I cannot load it. Aborting...")
return False
if not bokeh_available:
print("You need Bokeh. But I cannot find it. Aborting...")
return False
return True
def find_column(columns, label=None, end="cycle_index"):
"""find columns based on how the column-name ends.
Args:
columns: pandas columns
label: if not provided, generate, if provided, return as is
end: the string to use for searching
Returns:
column header, label
"""
hdr = None
lab = None
for col in columns:
if col.endswith(end):
hdr = col
if label is None:
lab = col.replace("_", " ")
else:
lab = label
break
return hdr, lab
def plot_concatenated(
dataframe,
x=None,
y=None,
err=None,
xlabel=None,
ylabel=None,
points=True,
line=True,
errors=True,
hover=True,
width=800,
height=300,
journal=None,
file_id_level=0,
hdr_level=None,
axis=1,
mean_end="_mean",
std_end="_std",
cycle_end="cycle_index",
legend_title="cell-type",
marker_size=None,
cmap="default_colors",
spread=False,
extension="bokeh",
edges=False,
keys=None,
simple=False,
**kwargs,
):
"""Create a holoviews plot of the concatenated summary.
This function is still under development. Feel free to contribute.
Args:
dataframe: the concatenated summary
x: colum-name for the x variable (not implemented yet)
y: colum-name for the y variable (not implemented yet)
err: colum-name for the std variable (not implemented yet)
xlabel: label for x-axis
ylabel: label for y-axis
points (bool): plot points if True
line (bool): plot line if True
errors (bool): plot errors if True
hover (bool): add hover tool if True
width: width of plot
height: height of plot
journal: batch.journal object
file_id_level: the level (multiindex-level) where the cell-names are.
hdr_level: the level (multiindex-level) where the parameter names are.
axis: what axis to use when looking in the data-frame (row-based or col-based).
mean_end: used for searching for y-column names
std_end: used for searching for e-column names
cycle_end: used for searching for x-column name
legend_title: title to put over the legends
marker_size: size of the markers used
cmap: color-map to use
spread (bool): plot error-bands instead of error-bars if True
extension (str): "matplotlib" or "bokeh". Note, this uses hv.extension) and will affect the
state of your notebook
edges (bool): show all axes
keys (dict): columns to plot (not working yet)
simple (bool): making a simple hv.Overlay instead of an hv.NdOverlay if True
**kwargs: key-word arguments sent to hv.NdOverlay
Example:
>>> my_mpl_plot = plot_concatenated(
>>> cap_cycle_norm_fast_1000, journal=b.experiment.journal,
>>> height=500, marker_size=5,
>>> extension="matplotlib",
>>> edges=True,
>>> )
>>> my_bokeh_plot = plot_concatenated(
>>> cap_cycle_norm_fast_1000, journal=b.experiment.journal,
>>> height=500, marker_size=5,
>>> edges=True,
>>> )
Example:
>>> # Simple conversion from bokeh to matplotlib
>>> # NB! make sure you have only used matplotlib-bokeh convertable key-words (not marker_size)
>>> hv.extension("matplotlib")
>>> my_plot.opts(aspect="auto", fig_inches=(12,7), fig_size=90, legend_position="top_right",
>>> legend_cols = 2,
>>> show_frame=True)
"""
# TODO: add option for using labels from journal in the legend
if keys is None:
keys = dict()
if not hv_available:
print(
"This function uses holoviews. But could not import it."
"So I am aborting..."
)
return
if extension == "matplotlib":
hover = False
elif extension == "plotly":
print("The plotly backend might not work properly yet.")
print("Fingers crossed.")
print(
"(at least, make sure you are using the most recent versions of jupyter, holoviews and plotly)"
)
try:
current_extension = hv.Store.current_backend
if extension != current_extension:
hv.extension(
extension, logo=False,
)
except Exception as e:
hv.extension(
extension, logo=False,
)
if hdr_level is None:
hdr_level = 0 if file_id_level == 1 else 1
averaged = True
columns = list(set(dataframe.columns.get_level_values(hdr_level)))
hdr_x, lab_x = find_column(columns, label=xlabel, end=cycle_end)
hdr_y, lab_y = find_column(columns, label=ylabel, end=mean_end)
if hdr_y is None:
averaged = False
errors = False
if hdr_x is not None:
columns.remove(hdr_x)
hdr_y = columns[0]
if ylabel is None:
lab_y = hdr_y.replace("_", " ")
else:
lab_y = ylabel
if errors:
hdr_e, _ = find_column(columns, end=std_end)
grouped = dataframe.groupby(axis=axis, level=file_id_level)
curve_dict = dict()
if not averaged and journal is not None:
journal_pages = journal.pages[
[hdr_journal["group"], hdr_journal["sub_group"]]
].copy()
journal_pages["g"] = 0
journal_pages["sg"] = 0
markers = itertools.cycle(["s", "o", "<", "*", "+", "x"])
colors = itertools.cycle(hv.Cycle(cmap).values)
j = journal_pages.groupby(hdr_journal["group"])
for i, (jn, jg) in enumerate(j):
journal_pages.loc[journal_pages["group"] == jn, "g"] = i
journal_pages.loc[journal_pages["group"] == jn, "sg"] = list(range(len(jg)))
markers = [next(markers) for _ in range(journal_pages["sg"].max() + 1)]
colors = [next(colors) for _ in range(journal_pages["g"].max() + 1)]
journal_pages = journal_pages[["g", "sg"]]
for i, (name, group) in enumerate(grouped):
if name in keys:
label = keys[name]
else:
label = name
keys[name] = name
group.columns = group.columns.droplevel(file_id_level)
if hdr_x is None:
group = group.reset_index()
hdr_x = group.columns[0]
if lab_x is None:
lab_x = hdr_x.replace("_", " ")
if not averaged and journal is not None:
g = journal_pages.loc[name, "g"]
sg = journal_pages.loc[name, "sg"]
color = colors[g]
marker = markers[sg]
curve = hv.Curve(group, (hdr_x, lab_x), (hdr_y, lab_y), label=label).opts(
color=color
)
else:
curve = hv.Curve(group, (hdr_x, lab_x), (hdr_y, lab_y), label=label)
if points:
if not averaged and journal is not None:
scatter = hv.Scatter(curve).opts(color=color, marker=marker)
if edges and extension == "matplotlib":
scatter = scatter.opts(edgecolor="k")
if edges and extension == "bokeh":
scatter = scatter.opts(line_color="k", line_width=1)
if marker_size is not None and extension == "bokeh":
scatter = scatter.opts(size=marker_size)
else:
scatter = hv.Scatter(curve)
if marker_size is not None and extension == "bokeh":
scatter = scatter.opts(size=marker_size)
if points and line:
curve *= scatter
elif points:
curve = scatter
if errors:
if spread:
curve *= hv.Spread(group, hdr_x, [hdr_y, hdr_e])
else:
curve *= hv.ErrorBars(
group, hdr_x, [hdr_y, hdr_e]
) # should get the color from curve and set it here
curve_dict[label] = curve
if extension == "matplotlib":
overlay_opts = {
"aspect": "auto",
"fig_inches": (width * 0.016, height * 0.012),
"show_frame": True,
}
else:
overlay_opts = {
"width": width,
"height": height,
}
if simple:
if len(keys) == len(curve_dict):
new_curve_dict = {}
for k in keys:
new_curve_dict[k] = curve_dict[keys[k]]
curve_dict = new_curve_dict
final_plot = hv.Overlay(
[*curve_dict.values()], vdims=[*curve_dict.keys()]
).opts(**overlay_opts, **kwargs)
else:
final_plot = hv.NdOverlay(curve_dict, kdims=legend_title).opts(
**overlay_opts, **kwargs
)
if hover and not extension == "plotly":
if points:
final_plot.opts(opts.Scatter(tools=["hover"]))
else:
final_plot.opts(opts.Curve(tools=["hover"]))
return final_plot
def create_colormarkerlist_for_journal(
journal, symbol_label="all", color_style_label="seaborn-colorblind"
):
"""Fetch lists with color names and marker types of correct length for a journal.
Args:
journal: cellpy journal
symbol_label: sub-set of markers to use
color_style_label: cmap to use for colors
Returns:
colors (list), markers (list)
"""
logging.debug("symbol_label: " + symbol_label)
logging.debug("color_style_label: " + color_style_label)
groups = journal.pages[hdr_journal.group].unique()
sub_groups = journal.pages[hdr_journal.subgroup].unique()
return create_colormarkerlist(groups, sub_groups, symbol_label, color_style_label)
def create_colormarkerlist(
groups, sub_groups, symbol_label="all", color_style_label="seaborn-colorblind"
):
"""Fetch lists with color names and marker types of correct length.
Args:
groups: list of group numbers (used to generate the list of colors)
sub_groups: list of sub-group numbers (used to generate the list of markers).
symbol_label: sub-set of markers to use
color_style_label: cmap to use for colors
Returns:
colors (list), markers (list)
"""
symbol_list = SYMBOL_DICT[symbol_label]
color_list = COLOR_DICT[color_style_label]
# checking that we have enough colors and symbols (if not, then use cycler (e.g. reset))
color_cycler = itertools.cycle(color_list)
symbol_cycler = itertools.cycle(symbol_list)
_color_list = []
_symbol_list = []
for i in groups:
_color_list.append(next(color_cycler))
for i in sub_groups:
_symbol_list.append(next(symbol_cycler))
return _color_list, _symbol_list
def _raw_plot(raw_curve, title="Voltage versus time", **kwargs):
tgt = raw_curve.relabel(title).opts(
width=800,
height=300,
labelled=["y"],
# tools=["pan","box_zoom", "reset"],
active_tools=["pan"],
)
src = raw_curve.opts(width=800, height=100, yaxis=None, default_tools=[])
RangeToolLink(src, tgt)
layout = (tgt + src).cols(1)
layout.opts(opts.Layout(shared_axes=False, merge_tools=False))
return layout
def raw_plot(cell, y=("voltage", "Voltage (V vs Li/Li+)"), title=None, **kwargs):
# TODO: missing doc-string
if title is None:
if isinstance(y, (list, tuple)):
pre_title = str(y[0])
else:
pre_title = str(y)
title = " ".join([pre_title, "versus", "time"])
if not _hv_bokeh_available():
return
hv.extension("bokeh", logo=False)
raw = cell.cell.raw
raw["test_time_hrs"] = raw[hdr_raw["test_time_txt"]] / 3600
x = ("test_time_hrs", "Time (hours)")
raw_curve = hv.Curve(raw, x, y)
layout = _raw_plot(raw_curve, title=title, **kwargs)
return layout
def cycle_info_plot(
cell,
cycle=None,
step=None,
title=None,
points=False,
x=None,
y=None,
info_level=1,
h_cycle=None,
h_step=None,
show_it=True,
label_cycles=True,
label_steps=False,
get_axes=False,
use_bokeh=True,
**kwargs,
):
"""Show raw data together with step and cycle information.
Args:
cell: cellpy object
cycle: cycles to select (optional, default is all)
step: steps to select (optional, defaults is all)
title: a title to give the plot
points: overlay a scatter plot
x (str): column header for the x-value (defaults to "Test_Time")
y (str): column header for the y-value (defaults to "Voltage")
info_level (int): how much information to display (defaults to 1)
0 - almost nothing
1 - pretty much
2 - something else
3 - not implemented yet
h_cycle: column header for the cycle number (defaults to "Cycle_Index")
h_step: column header for the step number (defaults to "Step_Index")
show_it (bool): show the figure (defaults to True). If not, return the figure.
label_cycles (bool): add labels with cycle numbers.
label_steps (bool): add labels with step numbers
get_axes (bool): return axes (for matplotlib)
use_bokeh (bool): use bokeh to plot (defaults to True). If not, use matplotlib.
**kwargs: parameters specific to either matplotlib or bokeh.
Returns:
"""
# TODO: missing doc-string
if use_bokeh and not bokeh_available:
print("OBS! bokeh is not available - using matplotlib instead")
use_bokeh = False
if use_bokeh:
axes = _cycle_info_plot_bokeh(
cell,
cycle=cycle,
step=step,
title=title,
points=points,
x=x,
y=y,
info_level=info_level,
h_cycle=h_cycle,
h_step=h_step,
show_it=show_it,
label_cycles=label_cycles,
label_steps=label_steps,
**kwargs,
)
else:
if isinstance(cycle, (list, tuple)):
if len(cycle) > 1:
print("OBS! The matplotlib-plotter only accepts single cycles.")
print(f"Selecting first cycle ({cycle[0]})")
cycle = cycle[0]
axes = _cycle_info_plot_matplotlib(cell, cycle, get_axes)
if get_axes:
return axes
def _plot_step(ax, x, y, color):
ax.plot(x, y, color=color, linewidth=3)
def _get_info(table, cycle, step):
# obs! hard-coded col-names. Please fix me.
m_table = (table.cycle == cycle) & (table.step == step)
p1, p2 = table.loc[m_table, ["point_min", "point_max"]].values[0]
c1, c2 = table.loc[m_table, ["current_min", "current_max"]].abs().values[0]
d_voltage, d_current = table.loc[
m_table, ["voltage_delta", "current_delta"]
].values[0]
d_discharge, d_charge = table.loc[
m_table, ["discharge_delta", "charge_delta"]
].values[0]
current_max = (c1 + c2) / 2
rate = table.loc[m_table, "rate_avr"].values[0]
step_type = table.loc[m_table, "type"].values[0]
return [step_type, rate, current_max, d_voltage, d_current, d_discharge, d_charge]
def _add_step_info_cols(df, table, cycles=None, steps=None, h_cycle=None, h_step=None):
if h_cycle is None:
h_cycle = "cycle_index" # edit
if h_step is None:
h_step = "step_index" # edit
col_name_mapper = {"cycle": h_cycle, "step": h_step}
df = df.merge(
table.rename(columns=col_name_mapper),
on=("cycle_index", "step_index"),
how="left",
)
return df
def _cycle_info_plot_bokeh(
cell,
cycle=None,
step=None,
title=None,
points=False,
x=None,
y=None,
info_level=0,
h_cycle=None,
h_step=None,
show_it=False,
label_cycles=True,
label_steps=False,
**kwargs,
):
"""Plot raw data with annotations.
This function uses Bokeh for plotting and is intended for use in
Jupyter Notebooks.
"""
# TODO: check that correct new column-names are used
# TODO: fix bokeh import (use e.g. import bokeh.io)
try:
from bokeh.io import output_notebook, show
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.models import HoverTool
from bokeh.models.annotations import Span
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure
except ImportError:
warnings.warn("Could not import bokeh")
return
try:
output_notebook(hide_banner=True)
finally:
sys.stdout = sys.__stdout__
if points:
if cycle is None or (len(cycle) > 1):
print("Plotting points only allowed when plotting one single cycle.")
print("Turning points off.")
points = False
if h_cycle is None:
h_cycle = "cycle_index" # edit
if h_step is None:
h_step = "step_index" # edit
if x is None:
x = "test_time" # edit
if y is None:
y = "voltage" # edit
if isinstance(x, tuple):
x, x_label = x
else:
x_label = x
if isinstance(y, tuple):
y, y_label = y
else:
y_label = y
t_x = x # used in generating title - replace with a selector
t_y = y # used in generating title - replace with a selector
if title is None:
title = f"{t_y} vs. {t_x}"
cols = [x, y]
cols.extend([h_cycle, h_step])
df = cell.cell.raw.loc[:, cols]
if cycle is not None:
if not isinstance(cycle, (list, tuple)):
cycle = [cycle]
_df = df.loc[df[h_cycle].isin(cycle), :]
if len(cycle) < 5:
title += f" [c:{cycle}]"
else:
title += f" [c:{cycle[0]}..{cycle[-1]}]"
if _df.empty:
print(f"EMPTY (available cycles: {df[h_step].unique()})")
return
else:
df = _df
cycle = df[h_cycle].unique()
if step is not None:
if not isinstance(step, (list, tuple)):
step = [step]
_df = df.loc[df[h_step].isin(step), :]
if len(step) < 5:
title += f" (s:{step})"
else:
title += f" [s:{step[0]}..{step[-1]}]"
if _df.empty:
print(f"EMPTY (available steps: {df[h_step].unique()})")
return
else:
df = _df
x_min, x_max = df[x].min(), df[x].max()
y_min, y_max = df[y].min(), df[y].max()
if info_level > 0:
table = cell.cell.steps
df = _add_step_info_cols(df, table, cycle, step)
source = ColumnDataSource(df)
plot = figure(
title=title,
tools="pan,reset,save,wheel_zoom,box_zoom,undo,redo",
x_range=[x_min, x_max],
y_range=[y_min, y_max],
**kwargs,
)
plot.line(x, y, source=source, line_width=3, line_alpha=0.6)
# labelling cycles
if label_cycles:
cycle_line_positions = [df.loc[df[h_cycle] == c, x].min() for c in cycle]
cycle_line_positions.append(df.loc[df[h_cycle] == cycle[-1], x].max())
for m in cycle_line_positions:
_s = Span(
location=m,
dimension="height",
line_color="red",
line_width=3,
line_alpha=0.5,
)
plot.add_layout(_s)
s_y_pos = y_min + 0.9 * (y_max - y_min)
s_x = []
s_y = []
s_l = []
for s in cycle:
s_x_min = df.loc[df[h_cycle] == s, x].min()
s_x_max = df.loc[df[h_cycle] == s, x].max()
s_x_pos = (s_x_min + s_x_max) / 2
s_x.append(s_x_pos)
s_y.append(s_y_pos)
s_l.append(f"c{s}")
c_labels = ColumnDataSource(data={x: s_x, y: s_y, "names": s_l})
c_labels = LabelSet(
x=x,
y=y,
text="names",
level="glyph",
source=c_labels,
render_mode="canvas",
text_color="red",
text_alpha=0.7,
)
plot.add_layout(c_labels)
# labelling steps
if label_steps:
for c in cycle:
step = df.loc[df[h_cycle] == c, h_step].unique()
step_line_positions = [
df.loc[(df[h_step] == s) & (df[h_cycle] == c), x].min()
for s in step[0:]
]
for m in step_line_positions:
_s = Span(
location=m,
dimension="height",
line_color="olive",
line_width=3,
line_alpha=0.1,
)
plot.add_layout(_s)
# s_y_pos = y_min + 0.8 * (y_max - y_min)
s_x = []
s_y = []
s_l = []
for s in step:
s_x_min = df.loc[(df[h_step] == s) & (df[h_cycle] == c), x].min()
s_x_max = df.loc[(df[h_step] == s) & (df[h_cycle] == c), x].max()
s_x_pos = s_x_min
s_y_min = df.loc[(df[h_step] == s) & (df[h_cycle] == c), y].min()
s_y_max = df.loc[(df[h_step] == s) & (df[h_cycle] == c), y].max()
s_y_pos = (s_y_max + s_y_min) / 2
s_x.append(s_x_pos)
s_y.append(s_y_pos)
s_l.append(f"s{s}")
s_labels = ColumnDataSource(data={x: s_x, y: s_y, "names": s_l})
s_labels = LabelSet(
x=x,
y=y,
text="names",
level="glyph",
source=s_labels,
render_mode="canvas",
text_color="olive",
text_alpha=0.3,
)
plot.add_layout(s_labels)
hover = HoverTool()
if info_level == 0:
hover.tooltips = [
(x, "$x{0.2f}"),
(y, "$y"),
("cycle", f"@{h_cycle}"),
("step", f"@{h_step}"),
]
elif info_level == 1:
# insert C-rates etc here
hover.tooltips = [
(f"(x,y)", "($x{0.2f} $y"),
("cycle", f"@{h_cycle}"),
("step", f"@{h_step}"),
("step_type", "@type"),
("rate", "@rate_avr{0.2f}"),
]
elif info_level == 2:
hover.tooltips = [
(x, "$x{0.2f}"),
(y, "$y"),
("cycle", f"@{h_cycle}"),
("step", f"@{h_step}"),
("step_type", "@type"),
("rate (C)", "@rate_avr{0.2f}"),
("dv (%)", "@voltage_delta{0.2f}"),
("I-max (A)", "@current_max"),
("I-min (A)", "@current_min"),
("dCharge (%)", "@charge_delta{0.2f}"),
("dDischarge (%)", "@discharge_delta{0.2f}"),
]
hover.mode = "vline"
plot.add_tools(hover)
plot.xaxis.axis_label = x_label
plot.yaxis.axis_label = y_label
if points:
plot.scatter(x, y, source=source, alpha=0.3)
if show_it:
show(plot)
return plot
def _cycle_info_plot_matplotlib(cell, cycle, get_axes=False):
# obs! hard-coded col-names. Please fix me.
if not plt_available:
print(
"This function uses matplotlib. But I could not import it. "
"So I decided to abort..."
)
return
data = cell.cell.raw
table = cell.cell.steps
span_colors = ["#4682B4", "#FFA07A"]
voltage_color = "#008B8B"
current_color = "#CD5C5C"
m_cycle_data = data.cycle_index == cycle
all_steps = data[m_cycle_data]["step_index"].unique()
color = itertools.cycle(span_colors)
fig = plt.figure(figsize=(20, 8))
fig.suptitle(f"Cycle: {cycle}")
ax3 = plt.subplot2grid((8, 3), (0, 0), colspan=3, rowspan=1, fig=fig) # steps
ax4 = plt.subplot2grid((8, 3), (1, 0), colspan=3, rowspan=2, fig=fig) # rate
ax1 = plt.subplot2grid((8, 3), (3, 0), colspan=3, rowspan=5, fig=fig) # data
ax2 = ax1.twinx()
ax1.set_xlabel("time (minutes)")
ax1.set_ylabel("voltage (V vs. Li/Li+)", color=voltage_color)
ax2.set_ylabel("current (mA)", color=current_color)
annotations_1 = [] # step number (IR)
annotations_2 = [] # step number
annotations_4 = [] # rate
for i, s in enumerate(all_steps):
m = m_cycle_data & (data.step_index == s)
c = data.loc[m, "current"] * 1000
v = data.loc[m, "voltage"]
t = data.loc[m, "test_time"] / 60
step_type, rate, current_max, dv, dc, d_discharge, d_charge = _get_info(
table, cycle, s
)
if len(t) > 1:
fcolor = next(color)
info_txt = (
f"{step_type}\nc-rate = {rate}\ni = |{1000 * current_max:0.2f}| mA\n"
)
info_txt += f"delta V = {dv:0.2f} %\ndelta i = {dc:0.2f} %\n"
info_txt += f"delta C = {d_charge:0.2} %\ndelta DC = {d_discharge:0.2} %\n"
for ax in [ax2, ax3, ax4]:
ax.axvspan(t.iloc[0], t.iloc[-1], facecolor=fcolor, alpha=0.2)
_plot_step(ax1, t, v, voltage_color)
_plot_step(ax2, t, c, current_color)
annotations_1.append([f"{s}", t.mean()])
annotations_4.append([info_txt, t.mean()])
else:
info_txt = f"{s}({step_type})"
annotations_2.append([info_txt, t.mean()])
ax3.set_ylim(0, 1)
for s in annotations_1:
ax3.annotate(f"{s[0]}", (s[1], 0.2), ha="center")
for s in annotations_2:
ax3.annotate(f"{s[0]}", (s[1], 0.6), ha="center")
for s in annotations_4:
ax4.annotate(f"{s[0]}", (s[1], 0.0), ha="center")
for ax in [ax3, ax4]:
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
if get_axes:
return ax1, ax2, ax2, ax4
def save_fig(figure, file_name=None, wide=False, size=None, dpi=300, **kwargs):
"""Save a figure, either a holoviews plot or a matplotlib figure.
This function should mainly be used when using the standard cellpy notebook
template (generated by '> cellpy new')
Args:
figure (obj): the figure or plot object
file_name (str): the file name
wide (bool): release the equal aspect lock (default on for holoviews)
size (int or tuple of ints): figure size in inches
dpi (int): resolution
**kwargs: sent to cellpy.utils.plotutils.hv_bokeh_to_mpl
"""
out_path = Path("out/")
extension = "png"
if size is None:
size = (6, 6)
# create file name:
if file_name is None:
counter = 1
while True:
_file_name = f"cellpy-plot-{str(counter).zfill(3)}.{extension}"
_file_name = out_path / _file_name
if not os.path.isfile(_file_name):
break
counter += 1
file_name = _file_name
type_str = str(type(figure))
is_hv = type_str.find("holoviews") >= 0
is_mpl = type_str.find("matplotlib") >= 0
if is_mpl:
is_mpl_figure = type_str.find("figure.Figure") >= 0
is_mpl_axes = type_str.find(".axes.") >= 0
if not is_mpl_figure:
if is_mpl_axes:
figure = figure.get_figure()
else:
print("this matplotlib object is not supported")
print(type_str)
return
figure.savefig(file_name, dpi=dpi)
elif is_hv:
is_hv_nd_overlay = isinstance(figure, hv.core.overlay.NdOverlay)
is_hv_overlay = isinstance(figure, hv.core.overlay.Overlay)
is_hv_layout = isinstance(figure, hv.core.layout.Layout)
figure = hv_bokeh_to_mpl(figure, wide=wide, size=size, **kwargs)
figure.savefig(file_name, dpi=dpi)
else:
print("this figure object is not supported")
print(type_str)
return
print(f"saved to {file_name}")
def hv_bokeh_to_mpl(figure, wide=False, size=(6, 4), **kwargs):
# I think this needs to be tackled differently. For example by setting hv.extension("matplotlib") and
# re-making the figure. Or making a custom renderer.
figure = hv.render(figure, backend="matplotlib")
axes = figure.axes
number_of_axes = len(axes)
if number_of_axes > 1:
for j, ax in enumerate(axes):
if j < number_of_axes - 1:
ax.set_xlabel("")
if j < number_of_axes - 1:
ax.get_legend().remove()
else:
handles, labels = ax.get_legend_handles_labels()
# This does not work:
# ax.legend(handles, labels, bbox_to_anchor=(1.05, 1), loc="upper_right")
# TODO: create new legend based on the ax data
if wide:
for axis in axes:
axis.set_aspect("auto")
figure.tight_layout()
figure.set_size_inches(size)
return figure
def oplot(
b,
cap_ylim=None,
ce_ylim=None,
ir_ylim=None,
simple=False,
group_it=False,
spread=True,
capacity_unit="gravimetric",
**kwargs,
):
"""create a holoviews-plot containing Coulombic Efficiency, Capacity, and IR.
Args:
b (cellpy.batch object): the batch with the cells.
cap_ylim (tuple of two floats): scaling of y-axis for capacity plots.
ce_ylim (tuple of two floats): scaling of y-axis for c.e. plots.
ir_ylim (tuple of two floats): scaling of y-axis for i.r. plots.
simple (bool): if True, use hv.Overlay instead of hv.NdOverlay.
group_it (bool): if True, average pr group.
spread (bool): if True, show spread instead of error-bars
capacity_unit (str): select "gravimetric", or "areal"
Returns:
hv.Overlay or hv.NdOverlay
"""
extension = kwargs.pop("extension", "bokeh")
cap_colum_dict = {
"gravimetric": {
"discharge": "discharge_capacity",
"charge": "charge_capacity",
"unit": "mAh/g(a.m.)",
"ylim": (0, 5000),
},
"areal": {
"discharge": "areal_discharge_capacity",
"charge": "areal_charge_capacity",
"unit": "mAh/cm2",
"ylim": (0, 3),
},
}
if cap_ylim is not None:
cap_colum_dict[capacity_unit]["ylim"] = cap_ylim
if ce_ylim is None:
ce_ylim = (80, 120)
if ir_ylim is None:
ir_ylim = (0, 200)
overlay_sensitive_opts = {
"ce": {},
"dcap": {},
"ccap": {},
"ird": {},
"irc": {},
}
layout_sensitive_opts = {
"ce": {},
"dcap": {},
"ccap": {},
"ird": {},
"irc": {},
}
if extension == "bokeh":
overlay_sensitive_opts["ce"] = {"height": 150}
overlay_sensitive_opts["dcap"] = {"height": 400}
overlay_sensitive_opts["ccap"] = {"height": 150}
overlay_sensitive_opts["ird"] = {"height": 150}
overlay_sensitive_opts["irc"] = {"height": 150}
elif extension == "matplotlib":
simple = True
overlay_sensitive_opts["ce"] = {"aspect": 2}
overlay_sensitive_opts["dcap"] = {"aspect": 2}
overlay_sensitive_opts["ccap"] = {"aspect": 2}
overlay_sensitive_opts["ird"] = {"aspect": 2}
overlay_sensitive_opts["irc"] = {"aspect": 2}
hspace = 2
layout_sensitive_opts["ce"] = {"hspace": hspace}
layout_sensitive_opts["dcap"] = {"hspace": hspace}
layout_sensitive_opts["ccap"] = {"hspace": hspace}
layout_sensitive_opts["ird"] = {"hspace": hspace}
layout_sensitive_opts["irc"] = {"hspace": hspace}
bplot_shared_opts = {
"group_it": group_it,
"simple": simple,
"spread": spread,
"extension": extension,
}
if simple:
overlay_opts = hv.opts.Overlay
layout_opts = hv.opts.Layout
else:
overlay_opts = hv.opts.NdOverlay
layout_opts = hv.opts.NdLayout
# print("creating interactive plots")
oplot_ce = bplot(b, columns=["coulombic_efficiency"], **bplot_shared_opts).opts(
hv.opts.Curve(ylim=ce_ylim),
overlay_opts(
title="",
show_legend=False,
xlabel="",
ylabel="C.E.",
**overlay_sensitive_opts["ce"],
),
layout_opts(title="Coulombic efficiency (%)", **layout_sensitive_opts["ce"]),
)
# print(" - created oplot_ce")
oplot_dcap = bplot(
b, columns=[cap_colum_dict[capacity_unit]["discharge"]], **bplot_shared_opts
).opts(
hv.opts.Curve(ylim=cap_colum_dict[capacity_unit]["ylim"]),
overlay_opts(
title="",
show_legend=True,
xlabel="",
ylabel="discharge",
**overlay_sensitive_opts["dcap"],
),
layout_opts(
title=f"Capacity ({cap_colum_dict[capacity_unit]['unit']})",
**layout_sensitive_opts["dcap"],
),
)
# print(" - created oplot_dcap")
oplot_ccap = bplot(
b, columns=[cap_colum_dict[capacity_unit]["charge"]], **bplot_shared_opts
).opts(
hv.opts.Curve(ylim=cap_colum_dict[capacity_unit]["ylim"]),
overlay_opts(
title="",
show_legend=False,
xlabel="",
ylabel="charge",
**overlay_sensitive_opts["ccap"],
),
layout_opts(title="", **layout_sensitive_opts["ccap"]),
)
# print(" - created oplot_ccap")
oplot_ird = bplot(b, columns=["ir_discharge"], **bplot_shared_opts).opts(
hv.opts.Curve(ylim=ir_ylim),
overlay_opts(
title="",
show_legend=False,
xlabel="",
ylabel="discharge",
**overlay_sensitive_opts["ird"],
),
layout_opts(title="Internal Resistance (Ohm)", **layout_sensitive_opts["ird"]),
)
oplot_irc = bplot(b, columns=["ir_charge"], **bplot_shared_opts).opts(
hv.opts.Curve(ylim=ir_ylim),
overlay_opts(
title="",
show_legend=False,
ylabel="charge",
**overlay_sensitive_opts["irc"],
),
layout_opts(title="", **layout_sensitive_opts["irc"]),
)
return (oplot_ce + oplot_dcap + oplot_ccap + oplot_ird + oplot_irc).cols(1)
def bplot(b, individual=False, cols=1, **kwargs):
"""plot batch summaries.
This is wrapper around the two functions concatenate_summaries and plot_concatenated.
>>> p1 = bplot(b, columns=["charge_capacity"], journal=b.experiment.journal, group_it=True)
is equivalent to:
>>> cs = helpers.concatenate_summaries(b, columns=["charge_capacity"], group_it=True)
>>> p1 = plot_concatenated(cs, journal=journal)
Args:
b (cellpy.batch object): the batch with the cells.
individual (bool): in case of multiple columns, return a list of plots instaed of a hv.Layout
cols (int): number of columns.
Keyword Args sent to concatenator:
rate (float): filter on rate (C-rate)
on (str or list of str): only select cycles if based on the rate of this step-type (e.g. on="charge").
columns (list): selected column(s) (using cellpy name) [defaults to "charge_capacity"]
column_names (list): selected column(s) (using exact column name)
normalize_capacity_on (list): list of cycle numbers that will be used for setting the basis of the normalization
(typically the first few cycles after formation)
scale_by (float or str): scale the normalized data with nominal capacity if "nom_cap", or given value (defaults to one).
nom_cap (float): nominal capacity of the cell
normalize_cycles (bool): perform a normalisation of the cycle numbers (also called equivalent cycle index)
add_areal (bool): add areal capacity to the summary
group_it (bool): if True, average pr group.
rate_std (float): allow for this inaccuracy when selecting cycles based on rate
rate_column (str): name of the column containing the C-rates.
inverse (bool): select steps that does not have the given C-rate.
inverted (bool): select cycles that does not have the steps filtered by given C-rate.
journal (batch.journal object): the journal (will use the journal in b if not given).
Keyword Args sent to plotter:
width (int): width of plot.
spread (bool): use error-spread instead of error-bars.
simple (bool): use hv.Overlay instead of hv.NdOverlay.
Returns:
holoviews plot
"""
width = kwargs.pop("width", 800)
journal = kwargs.pop("journal", b.experiment.journal)
spread = kwargs.pop("spread", True)
simple = kwargs.pop("simple", False)
columns = kwargs.pop("columns", ["charge_capacity"])
extension = kwargs.pop("extension", "bokeh")
p = collections.OrderedDict()
i_width = width // cols
for i, col in enumerate(columns):
try:
cs = helpers.concatenate_summaries(b, columns=[col], **kwargs)
_p = plot_concatenated(
cs,
journal=journal,
spread=spread,
width=i_width,
extension=extension,
title=col,
simple=simple,
)
if i < len(columns) - 1:
_p.opts(show_legend=False)
if cols == 1:
_p.opts(xlabel="")
else:
_p.opts(show_legend=True, legend_position="right")
# if (len(columns) > 1) and cols > 1:
# _p.opts(frame_width=width)
if extension == "bokeh":
_p.opts(frame_width=width)
p[col] = _p
except KeyError as e:
print(f"Sorry - missing key: {col}")
logging.debug(e)
w = width / 180 * cols
h = 5 * len(p) / cols
if len(p) >= 1:
if not individual:
if simple:
out = hv.Layout(list(p.values())).cols(cols)
else:
out = hv.NdLayout(p, sort=False).cols(cols)
if extension == "matplotlib":
out.opts(fig_inches=(w, h))
else:
if extension == "matplotlib":
out = [o.opts(fig_inches=(w, h)) for o in p.values()]
else:
out = [p.values()]
return out
if __name__ == "__main__":
pass
|
__author__ = 'upendrakumardevisetty'
import sys
accfile = sys.argv[1]
infile = sys.argv[2]
outfile = sys.argv[3]
AI_DICT = {}
with open(accfile, "rU") as acc_in:
for line in acc_in:
AI_DICT[line[:-1]] = 1
skip = 0
with open(infile, "rU") as fh_in:
with open(outfile, "w") as fh_out:
for line in fh_in:
if line.startswith('>'):
#line_split = line.split(' ')
gene = line.strip()
if gene in AI_DICT:
fh_out.write(line)
skip = 0
else:
skip = 1
else:
if not skip:
fh_out.write(line)
|
import sys
def magic(x, y):
return x + y * 2
x = sys.argv[1]
y = sys.argv[1]
answer = magic(x, y)
print('The answer is: {}'.format(answer))
|
from __future__ import print_function, division
from collections import namedtuple
import faulthandler
import yaml
import time
from src import *
if __name__ == "__main__":
print("Starting time: {}".format(time.asctime()))
# To have a more verbose output in case of an exception
faulthandler.enable()
with open('parameters.yaml', 'r') as params_file:
parameters = yaml.safe_load(params_file)
parameters = namedtuple('Parameters', (parameters.keys()))(*parameters.values())
if parameters.trainModel is True:
# Instantiating the trainer
trainer = AgentTrainer(parameters)
# Training the model
avg_losses, episode_durations = trainer.train_model()
# Plot losses
plot_losses(avg_losses, title='Average Loss per Episode', xlabel='Episode', ylabel='Average Loss')
# Plot durations
plot_losses(episode_durations, title='Episode Durations', xlabel='Episode', ylabel='Duration')
else:
# Instantiating the test
tester = AgentTester(parameters)
# Testing the policy
screens, scores = tester.test_model()
# Plot Scores
plot_scores(scores, xlabel='Score', ylabel='Number of Games', bins=8)
# Save animation
save_animation('static', screens, 10)
print("Finishing time: {}".format(time.asctime()))
|
# utils init file
import predictors.ArimaAutoregressor
import predictors.RealAutoRegressor |
#!/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from scipy.linalg import sqrtm
from grgrlib.la import tinv, nearest_psd
from numba import njit
from .stats import logpdf
try:
import chaospy
if hasattr(chaospy.distributions.kernel.baseclass, 'Dist'):
def init_mv_normal(self, loc=[0, 0], scale=[[1, .5], [.5, 1]]):
loc = np.asfarray(loc)
scale = np.asfarray(scale)
assert len(loc) == len(scale)
self._repr = {"loc": loc.tolist(), "scale": scale.tolist()}
try:
C = np.linalg.cholesky(scale)
Ci = np.linalg.inv(C)
except np.linalg.LinAlgError as err:
C = np.real(sqrtm(scale))
Ci = np.linalg.pinv(C)
chaospy.baseclass.Dist.__init__(self, C=C, Ci=Ci, loc=loc)
# must be patched to allow for a covariance that is only PSD
chaospy.MvNormal.__init__ = init_mv_normal
else:
def init_mv_normal(
self,
dist,
mean=0,
covariance=1,
rotation=None,
repr_args=None,
):
mean = np.atleast_1d(mean)
length = max(len(dist), len(mean), len(covariance))
exclusion = dist._exclusion.copy()
dist = chaospy.Iid(dist, length)
covariance = np.asarray(covariance)
rotation = [key for key, _ in sorted(enumerate(dist._dependencies), key=lambda x: len(x[1]))]
accumulant = set()
dependencies = [deps.copy() for deps in dist._dependencies]
for idx in rotation:
accumulant.update(dist._dependencies[idx])
dependencies[idx] = accumulant.copy()
self._permute = np.eye(len(rotation), dtype=int)[rotation]
self._covariance = covariance
self._pcovariance = self._permute.dot(covariance).dot(self._permute.T)
try:
cholesky = np.linalg.cholesky(self._pcovariance)
self._fwd_transform = self._permute.T.dot(np.linalg.inv(cholesky))
except np.linalg.LinAlgError as err:
cholesky = np.real(sqrtm(self._pcovariance))
self._fwd_transform = self._permute.T.dot(np.linalg.pinv(cholesky))
self._inv_transform = self._permute.T.dot(cholesky)
self._dist = dist
super(chaospy.distributions.MeanCovarianceDistribution, self).__init__(
parameters=dict(mean=mean, covariance=covariance),
dependencies=dependencies,
rotation=rotation,
exclusion=exclusion,
repr_args=repr_args,
)
def get_parameters_patched(self, idx, cache, assert_numerical=True):
# avoids all functionality not used
parameters = super(chaospy.distributions.MeanCovarianceDistribution, self).get_parameters(
idx, cache, assert_numerical=assert_numerical)
mean = parameters["mean"]
mean = mean[self._rotation]
dim = self._rotation.index(idx)
return dict(idx=idx, mean=mean, sigma=None, dim=dim, mut=None, cache=cache)
# must be patched to allow for a covariance that is only PSD
chaospy.distributions.MeanCovarianceDistribution.__init__ = init_mv_normal
chaospy.distributions.MeanCovarianceDistribution.get_parameters = get_parameters_patched
def multivariate_dispatch(rule):
def multivariate(mean, cov, size):
# rule must be of 'L', 'M', 'H', 'K' or 'S'
res = chaospy.MvNormal(mean, cov).sample(size=size, rule=rule or 'L')
res = np.moveaxis(res, 0, res.ndim-1)
np.random.shuffle(res)
return res
return multivariate
except ModuleNotFoundError as e:
def multivariate_dispatch(rule):
def multivariate(mean, cov, size):
return np.random.multivariate_normal(mean=mean, cov=cov, size=size)
return multivariate
print(str(e)+". Low-discrepancy series will not be used. This might cause a loss in precision.")
class TEnKF(object):
name = 'TEnKF'
def __init__(self, N, dim_x=None, dim_z=None, fx=None, hx=None, rule=None, seed=None):
self.dim_x = dim_x
self.dim_z = dim_z
self.t_func = fx
self.o_func = hx
self.N = N
self.seed = seed
self.R = np.eye(self.dim_z)
self.Q = np.eye(self.dim_x)
self.P = np.eye(self.dim_x)
self.x = np.zeros(self.dim_x)
self.multivariate = multivariate_dispatch(rule)
def batch_filter(self, Z, init_states=None, seed=None, store=False, calc_ll=False, verbose=False):
"""Batch filter.
Runs the TEnKF on the full dataset.
"""
# store time series for later
self.Z = Z
dim_x = self.dim_x
dim_z = self.dim_z
N = self.N
I1 = np.ones(N)
I2 = np.eye(N) - np.outer(I1, I1)/N
# pre allocate
if store:
self.Xs = np.empty((Z.shape[0], dim_x, N))
self.X_priors = np.empty_like(self.Xs)
self.X_bars = np.empty_like(self.Xs)
self.X_bar_priors = np.empty_like(self.Xs)
ll = 0
if seed is not None:
np.random.seed(seed)
elif self.seed is not None:
np.random.seed(self.seed)
means = np.empty((Z.shape[0], dim_x))
covs = np.empty((Z.shape[0], dim_x, dim_x))
Y = np.empty((dim_z, N))
mus = self.multivariate(mean=np.zeros(
self.dim_z), cov=self.R, size=(len(Z), self.N))
epss = self.multivariate(mean=np.zeros(
self.dim_z), cov=self.Q, size=(len(Z), self.N))
X = init_states or self.multivariate(mean=self.x, cov=self.P, size=N).T
self.Xs = np.empty((Z.shape[0], dim_x, N))
for nz, z in enumerate(Z):
# predict
for i in range(N):
eps = epss[nz, i]
if self.o_func is None:
X[:, i], Y[:, i] = self.t_func(X[:, i], eps)[0]
else:
X[:, i] = self.t_func(X[:, i], eps)[0]
if self.o_func is not None:
Y = self.o_func(X.T).T
if store:
self.X_priors[nz, :, :] = X
# update
X_bar = X @ I2
Y_bar = Y @ I2
ZZ = np.outer(z, I1)
S = np.cov(Y) + self.R
X += X_bar @ Y_bar.T @ np.linalg.inv((N-1)*S) @ (ZZ - Y - mus[nz].T)
if store:
self.X_bar_priors[nz, :, :] = X_bar
self.X_bars[nz, :, :] = X @ I2
self.Xs[nz, :, :] = X
if calc_ll:
# cummulate ll
z_mean = np.mean(Y, axis=1)
y = z - z_mean
ll += logpdf(x=y, mean=np.zeros(dim_z), cov=S)
else:
self.Xs[nz, :, :] = X
if calc_ll:
self.ll = ll
return ll
else:
return np.rollaxis(self.Xs, 2)
def rts_smoother(self, means=None, covs=None, rcond=1e-14):
S = self.Xs[-1]
Ss = self.Xs.copy()
for i in reversed(range(self.Xs.shape[0] - 1)):
J = self.X_bars[i] @ tinv(self.X_bar_priors[i+1])
S = self.Xs[i] + J @ (S - self.X_priors[i+1])
Ss[i, :, :] = S
self.Ss = Ss
return np.rollaxis(Ss, 2)
|
import States
class Context:
def __init__(self):
self.currentState = States.StateToDo()
def action(self):
self.currentState.action(self)
def actionBack(self):
self.currentState.actionBack(self) |
from typing import Tuple, Union
from scipy.stats import norm
from GPyOpt.util.general import get_quantiles
import numpy as np
from ...core.interfaces import IModel, IDifferentiable
from ...core.acquisition import Acquisition
class LogExpectedImprovement(Acquisition):
def __init__(self, model: Union[IModel, IDifferentiable], jitter: np.float64 = np.float64(0))-> None:
"""
:param model: model that is used to compute the improvement.
:param jitter: parameter to encourage extra exploration.
"""
self.model = model
self.jitter = jitter
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Computes the Expected Improvement.
:param x: points where the acquisition is evaluated.
"""
m, v = self.model.predict(x)
eta = np.min(self.model.Y, axis=0)
f_min = eta - self.jitter
s = np.sqrt(v)
z = (f_min - m) / s
log_ei = np.zeros([m.size, 1])
for i in range(0, m.size):
mu, sigma = m[i], s[i]
# par_s = self.par * sigma
# Degenerate case 1: first term vanishes
if np.any(abs(f_min - mu) == 0):
if sigma > 0:
log_ei[i] = np.log(sigma) + norm.logpdf(z[i])
else:
log_ei[i] = -np.Infinity
# Degenerate case 2: second term vanishes and first term
# has a special form.
elif sigma == 0:
if np.any(mu < f_min):
log_ei[i] = np.log(f_min - mu)
else:
log_ei[i] = -np.Infinity
# Normal case
else:
b = np.log(sigma) + norm.logpdf(z[i])
# log(y+z) is tricky, we distinguish two cases:
if np.any(f_min > mu):
# When y>0, z>0, we define a=ln(y), b=ln(z).
# Then y+z = exp[ max(a,b) + ln(1 + exp(-|b-a|)) ],
# and thus log(y+z) = max(a,b) + ln(1 + exp(-|b-a|))
a = np.log(f_min - mu) + norm.logcdf(z[i])
log_ei[i] = max(a, b) + np.log(1 + np.exp(-abs(b - a)))
else:
# When y<0, z>0, we define a=ln(-y), b=ln(z),
# and it has to be true that b >= a in
# order to satisfy y+z>=0.
# Then y+z = exp[ b + ln(exp(b-a) -1) ],
# and thus log(y+z) = a + ln(exp(b-a) -1)
a = np.log(mu - f_min) + norm.logcdf(z[i])
if a >= b:
# a>b can only happen due to numerical inaccuracies
# or approximation errors
log_ei[i] = -np.Infinity
else:
log_ei[i] = b + np.log(1 - np.exp(a - b))
return log_ei
def has_gradients(self) -> bool:
"""Returns that this acquisition has gradients"""
return False
|
from __future__ import print_function
from cached_property import cached_property
import math
import numpy as np
from rllab import spaces
from rllab.misc import logger
from mme.envs import GridMap
BIG = 1e6
def get_state_block(state):
x = state[0].item()
y = state[1].item()
x_int = np.floor(x)
y_int = np.floor(y)
return x_int * 1000 + y_int
def get_two_random_indices(r, c):
"""Return a 2x2 NumPy array, containing two different index pair."""
res = np.zeros((2, 2), dtype=np.int)
while (res[0, 0] == res[1, 0] and \
res[0, 1] == res[1, 1]):
res[0, 0] = np.random.randint(0, r)
res[0, 1] = np.random.randint(0, c)
res[1, 0] = np.random.randint(0, r)
res[1, 1] = np.random.randint(0, c)
return res
class GME_NP_pure(GridMap.GridMapEnv):
def __init__(self, name="", gridMap=None, workingDir="./"):
super(GME_NP_pure, self).__init__(name, gridMap, workingDir)
# # Create map.
# self.map = GridMap.GridMap2D( 10, 20 )
gm2d = GridMap.GridMap2D(100, 100, outOfBoundValue=0) # -200
gm2d.valueStartingBlock = 0 # -0.1
gm2d.valueEndingBlock = 0
gm2d.valueNormalBlock = 0 # -0.1
gm2d.valueObstacleBlock = 0 # -10
gm2d.initialize()
# Create a starting block and an ending block.
startingBlock = GridMap.StartingBlock()
# endingBlock = GridMap.EndingBlock()
# Create an obstacle block.
obstacle = GridMap.ObstacleBlock()
# Overwrite blocks.
gm2d.set_starting_block((0, 0))
# gm2d.set_ending_block((49, 49), endPoint=(49.1, 49.1))
for i in range(5):
for j in range(100):
if not ((j < 25 and j >= 15) or (j < 85 and j >= 75)):
gm2d.add_obstacle((48 + i, j))
gm2d.add_obstacle((j, 48 + i))
# gm2d.add_obstacle((i, 23+j))
# gm2d.add_obstacle((5, 10))
# gm2d.add_obstacle((6, 10))
# gm2d.add_obstacle((30, 10))
# indexEndingBlock = gm2d.get_index_ending_block()
# ebGm2d = gm2d.get_block(indexEndingBlock)
# print("ebGm2d.is_in_range(19.2, 9.2, 1) = {}".format(ebGm2d.is_in_range(19.2, 9.2, 1)))
self.map = gm2d
self.maxStuckCount = 0
self.stuckPenaltyFactor = 0 # -10
self.stuckCount = 0
self.stuckState = None
self.timestep = 0
# Member variables for compatibility.
# self.observation_space = np.array([0, 0]) # self.observation_spac.shape should be a tuple showing the shape of the state variable.
@cached_property
def observation_space(self):
shp = (2,)
# shp = (3,)
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@cached_property
def action_space(self):
shp = (2,)
ub = 1.0 * np.ones(shp)
return spaces.Box(ub * -1, ub)
def enable_stuck_check(self, maxStuckCount, penaltyFactor):
if (maxStuckCount < 0):
raise GridMap.GridMapException("Max stuck count must be non-negative number.")
self.maxStuckCount = maxStuckCount
self.stuckPenaltyFactor = penaltyFactor
def disable_stuck_check(self):
self.maxStuckCount = 0
self.stuckPenaltyFactor = 1.0
def step(self, action):
"""
Override super class.
"""
act = GridMap.BlockCoorDelta(action[0], action[1])
coor, val, flagTerm, dummy = super(GME_NP_pure, self).step(act)
self.timestep += 1
state = np.array( [coor.x, coor.y], dtype=np.float32)
# state = np.array([coor.x, coor.y, self.timestep], dtype=np.float32)
# Check stuck states.
if (0 != self.maxStuckCount):
if (self.stuckState is None):
self.stuckState = state
else:
if (state[0] == self.stuckState[0] and \
state[1] == self.stuckState[1]):
self.stuckCount += 1
else:
self.stuckCount = 0
self.stuckState = None
if (self.maxStuckCount == self.stuckCount):
val = self.stuckPenaltyFactor * math.fabs(val)
flagTerm = True
if flagTerm:
self.timestep = 0
return state, val, flagTerm, dummy
def reset(self):
res = super(GME_NP_pure, self).reset()
# Clear the stuck states.
self.stuckCount = 0
self.stuckState = None
self.timestep = 0
return np.array([res.x, res.y])
# return np.array([res.x, res.y, self.timestep])
def set_trajectory(self, t):
"""
t is a numpy ndarray of shape (x, 2). t stores the position (state) history of the agent.
This function substitutes the self.agentLocs member variable with t. Converts numpy ndarray
into BlockCoor objects.
"""
n = t.shape[0]
temp = []
for i in range(n):
temp.append(self.make_a_coor(t[i, 0], t[i, 1]))
self.agentLocs = temp
self.nSteps = n
def random_map(self):
# There must be a map.
if (self.map is None):
raise GridMap.GridMapException("Map must not be None for randomizing.")
# Get the randomized indices of the staring and ending blocks.
indices = get_two_random_indices(self.map.rows, self.map.cols)
# Reset the staring block.
self.map.set_starting_block(GridMap.BlockIndex(indices[0, 0], indices[0, 1]))
# Reset the ending block.
self.map.set_ending_block(GridMap.BlockIndex(indices[1, 0], indices[1, 1]))
def log_diagnostics(self, paths):
if len(paths) > 0:
progs = [
path["observations"]
for path in paths
]
# logger.record_tabular('AverageForwardProgress', np.mean(progs))
# logger.record_tabular('MaxForwardProgress', np.max(progs))
# logger.record_tabular('MinForwardProgress', np.min(progs))
# logger.record_tabular('StdForwardProgress', np.std(progs))
else:
0
# logger.record_tabular('AverageForwardProgress', np.nan)
# logger.record_tabular('MaxForwardProgress', np.nan)
# logger.record_tabular('MinForwardProgress', np.nan)
# logger.record_tabular('StdForwardProgress', np.nan)
def terminate(self):
print("end")
class GME_NP_rew(GridMap.GridMapEnv):
def __init__(self, name="", gridMap=None, workingDir="./"):
super(GME_NP_rew, self).__init__(name, gridMap, workingDir)
# # Create map.
# self.map = GridMap.GridMap2D( 10, 20 )
# 50 x 50 maze
gm2d = GridMap.GridMap2D(50, 50, outOfBoundValue=0) # -200
gm2d.valueStartingBlock = 0 # -0.1
gm2d.valueEndingBlock = 1000
gm2d.valueNormalBlock = 0 # -0.1
gm2d.valueObstacleBlock = 0 # -10
gm2d.initialize()
# Create a starting block and an ending block.
startingBlock = GridMap.StartingBlock()
endingBlock = GridMap.EndingBlock()
# Create an obstacle block.
obstacle = GridMap.ObstacleBlock()
# Overwrite blocks.
gm2d.set_starting_block((0, 0))
gm2d.set_ending_block((49, 49), endPoint=(49.1, 49.1))
for i in range(5):
for j in range(50):
if not ((j < 15 and j >= 5) or (j < 45 and j >= 35)):
gm2d.add_obstacle((23 + i, j))
gm2d.add_obstacle((j, 23 + i))
# # 40 x 40 maze
# gm2d = GridMap.GridMap2D(40, 40, outOfBoundValue=0) # -200
# gm2d.valueStartingBlock = 0 # -0.1
# gm2d.valueEndingBlock = 1000
# gm2d.valueNormalBlock = 0 # -0.1
# gm2d.valueObstacleBlock = 0 # -10
# gm2d.initialize()
#
# # Create a starting block and an ending block.
# startingBlock = GridMap.StartingBlock()
# endingBlock = GridMap.EndingBlock()
#
# # Create an obstacle block.
# obstacle = GridMap.ObstacleBlock()
#
# # Overwrite blocks.
# gm2d.set_starting_block((0, 0))
# gm2d.set_ending_block((39, 39), endPoint=(39.1, 39.1))
# for i in range(4):
# for j in range(40):
# if not ((j < 12 and j >= 5) or (j < 35 and j >= 28)):
# gm2d.add_obstacle((18 + i, j))
# gm2d.add_obstacle((j, 18 + i))
indexEndingBlock = gm2d.get_index_ending_block()
ebGm2d = gm2d.get_block(indexEndingBlock)
print("ebGm2d.is_in_range(19.2, 9.2, 1) = {}".format(ebGm2d.is_in_range(19.2, 9.2, 1)))
self.map = gm2d
self.maxStuckCount = 0
self.stuckPenaltyFactor = 0 # -10
self.stuckCount = 0
self.stuckState = None
# Member variables for compatibility.
# self.observation_space = np.array([0, 0]) # self.observation_spac.shape should be a tuple showing the shape of the state variable.
@cached_property
def observation_space(self):
shp = (2,)
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@cached_property
def action_space(self):
shp = (2,)
ub = 1.0 * np.ones(shp)
return spaces.Box(ub * -1, ub)
def enable_stuck_check(self, maxStuckCount, penaltyFactor):
if (maxStuckCount < 0):
raise GridMap.GridMapException("Max stuck count must be non-negative number.")
self.maxStuckCount = maxStuckCount
self.stuckPenaltyFactor = penaltyFactor
def disable_stuck_check(self):
self.maxStuckCount = 0
self.stuckPenaltyFactor = 1.0
def step(self, action):
"""
Override super class.
"""
act = GridMap.BlockCoorDelta(action[0], action[1])
coor, val, flagTerm, dummy = super(GME_NP_rew, self).step(act)
state = np.array([coor.x, coor.y], dtype=np.float32)
# Check stuck states.
if (0 != self.maxStuckCount):
if (self.stuckState is None):
self.stuckState = state
else:
if (state[0] == self.stuckState[0] and \
state[1] == self.stuckState[1]):
self.stuckCount += 1
else:
self.stuckCount = 0
self.stuckState = None
if (self.maxStuckCount == self.stuckCount):
val = self.stuckPenaltyFactor * math.fabs(val)
flagTerm = True
return state, val, flagTerm, dummy
def reset(self):
res = super(GME_NP_rew, self).reset()
# Clear the stuck states.
self.stuckCount = 0
self.stuckState = None
return np.array([res.x, res.y])
def set_trajectory(self, t):
"""
t is a numpy ndarray of shape (x, 2). t stores the position (state) history of the agent.
This function substitutes the self.agentLocs member variable with t. Converts numpy ndarray
into BlockCoor objects.
"""
n = t.shape[0]
temp = []
for i in range(n):
temp.append(self.make_a_coor(t[i, 0], t[i, 1]))
self.agentLocs = temp
self.nSteps = n
def random_map(self):
# There must be a map.
if (self.map is None):
raise GridMap.GridMapException("Map must not be None for randomizing.")
# Get the randomized indices of the staring and ending blocks.
indices = get_two_random_indices(self.map.rows, self.map.cols)
# Reset the staring block.
self.map.set_starting_block(GridMap.BlockIndex(indices[0, 0], indices[0, 1]))
# Reset the ending block.
self.map.set_ending_block(GridMap.BlockIndex(indices[1, 0], indices[1, 1]))
def log_diagnostics(self, paths):
if len(paths) > 0:
progs = [
path["observations"]
for path in paths
]
# logger.record_tabular('AverageForwardProgress', np.mean(progs))
# logger.record_tabular('MaxForwardProgress', np.max(progs))
# logger.record_tabular('MinForwardProgress', np.min(progs))
# logger.record_tabular('StdForwardProgress', np.std(progs))
else:
0
# logger.record_tabular('AverageForwardProgress', np.nan)
# logger.record_tabular('MaxForwardProgress', np.nan)
# logger.record_tabular('MinForwardProgress', np.nan)
# logger.record_tabular('StdForwardProgress', np.nan)
def terminate(self):
print("end") |
"""Promises, promises, promises."""
from __future__ import absolute_import, unicode_literals
import re
from collections import namedtuple
from .abstract import Thenable
from .promises import promise
from .synchronization import barrier
from .funtools import (
maybe_promise, ensure_promise,
ppartial, preplace, starpromise, transform, wrap,
)
__version__ = '1.3.0'
__author__ = 'Ask Solem'
__contact__ = '[email protected]'
__homepage__ = 'http://github.com/celery/vine'
__docformat__ = 'restructuredtext'
# -eof meta-
version_info_t = namedtuple('version_info_t', (
'major', 'minor', 'micro', 'releaselevel', 'serial',
))
# bump version can only search for {current_version}
# so we have to parse the version here.
_temp = re.match(
r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups()
VERSION = version_info = version_info_t(
int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '')
del(_temp)
del(re)
__all__ = [
'Thenable', 'promise', 'barrier',
'maybe_promise', 'ensure_promise',
'ppartial', 'preplace', 'starpromise', 'transform', 'wrap',
]
|
description = 'postion of Monitor: X in beam; Z may be motor'
group = 'lowlevel'
instrument_values = configdata('instrument.values')
tango_base = instrument_values['tango_base']
devices = dict(
prim_monitor_z = device('nicos.devices.generic.ManualMove',
description = 'Monitor axis motor',
abslimits = (-10, 300),
default = 0,
unit = 'mm',
),
prim_monitor_x = device('nicos.devices.generic.ManualMove',
description = 'pos of monitor in beam',
abslimits = (0, 500),
default = 0,
fmtstr = '%.1f',
unit = 'mm',
),
prim_monitor_typ = device('nicos.devices.generic.ManualSwitch',
description = 'which monitor is in use?',
states = ['None', '#1', '#2', '#3', '#4', '#5', '#6', '#7'],
fmtstr = 'Typ %d',
unit = '',
),
hv_mon1 = device('nicos.devices.entangle.PowerSupply',
description = 'HV monitor 1',
tangodevice = tango_base + 'monitor1/hv/voltage',
requires = {'level': 'admin'},
lowlevel = True,
),
hv_mon2 = device('nicos.devices.entangle.PowerSupply',
description = 'HV monitor 2',
tangodevice = tango_base + 'monitor2/hv/voltage',
requires = {'level': 'admin'},
lowlevel = True,
),
hv_mon3 = device('nicos.devices.entangle.PowerSupply',
description = 'HV monitor 2',
tangodevice = tango_base + 'monitor3/hv/voltage',
requires = {'level': 'admin'},
lowlevel = True,
),
hv_mon4 = device('nicos.devices.entangle.PowerSupply',
description = 'HV monitor 4',
tangodevice = tango_base + 'monitor4/hv/voltage',
requires = {'level': 'admin'},
lowlevel = True,
),
hv_mon = device('nicos_mlz.refsans.devices.devicealias.HighlevelDeviceAlias'),
)
alias_config = {
'hv_mon': {
'hv_mon1': 100,
'hv_mon2': 100,
'hv_mon3': 100,
'hv_mon4': 100,
},
}
|
# -*- coding: utf-8 -*-
"""Enhanced E-Reader.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ZsmtAW6mao5qps5tBq_-gyxBQCEYDDoO
"""
#This code takes a Project Gutenberg book and uses Wikify to add in links to make an enhanced e-Reader.
#NOTE: THIS CODE CAN TAKE AWHILE TO RUN
#Made for DSCI 511 in December 2018
USER_KEY = 'vvtmxtinhohcxaxqgdrleivlgpqxok'
import requests
import re
from bs4 import BeautifulSoup
from IPython.display import IFrame
import os
import urllib.parse, urllib.request, json
from IPython.core.display import HTML
import re
def Gutentag(ID):
file_path = 'https://www.gutenberg.org/files/' + ID + '/' + ID + '-h/' + ID + '-h.htm'
data = requests.get(file_path).content
with open(ID+'.htm', 'wb') as outfile:
outfile.write(data.text)
def wikify(text, lang="en", threshold=0.8):
# Prepare the URL.
data = urllib.parse.urlencode([
("text", text), ("lang", lang),
("userKey", USER_KEY),
("pageRankSqThreshold", "%g" % threshold), ("applyPageRankSqThreshold", "true"),
("support", "true"), ("includeCosines", "false"), ("ranges", "false")])
url = "http://www.wikifier.org/annotate-article"
req = urllib.request.Request(url, data=data.encode("utf8"))#, method="POST")
with urllib.request.urlopen(req, timeout = 60) as f:
response = f.read()
response = json.loads(response.decode("utf8"))
return response
def build_doc(wiki):
orig_text = ""
for i in range(len(wiki['spaces'])):
orig_text = orig_text + wiki['spaces'][i]
if i <= len(wiki['words'])-1:
orig_text = orig_text + wiki['words'][i]
else:
continue
return orig_text
def get_links(wiki):
links = []
for annot in wiki['annotations']:
for support_dict in annot['support']:
links.append((support_dict['chFrom'], support_dict['chTo'], annot['url']))
return links
def build_link(doc, start,end, dest):
link = doc[start:end+1]
format = "<a href="+dest+">"+link+"</a>"
return format
def embed_link(doc, link):
start = 0
n_doc = ""
for l in link:
n_doc = (n_doc + doc[start:l[0]]+build_link(doc,l[0],l[1],l[2]))
start = l[1]+1
nn_doc = n_doc+doc[start:]
return HTML(nn_doc)
def embed_links(wiki):
doc = build_doc(wiki)
links = get_links(wiki)
sorted_links = sorted(links, key=lambda x: x[1], reverse=True)
valid_links = []
for i in range(len(sorted_links)-1):
if sorted_links[i+1][1] < sorted_links[i][0]:
valid_links.append(sorted_links[0])
valid_links.append(sorted_links[-1])
link = valid_links[::-1]
return embed_link(doc, link)
def get_out_path(path):
path_list = path.split('/')
out_dir = path_list[:-1]
file_name = path_list[-1]
file = re.split('.htm', file_name)[0]
out_name = file + '.json'
out_dir.append(out_name)
out_path = '/'.join(out_dir)
return out_path
def wikify_html(path):
out_path = get_out_path(path)
with open(path, 'r') as infile:
html_data = infile.read()
soup = BeautifulSoup(html_data, 'html.parser')
p_tags = soup.find_all('p')
wikified_doc = []
for p_tag in p_tags:
wikified_doc.append(wikify(p_tag.text))
with open(out_path, 'w') as outfile:
json.dump(wikified_doc, outfile)
def get_enhanced_html(path):
json_path = get_out_path(path)
if os.path.exists(json_path):
with open(json_path) as f:
Wikis = json.load(f)
else:
wikify_html(path)
with open(json_path) as f:
Wikis = json.load(f)
with open(path, "r") as infile:
html_file = infile.read()
newsoup = BeautifulSoup(html_file, 'html.parser')
p_tags = newsoup.find_all('p')
p_tags_index_tuples = [(p_tag, i) for i, p_tag in enumerate(p_tags)]
annotations_and_num = [(wiki, len(wiki['annotations'])) for i, wiki in enumerate(Wikis)]
annotated_paragraph_text = []
for annot in annotations_and_num:
if annot[1]==True:
annotated_paragraph_text.append(embed_links(annot[0]).data)
else:
annotated_paragraph_text.append(HTML(build_doc(annot[0])).data)
enhanced_paragraph = []
for each_par in annotated_paragraph_text:
html_par = "<p>"+each_par+"</p>"
enhanced_paragraph.append(BeautifulSoup(html_par, 'html.parser'))
i = 0
for p in newsoup.find_all('p'):
n_para = enhanced_paragraph[i]
if p.string:
p.string.replace_with(n_para)
else:
continue
i += 1
if i == len(p_tags)-1:
break
else:
continue
with open(path[:-4]+"Enhanced.html", 'wb') as f:
f.write(newsoup.renderContents())
with open(path[:-4]+"Enhanced.html", "r") as f:
enhanced_html_file = f.read()
def enhanced_book(book_id):
path = book_id+".htm"
if os.path.exists(path):
pass
else:
Gutentag(book_id)
get_enhanced_html(path)
return IFrame(path[:-4]+"_Enhanced.html", width=800, height=200)
enhanced_book("19033") |
# -*- coding: utf-8 -*-
# @Date : 18-4-16 下午8:07
# @Author : hetao
# @Email : [email protected]
# 正在使用的环境 0: 开发环境, 1: 测试环境, 2: 生产环境
ENVIRONMENT = 0
# ===========================================================
# 本地调试
# ===========================================================
# mongodb
DATABASE_HOST = '192.168.14.240'
# redis
REDIS_HOST = '192.168.14.240'
# ===========================================================
# 通用配置
# ===========================================================
# mongodb
DATABASE_ENGINE = 'mongodb' # 留着备用
DATABASE_PORT = 27017
# redis
REDIS_NAME = 2
REDIS_NAME_PRO = 1
REDIS_PORT = 6379
# wallet_field
# 只和数据库进行交互的dict类型字段
DICT_DB_FIELD = ('master_private_keys', 'master_public_keys', 'addr_history',
'transactions', 'txi', 'txo', 'pruned_txo',
'claimtrie_transactions', 'verified_tx3',
'accounts',
)
# 既和数据库进行交互, 又和内存交互的dict类型字段
DICT_BOTH_FIELD = ()
# 只和数据库进行交互的 list 类型字段
LIST_DB_FIELD = ('addresses',)
# 既和数据库进行交互, 又和内存交互的 list 类型字段
LIST_BOTH_FIELD = ()
# 单个长度字段
NOR_FIELD = ('seed', 'stored_height', 'use_changes')
WALLET_FIELD = DICT_BOTH_FIELD + DICT_DB_FIELD + LIST_DB_FIELD + LIST_BOTH_FIELD + NOR_FIELD |
# -*- coding: utf-8 -*-
'''
Custom theano class to query the search engine.
'''
import numpy as np
import theano
from theano import gof
from theano import tensor
import parameters as prm
import utils
import average_precision
import random
class Search(theano.Op):
__props__ = ()
def __init__(self,options):
self.options = options
self.options['reformulated_queries'] = {}
def make_node(self, x1, x2, x3, x4):
assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
x1 = tensor.as_tensor_variable(x1)
x2 = tensor.as_tensor_variable(x2)
x3 = tensor.as_tensor_variable(x3)
x4 = tensor.as_tensor_variable(x4)
out = [tensor.fmatrix().type(), tensor.itensor3().type(), tensor.imatrix().type(), tensor.fmatrix().type()]
return theano.Apply(self, [x1, x2, x3, x4], out)
def perform(self, node, inputs, output_storage):
q_m = inputs[0]
D_truth = inputs[1]
n_iter = int(inputs[2])
is_train = int(inputs[3])
#outputs
metrics = np.zeros((len(q_m), len(prm.metrics_map)), np.float32)
if is_train:
max_feedback_docs = prm.max_feedback_docs_train
else:
max_feedback_docs = prm.max_feedback_docs
D_i = -2 * np.ones((len(q_m), max_feedback_docs, prm.max_words_input), np.int32)
D_gt_m = np.zeros((len(q_m), prm.max_candidates), np.float32)
D_id = np.zeros((len(q_m), prm.max_candidates), np.int32)
# no need to retrieve extra terms in the last iteration
if n_iter == prm.n_iterations - 1:
extra_terms = False
else:
extra_terms = True
# allow the search engine to cache queries only in the first iteration.
if n_iter == 0:
save_cache = prm.use_cache
else:
save_cache = False
max_cand = prm.max_candidates
qs = []
for i, q_lst in enumerate(self.options['current_queries']):
q = []
for j, word in enumerate(q_lst):
if q_m[i,j] == 1:
q.append(str(word))
q = ' '.join(q)
if len(q) == 0:
q = 'dummy'
qs.append(q)
# only used to print the reformulated queries.
self.options['reformulated_queries'][n_iter] = qs
# always return one more candidate because one of them might be the input doc.
candss = self.options['engine'].get_candidates(qs, max_cand, prm.max_feedback_docs, save_cache, extra_terms)
for i, cands in enumerate(candss):
D_truth_dic = {}
for d_truth in D_truth[i]:
if d_truth > -1:
D_truth_dic[d_truth] = 0
D_id[i,:len(cands.keys())] = cands.keys()
j = 0
m = 0
cand_ids = []
selected_docs = np.arange(prm.max_feedback_docs)
if is_train:
selected_docs = np.random.choice(selected_docs, size=prm.max_feedback_docs_train, replace=False)
for k, (cand_id, (words_idx, words)) in enumerate(cands.items()):
cand_ids.append(cand_id)
# no need to add candidate words in the last iteration.
if n_iter < prm.n_iterations - 1:
# only add docs selected by sampling (if training).
if k in selected_docs:
words = words[:prm.max_terms_per_doc]
words_idx = words_idx[:prm.max_terms_per_doc]
D_i[i,m,:len(words_idx)] = words_idx
# append empty strings, so the list size becomes <dim>.
words = words + max(0, prm.max_words_input - len(words)) * ['']
# append new words to the list of current queries.
self.options['current_queries'][i] += words
m += 1
if cand_id in D_truth_dic:
D_gt_m[i,j] = 1.
j += 1
cands_set = set(cands.keys())
if qs[i].lower() in self.options['engine'].title_id_map:
input_doc_id = self.options['engine'].title_id_map[qs[i].lower()]
# Remove input doc from returned docs.
# This operation does not raise an error if the element is not there.
cands_set.discard(input_doc_id)
intersec = len(set(D_truth_dic.keys()) & cands_set)
recall = intersec / max(1., float(len(D_truth_dic)))
precision = intersec / max(1., float(prm.max_candidates))
metrics[i,prm.metrics_map['RECALL']] = recall
metrics[i,prm.metrics_map['PRECISION']] = precision
metrics[i,prm.metrics_map['F1']] = 2 * recall * precision / max(0.01, recall + precision)
avg_precision = average_precision.compute(D_truth_dic.keys(), cand_ids)
metrics[i,prm.metrics_map['MAP']] = avg_precision
metrics[i,prm.metrics_map['LOG-GMAP']] = np.log(avg_precision + 1e-5)
output_storage[0][0] = metrics
output_storage[1][0] = D_i
output_storage[2][0] = D_id
output_storage[3][0] = D_gt_m
def grad(self, inputs, output_grads):
return [tensor.zeros_like(ii, dtype=theano.config.floatX) for ii in inputs]
|
from pyautofinance.common.engine.components_assembly import ComponentsAssembly
from pyautofinance.common.engine.engine import Engine
|
import pika
import json
import psycopg2
CREATE_TABLE = "CREATE TABLE IF NOT EXISTS person (id SERIAL, name VARCHAR(80), gender VARCHAR(6), age integer);"
INSERT_SQL = "INSERT INTO person (name, gender, age) VALUES (%s, %s, %s);"
def callback(ch, method, properties, body):
"""
Method used to consume the message on queue.
Each message consumed will be parsed into JSON and persisted.
"""
ch.basic_ack(delivery_tag=method.delivery_tag)
print("Message received.")
data = json.loads(body)
persist(data)
def persist(data):
"""This method persists the new person into the database."""
conn = psycopg2.connect(host="localhost", database="integration", user="postgres", password="postgres")
cursor = conn.cursor()
cursor.execute(INSERT_SQL, (data["name"], data["gender"], data["age"]))
conn.commit()
cursor.close()
def create_table():
"""
Method used to create the person table on database.
If the table already exists, this method will do nothing.
"""
conn = psycopg2.connect(host="localhost", database="integration", user="postgres", password="postgres")
cursor = conn.cursor()
cursor.execute(CREATE_TABLE)
conn.commit()
cursor.close()
connection = pika.BlockingConnection(pika.ConnectionParameters("localhost"))
channel = connection.channel()
channel.queue_declare(queue="integration",
durable=True)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback,
queue="integration")
create_table()
print("The consumer application has been started.")
channel.start_consuming()
|
import re
import sys
def move_ship(from_pos: tuple, from_ori: tuple, cmds: list) -> tuple:
turns = [(1, 0), (0, 1), (-1, 0), (0, -1)]
posX, posY = from_pos
oriX, oriY = from_ori
for cmd, n in cmds:
if cmd == "F":
posX += oriX * n
posY += oriY * n
if cmd == "N":
posY += n
if cmd == "S":
posY -= n
if cmd == "E":
posX += n
if cmd == "W":
posX -= n
# can only have {90, 180, 270}
n //= 90
i = turns.index((oriX, oriY))
if cmd == "L":
oriX, oriY = turns[(i + n) % 4]
if cmd == "R":
oriX, oriY = turns[(i + 4 - n) % 4]
return posX, posY
def move_ship_complex(cmds, pos, ori, use_waypoint: bool) -> complex:
dirs = {"N": +1j, "S": -1j, "E": +1, "W": -1}
for cmd, n in cmds:
if cmd == "F":
pos += n * ori
elif cmd == "L":
ori *= 1j ** (n // 90)
elif cmd == "R":
ori *= 1j ** (-n // 90) # or 4 - n//90 to make use of the L-rot
else:
if use_waypoint:
ori += n * dirs[cmd] # move waypoint
else:
pos += n * dirs[cmd] # move self
return pos
def mh(x, y):
return abs(x) + abs(y)
if __name__ == "__main__":
with open(sys.argv[1], "rt") as fp:
cmds = [l.strip() for l in fp.readlines()]
cmds = [re.match(r"([NSEWLRF])(\d+)", c).groups() for c in cmds]
cmds = [(d, int(n)) for (d, n) in cmds]
px, py = move_ship(from_pos=(0, 0), from_ori=(1, 0), cmds=cmds)
print(f"Part 1: {mh(px, py)}")
p = move_ship_complex(cmds, pos=0 + 0j, ori=1 + 0j, use_waypoint=False)
print(f"Part 1 (using complex numbers): {mh(p.real, p.imag)}")
p = move_ship_complex(cmds, pos=0 + 0j, ori=10 + 1j, use_waypoint=True)
print(f"Part 2 (using complex numbers): {mh(p.real, p.imag)}")
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _maybe_reshape_input_tensor(tensor, column_name, output_rank):
"""Reshape the input tensor by the following rule.
1. If `output_rank > input_rank + 1`, raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand the tensor by one dimension.
3. If `output_rank == input_rank`, do nothing.
4. If `output_rank < input_rank`, flatten the inner dimensions of the tensor.
Args:
tensor: A Tensor or SparseTensor to be reshaped.
column_name: A string name of the feature column for the tensor.
output_rank: the desired rank of the tensor.
Returns:
A reshaped Tensor or SparseTensor.
Raises:
ValueError: if `output_rank > input_rank + 1` for the input tensor.
"""
input_rank = tensor.get_shape().ndims
if input_rank is None and isinstance(tensor, sparse_tensor_py.SparseTensor):
# Try to get the rank of a sparse tensor by its dense_shape's shape.
input_rank = tensor.dense_shape.get_shape().as_list()[0]
if input_rank is None:
raise ValueError('Error while processing column {}. Rank of input Tensor '
'can not be None.'.format(column_name))
if output_rank > input_rank + 1:
raise ValueError('Error while processing column {}. Rank of input Tensor '
'({}) should be the same as output_rank ({}). For '
'example, sequence data should typically be 3 '
'dimensional (rank 3) while non-sequence data is '
'typically 2 dimensional (rank 2).'.format(
column_name, input_rank, output_rank))
elif output_rank == input_rank + 1:
# Expand the tensor's shape by 1 dimension.
if isinstance(tensor, sparse_tensor_py.SparseTensor):
output_shape = array_ops.concat([tensor.dense_shape, [1]], 0)
return sparse_ops.sparse_reshape(tensor, output_shape)
else:
reshaped = array_ops.expand_dims(tensor, -1)
# Try to calculate the new shape.
static_shape = tensor.get_shape()
if static_shape is not None and static_shape.dims is not None:
reshaped.set_shape(static_shape.as_list() + [1])
return reshaped
elif output_rank < input_rank:
return layers._inner_flatten(tensor, output_rank) # pylint: disable=protected-access
else:
return tensor
def _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank,
default_name,
cols_to_outs=None):
"""Implementation of `input_from(_sequence)_feature_columns`."""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
if cols_to_outs is not None and not isinstance(cols_to_outs, dict):
raise ValueError('cols_to_outs must be a dict unless None')
with variable_scope.variable_scope(scope,
default_name=default_name,
values=columns_to_tensors.values()):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.GLOBAL_VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_scope(None,
default_name=column.name,
values=columns_to_tensors.values()):
transformed_tensor = transformer.transform(column)
if output_rank == 3:
transformed_tensor = nest.map_structure(
functools.partial(
_maybe_reshape_input_tensor,
column_name=column.name,
output_rank=output_rank), transformed_tensor)
try:
# pylint: disable=protected-access
arguments = column._deep_embedding_lookup_arguments(
transformed_tensor)
output_tensors.append(
fc._embeddings_from_arguments( # pylint: disable=protected-access
column,
arguments,
weight_collections,
trainable,
output_rank=output_rank))
except NotImplementedError as ee:
try:
# pylint: disable=protected-access
output_tensors.append(column._to_dnn_input_layer(
transformed_tensor,
weight_collections,
trainable,
output_rank=output_rank))
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}, {}'.format(column.name, e, ee))
if cols_to_outs is not None:
cols_to_outs[column] = output_tensors[-1]
return array_ops.concat(output_tensors, output_rank - 1)
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None,
cols_to_outs=None):
"""A tf.contrib.layers style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
Example:
```python
# Building model for training
columns_to_tensor = tf.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns)
second_layer = fully_connected(inputs=first_layer, ...)
...
```
where feature_columns can be defined as follows:
```python
sparse_feature = sparse_column_with_hash_bucket(
column_name="sparse_col", ...)
sparse_feature_emb = embedding_column(sparse_id_column=sparse_feature, ...)
real_valued_feature = real_valued_column(...)
real_valued_buckets = bucketized_column(
source_column=real_valued_feature, ...)
feature_columns=[sparse_feature_emb, real_valued_buckets]
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
cols_to_outs: Optional dict from feature column to output tensor,
which is concatenated into the returned tensor.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
return _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=2,
default_name='input_from_feature_columns',
cols_to_outs=cols_to_outs)
@experimental
def sequence_input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""Builds inputs for sequence models from `FeatureColumn`s.
See documentation for `input_from_feature_columns`. The following types of
`FeatureColumn` are permitted in `feature_columns`: `_OneHotColumn`,
`_EmbeddingColumn`, `_ScatteredEmbeddingColumn`, `_RealValuedColumn`,
`_DataFrameColumn`. In addition, columns in `feature_columns` may not be
constructed using any of the following: `ScatteredEmbeddingColumn`,
`BucketizedColumn`, `CrossedColumn`.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
_check_supported_sequence_columns(feature_columns)
_check_forbidden_sequence_columns(feature_columns)
return _input_from_feature_columns(
columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=3,
default_name='sequence_input_from_feature_columns')
def _create_embedding_lookup(column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates variables and returns predictions for linear weights in a model.
Args:
column: the column we're working on.
columns_to_tensors: a map from column name to tensors.
embedding_lookup_arguments: arguments for embedding lookup.
num_outputs: how many outputs.
trainable: whether the variable we create is trainable.
weight_collections: weights will be placed here.
Returns:
variables: the created embeddings.
predictions: the computed predictions.
"""
with variable_scope.variable_scope(
None, default_name=column.name, values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[embedding_lookup_arguments.vocab_size, num_outputs],
dtype=dtypes.float32,
initializer=embedding_lookup_arguments.initializer,
trainable=trainable,
collections=weight_collections)
if fc._is_variable(variable): # pylint: disable=protected-access
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
embedding_lookup_arguments.input_tensor,
sparse_weights=embedding_lookup_arguments.weight_tensor,
combiner=embedding_lookup_arguments.combiner,
name=column.name + '_weights')
return variable, predictions
def _create_joint_embedding_lookup(columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates an embedding lookup for all columns sharing a single weight."""
for arg in embedding_lookup_arguments:
assert arg.weight_tensor is None, (
'Joint sums for weighted sparse columns are not supported. '
'Please use weighted_sum_from_feature_columns instead.')
assert arg.combiner == 'sum', (
'Combiners other than sum are not supported for joint sums. '
'Please use weighted_sum_from_feature_columns instead.')
assert len(embedding_lookup_arguments) >= 1, (
'At least one column must be in the model.')
prev_size = 0
sparse_tensors = []
for a in embedding_lookup_arguments:
t = a.input_tensor
values = t.values + prev_size
prev_size += a.vocab_size
sparse_tensors.append(
sparse_tensor_py.SparseTensor(t.indices,
values,
t.dense_shape))
sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
with variable_scope.variable_scope(
None, default_name='linear_weights', values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[prev_size, num_outputs],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
if fc._is_variable(variable): # pylint: disable=protected-access
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
sparse_tensor,
sparse_weights=None,
combiner='sum',
name='_weights')
return variable, predictions
def joint_weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A restricted linear prediction builder based on FeatureColumns.
As long as all feature columns are unweighted sparse columns this computes the
prediction of a linear model which stores all weights in a single variable.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A list of Variables storing the weights.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='joint_weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
transformer = _Transformer(columns_to_tensors)
embedding_lookup_arguments = []
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments.append(
column._wide_embedding_lookup_arguments(transformed_tensor)) # pylint: disable=protected-access
except NotImplementedError:
raise NotImplementedError('Real-valued columns are not supported. '
'Use weighted_sum_from_feature_columns '
'instead, or bucketize these columns.')
variable, predictions_no_bias = _create_joint_embedding_lookup(
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, variable, bias
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layers style linear prediction builder based on FeatureColumn.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
Example:
```
# Building model for training
feature_columns = (
real_valued_column("my_feature1"),
...
)
columns_to_tensor = tf.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
output_tensors = []
column_to_variable = dict()
transformer = _Transformer(columns_to_tensors)
# pylint: disable=protected-access
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
transformed_tensor)
variable, predictions = _create_embedding_lookup(
column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
except NotImplementedError:
with variable_scope.variable_scope(
None,
default_name=column.name,
values=columns_to_tensors.values()):
tensor = column._to_dense_tensor(transformed_tensor)
tensor = _maybe_reshape_input_tensor(
tensor, column.name, output_rank=2)
variable = [
contrib_variables.model_variable(
name='weight',
shape=[tensor.get_shape()[1], num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
]
predictions = math_ops.matmul(tensor, variable[0], name='matmul')
except ValueError as ee:
raise ValueError('Error creating weighted sum for column: {}.\n'
'{}'.format(column.name, ee))
output_tensors.append(array_ops.reshape(
predictions, shape=(-1, num_outputs)))
column_to_variable[column] = variable
_log_variable(variable)
fc._maybe_restore_from_checkpoint(column._checkpoint_path(), variable) # pylint: disable=protected-access
# pylint: enable=protected-access
predictions_no_bias = math_ops.add_n(output_tensors)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, column_to_variable, bias
def parse_feature_columns_from_examples(serialized,
feature_columns,
name=None,
example_names=None):
"""Parses tf.Examples to extract tensors for given feature_columns.
This is a wrapper of 'tf.parse_example'.
Example:
```python
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
my_features = [embedding_feature_b, real_feature_buckets, embedding_feature_a]
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
serialized=serialized,
features=fc.create_feature_spec_for_parsing(feature_columns),
name=name,
example_names=example_names)
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
return columns_to_tensors
def transform_features(features, feature_columns):
"""Returns transformed features based on features columns passed in.
Example:
```python
columns_to_tensor = transform_features(features=features,
feature_columns=feature_columns)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
feature_columns = [embedding_feature_b,
real_feature_buckets,
embedding_feature_a]
```
Args:
features: A dictionary of features.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
columns_to_tensor = features.copy()
check_feature_columns(feature_columns)
transformer = _Transformer(columns_to_tensor)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
keys = list(columns_to_tensor.keys())
for k in keys:
if k not in feature_columns:
columns_to_tensor.pop(k)
return columns_to_tensor
def parse_feature_columns_from_sequence_examples(
serialized,
context_feature_columns,
sequence_feature_columns,
name=None,
example_name=None):
"""Parses tf.SequenceExamples to extract tensors for given `FeatureColumn`s.
Args:
serialized: A scalar (0-D Tensor) of type string, a single serialized
`SequenceExample` proto.
context_feature_columns: An iterable containing the feature columns for
context features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
sequence_feature_columns: An iterable containing the feature columns for
sequence features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
name: A name for this operation (optional).
example_name: A scalar (0-D Tensor) of type string (optional), the names of
the serialized proto.
Returns:
A tuple consisting of (context_features, sequence_features)
* context_features: a dict mapping `FeatureColumns` from
`context_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
* sequence_features: a dict mapping `FeatureColumns` from
`sequence_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
"""
# Sequence example parsing requires a single (scalar) example.
try:
serialized = array_ops.reshape(serialized, [])
except ValueError as e:
raise ValueError(
'serialized must contain as single sequence example. Batching must be '
'done after parsing for sequence examples. Error: {}'.format(e))
if context_feature_columns is None:
context_feature_columns = []
if sequence_feature_columns is None:
sequence_feature_columns = []
check_feature_columns(context_feature_columns)
context_feature_spec = fc.create_feature_spec_for_parsing(
context_feature_columns)
check_feature_columns(sequence_feature_columns)
sequence_feature_spec = fc._create_sequence_feature_spec_for_parsing( # pylint: disable=protected-access
sequence_feature_columns, allow_missing_by_default=False)
return parsing_ops.parse_single_sequence_example(serialized,
context_feature_spec,
sequence_feature_spec,
example_name,
name)
def _log_variable(variable):
if isinstance(variable, list):
for var in variable:
if fc._is_variable(variable): # pylint: disable=protected-access
logging.info('Created variable %s, with device=%s', var.name,
var.device)
elif fc._is_variable(variable): # pylint: disable=protected-access
logging.info('Created variable %s, with device=%s', variable.name,
variable.device)
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, sparse_tensor_py.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
if not (tensor.dtype.is_integer or tensor.dtype.is_floating):
raise ValueError(
'Non integer or non floating types are not supported for auto detection'
'. Please define corresponding FeatureColumn for tensor {} {}.', name,
tensor)
shape = tensor.get_shape().as_list()
dimension = 1
for i in range(1, len(shape)):
dimension *= shape[i]
return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype)
def infer_real_valued_columns(features):
if not isinstance(features, dict):
return [_infer_real_valued_column_for_tensor('', features)]
feature_columns = []
for key, value in features.items():
feature_columns.append(_infer_real_valued_column_for_tensor(key, value))
return feature_columns
def check_feature_columns(feature_columns):
"""Checks the validity of the set of FeatureColumns.
Args:
feature_columns: An iterable of instances or subclasses of FeatureColumn.
Raises:
ValueError: If `feature_columns` is a dict.
ValueError: If there are duplicate feature column keys.
"""
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
seen_keys = set()
for f in feature_columns:
key = f.key
if key in seen_keys:
raise ValueError('Duplicate feature column key found for column: {}. '
'This usually means that the column is almost identical '
'to another column, and one must be discarded.'.format(
f.name))
seen_keys.add(key)
class _Transformer(object):
"""Handles all the transformations defined by FeatureColumn if needed.
FeatureColumn specifies how to digest an input column to the network. Some
feature columns require data transformations. This class handles those
transformations if they are not handled already.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case Transformer
should create only one bucketization op instead of multiple ops for each
feature column. To handle re-use of transformed columns, Transformer keeps all
previously transformed columns.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(...)
real_valued_feature = real_valued_column(...)
real_valued_buckets = bucketized_column(source_column=real_valued_feature,
...)
sparse_x_real = crossed_column(
columns=[sparse_feature, real_valued_buckets], hash_bucket_size=10000)
columns_to_tensor = tf.parse_example(...)
transformer = Transformer(columns_to_tensor)
sparse_x_real_tensor = transformer.transform(sparse_x_real)
sparse_tensor = transformer.transform(sparse_feature)
real_buckets_tensor = transformer.transform(real_valued_buckets)
```
"""
def __init__(self, columns_to_tensors):
"""Initializes transformer.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have FeatureColumn as
a key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
Transformed features are inserted in columns_to_tensors.
"""
self._columns_to_tensors = columns_to_tensors
def transform(self, feature_column):
"""Returns a Tensor which represents given feature_column.
Args:
feature_column: An instance of FeatureColumn.
Returns:
A Tensor which represents given feature_column. It may create a new Tensor
or re-use an existing one.
Raises:
ValueError: if FeatureColumn cannot be handled by this Transformer.
"""
logging.debug('Transforming feature_column %s', feature_column)
if feature_column in self._columns_to_tensors:
# Feature_column is already transformed.
return self._columns_to_tensors[feature_column]
feature_column.insert_transformed_feature(self._columns_to_tensors)
if feature_column not in self._columns_to_tensors:
raise ValueError('Column {} is not supported.'.format(
feature_column.name))
return self._columns_to_tensors[feature_column]
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(
set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES]))
return weight_collections
# TODO(jamieas): remove the following logic once all FeatureColumn types are
# supported for sequences.
# pylint: disable=protected-access
_SUPPORTED_SEQUENCE_COLUMNS = (fc._OneHotColumn,
fc._EmbeddingColumn,
fc._RealValuedColumn,
fc._RealValuedVarLenColumn)
_FORBIDDEN_SEQUENCE_COLUMNS = (fc._ScatteredEmbeddingColumn,
fc._BucketizedColumn,
fc._CrossedColumn)
def _check_supported_sequence_columns(feature_columns):
"""Asserts `feature_columns` are in `_SUPPORTED_SEQUENCE_COLUMNS`."""
for col in feature_columns:
if not isinstance(col, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
'FeatureColumn type {} is not currently supported for sequence data.'.
format(type(col).__name__))
def _get_parent_columns(feature_column):
"""Returns the tuple of `FeatureColumn`s that `feature_column` depends on."""
if isinstance(feature_column, (fc._WeightedSparseColumn,
fc._OneHotColumn,
fc._EmbeddingColumn,)):
return (feature_column.sparse_id_column,)
if isinstance(feature_column, (fc._BucketizedColumn,)):
return (feature_column.source_column,)
if isinstance(feature_column, (fc._CrossedColumn)):
return tuple(feature_column.columns)
return tuple()
def _gather_feature_columns(feature_columns):
"""Returns a list of all ancestor `FeatureColumns` of `feature_columns`."""
gathered = list(feature_columns)
i = 0
while i < len(gathered):
for column in _get_parent_columns(gathered[i]):
if column not in gathered:
gathered.append(column)
i += 1
return gathered
def _check_forbidden_sequence_columns(feature_columns):
"""Recursively checks `feature_columns` for `_FORBIDDEN_SEQUENCE_COLUMNS`."""
all_feature_columns = _gather_feature_columns(feature_columns)
for feature_column in all_feature_columns:
if isinstance(feature_column, _FORBIDDEN_SEQUENCE_COLUMNS):
raise ValueError(
'Column {} is of type {}, which is not currently supported for '
'sequences.'.format(feature_column.name,
type(feature_column).__name__))
|
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from dag_checks.base import BaseCheck
class CheckOperatorsReferenceExistingDagTaskIds(BaseCheck):
def __init__(self, *args, **kwargs):
super( # pylint: disable=super-with-arguments
CheckOperatorsReferenceExistingDagTaskIds, self
).__init__(*args, **kwargs)
self.dag_task_map = {d.dag_id: [t.task_id for t in d.tasks] for d in self.dag_bag.dags.values()}
def get_errors(self):
errors = []
for dag in self.dag_bag.dags.values():
for task in dag.tasks:
if isinstance(task, ExternalTaskSensor):
# Make sure that the dag_id exists in other DAGs
external_dag_id = task.external_dag_id
external_task_id = task.external_task_id
if external_dag_id not in self.dag_task_map:
err = (
f"ExternalTaskSensor (task: {task.task_id}) in DAG {dag.dag_id} references "
f"dag_id that does not exist: {external_dag_id}"
)
errors.append(err)
if external_task_id is not None:
if external_task_id not in self.dag_task_map.get(external_dag_id, []):
err = (
f"ExternalTaskSensor (task: {task.task_id}) in DAG {dag.dag_id} references "
f"task_id {external_task_id} that does not exist in {external_dag_id}"
)
errors.append(err)
elif isinstance(task, TriggerDagRunOperator):
# Make sure that TriggerDagRunOperator use existing dag_id
external_dag_id = task.trigger_dag_id
if external_dag_id not in self.dag_task_map:
err = (
f"TriggerDagRunOperator (task: {task.task_id}) in DAG {dag.dag_id} references "
f"dag_id that does not exist: {external_dag_id}"
)
errors.append(err)
return errors
|
strings = ""
integers = 9
floats = 99.09
booleans = False
|
class Mario(object):
def move(self):
print 'i am moving'
class Mushroom(object):
def eat(self):
print 'i am bigger!!'
class BiggerMario(Mario, Mushroom):
def flower(self):
print 'now i can shoot'
bm = BiggerMario()
bm.move()
bm.eat()
bm.flower()
# this shows that python shows multiple inheritence
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-11-30 12:37
from __future__ import unicode_literals
from django.db import migrations, connection
class Migration(migrations.Migration):
dependencies = [
('activities', '0034_auto_20201130_1316'),
]
operations = [
]
|
# -*- coding: utf-8 -*-
"""
run_optimization.py generated by WhatsOpt 1.10.4
"""
# DO NOT EDIT unless you know what you are doing
# analysis_id: 942
import sys
import numpy as np
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from run_parameters_init import initialize
from openmdao.api import Problem, SqliteRecorder, CaseReader, ScipyOptimizeDriver #, pyOptSparseDriver
from nested_sellar import NestedSellar
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-b", "--batch",
action="store_true", dest="batch", default=False,
help="do not plot anything")
(options, args) = parser.parse_args()
pb = Problem(NestedSellar())
class NamedScipyOptimizeDriver(ScipyOptimizeDriver):
def _get_name(self):
return 'scipy_optimizer_slsqp'
pb.driver = NamedScipyOptimizeDriver()
pb.driver.options['optimizer'] = 'SLSQP'
pb.driver.options['tol'] = 1.0e-06
pb.driver.options['maxiter'] = 100
pb.driver.options['disp'] = True
#pb.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs', 'totals']
pb.driver.options['debug_print'] = []
case_recorder_filename = 'nested_sellar_optimization.sqlite'
print(case_recorder_filename)
recorder = SqliteRecorder(case_recorder_filename)
# pb.add_recorder(recorder)
pb.driver.add_recorder(recorder)
pb.model.add_recorder(recorder)
# Derivatives are compute via finite-difference method
# to be commented out if partial derivatives are declared
pb.model.approx_totals(method='fd', step=1e-6, form='central')
pb.model.add_design_var('x', lower=0, upper=10)
pb.model.add_design_var('z', lower=0, upper=10)
pb.model.add_objective('f')
pb.model.add_constraint('g1', upper=0.)
pb.model.add_constraint('g2', upper=0.)
pb.setup()
initialize(pb)
pb.run_driver()
print("x= {}".format(pb['x']))
print("z= {}".format(pb['z']))
if options.batch:
exit(0)
# reader = CaseReader(case_recorder_filename)
# cases = reader.list_cases('problem')
# print(cases)
# for i in range(len(cases)):
# obj = cases[i].get_objectives()
# print(obj)
|
from typing import List, Type
from .dict_unpacking import DictUnpackingTransformer
from .formatted_values import FormattedValuesTransformer
from .functions_annotations import FunctionsAnnotationsTransformer
from .starred_unpacking import StarredUnpackingTransformer
from .variables_annotations import VariablesAnnotationsTransformer
from .yield_from import YieldFromTransformer
from .return_from_generator import ReturnFromGeneratorTransformer
from .python2_future import Python2FutureTransformer
from .super_without_arguments import SuperWithoutArgumentsTransformer
from .class_without_bases import ClassWithoutBasesTransformer
from .import_pathlib import ImportPathlibTransformer
from .six_moves import SixMovesTransformer
from .metaclass import MetaclassTransformer
from .string_types import StringTypesTransformer
from .import_dbm import ImportDbmTransformer
from .base import BaseTransformer
transformers = [
# 3.5
VariablesAnnotationsTransformer,
FormattedValuesTransformer,
# 3.4
DictUnpackingTransformer,
StarredUnpackingTransformer,
# 3.2
YieldFromTransformer,
ReturnFromGeneratorTransformer,
# 2.7
FunctionsAnnotationsTransformer,
SuperWithoutArgumentsTransformer,
ClassWithoutBasesTransformer,
ImportPathlibTransformer,
SixMovesTransformer,
MetaclassTransformer,
StringTypesTransformer,
ImportDbmTransformer,
Python2FutureTransformer, # always should be the last transformer
] # type: List[Type[BaseTransformer]]
|
import argparse
import dask_zarr as dz
import napari
def main(z_url):
# Test example, knowing that the file contains only 3 groups,
# and at most 10 scales
plane_names_scale = [['%s/%s' % (r, s) for r in [0]] for s in range(10)]
# Retrieve a list of stacks to form the pyramid representation of the image
stack_list = [
dz.get_lazy_stacks(z_url, plane_names)
for plane_names in plane_names_scale
]
viewer = napari.Viewer()
img = viewer.add_image(stack_list, multiscale=True)
napari.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-z', '--zurl', dest='z_url', type=str,
help='Path or url to a zarr file')
args = parser.parse_args()
main(args.z_url)
|
"""Checks if RMSE of Model Prediction is as low as expected."""
import pandas as pd
from lifetimes import BetaGeoFitter
from sklearn.metrics import mean_squared_error
import pytest
@pytest.fixture
def load_data_and_model():
"""Loads Customer Lifetime Estimator Model"""
model = BetaGeoFitter(penalizer_coef=0.0)
model.load_model("../models/calibration_model.pkl")
summary_cal_holdout = pd.read_csv("../datasets/summary_cal_holdout.csv")
return model, summary_cal_holdout
def test_model_rmse(load_data_and_model):
"""Test RMSE of Predicted Frequency vs Holdout Frequency"""
data = load_data_and_model[1]
model = load_data_and_model[0]
predictions = model.predict(t=243,
frequency=data["frequency_cal"],
recency=data["recency_cal"],
T=data["T_cal"])
rmse = mean_squared_error(data["frequency_holdout"],
predictions,
squared=False)
assert rmse < 0.15, "RMSE is greater than 0.15"
|
from flask import render_template,request,redirect,url_for,abort
from . import main
from .forms import UpdateProfile
from ..models import User
from flask_login import login_required,current_user
from .. import db,photos
import markdown2
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to pomodoro'
content = "WELCOME TO POMODORO APP"
return render_template('index.html', title = title,content = content)
@main.route('/about')
def about():
return render_template('about.html', title = 'About')
@main.route('/pomodoro')
@login_required
def pomodoro():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to pomodoro'
content = "WELCOME TO POMODORO APP"
return render_template('pomodoro.html', title = title,content = content)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/reason/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_reason(id):
form = ReasonForm()
reason = get_reason(id)
if form.validate_on_submit():
title = form.title.data
reason = form.reason.data
# Updated reason instance
new_reason = Reason(reason_id=reason.id,reason_title=title,reason=reason,user=current_user)
# save reason method
new_reason.save_reason()
return redirect(url_for('.reason',id = reason.id ))
title = f'{reason.title} reason'
return render_template('new_reason.html',title = title, reason_form=form, reason=reason) |
import pathlib
from setuptools import setup, find_packages
ROOT_DIR = pathlib.Path(__file__).parent
README = (ROOT_DIR / "README.md").read_text()
setup(
name="poem_parser",
version="0.0.4",
description="Parse poems into stressed and unstressed syllables.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/obeezzy/poem_parser",
author="Chronic Coder",
author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(),
include_package_data=True,
install_requires=["prosodic"],
)
|
"""Build sql queries from template to retrieve info from the database"""
import os.path
import pathlib
from functools import lru_cache
from mako.template import Template
from ._constants import _DB_TABLE_NAMES
__all__ = ["get_query"]
QUERY_DIR = os.path.join(os.path.dirname(__file__), "queries")
def get_query(query_name, photos_ver, **kwargs):
"""Return sqlite query string for an attribute and a given database version"""
# there can be a single query for multiple database versions or separate queries for each version
# try generic version first (most common case), if that fails, look for version specific query
query_string = _get_query_string(query_name, photos_ver)
asset_table = _DB_TABLE_NAMES[photos_ver]["ASSET"]
query_template = Template(query_string)
return query_template.render(asset_table=asset_table, **kwargs)
@lru_cache(maxsize=None)
def _get_query_string(query_name, photos_ver):
"""Return sqlite query string for an attribute and a given database version"""
query_file = pathlib.Path(QUERY_DIR) / f"{query_name}.sql.mako"
if not query_file.is_file():
query_file = pathlib.Path(QUERY_DIR) / f"{query_name}_{photos_ver}.sql.mako"
if not query_file.is_file():
raise FileNotFoundError(f"Query file '{query_file}' not found")
with open(query_file, "r") as f:
query_string = f.read()
return query_string
|
"""Web tests for pixivpy3.api.BasePixivAPI."""
import pytest
from jsonschema import validate
from pixivpy3.api import BasePixivAPI, PixivError as PixivPyError
from .schemas import AUTH_RESPONSE_SCHEMA
from .secret import SECRET
class TestBasePixivAPI:
"""Web tests for pixivpy3.api.BasePixivAPI."""
@pytest.fixture
def api(self):
"""Fixture that returns a BasePixivAPI object."""
return BasePixivAPI(**SECRET['requests_kwargs'])
@pytest.mark.skipif(
'username' not in SECRET or 'password' not in SECRET,
reason='username and password not provided',
)
@pytest.mark.xfail(reason='username and password no longer supported')
def test_auth_username_and_password(self, api):
"""Test authenticate with username and password."""
response = api.auth(username=SECRET['username'], password=SECRET['password'])
validate(instance=response, schema=AUTH_RESPONSE_SCHEMA)
def test_auth_refresh_token(self, api):
"""Test authenticate with refresh token."""
response = api.auth(refresh_token=SECRET['refresh_token'])
validate(instance=response, schema=AUTH_RESPONSE_SCHEMA)
def test_auth_failed(self, api):
"""Test BasePixivAPI behavior on failed authentication."""
with pytest.raises(PixivPyError):
api.auth(username='[email protected]', password='password')
with pytest.raises(PixivPyError):
api.auth(refresh_token='1234567890')
|
from corehq.apps.userreports.extension_points import custom_ucr_expressions
@custom_ucr_expressions.extend()
def abt_ucr_expressions():
return [
('abt_supervisor', 'custom.abt.reports.expressions.abt_supervisor_expression'),
('abt_supervisor_v2', 'custom.abt.reports.expressions.abt_supervisor_v2_expression'),
('abt_supervisor_v2019', 'custom.abt.reports.expressions.abt_supervisor_v2019_expression'),
]
|
# using 60, 20, 20 split
import os
from sklearn.model_selection import train_test_split
NUM_CLASSES = 10
data_path = './original/'
for i in range(NUM_CLASSES):
curr_dir_path = data_path + 'c' + str(i) + '/'
xtrain = labels = os.listdir(curr_dir_path)
x, x_test, y, y_test = train_test_split(xtrain,labels,test_size=0.2,train_size=0.8)
x_train, x_val, y_train, y_val = train_test_split(x,y,test_size = 0.25,train_size =0.75)
for x in x_train:
if (not os.path.exists('train/' + 'c' + str(i) + '/')):
os.makedirs('train/' + 'c' + str(i) + '/')
os.rename(data_path + 'c' + str(i) + '/' + x, 'train/' + 'c' + str(i) + '/' + x)
for x in x_test:
if (not os.path.exists('test/' + 'c' + str(i) + '/')):
os.makedirs('test/' + 'c' + str(i) + '/')
os.rename(data_path + 'c' + str(i) + '/' + x, 'test/' + 'c' + str(i) + '/' + x)
for x in x_val:
if (not os.path.exists('validation/' + 'c' + str(i) + '/')):
os.makedirs('validation/' + 'c' + str(i) + '/')
os.rename(data_path + 'c' + str(i) + '/' + x, 'validation/' + 'c' + str(i) + '/' + x) |
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
from uuid import uuid4
# Create your models here.
class User(AbstractUser):
address = models.CharField(max_length=2000)
contactNumber = models.CharField(max_length=15)
object = UserManager()
# Products models
class Category(models.Model):
id = models.UUIDField(auto_created=True, default=uuid4, primary_key=True, unique=True)
name = models.CharField(max_length=300)
def __str__(self):
return self.name
class Product(models.Model):
id = models.UUIDField(auto_created=True, default=uuid4, primary_key=True, unique=True)
name = models.CharField(max_length=300)
price = models.FloatField()
images = models.ImageField(null=True)
#images1 = models.ImageField(null=True)
#images1 = models.ImageField(null=True)
#brand
description = models.CharField(max_length=2500)
size = models.CharField(max_length=20) # Probable JSON field
#colors
availability = models.BooleanField()
category_id = models.ForeignKey(Category, related_name='cat_id', on_delete=models.CASCADE)# ManytoManyfields because of some product might have multiple category
details = models.CharField(max_length=2500)
def __str__(self):
return self.name
'''class Images(models.Model):
id = models.UUIDField(auto_created=True, default=uuid4, primary_key=True, unique=True)
image = models.ImageField(null=False);'''
class ProductReview(models.Model):
id = models.UUIDField(auto_created=True, default=uuid4, primary_key=True, unique=True)
name = models.CharField(max_length=300)
email = models.EmailField()
review = models.CharField(max_length=3000)
productid = models.ForeignKey(Product, on_delete=models.CASCADE)
userid = models.ForeignKey(User, on_delete=models.CASCADE)
class Order(models.Model):
id = models.UUIDField(auto_created=True, default=uuid4, primary_key=True, unique=True)
#name = models.CharField(max_length=300)
#email = models.EmailField()
#address = models.CharField(max_length=2000)
total = models.FloatField(null=True)
productlist = models.ManyToManyField(Product)
userid = models.ForeignKey(User, on_delete=models.CASCADE)
|
from tkinter import *
from tkinter.ttk import *
from itertools import chain
def get_events(widget):
return set(chain.from_iterable(widget.bind_class(cls) for cls in widget.bindtags()))
root = Tk()
a = get_events(Button())
print(a)
root.destroy() |
from itertools import product
from typing import Dict, Tuple, Union, Any
from npsem.model import StructuralCausalModel
from npsem.utils import combinations
from npsem.where_do import POMISs, MISs
def SCM_to_bandit_machine(M: StructuralCausalModel, Y='Y') -> Tuple[Tuple, Dict[Union[int, Any], Dict]]:
G = M.G
mu_arm = list()
arm_setting = dict()
arm_id = 0
all_subsets = list(combinations(sorted(G.V - {Y})))
for subset in all_subsets:
for values in product(*[M.D[variable] for variable in subset]):
arm_setting[arm_id] = dict(zip(subset, values))
result = M.query((Y,), intervention=arm_setting[arm_id])
expectation = sum(y_val * result[(y_val,)] for y_val in M.D[Y])
mu_arm.append(expectation)
arm_id += 1
return tuple(mu_arm), arm_setting
def arm_types():
return ['POMIS', 'MIS', 'Brute-force', 'All-at-once']
def arms_of(arm_type: str, arm_setting, G, Y) -> Tuple[int, ...]:
if arm_type == 'POMIS':
return pomis_arms_of(arm_setting, G, Y)
elif arm_type == 'All-at-once':
return controlphil_arms_of(arm_setting, G, Y)
elif arm_type == 'MIS':
return mis_arms_of(arm_setting, G, Y)
elif arm_type == 'Brute-force':
return tuple(range(len(arm_setting)))
raise AssertionError(f'unknown: {arm_type}')
def pomis_arms_of(arm_setting, G, Y):
pomiss = POMISs(G, Y)
return tuple(arm_x for arm_x in range(len(arm_setting)) if set(arm_setting[arm_x]) in pomiss)
def mis_arms_of(arm_setting, G, Y):
miss = MISs(G, Y)
return tuple(arm_x for arm_x in range(len(arm_setting)) if set(arm_setting[arm_x]) in miss)
def controlphil_arms_of(arm_setting, G, Y):
intervenable = G.V - {Y}
return tuple(arm_x for arm_x in range(len(arm_setting)) if arm_setting[arm_x].keys() == intervenable)
|
# -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import unittest
from sumy.models.dom._sentence import Sentence
from sumy.summarizers.sum_basic import SumBasicSummarizer
from sumy._compat import to_unicode
from ..utils import build_document, build_document_from_string
from sumy.nlp.tokenizers import Tokenizer
class TestSumBasic(unittest.TestCase):
EMPTY_STOP_WORDS = []
COMMON_STOP_WORDS = ["the", "and", "i"]
def _build_summarizer(self, stop_words):
summarizer = SumBasicSummarizer()
summarizer.stop_words = stop_words
return summarizer
def test_empty_document(self):
document = build_document()
summarizer = self._build_summarizer(self.EMPTY_STOP_WORDS)
returned = summarizer(document, 10)
self.assertEqual(len(returned), 0)
def test_single_sentence(self):
s = Sentence("I am one slightly longer sentence.", Tokenizer("english"))
document = build_document([s])
summarizer = self._build_summarizer(self.EMPTY_STOP_WORDS)
returned = summarizer(document, 10)
self.assertEqual(len(returned), 1)
def test_normalize_words(self):
summarizer = self._build_summarizer(self.EMPTY_STOP_WORDS)
sentence = "This iS A test 2 CHECk normalization."
words_original = sentence.split()
words_normalized = summarizer._normalize_words(words_original)
words_correctly_normalized = "this is a test 2 check normalization.".split()
self.assertEqual(words_normalized, words_correctly_normalized)
def test_filter_out_stop_words(self):
summarizer = self._build_summarizer(self.COMMON_STOP_WORDS)
sentence = "the dog and i went on a walk"
words = sentence.split()
words_filtered = summarizer._filter_out_stop_words(words)
words_correctly_filtered = ["dog", "went", "on", "a", "walk"]
self.assertEqual(words_filtered, words_correctly_filtered)
def test_compute_word_freq(self):
summarizer = self._build_summarizer(self.EMPTY_STOP_WORDS)
words = ["one", "two", "three", "four"]
freq = summarizer._compute_word_freq(words)
self.assertEqual(freq.get("one", 0), 1)
self.assertEqual(freq.get("two", 0), 1)
self.assertEqual(freq.get("three", 0), 1)
self.assertEqual(freq.get("four", 0), 1)
words = ["one", "one", "two", "two"]
freq = summarizer._compute_word_freq(words)
self.assertEqual(freq.get("one", 0), 2)
self.assertEqual(freq.get("two", 0), 2)
self.assertEqual(freq.get("three", 0), 0)
def test_get_all_content_words_in_doc(self):
summarizer = self._build_summarizer(self.EMPTY_STOP_WORDS)
s0 = Sentence("One two three.", Tokenizer("english"))
s1 = Sentence("One two three.", Tokenizer("english"))
document = build_document([s0, s1])
content_words = summarizer._get_all_content_words_in_doc(document.sentences)
content_words_freq = {}
for w in content_words:
content_words_freq[w] = content_words_freq.get(w, 0) + 1
content_words_correct = {"one": 2, "two": 2, "three": 2}
self.assertEqual(content_words_freq, content_words_correct)
def test_compute_tf(self):
summarizer = self._build_summarizer(self.EMPTY_STOP_WORDS)
s0 = Sentence("kicking soccer balls.", Tokenizer("english"))
s1 = Sentence("eating chicken dumplings.", Tokenizer("english"))
document = build_document([s0, s1])
freq = summarizer._compute_tf(document.sentences)
self.assertEqual(freq["kicking"], 1/6)
self.assertEqual(freq["soccer"], 1/6)
self.assertEqual(freq["balls"], 1/6)
self.assertEqual(freq["eating"], 1/6)
self.assertEqual(freq["chicken"], 1/6)
self.assertEqual(freq["dumplings"], 1/6)
document = build_document([s0, s0, s1])
freq = summarizer._compute_tf(document.sentences)
self.assertEqual(freq["kicking"], 2/9)
self.assertEqual(freq["soccer"], 2/9)
self.assertEqual(freq["balls"], 2/9)
self.assertEqual(freq["eating"], 1/9)
self.assertEqual(freq["chicken"], 1/9)
self.assertEqual(freq["dumplings"], 1/9)
def test_compute_average_probability_of_words(self):
summarizer = self._build_summarizer(self.EMPTY_STOP_WORDS)
word_freq = {"one": 1/6, "two": 2/6, "three": 3/6}
s0 = []
s1 = ["one"]
s2 = ["two", "three"]
s3 = ["two", "three", "three"]
EPS = 0.0001
self.assertTrue(abs(summarizer._compute_average_probability_of_words(word_freq, s0) - 0) < EPS)
self.assertTrue(abs(summarizer._compute_average_probability_of_words(word_freq, s1) - 1/6) < EPS)
self.assertTrue(abs(summarizer._compute_average_probability_of_words(word_freq, s2) - 5/12) < EPS)
self.assertTrue(abs(summarizer._compute_average_probability_of_words(word_freq, s3) - 8/18) < EPS)
def test_compute_ratings(self):
summarizer = self._build_summarizer(self.EMPTY_STOP_WORDS)
s0 = Sentence("Dog cat fish.", Tokenizer("english"))
s1 = Sentence("Dog cat camel.", Tokenizer("english"))
s2 = Sentence("Fish frog horse.", Tokenizer("english"))
document = build_document([s0, s1, s2])
ratings = summarizer._compute_ratings(document.sentences)
self.assertEqual(ratings[s0], 0)
self.assertEqual(ratings[s1], -2)
self.assertEqual(ratings[s2], -1)
# Due to the frequency discounting, after finding sentence s0,
# s2 should come before s1 since only two of its words get discounted
# rather than all 3 of s1's
s0 = Sentence("one two three", Tokenizer("english"))
s1 = Sentence("one two four", Tokenizer("english"))
s2 = Sentence("three five six", Tokenizer("english"))
document = build_document([s0, s1, s2])
ratings = summarizer._compute_ratings(document.sentences)
self.assertEqual(ratings[s0], 0)
self.assertEqual(ratings[s1], -2)
self.assertEqual(ratings[s2], -1)
|
# import findspark
# findspark.init()
from pyspark import SparkConf
from pyspark.sql import SparkSession
# 构建SparkSession
class SparkSessionBase(object):
SPARK_APP_NAME = None
# SPARK_URL = "yarn"
SPARK_EXECUTOR_MEMORY = "16g"
SPARK_EXECUTOR_CORES = 6
SPARK_EXECUTOR_INSTANCES = 6
ENABLE_HIVE_SUPPORT = False
def _create_spark_session(self):
conf = SparkConf()
config = (
("spark.app.name", self.SPARK_APP_NAME),
("spark.executor.memory", self.SPARK_EXECUTOR_MEMORY),
# ("spark.master", self.SPARK_URL),
("spark.executor.cores", self.SPARK_EXECUTOR_CORES),
("spark.executor.instances", self.SPARK_EXECUTOR_INSTANCES),
# ("spark.sql.warehouse.dir", "/root/apache-hive-2.3.7-bin/warehouse"),
("hive.metastore.uris", "thrift://172.18.0.2:9083")
)
conf.setAll(config)
print(self.ENABLE_HIVE_SUPPORT, config)
if self.ENABLE_HIVE_SUPPORT:
return SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()
else:
return SparkSession.builder.config(conf=conf).getOrCreate()
|
from optparse import make_option
from django.core import exceptions
from django.core.management.base import BaseCommand
from django.utils.encoding import force_str
from django.db.utils import IntegrityError
from django.db import connection
from django_tenants.clone import CloneSchema
from django_tenants.utils import get_tenant_model
class Command(BaseCommand):
help = 'Clones a tenant'
# Only use editable fields
tenant_fields = [field for field in get_tenant_model()._meta.fields
if field.editable and not field.primary_key]
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.option_list = ()
self.option_list += (make_option('--clone_from',
help='Specifies which schema to clone.'), )
for field in self.tenant_fields:
self.option_list += (make_option('--%s' % field.name,
help='Specifies the %s for tenant.' % field.name), )
def handle(self, *args, **options):
tenant_data = {}
for field in self.tenant_fields:
input_value = options.get(field.name, None)
tenant_data[field.name] = input_value
clone_schema_from = options.get('clone_from')
while clone_schema_from == '' or clone_schema_from is None:
clone_schema_from = input(force_str('Clone schema from: '))
while True:
for field in self.tenant_fields:
if tenant_data.get(field.name, '') == '':
input_msg = field.verbose_name
default = field.get_default()
if default:
input_msg = "%s (leave blank to use '%s')" % (input_msg, default)
input_value = input(force_str('%s: ' % input_msg)) or default
tenant_data[field.name] = input_value
tenant = self.store_tenant(clone_schema_from, **tenant_data)
if tenant is not None:
break
tenant_data = {}
def store_tenant(self, clone_schema_from, **fields):
connection.set_schema_to_public()
cursor = connection.cursor()
try:
tenant = get_tenant_model()(**fields)
tenant.auto_create_schema = False
tenant.save()
clone_schema = CloneSchema(cursor)
clone_schema.clone(clone_schema_from, tenant.schema_name)
return tenant
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
return None
except IntegrityError:
return None
|
# Copyright (c) 2010 Jeremy Thurgood <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# NOTE: These tests only cover the very simple cases I needed to test
# for the InstanceGroup fix.
import xml.sax
from boto import handler
from boto.emr import emrobject
from boto.resultset import ResultSet
from tests.compat import unittest
JOB_FLOW_EXAMPLE = b"""
<DescribeJobFlowsResponse
xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-01-15">
<DescribeJobFlowsResult>
<JobFlows>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime>
<StartDateTime>2009-01-28T21:49:16Z</StartDateTime>
<State>STARTING</State>
</ExecutionStatusDetail>
<BootstrapActions>
<member>
<BootstrapActionConfig>
<ScriptBootstrapAction>
<Args/>
<Path>s3://elasticmapreduce/libs/hue/install-hue</Path>
</ScriptBootstrapAction>
<Name>Install Hue</Name>
</BootstrapActionConfig>
</member>
</BootstrapActions>
<VisibleToAllUsers>true</VisibleToAllUsers>
<SupportedProducts>
<member>Hue</member>
</SupportedProducts>
<Name>MyJobFlowName</Name>
<LogUri>mybucket/subdir/</LogUri>
<Steps>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime>
<State>PENDING</State>
</ExecutionStatusDetail>
<StepConfig>
<HadoopJarStep>
<Jar>MyJarFile</Jar>
<MainClass>MyMailClass</MainClass>
<Args>
<member>arg1</member>
<member>arg2</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>MyStepName</Name>
<ActionOnFailure>CONTINUE</ActionOnFailure>
</StepConfig>
</member>
</Steps>
<JobFlowId>j-3UN6WX5RRO2AG</JobFlowId>
<Instances>
<Placement>
<AvailabilityZone>us-east-1a</AvailabilityZone>
</Placement>
<SubordinateInstanceType>m1.small</SubordinateInstanceType>
<MainInstanceType>m1.small</MainInstanceType>
<Ec2KeyName>myec2keyname</Ec2KeyName>
<InstanceCount>4</InstanceCount>
<KeepJobFlowAliveWhenNoSteps>true</KeepJobFlowAliveWhenNoSteps>
</Instances>
</member>
</JobFlows>
</DescribeJobFlowsResult>
<ResponseMetadata>
<RequestId>9cea3229-ed85-11dd-9877-6fad448a8419</RequestId>
</ResponseMetadata>
</DescribeJobFlowsResponse>
"""
JOB_FLOW_COMPLETED = b"""
<DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<DescribeJobFlowsResult>
<JobFlows>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<LastStateChangeReason>Steps completed</LastStateChangeReason>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
</ExecutionStatusDetail>
<BootstrapActions/>
<Name>RealJobFlowName</Name>
<LogUri>s3n://example.emrtest.scripts/jobflow_logs/</LogUri>
<Steps>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>s3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar</Jar>
<Args>
<member>s3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>Setup Hadoop Debugging</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:04:22Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialMapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialReducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/20/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/19/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/18/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/17/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/16/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/15/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/14/*</member>
<member>-output</member>
<member>s3://example.emrtest.crunched/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_Initial</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:04:22Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:36:18Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.crunched/*</member>
<member>-output</member>
<member>s3://example.emrtest.step1/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step1</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:36:18Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:37:51Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.crunched/*</member>
<member>-output</member>
<member>s3://example.emrtest.step2/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step2</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:37:51Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:39:32Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.step1/*</member>
<member>-output</member>
<member>s3://example.emrtest.step3/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step3</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:39:32Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:41:22Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.step1/*</member>
<member>-output</member>
<member>s3://example.emrtest.step4/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step4</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:41:22Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:43:03Z</EndDateTime>
</ExecutionStatusDetail>
</member>
</Steps>
<JobFlowId>j-3H3Q13JPFLU22</JobFlowId>
<Instances>
<SubordinateInstanceType>m1.large</SubordinateInstanceType>
<MainInstanceId>i-64c21609</MainInstanceId>
<Placement>
<AvailabilityZone>us-east-1b</AvailabilityZone>
</Placement>
<InstanceGroups>
<member>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<InstanceRunningCount>0</InstanceRunningCount>
<StartDateTime>2010-10-21T01:02:09Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:03Z</ReadyDateTime>
<State>ENDED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
<InstanceRequestCount>1</InstanceRequestCount>
<InstanceType>m1.large</InstanceType>
<Market>ON_DEMAND</Market>
<LastStateChangeReason>Job flow terminated</LastStateChangeReason>
<InstanceRole>MASTER</InstanceRole>
<InstanceGroupId>ig-EVMHOZJ2SCO8</InstanceGroupId>
<Name>main</Name>
</member>
<member>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<InstanceRunningCount>0</InstanceRunningCount>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime>
<State>ENDED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
<InstanceRequestCount>9</InstanceRequestCount>
<InstanceType>m1.large</InstanceType>
<Market>ON_DEMAND</Market>
<LastStateChangeReason>Job flow terminated</LastStateChangeReason>
<InstanceRole>CORE</InstanceRole>
<InstanceGroupId>ig-YZHDYVITVHKB</InstanceGroupId>
<Name>subordinate</Name>
</member>
</InstanceGroups>
<NormalizedInstanceHours>40</NormalizedInstanceHours>
<HadoopVersion>0.20</HadoopVersion>
<MainInstanceType>m1.large</MainInstanceType>
<MainPublicDnsName>ec2-184-72-153-139.compute-1.amazonaws.com</MainPublicDnsName>
<Ec2KeyName>myubersecurekey</Ec2KeyName>
<InstanceCount>10</InstanceCount>
<KeepJobFlowAliveWhenNoSteps>false</KeepJobFlowAliveWhenNoSteps>
</Instances>
</member>
</JobFlows>
</DescribeJobFlowsResult>
<ResponseMetadata>
<RequestId>c31e701d-dcb4-11df-b5d9-337fc7fe4773</RequestId>
</ResponseMetadata>
</DescribeJobFlowsResponse>
"""
class TestEMRResponses(unittest.TestCase):
def _parse_xml(self, body, markers):
rs = ResultSet(markers)
h = handler.XmlHandler(rs, None)
xml.sax.parseString(body, h)
return rs
def _assert_fields(self, response, **fields):
for field, expected in fields.items():
actual = getattr(response, field)
self.assertEquals(expected, actual,
"Field %s: %r != %r" % (field, expected, actual))
def test_JobFlows_example(self):
[jobflow] = self._parse_xml(JOB_FLOW_EXAMPLE,
[('member', emrobject.JobFlow)])
self._assert_fields(jobflow,
creationdatetime='2009-01-28T21:49:16Z',
startdatetime='2009-01-28T21:49:16Z',
state='STARTING',
instancecount='4',
jobflowid='j-3UN6WX5RRO2AG',
loguri='mybucket/subdir/',
name='MyJobFlowName',
availabilityzone='us-east-1a',
subordinateinstancetype='m1.small',
maininstancetype='m1.small',
ec2keyname='myec2keyname',
keepjobflowalivewhennosteps='true')
def test_JobFlows_completed(self):
[jobflow] = self._parse_xml(JOB_FLOW_COMPLETED,
[('member', emrobject.JobFlow)])
self._assert_fields(jobflow,
creationdatetime='2010-10-21T01:00:25Z',
startdatetime='2010-10-21T01:03:59Z',
enddatetime='2010-10-21T01:44:18Z',
state='COMPLETED',
instancecount='10',
jobflowid='j-3H3Q13JPFLU22',
loguri='s3n://example.emrtest.scripts/jobflow_logs/',
name='RealJobFlowName',
availabilityzone='us-east-1b',
subordinateinstancetype='m1.large',
maininstancetype='m1.large',
ec2keyname='myubersecurekey',
keepjobflowalivewhennosteps='false')
self.assertEquals(6, len(jobflow.steps))
self.assertEquals(2, len(jobflow.instancegroups))
|
import pytest
import sys
sys.path.insert(0, '..') # insert everything from .. to path search
import os
import pandas as pd
import numpy as np
import dash_bootstrap_components
from src.navbar import *
def test_Navbar():
'''
'''
navbar = Navbar()
assert navbar
assert isinstance(navbar, dash_bootstrap_components._components.NavbarSimple) |
def _is_windows(ctx):
return ctx.os.name.lower().find("windows") != -1
def _wrap_bash_cmd(ctx, cmd):
if _is_windows(ctx):
bazel_sh = _get_env_var(ctx, "BAZEL_SH")
if not bazel_sh:
fail("BAZEL_SH environment variable is not set")
cmd = [bazel_sh, "-l", "-c", " ".join(["\"%s\"" % s for s in cmd])]
return cmd
def _get_env_var(ctx, name):
if name in ctx.os.environ:
return ctx.os.environ[name]
else:
return None
# Checks if we should use the system lib instead of the bundled one
def _use_system_lib(ctx, name):
syslibenv = _get_env_var(ctx, "STUKA_SYSTEM_LIBS")
if syslibenv:
for n in syslibenv.strip().split(","):
if n.strip() == name:
return True
return False
# Executes specified command with arguments and calls 'fail' if it exited with
# non-zero code
def _execute_and_check_ret_code(repo_ctx, cmd_and_args):
result = repo_ctx.execute(cmd_and_args, timeout = 60)
if result.return_code != 0:
fail(("Non-zero return code({1}) when executing '{0}':\n" + "Stdout: {2}\n" +
"Stderr: {3}").format(
" ".join(cmd_and_args),
result.return_code,
result.stdout,
result.stderr,
))
def _repos_are_siblings():
return Label("@foo//bar").workspace_root.startswith("../")
def _apply_delete(ctx, paths):
for path in paths:
if path.startswith("/"):
fail("refusing to rm -rf path starting with '/': " + path)
if ".." in path:
fail("refusing to rm -rf path containing '..': " + path)
cmd = _wrap_bash_cmd(ctx, ["rm", "-rf"] + [ctx.path(path) for path in paths])
_execute_and_check_ret_code(ctx, cmd)
def _stuka_http_archive(ctx):
use_syslib = _use_system_lib(ctx, ctx.attr.name)
if not use_syslib:
ctx.download_and_extract(
ctx.attr.urls,
"",
ctx.attr.sha256,
ctx.attr.type,
ctx.attr.strip_prefix,
)
if ctx.attr.delete:
_apply_delete(ctx, ctx.attr.delete)
if use_syslib and ctx.attr.system_build_file != None:
# Use BUILD.bazel to avoid conflict with third party projects with
# BUILD or build (directory) underneath.
ctx.template("BUILD.bazel", ctx.attr.system_build_file, {
"%prefix%": ".." if _repos_are_siblings() else "external",
}, False)
elif ctx.attr.build_file != None:
# Use BUILD.bazel to avoid conflict with third party projects with
# BUILD or build (directory) underneath.
ctx.template("BUILD.bazel", ctx.attr.build_file, {
"%prefix%": ".." if _repos_are_siblings() else "external",
}, False)
if use_syslib:
for internal_src, external_dest in ctx.attr.system_link_files.items():
ctx.symlink(Label(internal_src), ctx.path(external_dest))
stuka_http_archive = repository_rule(
implementation = _stuka_http_archive,
attrs = {
"sha256": attr.string(mandatory = True),
"urls": attr.string_list(mandatory = True, allow_empty = False),
"strip_prefix": attr.string(),
"type": attr.string(),
"delete": attr.string_list(),
"build_file": attr.label(),
"system_build_file": attr.label(),
"system_link_files": attr.string_dict(),
},
environ = [
"STUKA_SYSTEM_LIBS",
],
) |
import django
SECRET_KEY = 'stuffandnonsense'
APPS = [
'nano.activation',
'nano.badge',
'nano.blog',
'nano.chunk',
'nano.comments',
'nano.countries',
'nano.faq',
'nano.link',
'nano.mark',
'nano.privmsg',
'nano.tools',
'nano.user',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
] + APPS
MIDDLEWARE_CLASSES = ()
if django.VERSION[:2] < (1, 6):
INSTALLED_APPS += ['discover_runner']
TEST_RUNNER = 'discover_runner.DiscoverRunner'
|
import nextcord
from nextcord.ext import commands
import config
import os
import motor.motor_asyncio
from utils.mongo import Document
# Dont forget to enable all intents in the developer portal.
async def get_prefix(bot, message):
if not message.guild:
return commands.when_mentioned_or(bot.DEFAULTPREFIX)(bot,message)
try:
data = await bot.config.find(message.guild.id)
if not data or "prefix" not in data:
return commands.when_mentioned_or(bot.DEFAULTPREFIX)(bot,message)
return commands.when_mentioned_or(data["prefix"])(bot,message)
except:
return commands.when_mentioned_or(bot.DEFAULTPREFIX)(bot,message)
intents = nextcord.Intents.all()
bot = commands.Bot(command_prefix=get_prefix, intents=intents, help_command=None)
bot.DEFAULTPREFIX = config.PREFIX
@bot.event
async def on_ready():
print(f"Bot is online! \nId: {config.BOT_ID}\nUsername: {config.BOT_NAME}")
await bot.change_presence(
activity=nextcord.Game(name=f"Hi, my names {bot.user.name}.Use {config.PREFIX} to interact with me!")
)
bot.mongo = motor.motor_asyncio.AsyncIOMotorClient(str(config.MONGODB))
bot.db = bot.mongo["myFirstDatabase"]
bot.config = Document(bot.db, "configs")
bot.warns = Document(bot.db, "warns")
print("Database has been loaded!")
for document in await bot.config.get_all():
print(document)
@bot.command()
async def load(ctx, extension):
if ctx.message.author.id == config.OWNER_ID or config.MANAGER_ID:
bot.load_extension(f'cogs.{extension}')
await ctx.send(f'I have loaded the cog `{extension}`!')
print(f'Cog loaded with in discord\n{extension}')
else:
await ctx.send(f'Only users with the ids: `{config.OWNER_ID}`, `{config.MANAGER_ID}`. Can run this command!')
@bot.command()
async def unload(ctx, extension):
if ctx.message.author.id == config.OWNER_ID or config.MANAGER_ID:
bot.unload_extension(f'cogs.{extension}')
await ctx.send(f"I have unloaded the cog `{extension}`!")
print(f'Cog unloaded with in discord\n{extension}')
else:
await ctx.send(f'Only users with the ids: `{config.OWNER_ID}, `{config.MANAGER_ID}`. Can use this command!')
@bot.command()
async def reload(ctx, extension):
if ctx.message.author.id == config.OWNER_ID or config.MANAGER_ID:
bot.reload_extension(f'cogs.{extension}')
await ctx.send(f"I have reloaded the cog `{extension}`!")
print(f'Cog reload with in discord\n{extension}')
else:
await ctx.send(f'Only users with the ids: `{config.OWNER_ID}`, {config.MANAGER_ID}. Can use this command!')
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
print(f"Loaded cog:\n{filename[:-3]}")
else:
print(f"Error loading cog:\n{filename[:-3]}")
bot.run(config.TOKEN) |
x = set()
print(type(x))
x.add(1)
print(x)
x.add(2)
print(x)
x.add(1)
print(x)
l = [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4]
print(set(l))
# Booleans
a = True
print(a)
print(type(a))
print(11 > 2)
b = None
print(b)
print(type(b))
b = 'a'
print(b)
|
import torch
from hearline.models.transformers.attention import MultiHeadedAttention
from hearline.models.transformers.embedding import PositionalEncoding
from hearline.models.transformers.encoder_layer import EncoderLayer
from hearline.models.transformers.layer_norm import LayerNorm
from hearline.models.transformers.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from hearline.models.transformers.repeat import repeat
from hearline.models.transformers.subsampling import (
Conv2dNoSubsampling,
Conv2dSubsampling,
)
# Reference: https://github.com/espnet/espnet/tree/master/espnet/nets/pytorch_backend/transformer
class Encoder(torch.nn.Module):
"""Encoder module
:param int idim: input dim
:param argparse.Namespace args: experiment config
"""
def __init__(self, idim, args, pos_enc=True):
super(Encoder, self).__init__()
if args.transformer_input_layer == "linear":
if pos_enc:
self.input_layer = torch.nn.Sequential(
torch.nn.Linear(idim, args.adim),
torch.nn.LayerNorm(args.adim),
torch.nn.Dropout(args.dropout_rate),
torch.nn.ReLU(),
PositionalEncoding(args.adim, args.dropout_rate),
)
else:
self.input_layer = torch.nn.Sequential(
torch.nn.Linear(idim, args.adim),
torch.nn.LayerNorm(args.adim),
torch.nn.Dropout(args.dropout_rate),
torch.nn.ReLU(),
)
elif args.transformer_input_layer == "conv2d":
self.input_layer = Conv2dSubsampling(idim, args.adim, args.dropout_rate)
elif args.transformer_input_layer == "conv2d_no":
self.input_layer = Conv2dNoSubsampling(idim, args.adim, args.dropout_rate)
elif args.transformer_input_layer == "embed":
self.input_layer = torch.nn.Sequential(
torch.nn.Embedding(idim, args.adim),
PositionalEncoding(args.adim, args.dropout_rate),
)
else:
raise ValueError("unknown input_layer: " + args.transformer_input_layer)
self.encoders = repeat(
args.elayers,
lambda: EncoderLayer(
args.adim,
MultiHeadedAttention(
args.aheads, args.adim, args.transformer_attn_dropout_rate
),
PositionwiseFeedForward(args.adim, args.eunits, args.dropout_rate),
args.dropout_rate,
args.after_conv,
),
)
self.norm = LayerNorm(args.adim)
def forward(self, x, mask=None):
"""Embed positions in tensor
:param torch.Tensor x: input tensor
:param torch.Tensor mask: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
if isinstance(self.input_layer, Conv2dNoSubsampling):
x, mask = self.input_layer(x, mask)
elif isinstance(self.input_layer, Conv2dSubsampling):
x, mask = self.input_layer(x, mask)
else:
x = self.input_layer(x)
# x, mask = self.encoders(x, mask)
# return x, mask
x, mask = self.encoders(x, mask)
return self.norm(x), mask
|
def mes(valor):
meses = {1: 'January',
2: 'February',
3: 'March',
4: 'April',
5: 'May',
6: 'June',
7: 'July',
8: 'August',
9: 'September',
10: 'October',
11: 'November',
12: 'December'}
return print(meses[valor])
mes_do_ano = int(input())
mes(mes_do_ano)
|
# -*- coding: utf-8 -*-
from denite import util
from .base import Base
import os
import site
# Add external modules
path_to_parent_dir = os.path.abspath(os.path.dirname(__file__) + '/../')
path_to_modules = os.path.join(path_to_parent_dir, 'modules')
site.addsitedir(path_to_modules)
site.addsitedir(os.path.join(path_to_modules, 'inflection'))
site.addsitedir(os.path.join(path_to_modules, 'finders'))
site.addsitedir(os.path.join(path_to_modules, 'models'))
from dwim_finder import DwimFinder # noqa
from all_finder import AllFinder # noqa
from app_finder import AppFinder # noqa
from model_finder import ModelFinder # noqa
from controller_finder import ControllerFinder # noqa
from helper_finder import HelperFinder # noqa
from view_finder import ViewFinder # noqa
from frontend_finder import FrontendFinder # noqa
from test_finder import TestFinder # noqa
from spec_finder import SpecFinder # noqa
from config_finder import ConfigFinder # noqa
from db_finder import DbFinder # noqa
from lib_finder import LibFinder # noqa
from assets_finder import AssetsFinder # noqa
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'rails'
self.kind = 'file'
def on_init(self, context):
try:
context['__target'] = context['args'][0]
except IndexError:
raise NameError('target must be provided')
cbname = self.vim.current.buffer.name
context['__cbname'] = cbname
context['__root_path'] = util.path2project(self.vim, cbname, context.get('root_markers', ''))
def highlight(self):
# TODO syntax does not work as expected
self.vim.command('syntax region deniteSource_railsConstant start=+^+ end=+^.\{-}\s+')
self.vim.command('highlight link deniteSource_railsConstant Statement')
self.vim.command('syntax match deniteSource_railsSeparator /::/ containedin=deniteSource_railsConstant')
self.vim.command('highlight link deniteSource_railsSeparator Identifier')
self.vim.command('syntax region deniteSource_railsPath start=+(+ end=+)+')
self.vim.command('highlight link deniteSource_railsPath Statement')
self.vim.command('syntax match deniteSource_railsController /Controller:/')
self.vim.command('highlight link deniteSource_railsController Function')
self.vim.command('syntax match deniteSource_railsModel /Model:/')
self.vim.command('highlight link deniteSource_railsModel String')
self.vim.command('syntax match deniteSource_railsHelper /Helper:/')
self.vim.command('highlight link deniteSource_railsHelper Type')
self.vim.command('syntax match deniteSource_railsView /View:/')
self.vim.command('highlight link deniteSource_railsView Statement')
self.vim.command('syntax match deniteSource_railsTest /Test:/')
self.vim.command('highlight link deniteSource_railsTest Number')
self.vim.command('syntax match deniteSource_railsSpec /Spec:/')
self.vim.command('highlight link deniteSource_railsSpec Number')
self.vim.command('syntax match deniteSource_railsConfig /Config:/')
self.vim.command('highlight link deniteSource_railsConfig Statement')
self.vim.command('syntax match deniteSource_railsDb /Db:/')
self.vim.command('highlight link deniteSource_railsDb Statement')
self.vim.command('syntax match deniteSource_railsLib /Lib:/')
self.vim.command('highlight link deniteSource_railsLib Statement')
self.vim.command('syntax match deniteSource_railsAssets /Assets:/')
self.vim.command('highlight link deniteSource_railsAssets Statement')
self.vim.command('syntax match deniteSource_railsApp /App:/')
self.vim.command('highlight link deniteSource_railsApp Statement')
self.vim.command('syntax match deniteSource_railsAll /All:/')
self.vim.command('highlight link deniteSource_railsAll Statement')
self.vim.command('syntax match deniteSource_railsFrontend /Frontend:/')
self.vim.command('highlight link deniteSource_railsFrontend Statement')
def gather_candidates(self, context):
file_list = self._find_files(context)
if file_list is not None:
return [self._convert(context, x) for x in file_list]
else:
return []
def _find_files(self, context):
target = context['__target']
if target == 'dwim':
finder_class = DwimFinder
elif target == 'app':
finder_class = AppFinder
elif target == 'all':
finder_class = AllFinder
elif target == 'model':
finder_class = ModelFinder
elif target == 'controller':
finder_class = ControllerFinder
elif target == 'helper':
finder_class = HelperFinder
elif target == 'view':
finder_class = ViewFinder
elif target == 'frontend':
finder_class = FrontendFinder
elif target == 'test':
finder_class = TestFinder
elif target == 'spec':
finder_class = SpecFinder
elif target == 'config':
finder_class = ConfigFinder
elif target == 'db':
finder_class = DbFinder
elif target == 'lib':
finder_class = LibFinder
elif target == 'assets':
finder_class = AssetsFinder
else:
msg = '{0} is not valid denite-rails target'.format(target)
raise NameError(msg)
return finder_class(context).find_files()
def _convert(self, context, file_object):
result_dict = {
'word': file_object.to_word(context['__root_path']),
'action__path': file_object.filepath
}
return result_dict
|
import dataclasses
import mock
import pytest
from pca.domain.entity import (
AutoId,
Entity,
NaturalId,
SequenceId,
Uuid4Id,
)
@pytest.fixture(scope="session")
def data():
return {"frame_type": "gravel", "wheel_type": "road"}
def entity_constructor(id_field=None):
id_field = id_field or SequenceId()
class Bike(Entity):
id = id_field
frame_type: str
wheel_type: str
return Bike
class TestEntity:
def test_identity(self, data):
Bike = entity_constructor()
entities = [Bike(**data).__set_id__(i) for i in range(4)]
id_field = Bike.__get_id_field__()
assert [e.__get_id__() for e in entities] == [0, 1, 2, 3]
assert id_field.name == "id"
assert id_field.owner == Bike
def test_no_identity(self):
def entity_constructor():
class EntityWithoutId(Entity):
pass
return EntityWithoutId
with pytest.raises(AssertionError):
entity_constructor()
def test_too_many_identities(self):
def entity_constructor():
class EntityWith2Ids(Entity):
id1 = SequenceId()
id2 = Uuid4Id()
return EntityWith2Ids
with pytest.raises(AssertionError):
entity_constructor()
def test_inherited_id(self):
Bike = entity_constructor()
class Trike(Bike):
index = NaturalId("frame_type", "wheel_type")
assert Trike.__get_id_field__() == Bike.id
def test_overriding_inherited_id(self):
Bike = entity_constructor()
class Trike(Bike):
__id_field_name__ = "index"
index = NaturalId("frame_type", "wheel_type")
assert Trike.__get_id_field__() == Trike.index
def test_equality(self, data):
Bike = entity_constructor()
entity_1 = Bike(**data).__set_id__(1)
entity_2 = Bike(**data).__set_id__(2)
assert entity_1 != data
assert entity_1 != entity_2
entity_2.__set_id__(1)
assert entity_1 == entity_2
def test_serialization(self, data):
Bike = entity_constructor()
entity = Bike(**data).__set_id__(1)
assert dataclasses.asdict(entity) == data
class TestAutoId:
def test_value_generation(self, data):
id_value = 42
Bike = entity_constructor(AutoId(generator=lambda: id_value))
entity = Bike(**data).__set_id__()
assert entity.__get_id__() == id_value
# when explicitly set, ID is the value you set
entity.__set_id__(7)
assert entity.__get_id__() == 7
entity = Bike(**data).__set_id__()
assert entity.__get_id__() == id_value
class TestSequenceId:
def test_value_generation(self, data):
Bike = entity_constructor()
entity = Bike(**data).__set_id__()
assert entity.__get_id__() == 1
# new __set_id__ call to existing entity will generate a new ID,
# which behaves as pseudo-cloning (mutability of the values might)
entity.__set_id__()
assert entity.__get_id__() == 2
entity = Bike(**data).__set_id__()
assert entity.__get_id__() == 3
class TestNaturalId:
def test_value_generation(self, data):
Bike = entity_constructor(NaturalId("frame_type", "wheel_type"))
entity = Bike(**data).__set_id__()
assert entity.__get_id__() == ("gravel", "road")
# NaturalId ignores setting explicit value, it always gets it from
# the values of natural key's fields
entity.__set_id__(2)
assert entity.__get_id__() == ("gravel", "road")
entity = Bike(1, 2).__set_id__(3)
assert entity.__get_id__() == (1, 2)
class TestUuid4Id:
@mock.patch("pca.domain.entity.uuid4")
def test_value_generation(self, mocked_uuid4: mock.Mock, data):
id_value = "foo-bar"
mocked_uuid4.return_value = id_value
Bike = entity_constructor(Uuid4Id())
entity = Bike(**data).__set_id__()
assert entity.__get_id__() == id_value
id_value = "other-value"
entity.__set_id__(id_value)
assert entity.__get_id__() == id_value
|
from .models import Startup, Tag
from django.shortcuts import (get_object_or_404, render)
def homepage(request):
return render(
request,
'organizer/tag_list.html',
{'tag_list':Tag.objects.all()})
def tag_detail(request, slug):
tag = get_object_or_404(
Tag, slug__iexact=slug)
return render(
request, 'organizer/tag_detail.html',
{'tag':tag})
def tag_list(request):
return render(
request,
'organizer/tag_list.html',
{'tag_list': Tag.objects.all()}
)
def startup_list(request):
return render(
request,
'organizer/startup_list.html',
{'startup_list':Startup.objects.all()}
)
def startup_detail(request, slug):
startup = get_object_or_404(
Startup, slug__iexact=slug)
return render(
request,
'organizer/startup_detail.html',
{startup:'startup'}
)
|
# Export data to an excel file. Use different sheets for different sections
|
import networkx as nx
import osmnx as ox
import json
data = {}
ox.config(use_cache=True, log_console=True)
G = ox.graph_from_place('Piedmont, California, USA', network_type='drive')
# G = ox.graph_from_place('beijing, china', network_type='drive', which_result=2)
# G = ox.graph_from_bbox(40.19, 39.70,116.75,116.05,network_type='drive')
print("ok")
adj = G.adj
links = list()
vertex = list()
edges = G.edges
# degree_centrality = nx.degree_centrality(G)
# nx.set_node_attributes(G, degree_centrality, "degree_centrality")
# closeness_centrality = nx.closeness_centrality(G)
# nx.set_node_attributes(G, closeness_centrality, "closeness_centrality")
# eigenvector_centrality = nx.eigenvector_centrality(G)
# nx.set_node_attributes(G, eigenvector_centrality, "eigenvector_centrality")
# betweenness_centrality = nx.betweenness_centrality(G)
# nx.set_node_attributes(G, betweenness_centrality, "betweenness_centrality")
# print("ok")
for edge in edges:
current_edge = edges[edge]
links.append({
'id': edge,
'osmid': current_edge.get('osmid'),
'geometry': str(current_edge.get('geometry'))[12:-1].split(","),
'length': current_edge.get('length')
})
# id geometry
nodes = G.nodes
for node in nodes:
current_node = nodes[node]
vertex.append({
'id': node,
'osmid': current_node.get('osmid'),
'x': current_node.get('x'),
'y': current_node.get('y'),
"degree_centrality": current_node.get("degree_centrality"),
"closeness_centrality": current_node.get("closeness_centrality"),
"eigenvector_centrality": current_node.get("eigenvector_centrality"),
"betweenness_centrality": current_node.get("betweenness_centrality"),
})
data['nodes'] = vertex
data['edges'] = links
with open('data.json', 'w') as fp:
json.dump(data, fp) |
objects = []
ans = 0
a = []
for i in objects:
if i not in a:
a.append(i)
ans += 1
print(ans)
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 2 20:26:55 2018
@author: Ali Nasr
"""
# import important
import tensorflow as tf
import cv2
import numpy as np
import tensorflow_hub as hub
import os, os.path
from random import shuffle, sample
import re
data_path = "/home/ali/PycharmProjects/tensorHub/data/"
label_zero_path = data_path + "label_zero/"
label_one_path = data_path + "label_one/"
#height_img, width_img = hub.get_expected_image_size(m)
#depth = hub.get_num_image_channels(m)
#declaring necesare value and variables
height_img = 128
width_img = 128
width_img_cam = 352
height_img_cam = 288
frame = np.zeros((height_img, width_img, 1), np.uint8)
windowName = 'cam'
cv2.namedWindow(windowName)
l = 0
hm_epochs = 100
number_of_data = 10
n_classes = 2
batch_size = 10
def creat_graph(ModuleSpec):
with tf.Graph().as_default() as graph:
input_tensor = tf.placeholder('float', [None, width_img, height_img, 3])
#bottleneck_plcaholder = tf.placeholder('float', [None, 1024])
#y = tf.placeholder('float')
m = hub.Module(ModuleSpec)
bottleneck_tensor = m(input_tensor)
return graph, bottleneck_tensor, input_tensor
# calculate and save fixed part of graph
def run_and_save_bottleneck(sess, bottleneck_tensor, input_tensor):
input_data, labels = load_data_from_files()
total, _ = labels.shape
batch_step = int(total/batch_size)
# conevrt images from unin8 to float numbers
image_as_float = tf.image.convert_image_dtype(input_data, tf.float32)
decoded_images = sess.run(image_as_float)
x_epoch = decoded_images[0: batch_size]
bottleneck_value = sess.run(bottleneck_tensor, feed_dict={input_tensor: x_epoch})
for batch in range(1, batch_step):
x_epoch = decoded_images[batch * batch_size : (batch + 1) * batch_size]
batche_value = sess.run(bottleneck_tensor, feed_dict={input_tensor:x_epoch})
bottleneck_value = np.concatenate((bottleneck_value, batche_value))
#print(bottleneck_value.shape)
print("finishing up")
return bottleneck_value, labels
# new fully connceted layer for new application
def last_layer(X):
y = tf.placeholder('float')
weights = {'out': tf.Variable(tf.random_normal([1024, n_classes], stddev=0.001))}
biases = {'out': tf.Variable(tf.random_normal([n_classes], stddev=0.001))}
#fc = tf.nn.relu(tf.matmul(fc, weights['Weights_FC']) + biases['Biase_FC'])
# fc = tf.nn.dropout(fc, keep_rate)
output = tf.matmul(X, weights['out']) + biases['out']
output = tf.identity(output, name='output_1')
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
return output, cost, optimizer, y
# train new graph
def train_neural_network():
ModuleSpec = hub.load_module_spec("https://tfhub.dev/google/imagenet/mobilenet_v1_100_128/feature_vector/1")
graph, bottleneck_tensor, input_tensor = creat_graph(ModuleSpec)
with graph.as_default():
_, bottleneck_tensor_size = bottleneck_tensor.get_shape().as_list()
X = tf.placeholder_with_default(bottleneck_tensor, shape=[None, bottleneck_tensor_size],
name='newlayerinputplacholder')
output, cost, optimizer, y = last_layer(X)
with tf.Session(graph=graph)as sess:
init = tf.global_variables_initializer()
sess.run(init)
bottleneck_value, labels, batch_step = run_and_save_bottleneck(sess, bottleneck_tensor, input_tensor)
saver = tf.train.Saver()
for epoch in range(hm_epochs):
epoch_loss = 0
for batch in range(batch_step):
epoch_x = bottleneck_value[batch * batch_size: (batch + 1) * batch_size]
epoch_y = labels[batch * batch_size: (batch + 1) * batch_size]
_, c = sess.run([optimizer, cost], feed_dict={X: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
writer = tf.summary.FileWriter("output", sess.graph)
writer.close()
# please customize the directory for your project
saver.save(sess, '/home/ali/PycharmProjects/tensorHub/save/my_test_model')
# correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y,1))
# accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
# print('Accyracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
return graph, input_tensor, init
# feed-forward network
def feed_for():
global l
#img = np.zeros((number_of_data, height_img, width_img))
#label = np.zeros(number_of_data).astype(int)
i = 0
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
cap.set(3, width_img_cam);
cap.set(4, height_img_cam);
print(cap.get(3))
print(cap.get(4))
sess = tf.Session()
# please customize the directory for your project
saver = tf.train.import_meta_graph('/home/ali/PycharmProjects/test1/saved/my_test_model.meta')
saver.restore(sess, tf.train.latest_checkpoint('/home/ali/PycharmProjects/test1/saved/./'))
graph = tf.get_default_graph()
# w1 = graph.get_tensor_by_name("w1:0")
# w2 = graph.get_tensor_by_name("w2:0")
# Now, access the op that you want to run.
op_to_restore = graph.get_tensor_by_name("add_1:0")
forward = tf.nn.softmax(logits=op_to_restore)
while (True):
ret, camera_img = cap.read()
camera_img = cv2.cvtColor(camera_img, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(camera_img, (width_img, height_img))
cv2.imshow(windowName, frame)
epoch_x = frame.reshape(1, width_img * height_img)
feed_dict = {x: epoch_x}
sess.run(tf.global_variables_initializer())
# print(forward.eval(feed_dict))
print(sess.run(op_to_restore, feed_dict))
if cv2.waitKey(1) == 27: # exit on ESC
break
cv2.destroyAllWindows()
cap.release()
def mouse(event, x, y, flags, param):
global l
if event == cv2.EVENT_MBUTTONDOWN:
l = 1 # it actually zero it will be changed in capture_ dataset function
if event == cv2.EVENT_LBUTTONDOWN:
l = 2 # it actually 1 it will be changed in capture_ dataset function
# bind the callback function to window
cv2.setMouseCallback(windowName, mouse)
def load_data_from_files():
pattern = r"label_one"
extensions = ['jpg', 'png']
file_list_ones = []
file_list_zeros = []
for extension in extensions:
file_glob_ones = os.path.join(label_one_path, label_one_path, '*.' + extension)
file_glob_zeros = os.path.join(label_zero_path, label_zero_path, '*.' + extension)
file_list_zeros.extend(tf.gfile.Glob(file_glob_zeros))
file_list_ones.extend(tf.gfile.Glob(file_glob_ones))
files_one_and_zero = file_list_zeros + file_list_ones
all_files = sample(files_one_and_zero, len(files_one_and_zero));
input_data = np.zeros((len(all_files), height_img, width_img, 3), np.uint8)
labels = np.zeros((len(all_files), 1) , np.uint8)
for file in all_files:
input_data[all_files.index(file)] = cv2.imread(file, -1)
if re.search(pattern, file):
labels[all_files.index(file)] = 1
else:
labels[all_files.index(file)] = 0
print("number of train data is {}".format(len(labels)))
labels = np.eye(n_classes)[labels.reshape(-1)]
return input_data, labels
def capture_and_save_dataset():
global l
img = np.zeros((number_of_data, height_img, width_img, 3))
label = np.zeros(number_of_data).astype(int)
i = 0
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
cap.set(3, width_img_cam);
cap.set(4, height_img_cam);
print(cap.get(3))
print(cap.get(4))
while (True):
ret, camera_img = cap.read()
#camera_img = cv2.cvtColor(camera_img, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(camera_img, (width_img, height_img))
cv2.imshow(windowName, frame)
if l == 1:
label[i] = 0;
img[i] = frame
l = 0
i += 1
elif l == 2:
label[i] = 1
img[i] = frame
l = 0
i += 1
if i == number_of_data:
break
if cv2.waitKey(1) == 27: # exit on ESC
break
cv2.destroyAllWindows()
cap.release()
files_count_in_label_zero = len([f for f in os.listdir(label_zero_path)
if os.path.isfile(os.path.join(label_zero_path, f))])
files_count_in_label_one = len([f for f in os.listdir(label_one_path)
if os.path.isfile(os.path.join(label_one_path, f))])
files_counts = files_count_in_label_one + files_count_in_label_zero
n = 0
for j in label:
if j == 0:
files_counts += 1
cv2.imwrite(label_zero_path + "{}.jpg".format(files_counts), img[n],
[int(cv2.IMWRITE_JPEG_QUALITY), 90])
if j == 1:
files_counts += 1
cv2.imwrite(label_one_path + "{}.jpg".format(files_counts), img[n],
[int(cv2.IMWRITE_JPEG_QUALITY), 90])
n += 1
if cv2.waitKey(1) == 27: # exit on ESC
break
cv2.destroyAllWindows()
label = np.eye(n_classes)[label.reshape(-1)]
return img, label
def main():
images1, labels1 = capture_and_save_dataset()
graph, input_tensor, init = train_neural_network()
feed_for(graph, input_tensor, init)
main()
|
#!/usr/local/bin/python3
# Make Shap.py
# imports
import sys
import pandas as pd
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler, TomekLinks
from lightgbm import LGBMClassifier
from matplotlib import pyplot
from numpy import where
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.feature_selection import RFE, SelectFromModel, SelectKBest, chi2
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import explained_variance_score
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import (
LabelEncoder,
MinMaxScaler,
OneHotEncoder,
OrdinalEncoder,
)
from numpy import loadtxt
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn.metrics import (
classification_report,
plot_confusion_matrix,
plot_det_curve,
plot_roc_curve,
)
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
import shap
#
#
# Load data, we will make this an arguement at the end
def extract_info(file, target):
df = pd.read_csv(file, delimiter = "\t") # Load the data, make the command line arguements for the model
# get the x and y variables back from the data to put into basic model splitting for shit model
y = df[target]
X = df.drop(target, axis=1)
return X,y
def d_split(X,y):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
return X_train, X_test, y_train, y_test
def classify(X_train, X_test, y_train, y_test):
'''
Input the data frames and put them into the classifier
'''
model = XGBClassifier()
model.fit(X_train, y_train.ravel())
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
#f1 = f1_score(y_test, y_pred, average='weighted')
print("Accuracy: %.2f%%" % (accuracy * 100.0))
print("Precision: %.2f%%" % (precision * 100.0))
print("Recall: %.2f%%" % (recall * 100.0))
#print("F1 Score: %.2f%%" % (f1_score * 100.0))
return model, X_test, y_test, y_pred, predictions, accuracy, precision, recall
# shap this bitch
def shapperBAR(model, X_test, y_test, y_pred, predictions):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
f = plt.figure()
shap.summary_plot(shap_values, X_train, plot_type="bar")
f.savefig(col + "_bar.png", bbox_inches='tight', dpi=600)
return shap_values
# not complete
def mayTheShapperForcebeWithYou(model, X_test, y_test, y_pred, predictions, j):
shap.initjs()
# Get the predictions and put them with the test data.
X_output = X_test.copy()
X_output.loc[:,'predict'] = np.round(model.predict(X_output),2)
# Randomly pick some observations
random_picks = np.arange(1,330,50) # Every 50 rows
S = X_output.iloc[random_picks]
explainerModel = shap.TreeExplainer(model)
shap_values_Model = explainerModel.shap_values(S)
p = shap.force_plot(explainerModel.expected_value, shap_values_Model[j], S.iloc[[j]])
return(p)
def makeDependence(X_train, shap_values):
for col in X_train.columns:
for i in len(shap.values):
f = plt.figure()
shap.dependence_plot(col, shap_values[1], X_train)
f.savefig(col + "_dependence.png", bbox_inches='tight', dpi=600)
pass
def main():
args = sys.argv[1:]
df = extract_info(file = args[0], target = args[1])
X_train, X_test, y_train, y_test = d_split(X,y)
model, X_test, y_test, y_pred, predictions, accuracy, precision, recall = classify(X_train, X_test, y_train, y_test)
shap_values = shapperBAR(model, X_test, y_test, y_pred, predictions)
makeDependence(X_train, shap_values)
if __name__ == "__main__":
main()
|
from __future__ import print_function, division
import GLM.constants, os, pdb, pandas, numpy, logging, crop_stats
import pygeoutil.util as util
class CropFunctionalTypes:
"""
"""
def __init__(self, res='q'):
"""
:param res: Resolution of output dataset: q=quarter, h=half, o=one
:return:
"""
# Dictionary of crop functional types
self.cft = {'C4Annual': ['maize.asc', 'millet.asc', 'sorghum.asc'],
'C4Perren': ['sugarcane.asc'],
'C3Perren': ['banana.asc', 'berry.asc', 'citrus.asc', 'fruittree.asc', 'grape.asc', 'palm.asc', 'tropevrgrn.asc'],
'Ntfixing': ['alfalfa.asc', 'bean.asc', 'legumehay.asc', 'peanut.asc', 'soybean.asc'],
'C3Annual': ['beet.asc', 'cassava.asc', 'cotton.asc', 'flax.asc', 'hops.asc', 'mixedcover.asc',
'nursflower.asc', 'oat.asc', 'potato.asc', 'rapeseed.asc', 'rice.asc', 'rye.asc',
'safflower.asc', 'sunflower.asc', 'tobacco.asc', 'vegetable.asc', 'wheat.asc'],
'TotlRice': ['rice.asc', 'xrice.asc']}
# Get shape of file
self.skiprows = 6
self.res = res
self.tmpdata = util.open_or_die(path_file=GLM.constants.MFD_DATA_DIR + os.sep + 'maize.asc',
skiprows=self.skiprows, delimiter=' ')
self.asc_hdr = util.get_ascii_header(path_file=GLM.constants.MFD_DATA_DIR + os.sep + 'maize.asc',
getrows=self.skiprows)
self.yshape = self.tmpdata.shape[0]
self.xshape = self.tmpdata.shape[1]
# Create empty numpy arrays
self.c4annual = numpy.zeros(shape=(self.yshape, self.xshape))
self.c4perren = numpy.zeros(shape=(self.yshape, self.xshape))
self.c3perren = numpy.zeros(shape=(self.yshape, self.xshape))
self.ntfixing = numpy.zeros(shape=(self.yshape, self.xshape))
self.c3annual = numpy.zeros(shape=(self.yshape, self.xshape))
self.totlrice = numpy.zeros(shape=(self.yshape, self.xshape))
self.totlcrop = numpy.zeros(shape=(self.yshape, self.xshape))
self.c4anarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.c4prarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.c3prarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.ntfxarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.c3anarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.croparea = numpy.zeros(shape=(self.yshape, self.xshape))
# Area of each cell in Monfreda dataset
self.mfd_area = numpy.zeros(shape=(self.yshape, self.xshape))
# Ice-water fraction and other static data
self.icwtr = util.open_or_die(GLM.constants.path_GLM_stat)
# Read in area file based on res
if res == 'q':
self.area_data = util.open_or_die(path_file=GLM.constants.CELL_AREA_Q)
elif res == 'h':
self.area_data = util.open_or_die(path_file=GLM.constants.CELL_AREA_H)
elif res == 'o':
self.area_data = util.open_or_die(path_file=GLM.constants.CELL_AREA_O)
else:
logging.info('Incorrect resolution for output of Monfreda')
# Compute cell area (excluding ice-water fraction)
self.cell_area = util.open_or_die(GLM.constants.path_GLM_carea)
self.land_area = self.cell_area * (1.0 - self.icwtr.variables[GLM.constants.ice_water_frac][:, :])
# Get FAO country concordance list
self.fao_id = pandas.read_csv(GLM.constants.FAO_CONCOR)[['Country_FAO', 'ISO']]
# Output path
self.out_path = GLM.constants.out_dir + os.sep + 'Monfreda'
util.make_dir_if_missing(self.out_path)
def read_monfreda(self):
# Loop over crop functional types
for key, value in self.cft.iteritems():
for val in value:
logging.info('Processing ' + key + ' ' + val)
tmp_asc = util.open_or_die(path_file=GLM.constants.MFD_DATA_DIR + os.sep + val,
skiprows=self.skiprows, delimiter=' ')
if key == 'C4Annual':
self.c4annual = self.c4annual + tmp_asc
elif key == 'C4Perren':
self.c4perren = self.c4perren + tmp_asc
elif key == 'C3Perren':
self.c3perren = self.c3perren + tmp_asc
elif key == 'Ntfixing':
self.ntfixing = self.ntfixing + tmp_asc
elif key == 'C3Annual':
self.c3annual = self.c3annual + tmp_asc
elif key == 'TotlRice':
self.totlrice = self.totlrice + tmp_asc
else:
logging.info('Wrong key')
# Add to total crop fraction of grid cell area
self.totlcrop = self.totlcrop + tmp_asc
# Aggregate MONFREDA data from 5' to 0.25 degree
self.totlcrop = util.avg_np_arr(self.totlcrop, block_size=3)
self.croparea = self.totlcrop * self.land_area
# Aggregate MONFREDA data for each CFT from 5' to 0.25 degree
self.c4anarea = util.avg_np_arr(self.c4annual, block_size=3) * self.land_area
self.c4prarea = util.avg_np_arr(self.c4perren, block_size=3) * self.land_area
self.c3prarea = util.avg_np_arr(self.c3perren, block_size=3) * self.land_area
self.ntfxarea = util.avg_np_arr(self.ntfixing, block_size=3) * self.land_area
self.c3anarea = util.avg_np_arr(self.c3annual, block_size=3) * self.land_area
def read_HYDE(self):
pass
def output_ascii_to_file(self, fl_name, data, delim=' '):
asc_file = open(fl_name, 'w+')
if self.res == 'q':
ncols = 720
nrows = 360
cell_size = 0.25
elif self.res == 'h':
# @TODO
pass
elif self.res == 'o':
# @TODO
pass
asc_file.write('ncols %s\n' % ncols)
asc_file.write('nrows %s\n' % nrows)
asc_file.write('xllcorner -180\n')
asc_file.write('yllcorner -90\n')
asc_file.write('cellsize %s\n' % cell_size)
asc_file.write('NODATA_value -9999\n')
# Write numpy array
numpy.savetxt(asc_file, data, delimiter=delim)
asc_file.close()
def compute_area_by_country(self):
"""
Output area of each CFT by country
:return:
"""
df = []
# Read ASCII file of country codes
ccodes_fl = numpy.genfromtxt(GLM.constants.CNTRY_CODES, skip_header=0, delimiter=' ')
# Get list of unique countries, remove 0.0 as it is not a country ID
list_cntrs = numpy.unique(ccodes_fl)
list_cntrs = list_cntrs[list_cntrs > 0.0]
# For each country:
for cnt in list_cntrs:
# Get area of cropland in country based on MONFREDA data
area_cnt = self.croparea[ccodes_fl[:] == cnt].sum()
area_c4a = self.c4anarea[ccodes_fl[:] == cnt].sum()
area_c4p = self.c4prarea[ccodes_fl[:] == cnt].sum()
area_c3p = self.c3prarea[ccodes_fl[:] == cnt].sum()
area_ntf = self.ntfxarea[ccodes_fl[:] == cnt].sum()
area_c3a = self.c3anarea[ccodes_fl[:] == cnt].sum()
# Get country name from concordance table
cnt_name = self.fao_id[self.fao_id['ISO'] == int(cnt)]['Country_FAO'].iloc[0]
df.append({'ISO': int(cnt),
'Country_Monfreda': cnt_name,
'Area_Monfreda': area_cnt,
'Monfreda_c4annual': area_c4a,
'Monfreda_c4perren': area_c4p,
'Monfreda_c3perren': area_c3p,
'Monfreda_ntfixing': area_ntf,
'Monfreda_c3annual': area_c3a})
return pandas.DataFrame(df)
if __name__ == '__main__':
# Logging
LOG_FILENAME = constants.log_dir + os.sep + 'Log_' + constants.TAG + '.txt'
logging.basicConfig(filename=LOG_FILENAME,level=logging.INFO, filemode='w',\
format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s',\
datefmt="%m-%d %H:%M") # Logging levels are DEBUG, INFO, WARNING, ERROR, and CRITICAL
# Add a rotating handler
logging.getLogger().addHandler(logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=50000, backupCount=5))
# Output to screen
logging.getLogger().addHandler(logging.StreamHandler())
obj = CropFunctionalTypes(res='q')
obj.read_monfreda()
df = obj.compute_area_by_country()
df.to_csv(obj.out_path + os.sep + 'Monfreda_area_crops.csv')
# Output crop area to file
obj.output_ascii_to_file(fl_name=obj.out_path + os.sep + 'MONFREDA_crop_area.asc', data=obj.croparea, delim=' ')
# Read in FAO data for entire time period
fao_obj = crop_stats.CropStats()
fao_obj.process_crop_stats()
# Compare FAO and Monfreda
# fao_df.FAO_perc_all_df
# Convert from ascii files to our crop functional types
# Aggregate ascii files to quarter degree
# Produce netCDF file as output
# Currently, we compute crop type ratio's for the year 2000 based on Monfreda data crop type ratios and ag area
# from HYDE. This gives us a static number per CFT per grid cell. However, sometimes, the crop type ratio might be 0
# because Monfreda says that no crop exists, but HYDE says that crops do exist.
# New approach will:
# 1. Compute CFT fraction per grid cell in 2000
# 2. Aggregate this fractional value by country
# 3. Apply this fractional value for grid cells in the past where Monfreda says that no data exists. The values are
# assigned by country
# 1. Read in country codes file
# 2. Determine list of unique countries
# 3. For each country:
# 3a. Compute fraction of each CFT for that particular country in 2000 by averaging
# 4. Use the HYDE gcrop dataset so that for each grid cell where a crop is present, we provide either:
# a. Fraction as determined through Monfreda
# b. Country-specific fraction for grid cells where Monfreda says that there is no cropping
# 5. Output is a netCDF file with dimensions: time x lat x lon which provides CFT fraction for 5 CFTs across time
# and for each grid cell
# 1) We need to replace that 1/5th for each crop type with something more reasonable from the FAO data. So, we need
# to get FAO national ratios for each crop type for years 1961-2014 and then perhaps hold constant for 2015 and use
# 1961-1965 averages for years prior to 1961. This will give us a 199 x 5 x 516 file (could be netcdf or other.
# 2) The next step is a bit more complicated. In this step we need to use the FAO data to modify the Monfreda maps
# for years before/after 2000. I think the first step here is to compare Monfreda and FAO national crop type ratios
# in year 2000. We can then 'normalize' the FAO values so that in the year 2000 FAO national ratios are the same as
# Monfreda. Using these normalized values we can modify the Monfreda map forwards and backwards in time (throughout
# the FAO period) by changing all grid-cells within a country using the new FAO ratios. Prior to 1961 we would hold
# the map constant. The input to GLM would be a lat x lon x time x 5 file.
# This second step is definitely more complicated so we just need to break it down into manageable chunks. I would
# not worry about forming a data cube at this point. We first need to be looking at the data and seeing how we can
# connect the FAO and Monfreda data together. |
import numpy as np
import torch
import hexagdly as hex
import pytest
class TestConv2d(object):
def get_array(self):
return np.array(
[[j * 5 + 1 + i for j in range(8)] for i in range(5)], dtype=np.float32
)
def get_array_conv2d_size1_stride1(self):
return np.array(
[
[9, 39, 45, 99, 85, 159, 125, 136],
[19, 51, 82, 121, 152, 191, 222, 176],
[24, 58, 89, 128, 159, 198, 229, 181],
[29, 65, 96, 135, 166, 205, 236, 186],
[28, 39, 87, 79, 147, 119, 207, 114],
],
dtype=np.float32,
)
def get_array_conv2d_size2_stride1(self):
return np.array(
[
[42, 96, 128, 219, 238, 349, 265, 260],
[67, 141, 194, 312, 354, 492, 388, 361],
[84, 162, 243, 346, 433, 536, 494, 408],
[90, 145, 246, 302, 426, 462, 474, 343],
[68, 104, 184, 213, 314, 323, 355, 245],
],
dtype=np.float32,
)
def get_array_stride_2(self, array_stride_1):
array_stride_2 = np.zeros((2, 4), dtype=np.float32)
stride_2_pos = [
(0, 0, 0, 0),
(0, 1, 1, 2),
(0, 2, 0, 4),
(0, 3, 1, 6),
(1, 0, 2, 0),
(1, 1, 3, 2),
(1, 2, 2, 4),
(1, 3, 3, 6),
]
for pos in stride_2_pos:
array_stride_2[pos[0], pos[1]] = array_stride_1[pos[2], pos[3]]
return array_stride_2
def get_array_stride_3(self, array_stride_1):
array_stride_3 = np.zeros((2, 3), dtype=np.float32)
stride_3_pos = [
(0, 0, 0, 0),
(0, 1, 1, 3),
(0, 2, 0, 6),
(1, 0, 3, 0),
(1, 1, 4, 3),
(1, 2, 3, 6),
]
for pos in stride_3_pos:
array_stride_3[pos[0], pos[1]] = array_stride_1[pos[2], pos[3]]
return array_stride_3
def get_n_neighbors_size1(self):
return np.array(
[
[3, 6, 4, 6, 4, 6, 4, 4],
[5, 7, 7, 7, 7, 7, 7, 5],
[5, 7, 7, 7, 7, 7, 7, 5],
[5, 7, 7, 7, 7, 7, 7, 5],
[4, 4, 6, 4, 6, 4, 6, 3],
],
dtype=np.float32,
)
def get_n_neighbors_size2(self):
return np.array(
[
[7, 11, 11, 13, 11, 13, 9, 8],
[10, 15, 16, 18, 16, 18, 13, 11],
[12, 16, 19, 19, 19, 19, 16, 12],
[11, 13, 18, 16, 18, 16, 15, 10],
[8, 9, 13, 11, 13, 11, 11, 7],
],
dtype=np.float32,
)
def get_tensors(self, in_channels, kernel_size, stride, bias):
channel_dist = 1000
if bias is False:
bias_value = 0
else:
bias_value = 1.0
# input tensor
array = self.get_array()
array = np.expand_dims(
np.stack([j * channel_dist + array for j in range(in_channels)]), 0
)
tensor = torch.FloatTensor(array)
# expected output tensor
if kernel_size == 1:
conv2d_array = self.get_array_conv2d_size1_stride1()
n_neighbours = self.get_n_neighbors_size1()
elif kernel_size == 2:
conv2d_array = self.get_array_conv2d_size2_stride1()
n_neighbours = self.get_n_neighbors_size2()
convolved_array = np.sum(
np.stack(
[
(channel * channel_dist) * n_neighbours + conv2d_array
for channel in range(in_channels)
]
),
0,
)
if stride == 2:
convolved_array = self.get_array_stride_2(convolved_array)
elif stride == 3:
convolved_array = self.get_array_stride_3(convolved_array)
convolved_array = np.expand_dims(np.expand_dims(convolved_array, 0), 0)
convolved_tensor = torch.FloatTensor(convolved_array) + bias_value
# output tensor of test method
conv2d = hex.Conv2d(in_channels, 1, kernel_size, stride, bias, True)
return conv2d(tensor), convolved_tensor
def test_in_channels_1_kernel_size_1_stride_1_bias_False(self):
in_channels = 1
kernel_size = 1
stride = 1
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_2_bias_False(self):
in_channels = 1
kernel_size = 1
stride = 2
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_3_bias_False(self):
in_channels = 1
kernel_size = 1
stride = 3
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_1_bias_False(self):
in_channels = 1
kernel_size = 2
stride = 1
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_2_bias_False(self):
in_channels = 1
kernel_size = 2
stride = 2
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_3_bias_False(self):
in_channels = 1
kernel_size = 2
stride = 3
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_1_bias_False(self):
in_channels = 5
kernel_size = 1
stride = 1
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_2_bias_False(self):
in_channels = 5
kernel_size = 1
stride = 2
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_3_bias_False(self):
in_channels = 5
kernel_size = 1
stride = 3
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_1_bias_False(self):
in_channels = 5
kernel_size = 2
stride = 1
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_2_bias_False(self):
in_channels = 5
kernel_size = 2
stride = 2
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_3_bias_False(self):
in_channels = 5
kernel_size = 2
stride = 3
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_1_bias_True(self):
in_channels = 1
kernel_size = 1
stride = 1
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_2_bias_True(self):
in_channels = 1
kernel_size = 1
stride = 2
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_3_bias_True(self):
in_channels = 1
kernel_size = 1
stride = 3
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_1_bias_True(self):
in_channels = 1
kernel_size = 2
stride = 1
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_2_bias_True(self):
in_channels = 1
kernel_size = 2
stride = 2
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_3_bias_True(self):
in_channels = 1
kernel_size = 2
stride = 3
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_1_bias_True(self):
in_channels = 5
kernel_size = 1
stride = 1
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_2_bias_True(self):
in_channels = 5
kernel_size = 1
stride = 2
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_3_bias_True(self):
in_channels = 5
kernel_size = 1
stride = 3
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_1_bias_True(self):
in_channels = 5
kernel_size = 2
stride = 1
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_2_bias_True(self):
in_channels = 5
kernel_size = 2
stride = 2
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_3_bias_True(self):
in_channels = 5
kernel_size = 2
stride = 3
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
|
import cv2
import numpy as np
img = cv2.imread("simpsons.jpg")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.imread("barts_face.jpg", cv2.IMREAD_GRAYSCALE)
w, h = template.shape[::-1]
result = cv2.matchTemplate(gray_img, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(result >= 0.4)
for pt in zip(*loc[::-1]):
cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 255, 0), 3)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
from flask import Flask,request, session, Response
import uuid
import boto3
from config import S3_BUCKET, S3_KEY, S3_SECRET, host, user, passwd, database, secret_key
from flask_cors import CORS
import json
from upload_to_ipfs import *
app = Flask(__name__)
CORS(app)
aws = boto3.resource(
's3',
aws_access_key_id=S3_KEY,
aws_secret_access_key=S3_SECRET
)
bucket = aws.Bucket(S3_BUCKET)
bucket.Acl().put(ACL='public-read')
@app.route("/ping")
def ping():
return "pong"
@app.route("/upload_file", methods=["POST"])
def upload():
file = request.files['file']
file_image_id = str(uuid.uuid1())[:10]
filename = str(uuid.uuid1()) + ".jpg"
# response = bucket.Object(filename).put(Body=file, ACL='public-read')
file.save(file.filename)
ipfs_link = upload_to_pinata(file.filename, file_image_id)
return {
'metadata_link': f'https://gateway.pinata.cloud/ipfs/{ipfs_link["IpfsHash"]}'
}
@app.route("/upload_depth_file", methods=["POST"])
def upload_depth_file():
file = request.files['file']
file_image_id = str(uuid.uuid1())[:10]
filename = str(uuid.uuid1()) + "_depth" + ".jpg"
response = bucket.Object(filename).put(Body=file, ACL='public-read')
return {
'result': 'good'
}
@app.route("/create_nft_metadata", methods=["POST"])
def create_nft_metadata():
data = request.get_json()
metadata_template = {
"name": "",
"description": "",
"image": "",
"attributes": []
}
ipfs_image = data['imageUrl']
nft_name = data['name']
metadata_template['name'] = data['name']
metadata_template['description'] = data['description']
metadata_template['image'] = ipfs_image
with open('metadata.json', 'w', encoding='utf-8') as f:
json.dump(metadata_template, f, ensure_ascii=False, indent=4)
ipfs_link = upload_to_pinata('metadata.json', nft_name)
return {
'metadata_link': f'https://gateway.pinata.cloud/ipfs/{ipfs_link["IpfsHash"]}'
}
# /create_nft
# /list_user_nft
if __name__ == '__main__':
app.run(debug=True) |
'''7. Agora faça uma função que recebe uma palavra e diz se ela é um
palíndromo, ou seja, se ela é igual a ela mesma ao contrário.
Dica: Use a função do exercício 5.'''
#Verificando se a palavra é um palindromo
#Receber palavra para verificar
palavra = input('Informe uma palavra: ').lower()
analise = palavra
x = list(analise)
a = x[::-1]
nova_frase = ''.join(a)
if palavra==nova_frase:
print('É um palíndromo')
elif palavra!=nova_frase:
print('Não é palíndromo:') |
from lib import requests
import uuid
import webbrowser
import threading
import httplib
import json
import time
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urlparse
from workflow import Workflow
auth_url = "https://www.wunderlist.com/oauth/authorize"
callback = "http://127.0.0.1:8080"
class OAuthListener(BaseHTTPRequestHandler):
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
self.query = parsed_path.query
try:
code = self.query[self.query.find("code")+5:]
state = self.query[self.query.find("state")+6:self.query.find("code")-1]
message = "<h1>Authorization successful</h1>"
set_token(state, code)
self.wfile.write(message)
except:
pass
finally:
self.server.stop = True
return
def do_QUIT(self):
self.send_response(200)
self.end_headers()
self.server.stop = True
class StoppableServer(HTTPServer):
def serve_forever(self):
self.stop = False
while not self.stop:
self.handle_request()
def end_server(s):
time.sleep(15)
s.stop = True
def set_token(state, code):
wf = Workflow()
if wf.stored_data('state') != None and wf.stored_data('state') == state:
wf.store_data('code', code)
token_url = "https://www.wunderlist.com/oauth/access_token"
headers = {"accept": "application/json"}
params = {"client_id": wf.settings['api']['client_id'],
"client_secret": wf.settings['api']['client2'],
"code": wf.stored_data('code')}
r = requests.post(token_url, data=params, headers=headers)
data = r.json()
token = data['access_token']
wf.save_password('token', token)
def do_login():
wf = Workflow()
if wf.stored_data('state') == None:
wf.store_data('state', str(uuid.uuid1()))
server = StoppableServer(('127.0.0.1', 8080), OAuthListener)
t = threading.Thread(target=server.serve_forever)
stop = threading.Thread(target=end_server, args=(server,))
url = auth_url + "?client_id=" + wf.settings['api']['client_id']
url += "&redirect_uri=" + callback
url += "&state=" + wf.stored_data('state')
webbrowser.open(url)
t.start()
stop.start()
|
import matplotlib.pyplot as plt
from structure_factor.point_processes import HomogeneousPoissonPointProcess
from structure_factor.spatial_windows import BallWindow
from structure_factor.structure_factor import StructureFactor
from structure_factor.tapered_estimators_isotropic import (
allowed_k_norm_bartlett_isotropic,
)
point_process = HomogeneousPoissonPointProcess(intensity=1)
window = BallWindow(center=[0, 0], radius=50)
point_pattern = point_process.generate_point_pattern(window=window)
sf = StructureFactor(point_pattern)
d, r = point_pattern.dimension, point_pattern.window.radius
k_norm = allowed_k_norm_bartlett_isotropic(dimension=d, radius=r, nb_values=60)
k_norm, sf_estimated = sf.bartlett_isotropic_estimator(k_norm)
ax = sf.plot_isotropic_estimator(
k_norm, sf_estimated, label=r"$\widehat{S}_{\mathrm{BI}}(k)$"
)
plt.tight_layout(pad=1)
|
# *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
import json
import sys
import uuid
from typing import Dict, Union
from django.forms.models import model_to_dict
from django.http import JsonResponse, HttpRequest
from reo.exceptions import UnexpectedError
from reo.models import ModelManager
from reo.models import ScenarioModel, PVModel, StorageModel, LoadProfileModel, GeneratorModel, FinancialModel, WindModel
from reo.utilities import annuity
from resilience_stats.models import ResilienceModel
from resilience_stats.outage_simulator_LF import simulate_outages
import numpy as np
def resilience_stats(request: Union[Dict, HttpRequest], run_uuid=None):
"""
Run outage simulator for given run_uuid
:param request: optional parameter for 'bau', boolean
:param run_uuid:
:return: {"resilience_by_timestep",
"resilience_hours_min",
"resilience_hours_max",
"resilience_hours_avg",
"outage_durations",
"probs_of_surviving",
}
Also can GET the same values as above with '_bau' appended if 'bau=true' for the site's existing capacities.
"""
try:
uuid.UUID(run_uuid) # raises ValueError if not valid uuid
except ValueError as e:
if e.args[0] == "badly formed hexadecimal UUID string":
return JsonResponse({"Error": str(e.args[0])}, status=400)
else:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = UnexpectedError(exc_type, exc_value.args[0], exc_traceback, task='resilience_stats',
run_uuid=run_uuid)
err.save_to_db()
return JsonResponse({"Error": str(err.message)}, status=400)
bau = False # whether or not user wants outage simulator run with existing sizes
if isinstance(request, HttpRequest):
if request.GET.get('bau') in ["True", "true", "1"]:
bau = True
elif isinstance(request, dict):
bau = request.get("bau")
# Safety check; No exception is expected if called after POST-ing to /outagesimjob end point
try:
scenario = ScenarioModel.objects.get(run_uuid=run_uuid)
except ScenarioModel.DoesNotExist:
msg = "Scenario {} does not exist.".format(run_uuid)
return JsonResponse({"Error": msg}, content_type='application/json', status=404)
if scenario.status == "Optimizing...":
return JsonResponse({"Error": "The scenario is still optimizing. Please try again later."},
content_type='application/json', status=404)
elif "error" in scenario.status.lower():
return JsonResponse(
{"Error": "An error occurred in the scenario. Please check the messages from your results."},
content_type='application/json', status=500)
try: # catch all exceptions
try: # catch specific exception
not_ready_msg = ('Outage sim results are not ready. '
'If you have already submitted an outagesimjob, please try again later. '
'If not, please first submit an outagesimjob by sending a POST request to '
'v1/outagesimjob/ with run_uuid and bau parameters. This will generate'
' outage simulation results that you can access from a GET request to the '
'v1/job/<run uuid>/resilience_stats endpoint. ')
not_ready_msg += 'Sample body data for POST-ing to /outagesimjob/: {"run_uuid\": \"6ea30f0f-3723-4fd1-8a3f-bebf8a3e4dbf\", \"bau\": false}'
rm = ResilienceModel.objects.get(scenariomodel=scenario)
if rm.resilience_by_timestep is None:
return JsonResponse({"Error": not_ready_msg}, content_type='application/json', status=404)
except ResilienceModel.DoesNotExist: # case for no resilience_stats generated yet
return JsonResponse({"Error": not_ready_msg}, content_type='application/json', status=404)
else: # ResilienceModel does exist
results = model_to_dict(rm)
# remove items that user does not need
del results['scenariomodel']
del results['id']
if bau and results[
"probs_of_surviving_bau"] is None: # then need to run outage_sim with existing sizes (BAU)
bau_results = run_outage_sim(run_uuid, with_tech=False, bau=bau)
ResilienceModel.objects.filter(id=rm.id).update(**bau_results)
results.update(bau_results)
if not bau: # remove BAU results from results dict (if they're there)
filtered_dict = {k: v for k, v in results.items() if "_bau" not in k}
results = filtered_dict
results.update({
"help_text": ("The present_worth_factor and avg_critical_load are provided such"
" that one can calculate an avoided outage cost in dollars by multiplying a value "
"of load load ($/kWh) by the avg_critical_load, resilience_hours_avg, and present_worth_factor."
" Note that if the outage event is 'major' (i.e. only occurs once), then the present_worth_factor is 1.")
})
response = JsonResponse({"outage_sim_results": results}, content_type='application/json', status=200)
return response
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = UnexpectedError(exc_type, exc_value.args[0], exc_traceback, task='resilience_stats', run_uuid=run_uuid)
err.save_to_db()
return JsonResponse({"Error": err.message}, status=500)
def financial_check(request, run_uuid=None):
""" Check to see if resilience scenario system sizes are the same as financial scenario sizes """
resilience_uuid = request.GET.get('resilience_uuid')
if resilience_uuid is None: # preserving old behavior
resilience_uuid = run_uuid
financial_uuid = request.GET.get('financial_uuid')
def parse_system_sizes(site):
size_dict = dict()
if "Generator" in site:
size_dict["Generator"] = site["Generator"]["size_kw"]
if "Storage" in site:
size_dict["Storage_kw"] = site["Storage"]["size_kw"]
size_dict["Storage_kwh"] = site["Storage"]["size_kwh"]
if "Wind" in site:
size_dict["Wind"] = site["Wind"]["size_kw"]
if "PV" in site:
size_dict["PV"] = site["PV"]["size_kw"]
return size_dict
# validate uuid's
try:
uuid.UUID(str(resilience_uuid)) # raises ValueError if not valid uuid
uuid.UUID(str(financial_uuid)) # raises ValueError if not valid uuid
except ValueError as e:
if e.args[0] == "badly formed hexadecimal UUID string":
return JsonResponse({"Error": str(e.args[0])}, status=400)
else:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = UnexpectedError(exc_type, exc_value.args[0], exc_traceback, task='resilience_stats',
run_uuid=resilience_uuid)
err.save_to_db()
return JsonResponse({"Error": str(err.message)}, status=400)
try:
resil_scenario = ScenarioModel.objects.get(run_uuid=resilience_uuid)
except ScenarioModel.DoesNotExist:
msg = "Scenario {} does not exist.".format(resilience_uuid)
return JsonResponse({"Error": msg}, content_type='application/json', status=404)
if resil_scenario.status == "Optimizing...":
return JsonResponse({"Error": "The resilience scenario is still optimizing. Please try again later."},
content_type='application/json', status=500)
elif "error" in resil_scenario.status.lower():
return JsonResponse(
{"Error": "An error occurred in the resilience scenario. Please check the messages from your results."},
content_type='application/json', status=500)
try:
financial_scenario = ScenarioModel.objects.get(run_uuid=financial_uuid)
except ScenarioModel.DoesNotExist:
msg = "Scenario {} does not exist.".format(financial_uuid)
return JsonResponse({"Error": msg}, content_type='application/json', status=404)
if financial_scenario.status == "Optimizing...":
return JsonResponse({"Error": "The financial scenario is still optimizing. Please try again later."},
content_type='application/json', status=500)
elif "error" in financial_scenario.status.lower():
return JsonResponse(
{"Error": "An error occurred in the financial scenario. Please check the messages from your results."},
content_type='application/json', status=500)
try:
# retrieve sizes from db
resilience_result = ModelManager.make_response(resilience_uuid)
financial_result = ModelManager.make_response(financial_uuid)
resilience_sizes = parse_system_sizes(resilience_result["outputs"]["Scenario"]["Site"])
financial_sizes = parse_system_sizes(financial_result["outputs"]["Scenario"]["Site"])
survives = True
if resilience_sizes.keys() == financial_sizes.keys():
for tech, resil_size in resilience_sizes.items():
if float(resil_size - financial_sizes[tech]) / float(max(resil_size, 1)) > 1.0e-3:
survives = False
break
else:
survives = False
response = JsonResponse({"survives_specified_outage": survives})
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = UnexpectedError(exc_type, exc_value.args[0], exc_traceback, task='resilience_stats',
run_uuid=resilience_uuid)
err.save_to_db()
return JsonResponse({"Error": err.message}, status=500)
else:
return response
def run_outage_sim(run_uuid, with_tech=True, bau=False):
load_profile = LoadProfileModel.objects.filter(run_uuid=run_uuid).first()
gen = GeneratorModel.objects.filter(run_uuid=run_uuid).first()
batt = StorageModel.objects.filter(run_uuid=run_uuid).first()
pvs = PVModel.objects.filter(run_uuid=run_uuid)
financial = FinancialModel.objects.filter(run_uuid=run_uuid).first()
wind = WindModel.objects.filter(run_uuid=run_uuid).first()
batt_roundtrip_efficiency = batt.internal_efficiency_pct \
* batt.inverter_efficiency_pct \
* batt.rectifier_efficiency_pct
results = dict()
if with_tech: # NOTE: with_tech case is run after each optimization in reo/results.py
celery_eager = True
""" nlaws 200229
Set celery_eager = False to run each inner outage simulator loop in parallel. Timing tests with generator only
indicate that celery task management does not improve speed due to the overhead required to manage the 8760 tasks.
However, if the outage simulator does get more complicated (say with CHP) we should revisit using celery to run
the inner loops in parallel.
try:
if load_profile['outage_end_hour'] - load_profile['outage_start_hour'] > 1000:
celery_eager = False
except KeyError:
pass # in case no outage has been defined
"""
pv_kw_ac_hourly = np.zeros(len(pvs[0].year_one_power_production_series_kw))
for pv in pvs:
pv_kw_ac_hourly += np.array(pv.year_one_power_production_series_kw)
pv_kw_ac_hourly = list(pv_kw_ac_hourly)
tech_results = simulate_outages(
batt_kwh=batt.size_kwh or 0,
batt_kw=batt.size_kw or 0,
pv_kw_ac_hourly=pv_kw_ac_hourly,
wind_kw_ac_hourly=wind.year_one_power_production_series_kw,
init_soc=batt.year_one_soc_series_pct,
critical_loads_kw=load_profile.critical_load_series_kw,
batt_roundtrip_efficiency=batt_roundtrip_efficiency,
diesel_kw=gen.size_kw or 0,
fuel_available=gen.fuel_avail_gal,
b=gen.fuel_intercept_gal_per_hr,
m=gen.fuel_slope_gal_per_kwh,
diesel_min_turndown=gen.min_turn_down_pct,
celery_eager=celery_eager
)
results.update(tech_results)
if bau:
# only PV and diesel generator may have existing size
pv_kw_ac_hourly = np.zeros(len(load_profile.critical_load_series_kw))
for pv in pvs:
if pv.existing_kw > 0:
pv_kw_ac_hourly += np.array(pv.year_one_power_production_series_kw) * pv.existing_kw / pv.size_kw
pv_kw_ac_hourly = list(pv_kw_ac_hourly)
bau_results = simulate_outages(
batt_kwh=0,
batt_kw=0,
pv_kw_ac_hourly=pv_kw_ac_hourly,
critical_loads_kw=load_profile.critical_load_series_kw,
diesel_kw=gen.existing_kw or 0,
fuel_available=gen.fuel_avail_gal,
b=gen.fuel_intercept_gal_per_hr,
m=gen.fuel_slope_gal_per_kwh,
diesel_min_turndown=gen.min_turn_down_pct
)
results.update({key + '_bau': val for key, val in bau_results.items()})
""" add avg_crit_ld and pwf to results so that avoided outage cost can be determined as:
avoided_outage_costs_us_dollars = resilience_hours_avg *
value_of_lost_load_us_dollars_per_kwh *
avg_crit_ld *
present_worth_factor
"""
avg_critical_load = round(sum(load_profile.critical_load_series_kw) /
len(load_profile.critical_load_series_kw), 5)
if load_profile.outage_is_major_event:
# assume that outage occurs only once in analysis period
present_worth_factor = 1
else:
present_worth_factor = annuity(financial.analysis_years, financial.escalation_pct,
financial.offtaker_discount_pct)
results.update({"present_worth_factor": present_worth_factor,
"avg_critical_load": avg_critical_load})
return results
|
# Generated by Django 2.2.20 on 2021-07-09 17:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('invitations', '0010_invitation_meta'),
]
operations = [
migrations.AlterField(
model_name='invitation',
name='submission',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='invitations', to='cases.Submission'),
),
]
|
#! /usr/bin/env python
# Copyright (c) 2016 Zielezinski A, combio.pl
import argparse
import sys
from alfpy import word_vector
from alfpy import word_distance
from alfpy.utils import distmatrix
from alfpy.utils import seqrecords
from alfpy import word_pattern
from alfpy.version import __version__
def get_parser():
parser = argparse.ArgumentParser(
description='''Calculate compositional distances between DNA/protein
sequences based on word (of length k) occurrences using a Markov model
of k-2.''',
add_help=False, prog='calc_word_cv.py'
)
group = parser.add_argument_group('REQUIRED ARGUMENTS')
group.add_argument('--fasta', '-f',
help='input FASTA sequence filename', required=True,
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group(' Choose between the two options')
g1 = group.add_mutually_exclusive_group()
g1.add_argument('--word_size', '-s', metavar="k", type=int,
help='''word size (k-mer) for creating word patterns
(must be >= 3)'''
)
g1.add_argument('--word_patterns', '-w', nargs=3,
help='''3 input word pattern files (k-, [k-1]-,
[k-2]-mers)''',
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group('OUTPUT ARGUMENTS')
group.add_argument('--out', '-o', help="output filename",
metavar="FILE")
group.add_argument('--outfmt', choices=['phylip', 'pairwise'],
default='phylip',
help='distances output format [DEFAULT: %(default)s]')
group = parser.add_argument_group("OTHER OPTIONS")
group.add_argument("-h", "--help", action="help",
help="show this help message and exit")
group.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
if len(sys.argv[1:]) == 0:
# parser.print_help()
parser.print_usage()
parser.exit()
return parser
def validate_args(parser):
args = parser.parse_args()
if args.word_size:
if args.word_size < 3:
parser.error('Word size must be >= 3')
elif args.word_patterns:
l = []
for i in range(0, 3):
try:
p = word_pattern.read(args.word_patterns[i])
l.append(p)
except Exception:
parser.error('Invalid format for word pattern: {0}'.format(
args.word_patterns[i].name))
if len(l) == 3:
# check if follow rule
k, k1, k2 = [len(p.pat_list[0]) for p in l]
if not (k == k1 + 1 == k2 + 2):
parser.error(
'''Word pattern lengths do not follow k, k-1, k-2''')
args.word_patterns = l
else:
parser.error("Specify either: --word_size or --word_pattern.")
return args
def main():
parser = get_parser()
args = validate_args(parser)
seq_records = seqrecords.read_fasta(args.fasta)
if args.word_patterns:
l = args.word_patterns
else:
l = []
for i in range(args.word_size, args.word_size - 3, -1):
p = word_pattern.create(seq_records.seq_list, i)
l.append(p)
compos = word_vector.Composition(seq_records.length_list, *l)
dist = word_distance.Distance(compos, 'angle_cos_diss')
matrix = distmatrix.create(seq_records.id_list, dist)
if args.out:
oh = open(args.out, 'w')
matrix.write_to_file(oh, args.outfmt)
oh.close()
else:
matrix.display(args.outfmt)
if __name__ == '__main__':
main()
|
import paste.urlmap
from hsm import flags
FLAGS = flags.FLAGS
def root_app_factory(loader, global_conf, **local_conf):
if not FLAGS.enable_v1_api:
del local_conf['/v1']
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
|
#-*- coding: utf-8 -*-
import os,shutil,time
import glob
import subprocess
from random import randint
import threading
import pdb
def geraNome():
a = randint(0,90000)
ext ='.avi'
nome_arquivo = str(a)+ext
return nome_arquivo
def retira_audio(video):
print(video)
sem_audio = video
nome_audio=geraNome()
retira_audio = ['ffmpeg', '-i',sem_audio,'-y','-an', nome_audio]
print('----------------APLICANDO EFEITO PARA RETIRAR O AUDIO------------------')
an = subprocess.Popen(retira_audio,stdout=subprocess.PIPE, stderr=subprocess.STDOUT,universal_newlines=True)
saida_audio=an.wait()
if saida_audio == 0:
print ('AUDIO RETIRADO COM SUCESSO',nome_audio)
#rm = os.unlink(sem_audio)
#rm = os.unlink(video_slow)
mv = shutil.move(sem_audio, './Audio')
print('move com sucesso')
else:
print ('PROBLEMAS AO RETIRAR O AUDIO')
return nome_audio
def gerar_slow(video_gerar_slow):
print('----PARAMETRO RECEBIDO',video_gerar_slow)
slow=video_gerar_slow
nome_slow=geraNome()
aplica_slow = [
'ffmpeg',
'-i',
slow,
'-y',
'-filter:v',
'setpts=1.7*PTS',
nome_slow
]
print('----------------APLICANDO EFEITO PARA GERAR O SLOW MOTION------------------')
slow_motion = subprocess.Popen(aplica_slow,stdout=subprocess.PIPE, stderr=subprocess.STDOUT,universal_newlines=True)
saida_slow = slow_motion.wait()
if saida_slow == 0:
print('SLOW MOTION GERADO COM SUCESSO',nome_slow)
#mv = shutil.move(nome_slow, './Slow')
#rm = os.unlink(slow)
#print('move SLOW com sucesso')
#print('remove SLOW com sucesso')
else:
print('PROBLEMAS COM SLOW MOTION')
return nome_slow
def concatena_slow (video_audio, video_slow):
print("--------VAMOS CONCATENAR O VIDEO SEM AUDIO COM SLOW MOTION--------")
print("--------VIDEO SEM AUDIO RECEBIDO",video_audio)
print("--------VIDEO ORIGINAL RECEBIDO",video_slow)
video_intro="/home/pi/app/flask-celery/static/videos/fechamento.avi"
nome_concatena=geraNome()
print(video_intro)
concatenar = [
"ffmpeg",
"-i",
video_audio,
"-i",
video_slow,
"-i",
video_intro,
"-filter_complex",
"[0:v] [1:v] [2:v] concat=n=3:v=1 [v]",
"-map","[v]",
nome_concatena
]
print(concatenar)
print('----------------APLICANDO PARA CONCATENAR VIDEO SEM AUDO COM SLOW MOTION------------------')
concat_video = subprocess.Popen(concatenar,stdout=subprocess.PIPE, stderr=subprocess.STDOUT,universal_newlines=True)
saida_concat = concat_video.wait()
if saida_concat == 0:
print('VIDEOS CONCATENADO COM SUCESSO',nome_concatena)
rm = os.unlink(video_audio)
rm = os.unlink(video_slow)
mv = shutil.move(nome_concatena, './Reproduzir')
print('move com sucesso')
else:
print('PROBLEMAS AO CONCATENAR VIDEOS')
return nome_concatena
def time_line_finaliza_video(verifica_video):
print ('---------INICIANDO A VERIFICAÇÃO E EDIÇÃO DE VIDEOS---------')
#pdb.set_trace()
verifica_video = verifica_video
if verifica_video == 1:
nome_concatena=geraNome()
print ('----------------Verificando se existem VIDEOS na pasta-----------------')
lista_videos = glob.glob('./tmp/video_extraido/*.avi')
lista_imagens = glob.glob('./static/images/*.png')
if len(lista_imagens) > 0:
imagem_abertura = lista_imagens[0]
imagem_propaganda = lista_imagens[1]
else:
print('Não existe imagem para concatenar no video')
print(lista_videos)
if len(lista_videos) > 0:
print('Lista de videos na pasta',lista_videos)
for i in range(len(lista_videos)):
print('Lista de videos',lista_videos[i])
video_normal=lista_videos[i]
video_slow=lista_videos[i]
cmd=["./FinalizaVideo.sh",imagem_abertura,imagem_propaganda,video_normal,video_slow]
#p = subprocess.Popen(cmd)
print(cmd)
a = subprocess.Popen(cmd,stdout=subprocess.PIPE, stderr=subprocess.STDOUT,universal_newlines=True)
print(a.stdout)
a.wait()
print('----------------APLICANDO PARA CONCATENAR VIDEO SEM AUDO COM SLOW MOTION------------------')
else:
print('não existe arquivo .avi na pasta')
else:
if verifica_video == 2:
roda = False
print('saindo')
#break
exit(1)
return 'time line de video gerada' |
from machine.plugins.base import MachineBasePlugin
from machine.plugins.decorators import respond_to, listen_to, process, on
import re, os
from datetime import datetime
from base.slack import Slack
from base.calendar_parser import Calendar, Event
from utils.command_descriptions import Command, CommandDescriptions
from utils.bot_descriptions import Bot, BotDescriptions
class AnswerFAQPlugin(MachineBasePlugin):
#TODO use message payload for the weblinks?
commands = CommandDescriptions()
def init(self):
self.bots = BotDescriptions()
faqBot = Bot("faqbot", "svarar på diverse frågor.")
self.bots.add(faqBot)
calendar_ical_url = os.getenv('BOT_CALENDAR_URL')
if calendar_ical_url == None:
raise RuntimeError('BOT_CALENDAR_URL not set')
self.calendar = Calendar(calendar_ical_url)
def init_final(self):
#@process('hello')
#def start(self, event):
self.slackUtil = Slack(self)
self.slackUtil.sendStatusMessage("FAQ plugin started.")
command = Command("faqbot", "Beskrivning av faq botten", aliases=["aboutfaqbot"])
commands.add(command)
@listen_to(regex=command.regex)
def aboutFAQQuestion(self, msg):
msgToSend="Faq botten svarar på diverse frågor\nArgument delas upp med :\n"+str(self.commands) #TODO fix : so it is in a settings file
self.slackUtil.sendMessage(msgToSend, msg)
command = Command('faq','Länk till Makerspace FAQ')
commands.add(command)
@listen_to(regex=command.regex)
def faqQuestion(self, msg):
msgToSend="Makerspace FAQ: https://wiki.makerspace.se/Makerspace_FAQ"
self.slackUtil.sendMessage(msgToSend, msg)
command = Command('nyckel','Information om nyckelutlämningar')
commands.add(command)
@listen_to(regex=r'nyckelutlämning.*\?')
@listen_to(regex=command.regex)
def keyQuestion(self, msg):
events_nyckel = self.calendar.find_events('nyckelutlämning')
msgToSend = "Nyckelutlämningar sker ungefär varannan vecka och du måste boka en tid. Länken finns i Kalendariet (https://www.makerspace.se/kalendarium)."
if len(events_nyckel) == 0:
msgToSend += "\nDet finns ingen planerad nyckelutläming."
elif len(events_nyckel) == 1:
days_left = (events_nyckel[0].start_time.date() - datetime.now().date()).days
msgToSend += f"\n\nNästa utlämning är planerad till {events_nyckel[0].start_time.strftime('%Y-%m-%d %H:%M')}-{events_nyckel[0].end_time.strftime('%H:%M')} (om {days_left} dagar)"
else:
msgToSend += "\n\nDet finns flera planerade nyckelutlämningar:"
for event_nyckel, _ in zip(events_nyckel, range(5)):
days_left = (event_nyckel.start_time.date() - datetime.now().date()).days
msgToSend += f"\n - {event_nyckel.start_time.strftime('%Y-%m-%d %H:%M')}-{event_nyckel.end_time.strftime('%H:%M')} (om {days_left} dagar)"
self.slackUtil.sendMessage(msgToSend, msg)
command = Command('box','Information om hur det fungerar med labblåda')
commands.add(command)
@listen_to(regex=command.regex)
def boxQuestion(self, msg):
msgToSend="Labblådan ska vara SmartStore Classic 31, den säljs tex av Clas Ohlsson och Jysk. Tänk på att man inte får förvara lithiumbatterier för radiostyrt på spacet pga brandrisken. Kemikalier ska förvaras kemiskåpet, ej i lådan. Mer info om förvaring på spacet och exempel på lådor finns på: https://wiki.makerspace.se/Medlems_F%C3%B6rvaring"
self.slackUtil.sendMessage(msgToSend, msg)
command = Command('sopor','Information om sophantering')
commands.add(command)
@listen_to(regex=command.regex)
def garbageQuestion(self, msg):
msgToSend="Nyckelkortet till soprummet finns i städskrubben. Soprummet ligger på lastkajen bakom huset. Mer info och karta till soprummet finns på: https://wiki.makerspace.se/Sophantering"
self.slackUtil.sendMessage(msgToSend, msg)
command = Command('wiki','Länkar till wiki sidan som motsvarar argumentet', 1)
commands.add(command)
@listen_to(regex=command.regex)
def wikiQuestion(self, msg):
argList = self.slackUtil.getArguments(msg)
msgToSend="https://wiki.makerspace.se/"+argList[0]
self.slackUtil.sendMessage(msgToSend, msg)
#TODO inköpsansvariga
#TODO kurser/workshops med kalender grej
#TODO kurser/workshop faq/info
|
from .api import (PoloCurrencyPair,
Poloniex,
POLONIEX_DATA_TYPE_MAP,
timestamp_to_utc,
datetime, pd,
timestamp_from_utc,
DATE, requests,
polo_return_chart_data)
|
Subsets and Splits