repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dylanh333/android-unmkbootimg | vendor/android-tools/toolbox/generate-input.h-labels.py | 4 | 2801 | #!/usr/bin/env python
#
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=bad-indentation,bad-continuation
from __future__ import print_function
import os
import re
import sys
input_prop_list = []
ev_list = []
syn_list = []
key_list = []
rel_list = []
abs_list = []
sw_list = []
msc_list = []
led_list = []
rep_list = []
snd_list = []
mt_tool_list = []
ff_status_list = []
ff_list = []
r = re.compile(r'#define\s+(\S+)\s+((?:0x)?\d+)')
for arg in sys.argv[1:]:
with open(arg, 'r') as f:
for line in f:
m = r.match(line)
if m:
name = m.group(1)
if name.startswith("INPUT_PROP_"):
input_prop_list.append(name)
elif name.startswith("EV_"):
ev_list.append(name)
elif name.startswith("SYN_"):
syn_list.append(name)
elif name.startswith("KEY_") or name.startswith("BTN_"):
key_list.append(name)
elif name.startswith("REL_"):
rel_list.append(name)
elif name.startswith("ABS_"):
abs_list.append(name)
elif name.startswith("SW_"):
sw_list.append(name)
elif name.startswith("MSC_"):
msc_list.append(name)
elif name.startswith("LED_"):
led_list.append(name)
elif name.startswith("REP_"):
rep_list.append(name)
elif name.startswith("SND_"):
snd_list.append(name)
elif name.startswith("MT_TOOL_"):
mt_tool_list.append(name)
elif name.startswith("FF_STATUS_"):
ff_status_list.append(name)
elif name.startswith("FF_"):
ff_list.append(name)
def Dump(struct_name, values):
print('static struct label %s[] = {' % (struct_name))
for value in values:
print(' LABEL(%s),' % (value))
print(' LABEL_END,')
print('};')
Dump("input_prop_labels", input_prop_list)
Dump("ev_labels", ev_list)
Dump("syn_labels", syn_list)
Dump("key_labels", key_list)
Dump("rel_labels", rel_list)
Dump("abs_labels", abs_list)
Dump("sw_labels", sw_list)
Dump("msc_labels", msc_list)
Dump("led_labels", led_list)
Dump("rep_labels", rep_list)
Dump("snd_labels", snd_list)
Dump("mt_tool_labels", mt_tool_list)
Dump("ff_status_labels", ff_status_list)
Dump("ff_labels", ff_list)
| mit | -6,136,553,381,846,212,000 | 28.177083 | 74 | 0.622992 | false | 3.223245 | false | false | false |
lavalamp-/ws-backend-community | lib/debugging.py | 1 | 4429 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from uuid import uuid4
def clear_celery_queue():
"""
Clear out all tasks for the Web Sight Celery application.
:return: None
"""
from tasknode import websight_app
websight_app.control.purge()
def enqueue_database_debugging_task(*args, **kwargs):
"""
Create and enqueue a Celery task of the debugging_database_task type.
:param args: Positional arguments to pass to the task.
:param kwargs: Keyword arguments to pass to the task.
:return: None
"""
from tasknode.tasks import debugging_database_task
sig = debugging_database_task.si(*args, **kwargs)
sig.apply_async()
def get_debugging_network_service(ip_address=None, port=None, protocol=None):
"""
Get a OrganizationNetworkService attached to the debugging organization that points to
the given IP address, port, and protocol.
:param ip_address: The IP address for the service.
:param port: The port for the service.
:param protocol: The protocol for the service.
:return: A OrganizationNetworkService attached to the debugging organization that points to
the given IP address, port, and protocol.
"""
debugging_org = get_debugging_organization()
network = debugging_org.org_networks[0]
from .sqlalchemy import get_sa_session, get_or_create_network_service_from_org_ip, \
get_or_create_ip_address_from_org_network
db_session = get_sa_session()
address_model = get_or_create_ip_address_from_org_network(
network_uuid=network.uuid,
address=ip_address,
address_type="ipv4",
db_session=db_session,
)
service = get_or_create_network_service_from_org_ip(
ip_uuid=address_model.uuid,
port=port,
protocol=protocol,
db_session=db_session,
)
return service
def get_debugging_organization(
org_uuid=u"a9def2a2-54be-40d4-83bf-efc34cc2fbbc",
user_email=u"[email protected]",
):
"""
Create the default debugging organization for the specified user, or return it if it already
exists.
:param org_uuid: The UUID to give the organization.
:param user_email: The email address for the user to add the organization to.
:return: The debugging organization owned by the given user.
"""
from .sqlalchemy import Organization, Network, get_sa_session, get_organization_by_uuid, \
get_user_uuid_by_username
db_session = get_sa_session()
existing_org = get_organization_by_uuid(org_uuid=org_uuid, db_session=db_session)
if existing_org is not None:
return existing_org
user_uuid = get_user_uuid_by_username(username=user_email, db_session=db_session)
new_org = Organization.new(
uuid=org_uuid,
user_id=user_uuid,
name=u"Debugging Organization",
description=u"Debugging Organization Description",
scanning_status=0,
)
new_org_network = Network.new(
name=u"Debugging Network",
address=u"157.166.255.0",
mask_length=24,
scanning_enabled=True,
organization_id=org_uuid,
endpoint_count=0,
)
db_session.add(new_org)
db_session.add(new_org_network)
db_session.commit()
db_session.close()
return new_org
def perform_network_service_inspection(
org_uuid=None,
scan_uuid=None,
ip_address=None,
port=None,
protocol=None,
):
"""
Create and enqueue a Celery task of the inspect_network_service_for_organization type.
:param org_uuid: The UUID for the organization.
:param scan_uuid: The UUID for the scan.
:param ip_address: The IP address to check.
:param port: The port to check.
:param protocol: The protocol to use to connect to the remote service.
:return: None
"""
pass
# from tasknode.tasks import perform_network_service_inspection
# from wselasticsearch import bootstrap_index_model_mappings
# org_uuid = org_uuid if org_uuid is not None else str(uuid4())
# scan_uuid = scan_uuid if scan_uuid is not None else str(uuid4())
# bootstrap_index_model_mappings(index=org_uuid, delete_first=True)
# task_sig = perform_network_service_inspection.si(
# org_uuid=org_uuid,
# scan_uuid=scan_uuid,
# port=port,
# protocol=protocol,
# ip_address=ip_address,
# )
# task_sig.apply_async()
| gpl-3.0 | 6,063,669,412,163,870,000 | 33.874016 | 96 | 0.669677 | false | 3.675519 | false | false | false |
GENETX/alpg | configs/example.py | 1 | 4948 | #!/usr/bin/python3
#Artifical load profile generator v1.1, generation of artificial load profiles to benchmark demand side management approaches
#Copyright (C) 2018 Gerwin Hoogsteen
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#This is an example configuration file!
# Select the output writer
import writer as writer
#Random seed
seed = 42
#input files:
weather_irradiation = 'input/weather/solarirradiation_twenthe.csv'
weather_timebaseDataset = 3600 #in seconds per interval
#Simulation:
#number of days to simulate and skipping of initial days. Simulation starts at Sunday January 1.
numDays = 365 # number of days
startDay = 0 # Initial day
#Select the geographic location. Refer to the Astral plugin to see available locations (or give a lon+lat)
# Use e.g. https://www.latlong.net/
from astral import Location
location = Location()
location.solar_depression = 'civil'
location.latitude = 52.239095
location.longitude = 6.857018
location.timezone = 'Europe/Amsterdam'
location.elevation = 0
#Select the devices in the neighbourhood
#Devices
#Scale overall consumption:
consumptionFactor = 1.0 #consumption was a bit too high
# Penetration of emerging technology in percentages
# all values must be between 0-100
# These indicate what percentage of the houses has a certain device
# Electric mobility, restriction that the sum <= 100
# Note, households with larger driving distances will receive EVs first
penetrationEV = 13
penetrationPHEV = 32
# PV and storage, restriction that Battery <= PV
# Note PV and battery size depend on the annual household consumption
# This emulates the Dutch "nul-op-the-meter regime (net zero annual electricity usage)
penetrationPV = 50
penetrationBattery = 10 #Note only houses with PV will receive a battery!
# Heating systems, with restriction that the sum <= 100
penetrationHeatPump = 25
penetrationCHP = 5 # Combined heat and power
penetrationInductioncooking = 25
#Device parameters:
#EV
capacityEV = 42000 #Wh
powerEV = 7400 #W
capacityPHEV = 12000 #Wh
powerPHEV = 3700 #W
#PV
PVProductionPerYear = 220 #average kWh per m2 solar panel on annual basis
PVAngleMean = 35 #degrees, 0 is horizontal to earth surface
PVAngleSigma = 10 #degrees
PVAzimuthMean = 180 #degrees, 0 is north, 90 is east
PVAzimuthSigma = 90 #degrees
PVEfficiencyMin = 15 #% of theoretical max
PVEfficiencyMax = 20 #% of theoretical max
#Driving distances
commuteDistanceMean = 25 #km
commuteDistanceSigma = 10 #km
#Battery
capacityBatteryLarge = 12000 #Wh
capacityBatteryMedium = 5000 #Wh
capacityBatterySmall = 2000 #Wh
powerBatteryLarge = 3700 #W
powerBatteryMedium = 3700 #W
powerBatterySmall = 3700 #W
#Kitchen
#Consumption of devices
ConsumptionOven = 2000 #W
ConsumptionMicroWave = 800 #W
ConsumptionStoveVentilation = 120 #W #But this is maximum, usually set lower!
ConsumptionInductionStove = 2200 #W #http://homeguides.sfgate.com/many-watts-induction-stove-85380.html
ConsumptionFridgeBigMin = 80 #W
ConsumptionFridgeBigMax = 120 #W
ConsumptionFridgeSmallMin = 50 #W
ConsumptionFridgeSmallMax = 80 #W
ConsumptionKettle = 2000 #W
#White goods
ConsumptionIron = 2000 #W
ConsumptionVacuumcleaner = 1500 #W
#House
ConsumptionHouseVentilation = 50 #W
#Household randomization
#all values must be between 0-1000
familyOutingChanceMin = 10 #percentage
familyOutingChanceMax = 20 #percentage
personWeekdayActivityChanceMin = 20 #percentage
personWeekdayActivityChanceMax = 30 #percentage
personWeekendActivityChanceMin = 20 #percentage
personWeekendActivityChanceMax = 30 #percentage
householdList = []
#Select the types of households
import households
for i in range(0,1):
householdList.append(households.HouseholdSingleWorker())
for i in range(0,2):
householdList.append(households.HouseholdSingleRetired())
for i in range(0,1):
householdList.append(households.HouseholdDualWorker(True))
for i in range(0,1):
householdList.append(households.HouseholdDualWorker(False))
for i in range(0,2):
householdList.append(households.HouseholdDualRetired())
for i in range(0,2):
householdList.append(households.HouseholdFamilyDualWorker(True))
for i in range(0,1):
householdList.append(households.HouseholdFamilyDualWorker(False))
| gpl-3.0 | 4,827,155,180,199,970,000 | 28.628743 | 126 | 0.760509 | false | 3.065675 | false | false | false |
woodem/woo | examples/perf/showPlots.py | 1 | 2971 | import numpy as np
from pprint import pprint
dd={}
for l in open('timings.txt'):
if l.startswith('#'): continue
ll=l[:-1].split()
if len(ll)==0: continue
tag,cores,nPar,nSteps=ll[0],int(ll[1]),int(ll[2]),int(ll[3])
t1,t,colliderRel=[float(i) for i in ll[4:]]
key=(tag,cores,nPar,nSteps)
data=[t1,t,colliderRel]
if key not in dd: dd[key]=[data]
else: dd[key]+=[data]
# compute averages
for k in dd: dd[k]=tuple([np.average(d) for d in zip(*dd[k])])
# nn=set()
# for k in dd: nn.add((k[1],k[2]))
out=[]
#refTag,cmpTag='par2_threadSafe','par3_oneMutex'
#refTag,cmpTag='par1','par3_oneMutex'
#refTag,cmpTag='orig','parBounds'
#refTag,cmpTag='noInvFast','par3_oneMutex'
#refTag,cmpTag='par1','par4_shortCircuit'
#refTag,cmpTag='par4_shortCircuit','parBounds'
#refTag,cmpTag='parBounds','gcc49'
#refTag,cmpTag='orig','ompTuneSort1_10k_0'
#refTag,cmpTag='r3547','r3552'
refTag,cmpTag='r3530','iniConParallel'
for k in sorted(dd.keys()):
if k[0]==refTag: continue
if k[0]!=cmpTag: continue
refKey=(refTag,k[1],k[2],k[3])
if refKey not in dd.keys(): continue
for i,name in enumerate(['t1','t','coll%']):
# if i==1 or i==2: continue
# if i!=2: continue
if i!=0: continue
val0=dd[refKey][i]
val=dd[k][i]
out+=[[k[1],k[2],k[3],name,refTag,val0,k[0],val,'%.2f%%'%(100*(val-val0)/val0)]]
# import prettytable
# print prettytable.PrettyTable(out,border=False)
# pprint(out)
# print out
for o in out:
print('\t'.join([str(oo) for oo in o]))
import pylab
cores=set([k[1] for k in dd.keys() if k[0]==cmpTag])
steps=set([k[3] for k in dd.keys() if k[0]==cmpTag])
nPar=set([k[2] for k in dd.keys() if k[0]==cmpTag])
#cores=[1]
if 0:
for core in cores:
for step in steps:
nPar=sorted(list(set([k[2] for k in dd.keys() if (cmpTag,core,k[2],step) in dd.keys() and (refTag,core,k[2],step) in dd.keys()])))
print(core,step,nPar)
pylab.plot(nPar,[dd[refTag,core,N,step][1] for N in nPar],label='%s, %d cores'%(refTag,core))
pylab.plot(nPar,[dd[cmpTag,core,N,step][1] for N in nPar],label='%s, %d cores'%(cmpTag,core),linewidth=4,alpha=.5)
pylab.xlabel('Number of particles')
pylab.ylabel('Time per one step [s]')
pylab.grid(True)
pylab.legend(loc='best')
if 1:
pylab.figure()
for core in cores:
for step in steps:
nPar=sorted(list(set([k[2] for k in dd.keys() if (cmpTag,core,k[2],step) in dd.keys() and (refTag,core,k[2],step) in dd.keys()])))
print(core,step,nPar)
pylab.plot(nPar,[dd[refTag,core,N,step][0] for N in nPar],label='%s, %d cores'%(refTag,core))
pylab.plot(nPar,[dd[cmpTag,core,N,step][0] for N in nPar],label='%s, %d cores'%(cmpTag,core),linewidth=4,alpha=.5)
pylab.xlabel('Number of particles')
pylab.ylabel('Time of the intial sort [s]')
pylab.grid(True)
pylab.legend(loc='best')
pylab.show()
| gpl-2.0 | 5,512,987,470,357,769,000 | 33.952941 | 142 | 0.612252 | false | 2.636202 | false | false | false |
schristakidis/p2ner | p2ner/components/serveroverlay/centralserver/centralserver/messages/messageobjects.py | 1 | 2833 | # -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from construct import Container
from p2ner.base.Consts import MessageCodes as MSG
from p2ner.base.ControlMessage import trap_sent, BaseControlMessage,probe_all,ControlMessage
class StreamMessage(BaseControlMessage):
type = "streammessage"
code = MSG.STREAM
ack = True
@classmethod
def send(cls, stream, peer, out):
#cls.log.debug('sending stream message to %s',peer)
return out.send(cls, Container(stream=stream), peer).addErrback(trap_sent)
class PeerListMessage(BaseControlMessage):
type = "peerlistmessage"
code = MSG.SEND_IP_LIST
ack = True
@classmethod
def send(cls, sid, peerlist, peer, out):
#cls.log.debug('sending peerList message to %s',peer)
msg = Container(streamid = sid, peer = peerlist)
return out.send(cls, msg, peer).addErrback(trap_sent)
class PeerListProducerMessage(PeerListMessage):
type = "peerlistmessage"
code = MSG.SEND_IP_LIST_PRODUCER
ack = True
class PeerRemoveMessage(BaseControlMessage):
type = "peerlistmessage"
code = MSG.REMOVE_NEIGHBOURS
ack = True
@classmethod
def send(cls, sid, peerlist, peer, out):
#cls.log.debug('sending peerRemove message to %s',peer)
msg = Container(streamid = sid, peer = peerlist)
return out.send(cls, msg, peer).addErrback(trap_sent)
class PeerRemoveProducerMessage(PeerRemoveMessage):
type = "peerlistmessage"
code = MSG.REMOVE_NEIGHBOURS_PRODUCER
ack = True
class SuggestNewPeerMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SUGGEST_NEW_PEER
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received suggest new peer message from %s',peer)
self.overlay.suggestNewPeer(peer,message.peer)
class SuggestMessage(BaseControlMessage):
type = "peerlistmessage"
code = MSG.SUGGEST
ack = True
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
| apache-2.0 | -8,950,229,297,693,526,000 | 31.563218 | 98 | 0.692199 | false | 3.693611 | false | false | false |
dfehrenbach/Swen343_Human_Resources | hr/controllers/authentication.py | 1 | 1073 | """ This is the controller of the /confirm_login endpoint
The following functions are called from here: GET
"""
import logging
import requests
import employees
logging.basicConfig(filename='./log.txt',format='%(asctime)s :: %(name)s :: %(message)s')
logger = logging.getLogger(__name__)
def get(department="",token=""):
""" This is the GET function that will return an object with an employee id if they are authenticated.
:param token:
:return: an object with employee_id
"""
response = requests.post('https://www.googleapis.com/oauth2/v3/tokeninfo',{'access_token': token})
logger.info(response)
if response.status_code == 200:
email = response.json()["email"]
emps = employees.get()
for e in emps["employee_array"]:
employee_department = e["department"].replace(" ","")
if e["email"] == email and (employee_department == department or employee_department == "Board"):
return {"employee_id": e["employee_id"]}
return {'error_message': 'User is not authenticated'}, 400
| mit | -5,626,718,884,545,181,000 | 36 | 109 | 0.65424 | false | 4.111111 | false | false | false |
BiaDarkia/scikit-learn | sklearn/cluster/k_means_.py | 1 | 61736 | """K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import _num_samples
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from ..exceptions import ConvergenceWarning
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : int, RandomState instance
The generator used to initialize the centers. Use an int to make the
randomness deterministic.
See :term:`Glossary <random_state>`.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
verbose : boolean, optional
Verbosity mode.
tol : float, optional
The relative increment in the results before declaring convergence.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True (default), then the original data is
not modified, ensuring X is C-contiguous. If False, the original data
is modified, and put back before the function returns, but small
numerical differences may be introduced by subtracting and then adding
the data mean, in this case it will also not ensure that data is
C-contiguous which may cause a significant slowdown.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
# avoid forcing order when copy_x=False
order = "C" if copy_x else None
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32],
order=order, copy=copy_x)
# verify that the number of samples given is larger than k
if _num_samples(X) < n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
_num_samples(X), n_clusters))
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
distinct_clusters = len(set(best_labels))
if distinct_clusters < n_clusters:
warnings.warn("Number of distinct clusters ({}) found smaller than "
"n_clusters ({}). Possibly due to duplicate points "
"in X.".format(distinct_clusters, n_clusters),
ConvergenceWarning, stacklevel=2)
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise TypeError("algorithm='elkan' not supported for sparse input X")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of squared distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of squared distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
verbose : int, default 0
Verbosity mode.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True (default), then the original data is
not modified, ensuring X is C-contiguous. If False, the original data
is modified, and put back before the function returns, but small
numerical differences may be introduced by subtracting and then adding
the data mean, in this case it will also not ensure that data is
C-contiguous which may cause a significant slowdown.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of squared distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[1., 2.],
[4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
y : Ignored
"""
random_state = check_random_state(self.random_state)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
y : Ignored
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
y : Ignored
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
y : Ignored
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization and to
pick new clusters amongst observations with uniform probability. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(X.shape[0], replace=False,
size=n_reassigns)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X, new_centers.astype(np.intp),
np.where(to_reassign)[0].astype(np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
batch_size : int, optional, default: 100
Size of the mini batches.
verbose : boolean, optional
Verbosity mode.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization and
random reassignment. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d"
% (n_samples, self.n_clusters))
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shape (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster. It must be noted that
X will be copied if it is not C-contiguous.
y : Ignored
"""
X = check_array(X, accept_sparse="csr", order="C")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause | 8,976,565,480,837,188,000 | 37.730238 | 81 | 0.611701 | false | 4.148646 | false | false | false |
vaishaksuresh/udacity_data_analyst | P2/ProblemSets_2_to_4/P2_01.py | 1 | 1464 | import pandas
import pandasql
def num_rainy_days(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return one column and
one row - a count of the number of days in the dataframe where
the rain column is equal to 1 (i.e., the number of days it
rained). The dataframe will be titled 'weather_data'. You'll
need to provide the SQL query. You might find SQL's count function
useful for this exercise. You can read more about it here:
https://dev.mysql.com/doc/refman/5.1/en/counting-rows.html
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be equal to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q = """
select count(*) from weather_data where cast(rain as integer) = 1
"""
#Execute your SQL command against the pandas frame
rainy_days = pandasql.sqldf(q.lower(), locals())
return rainy_days
| gpl-2.0 | 7,580,475,719,071,482,000 | 38.567568 | 93 | 0.70082 | false | 3.904 | false | false | false |
IvanRybakov/cachewarmer | cw.py | 1 | 3233 |
import urllib2 as ur
import re, traceback
import sys
import os
from scw.fetcher import Fetcher
from scw.app import App
class CacheWarmer():
def __init__(self, sitemap, processes = 100):
self.processes = processes
self.active_threads = []
self.app = App()
self.urls = []
self.updated_count = 0
self.fetched_count = 0
self.sitemap_url = sitemap
self.code_statistics = {}
self.average_time = 0.0
def start(self):
"""
Execute the main process
"""
self.app.printflush('Sitemap: ' + self.sitemap_url)
self.getUrlsList()
self.app.printflush('Fetched: ' + str(self.fetched_count))
self.app.printflush('Processes: ' + str(self.processes))
self.CheckURLs()
self.printReport()
def printReport(self):
"""
Print a report after process execution
"""
self.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)
def getUrlsList(self):
"""
Fetch an URLs list from website XML sitemap
"""
try:
f = ur.urlopen(self.sitemap_url)
res = f.readlines()
for d in res:
data = re.findall('<loc>(https?:\/\/.+?)<\/loc>',d)
for i in data:
self.urls.append(i)
except Exception as e:
self.app.printflush(str(e))
self.app.printflush(traceback.format_exc())
self.fetched_count = len(self.urls)
def CheckURLs(self):
"""
Start multy-threading requests to website
"""
self.updated_count = 0
self.app.setExitFlag(False)
try:
parsed_params = self.urls
while (parsed_params):
self.active_threads = []
while True:
while len(self.active_threads) < self.processes and len(parsed_params) > 0:
urlItem = parsed_params.pop()
if urlItem != None:
thread = Fetcher(self.app, urlItem)
thread.start()
self.active_threads.append( thread )
if self.app.getExitFlag():
break
if len( self.active_threads ) == 0:
break
else:
for thread in self.active_threads:
if not thread.isAlive():
thread.printStatus()
self.collectStat(thread)
self.active_threads.remove(thread)
if self.app.getExitFlag():
break
except KeyboardInterrupt as e:
self.app.setExitFlag(True)
except Exception as e:
self.app.printflush(traceback.format_exc())
def collectStat(self, thread):
"""
Collect statistic for a request
"""
# update average page load time
if self.updated_count == 0:
self.average_time = thread.load_time
else:
self.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)
# update stitistics by HTTP code
if thread.code not in self.code_statistics:
self.code_statistics[thread.code] = 1
else:
self.code_statistics[thread.code] += 1
# update count of processed pages
self.updated_count += 1
| mit | -5,689,395,381,370,856,000 | 28.126126 | 109 | 0.67275 | false | 3.058657 | false | false | false |
t-neumann/slamdunk | slamdunk/contrib/RNASeqReadSimulator/src/gensimreads.py | 1 | 9905 | #!/usr/bin/env python
"""
This script generates simulated RNA-Seq reads (in .bed format) from known gene annotations.
USAGE
gensimreads.py {OPTIONS} <BED-File|->
PARAMETER
BED-File\tThe gene annotation file (in BED format). Use '-' for STDIN input
OPTIONS
-e/--expression [expression level file] \tSpecify the weight of each transcript. Each line in the file should have at least (NFIELD+1) fields, with field 0 the annotation id, and field NFIELD the weight of this annoation. If this file is not provided, uniform weight is applied.
-n/--nreads readcnt \tSpecify the number of reads to be generated. Default 100000.
-b/--posbias [positional bias file] \tSpecify the positional bias file. The file should include at least 100 lines, each contains only one integer number, showing the preference of the positional bias at this position. If no positional bias file is specified, use uniform distribution bias.
-l/--readlen [read length] \tSpecify the read length. Default 32.
-o/--output [output .bed file] \tSpecify the output file. Default STDOUT
-f/--field [NFIELD] \tThe field of each line as weight input. Default 7 (beginning from field 0) to compatible to genexplvprofile.py.
-p/--pairend [PELENMEAN,PELENSTD]\t Generate paired-end reads with specified insert length mean and standard derivation. The default is 200,20.
--stranded \tThe reads are strand specific.
NOTE
1. The bed file is required to sort according to the chromosome name and position. In Unix systems, use "sort -k 1,1 -k 2,2n in.BED > out.BED" to get a sorted version (out.BED) of the bed file (in.BED).
2. No problem to handle reads spanning multiple exons.
HISTORY
04/30/2012
Support generating stranded RNA-Seq reads
02/16/2012
Now runs on python 2.7
02/08/2012
Change default value of NFIELD from 4 to 7 to be compatible with default genexplvprofile values.
01/29/2012
Add paired-end support.
01/09/2012
Add -f option.
AUTHOR
Wei Li (li.david.wei AT gmail.com)
"""
from __future__ import print_function
import sys;
import subprocess;
import pydoc;
import os;
import random;
import bisect;
import math;
from getSegs import *;
import pdb;
# read length
readlen=32;
# number of reads to sample
readcnt=100000;
nfield=7;
if len(sys.argv)<2:
print(pydoc.render_doc(sys.modules[__name__]));
sys.exit();
allids={};
allidl=[];
allexp=[];
posweight=[];
#onbedfile=sys.argv[-1]+'.reads.bed';
onbedfile="-";
genpereads=False;
pemean=200;
pestd=20;
stranded=False;
for i in range(len(sys.argv)):
if i<len(sys.argv)-1:
if sys.argv[i]=='-e' or sys.argv[i]=='--expression':
# parse the annoatation file, and sum up the weights
nline=0;
totalweight=0;
print('Reading annoatation file...',file=sys.stderr);
for lines in open(sys.argv[i+1]):
nline=nline+1;
if lines[0]=='#':
continue;
fields=lines.strip().split();
if len(fields)<nfield+1:
print('Error: the annotation file should include at least '+str(nfield+1)+' fields.',file=sys.stderr);
sys.exit();
allids[fields[0]]=0;
totalweight+=float(fields[nfield]);
allexp.append(totalweight);
allidl.append(fields[0]);
print('Read %d lines of the annoatation' % nline,file=sys.stderr);
#print('Total weight: %f' % sum(totalweight));
if sys.argv[i]=='-b' or sys.argv[i]=='--posbias':
bline=0;
tbweight=0;
for lines in open(sys.argv[i+1]):
bline=bline+1;
if bline>100:
break;
tbweight=float(lines.strip());
posweight.append(tbweight);
if len(posweight)!=100:
print('Error: the bias file should include at least 100 lines.',file=sys.stderr);
sys.exit();
if sys.argv[i]=='-n' or sys.argv[i]=='--nreads':
readcnt=int(sys.argv[i+1]);
print('Read count:',readcnt,file=sys.stderr);
if sys.argv[i]=='-l' or sys.argv[i]=='--readlen':
readlen=int(sys.argv[i+1]);
print('Read length:',readlen,file=sys.stderr);
if sys.argv[i]=='-o' or sys.argv[i]=='--output':
onbedfile=sys.argv[i+1];
print('Output bed file:',onbedfile,file=sys.stderr);
if sys.argv[i]=='-f' or sys.argv[i]=='--field':
nfield=int(sys.argv[i+1]);
print('Field:',nfield,file=sys.stderr);
if sys.argv[i]=='-p' or sys.argv[i]=='--pairend':
genpereads=True;
pef=sys.argv[i+1].split(',');
pemean=int(pef[0]);
pestd=int(pef[1]);
print('Generate paired-end reads with mean and std '+str(pemean)+','+str(pestd),file=sys.stderr);
if sys.argv[i]=='-h' or sys.argv[i]=='--help':
print(pydoc.render_doc(sys.modules[__name__]));
sys.exit();
if sys.argv[i]=='--stranded':
stranded=True;
bedfile=sys.argv[-1];
# if no annotation file is specified, use uniform distri.
print('Assigning weights...',file=sys.stderr);
if len(allexp)==0:
totalweight=0;
for lines in open(bedfile):
bedfield=lines.strip().split();
allids[bedfield[3]]=0;
totalweight+=1;
allexp.append(totalweight);
allidl.append(bedfield[3]);
# sampling process
print('Sampling...',file=sys.stderr);
for j in range(readcnt):
k=random.random()*totalweight;
sel=bisect.bisect_right(allexp,k);
allids[allidl[sel]]=allids[allidl[sel]]+1;
# if no bias file specified, use uniform distrib
print('Total assigned reads:',sum(allids.values()),file=sys.stderr);
#debug info:
#for k in allidl:
# print (k, allids[k]);
#sys.exit();
if onbedfile!="-":
onfid=open(onbedfile,'w');
else:
onfid=sys.stdout;
nlines=0;
totalgenreads=0;
# read bed file
for lines in open(bedfile):
# update line counter
nlines=nlines+1;
if nlines %10000==1:
print('Processing '+str(nlines)+' lines...',file=sys.stderr);
# parse lines
bedfield=lines.strip().split();
if len(bedfield)!=12:
print('Error: incorrect number of fields (should be 12)',file=sys.stderr);
continue;
if bedfield[5]=='+':
direction=1;
elif bedfield[5]=='-':
direction=-1;
else:
print('Error: incorrect field in field[5] %s:' %bedfield[5],file=sys.stderr);
if bedfield[3] not in allids:
# the current id not found, continue
continue;
nreads=allids[bedfield[3]];
if nreads<1:
continue;
# parse all segments
fieldrange=(int(bedfield[1]),int(bedfield[2]));
if bedfield[10][-1]==',':
bedfield[10]=bedfield[10][:-1];
if bedfield[11][-1]==',':
bedfield[11]=bedfield[11][:-1];
exonlen=[int(x) for x in bedfield[10].split(',')];
exonstart=[int(x)+fieldrange[0] for x in bedfield[11].split(',')];
# old code: for each possible position in the transcript, build its segments
# for ne in range(len(exonlen)):
# for pos in range(exonstart[ne],exonstart[ne]+exonlen[ne]):
# create a position
totallen=sum(exonlen);
# here, we randomly choose one position
if genpereads==False:
selrange=totallen-readlen+1;
else:
selrange=totallen-pemean+2*pestd;
if selrange<1:
if genpereads==False:
print('Ignore annoatation',bedfield[3],'of length',totallen,'Reads:',allids[bedfield[3]],file=sys.stderr);
else:
print('Ignore annoatation',bedfield[3],'of length',totallen,'since its shorter than paired-end mean insert length. Reads:',allids[bedfield[3]],file=sys.stderr);
continue;
totalgenreads+=nreads;
cumlen=[];cumlen.extend(exonlen);
for i in range(1,len(cumlen)):
cumlen[i]=cumlen[i]+cumlen[i-1];
# for nun-uniform distribution, construct a new array for selection
thistbweight=[];
if len(posweight)!=0:
kweight=0;
for i in range(selrange):
nfrac=i*100.0/selrange; # a value between 0-100
nlower=int(math.floor(nfrac)); # 0-100
nhigher=int(math.ceil(nfrac)); # 0-100
if nhigher==nlower: nhigher=nlower+1;
if nhigher<100:
val=posweight[nlower]*(nfrac-nlower)+posweight[nhigher]*(nhigher-nfrac);
else:
val=posweight[99];
kweight+=val;
thistbweight.append(kweight);
for t in range(nreads):
if len(posweight)==0:
tpos=random.choice(range(selrange));
else:
rd=random.random()*kweight;
bsl=bisect.bisect_right(thistbweight,rd);
# for reverse transcripts: flip the position
if direction==-1:
bsl=selrange-1-bsl;
tpos=bsl;
pos=tpos2pos(tpos,cumlen,exonstart);
if genpereads==True:
tpos2=tpos+int(random.normalvariate(pemean-readlen+1,pestd));
pos2=tpos2pos(tpos2,cumlen,exonstart);
# get the segments
if True:
(startrange,lenrange,status)=getSegs(pos,readlen,1,exonstart,exonlen);
if status!=0:
print('Status:',status,', pos:', pos,'out of',len(cumlen),file=sys.stderr);
#pdb.set_trace();
continue;
# generate another pair
if genpereads==True:
(startrange2,lenrange2,status2)=getSegs(pos2,readlen,1,exonstart,exonlen);
if status==1:
print('Status:',status,', pos:', pos,'out of',len(cumlen),file=sys.stderr);
if genpereads==False:
lineid="%s_e_%d_%s_%d" % (bedfield[3],t,bedfield[0],pos);
else:
lineid="%s_e_%d_%s_%d/1" % (bedfield[3],t,bedfield[0],pos);
lineid2="%s_e_%d_%s_%d/2" % (bedfield[3],t,bedfield[0],pos);
# random direction
if stranded==False or direction==0:
thisdir=random.choice([1,-1]);
else:
thisdir=direction;
writeBedline(onfid,lineid,bedfield[0],thisdir,startrange,lenrange);
if genpereads==True:
writeBedline(onfid,lineid2,bedfield[0],thisdir*(-1),startrange2,lenrange2);
else:
print(bedfield[0],file=sys.stdout);
#print('Pospool:');
#for k in sorted(pospool.keys()):
# print(str(k)+":"+str(pospool[k]),end=",");
#print();
print('Total '+str(nlines)+' lines...',file=sys.stderr);
print('Total '+str(totalgenreads)+' reads...',file=sys.stderr);
if onbedfile!="-":
onfid.close();
| agpl-3.0 | 3,342,112,633,374,298,000 | 30.645367 | 292 | 0.651186 | false | 3.055213 | false | false | false |
APTrust/EarthDiver | dpnode/dpn/client/management/commands/accept_transfers.py | 1 | 1510 | """
'Contrary to what people may say, there is no upper limit to stupidity.'
- Stephen Colbert
"""
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from dpn.data.models import Node
from dpn.client.tasks import accept_transfers
class Command(BaseCommand):
help = 'Pulls registry entries from all nodes or named node only if ' \
'specified.'
option_list = BaseCommand.option_list + (
make_option("--node",
dest="namespace",
default=None,
help="Namespace of specific node to pull registry entries from."
),
make_option("--max",
dest="max",
default=settings.DPN_MAX_ACCEPT,
help="Max number of transfer to mark as accepted at once."
)
)
def handle(self, *args, **options):
nodes = Node.objects.exclude(api_root__isnull=True).exclude(
api_root__exact='').exclude(namespace=settings.DPN_NAMESPACE)
nodes = nodes.filter(replicate_from=True)
if options['namespace']:
nodes.filter(namespace=options['namespace'])
if not nodes:
raise CommandError("No nodes found to query.")
for node in nodes:
accept_transfers(node, options['max'])
self.stdout.write("Done accepting transfers from %s" %
node.namespace) | apache-2.0 | -3,021,411,262,614,179,300 | 32.577778 | 78 | 0.59404 | false | 4.494048 | false | false | false |
eduNEXT/edunext-ecommerce | ecommerce/courses/publishers.py | 1 | 6029 | from __future__ import absolute_import, unicode_literals
import json
import logging
import six
from django.utils.translation import ugettext_lazy as _
from edx_rest_api_client.exceptions import SlumberHttpBaseException
from oscar.core.loading import get_model
from ecommerce.core.constants import ENROLLMENT_CODE_SEAT_TYPES
from ecommerce.courses.utils import mode_for_product
logger = logging.getLogger(__name__)
Product = get_model('catalogue', 'Product')
StockRecord = get_model('partner', 'StockRecord')
class LMSPublisher:
def get_seat_expiration(self, seat):
if not seat.expires or 'professional' in getattr(seat.attr, 'certificate_type', ''):
return None
return seat.expires.isoformat()
def get_course_verification_deadline(self, course):
return course.verification_deadline.isoformat() if course.verification_deadline else None
def serialize_seat_for_commerce_api(self, seat):
""" Serializes a course seat product to a dict that can be further serialized to JSON. """
stock_record = seat.stockrecords.first()
bulk_sku = None
if getattr(seat.attr, 'certificate_type', '') in ENROLLMENT_CODE_SEAT_TYPES:
enrollment_code = seat.course.enrollment_code_product
if enrollment_code:
bulk_sku = enrollment_code.stockrecords.first().partner_sku
return {
'name': mode_for_product(seat),
'currency': stock_record.price_currency,
'price': int(stock_record.price_excl_tax),
'sku': stock_record.partner_sku,
'bulk_sku': bulk_sku,
'expires': self.get_seat_expiration(seat),
}
def publish(self, course):
""" Publish course commerce data to LMS.
Uses the Commerce API to publish course modes, prices, and SKUs to LMS. Uses
CreditCourse API endpoints to publish CreditCourse data to LMS when necessary.
Arguments:
course (Course): Course to be published.
Returns:
None, if publish operation succeeded; otherwise, error message.
"""
site = course.partner.default_site
course_id = course.id
error_message = _('Failed to publish commerce data for {course_id} to LMS.').format(course_id=course_id)
name = course.name
verification_deadline = self.get_course_verification_deadline(course)
modes = [self.serialize_seat_for_commerce_api(seat) for seat in course.seat_products]
has_credit = 'credit' in [mode['name'] for mode in modes]
if has_credit:
try:
data = {
'course_key': course_id,
'enabled': True
}
credit_api_client = site.siteconfiguration.credit_api_client
credit_api_client.courses(course_id).put(data)
logger.info('Successfully published CreditCourse for [%s] to LMS.', course_id)
except SlumberHttpBaseException as e:
# Note that %r is used to log the repr() of the response content, which may sometimes
# contain non-ASCII Unicode. We don't know (or want to guess) the encoding, so using %r will log the
# raw bytes of the message, freeing us from the possibility of encoding errors.
logger.exception(
'Failed to publish CreditCourse for [%s] to LMS. Status was [%d]. Body was [%s].',
course_id,
e.response.status_code,
e.content.decode('utf-8')
)
return error_message
except: # pylint: disable=bare-except
logger.exception('Failed to publish CreditCourse for [%s] to LMS.', course_id)
return error_message
try:
data = {
'id': course_id,
'name': name,
'verification_deadline': verification_deadline,
'modes': modes,
}
commerce_api_client = site.siteconfiguration.commerce_api_client
commerce_api_client.courses(course_id).put(data=data)
logger.info('Successfully published commerce data for [%s].', course_id)
return None
except SlumberHttpBaseException as e: # pylint: disable=bare-except
logger.exception(
'Failed to publish commerce data for [%s] to LMS. Status was [%d]. Body was [%s].',
course_id,
e.response.status_code,
e.content.decode('utf-8')
)
return self._parse_error(e.content.decode('utf-8'), error_message)
except Exception: # pylint: disable=broad-except
logger.exception('Failed to publish commerce data for [%s] to LMS.', course_id)
return error_message
def _parse_error(self, response, default_error_message):
"""When validation errors occur during publication, the LMS is expected
to return an error message.
Arguments:
response (Response): A 'Response' object which contains json error message.
default_error_message (str) : default error message in case of exception.
Returns:
string: Returns the error message extracted from response.content
along with default message. If no message is available in response
then default message will be return.
"""
message = None
try:
data = json.loads(response)
if isinstance(data, six.string_types):
message = data
elif isinstance(data, dict) and data:
message = list(data.values())[0]
if isinstance(message, list):
message = message[0]
except Exception: # pylint: disable=broad-except
pass
if message:
return ' '.join([default_error_message, message])
return default_error_message
| agpl-3.0 | 152,557,083,518,754,620 | 40.294521 | 116 | 0.603914 | false | 4.426579 | false | false | false |
Nic30/HWToolkit | hwt/synthesizer/rtlLevel/netlist.py | 1 | 8089 | from typing import List, Optional, Union
from hdlConvertorAst.hdlAst._defs import HdlIdDef
from hdlConvertorAst.hdlAst._expr import HdlValueId
from hdlConvertorAst.hdlAst._structural import HdlModuleDec, HdlModuleDef, \
HdlCompInst
from hwt.code import If
from hwt.hdl.operatorDefs import AllOps
from hwt.hdl.types.defs import BIT
from hwt.hdl.value import HValue
from hwt.serializer.utils import HdlStatement_sort_key, RtlSignal_sort_key
from hwt.synthesizer.dummyPlatform import DummyPlatform
from hwt.synthesizer.exceptions import SigLvlConfErr
from hwt.synthesizer.interfaceLevel.mainBases import InterfaceBase
from hwt.synthesizer.param import Param
from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import\
markVisibilityOfSignalsAndCheckDrivers
from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal, NOT_SPECIFIED
from hwt.synthesizer.rtlLevel.rtlSyncSignal import RtlSyncSignal
from hwt.synthesizer.rtlLevel.statements_to_HdlStmCodeBlockContainers import\
statements_to_HdlStmCodeBlockContainers
from hwt.doc_markers import internal
class RtlNetlist():
"""
Hierarchical container for signals
:ivar ~.parent: optional parent for debug and late component inspection
:ivar ~.signals: set of all signals in this context
:ivar ~.statements: list of all statements which are connected to signals in this context
:ivar ~.subUnits: is set of all units in this context
:type ~.interfaces: Dict[RtlSignal, DIRECTION]
:ivar ~.interfaces: initialized in create_HdlModuleDef
:type ~.ent: HdlModuleDec
:ivar ~.ent: initialized in create_HdlModuleDec
:type ~.arch: HdlModuleDef
:ivar ~.arch: initialized in create_HdlModuleDef
:ivar ~.hdl_objs: The list of HDL objects which were produced by this instance
usually contains HdlModudeleDef but may contain imports/globals etc.
"""
def __init__(self, parent: Optional["Unit"]=None):
self.parent = parent
self.signals = set()
self.statements = set()
self.subUnits = set()
self.interfaces = {}
self.hdl_objs = []
self.ent = None
self.arch = None
self._port_items = []
def sig(self, name, dtype=BIT, clk=None, syncRst=None,
def_val=None, nop_val=NOT_SPECIFIED) -> Union[RtlSignal, RtlSyncSignal]:
"""
Create new signal in this context
:param clk: clk signal, if specified signal is synthesized
as SyncSignal
:param syncRst: synchronous reset signal
:param def_val: a default value used for reset and intialization
:param nop_val: a value which is used to drive the signal if there is no other drive
(used to prevent latches and to specify default values for unconnected signals)
"""
_def_val = _try_cast_any_to_HValue(def_val, dtype, True)
if nop_val is not NOT_SPECIFIED:
nop_val = _try_cast_any_to_HValue(nop_val, dtype, False)
if clk is not None:
s = RtlSyncSignal(self, name, dtype, _def_val, nop_val)
if syncRst is not None and def_val is None:
raise SigLvlConfErr(
"Probably forgotten default value on sync signal %s", name)
# dst_resolve_fn is overriden because default assign would assign to the "next" signal
if syncRst is not None:
r = If(syncRst._isOn(),
s(_def_val, dst_resolve_fn=lambda x: x)
).Else(
s(s.next, dst_resolve_fn=lambda x: x)
)
else:
r = [
s(s.next, dst_resolve_fn=lambda x: x)
]
if isinstance(clk, (InterfaceBase, RtlSignal)):
clk_trigger = clk._onRisingEdge()
else:
# has to be tuple of (clk_sig, AllOps.RISING/FALLING_EDGE)
clk, clk_edge = clk
if clk_edge is AllOps.RISING_EDGE:
clk_trigger = clk._onRisingEdge()
elif clk_edge is AllOps.FALLING_EDGE:
clk_trigger = clk._onRisingEdge()
else:
raise ValueError(
"Invalid clock edge specification", clk_edge)
If(clk_trigger,
r
)
else:
if syncRst:
raise SigLvlConfErr(
f"Signal {name:s} has reset but has no clk")
s = RtlSignal(self, name, dtype, def_val=_def_val, nop_val=nop_val)
return s
def create_HdlModuleDec(self, name: str,
store_manager: "StoreManager",
params: List[Param]):
"""
Generate a module header (entity) for this module
"""
self.ent = ent = HdlModuleDec()
ent.name = store_manager.name_scope.checked_name(name, ent)
ns = store_manager.hierarchy_push(ent)
# create generics
for p in sorted(params, key=lambda x: x._name):
hdl_val = p.get_hdl_value()
v = HdlIdDef()
v.origin = p
v.name = p.hdl_name = ns.checked_name(p._name, p)
v.type = hdl_val._dtype
v.value = hdl_val
ent.params.append(v)
return ent
def create_HdlModuleDef(self,
target_platform: DummyPlatform,
store_manager: "StoreManager"):
"""
Generate a module body (architecture) for this module
* Resolve name collisions
* Convert netlist representation to HdlProcesses
* Remove unconnected
* Mark visibility of signals
"""
for proc in target_platform.beforeHdlArchGeneration:
proc(self)
ns = store_manager.name_scope
mdef = HdlModuleDef()
mdef.dec = self.ent
mdef.module_name = HdlValueId(self.ent.name, obj=self.ent)
mdef.name = "rtl"
processes = sorted(self.statements, key=HdlStatement_sort_key)
processes = sorted(statements_to_HdlStmCodeBlockContainers(processes), key=HdlStatement_sort_key)
# add signals, variables etc. in architecture
for s in sorted((s for s in self.signals
if not s.hidden and
s not in self.interfaces.keys()),
key=RtlSignal_sort_key):
v = HdlIdDef()
v.origin = s
s.name = v.name = ns.checked_name(s.name, s)
v.type = s._dtype
v.value = s.def_val
v.is_const = s._const
mdef.objs.append(v)
for p in processes:
p.name = ns.checked_name(p.name, p)
mdef.objs.extend(processes)
# instantiate subUnits in architecture
for u in self.subUnits:
ci = HdlCompInst()
ci.origin = u
ci.module_name = HdlValueId(u._ctx.ent.name, obj=u._ctx.ent)
ci.name = HdlValueId(ns.checked_name(u._name + "_inst", ci), obj=u)
e = u._ctx.ent
ci.param_map.extend(e.params)
ci.port_map.extend(e.ports)
mdef.objs.append(ci)
self.arch = mdef
return mdef
def getDebugScopeName(self):
scope = []
p = self.parent
while p is not None:
scope.append(p._name)
try:
p = p._parent
except AttributeError:
break
return ".".join(reversed(scope))
@internal
def _try_cast_any_to_HValue(v, dtype, require_const):
if isinstance(v, RtlSignal):
assert not require_const or v._const, \
"Initial value of signal has to be a constant"
return v._auto_cast(dtype)
elif isinstance(v, HValue):
return v._auto_cast(dtype)
elif isinstance(v, InterfaceBase):
return v._sig
else:
return dtype.from_py(v) | mit | 2,040,947,391,428,766,200 | 37.160377 | 105 | 0.593769 | false | 3.783442 | false | false | false |
EdinburghNLP/nematus | nematus/exponential_smoothing.py | 1 | 3493 | import tensorflow as tf
# How often to update smoothed variables (in terms of training steps).
DEFAULT_UPDATE_FREQUENCY = 5
class ExponentialSmoothing(object):
"""Defines TensorFlow variables and operations for exponential smoothing.
Following Marian [1], we maintain smoothed versions of all trainable
variables. This class creates the smoothed variables (assuming that the
model has already been initialized) and provides operations that can be
run to update the variables and to interchange the values of the raw and
the smoothed variables (which can be used to swap-in the smoothed versions
for validation, for instance).
Ideally, the smoothed variables would be updated after every training step,
but in practice that introduces a noticeable overhead (around 20%)
due to the need to transfer tensor values from GPU memory into CPU memory.
Instead we allow updating after every N steps by increasing the smoothing
factor accordingly. The default N=5 seems to be a good compromise.
[1]
"Marian: Fast Neural Machine Translation in C++",
Junczys-Dowmunt et al., in Proceedings of ACL 2018, System Demonstrations.
"""
def __init__(self, smoothing_factor,
update_frequency=DEFAULT_UPDATE_FREQUENCY):
"""Creates TF variables and operations.
Args:
smoothing_factor: float controlling weight of past vs new values.
update_frequency: integer indicating how often updates will occur.
"""
self._update_frequency = update_frequency
adjusted_smoothing_factor = smoothing_factor * update_frequency
# Smoothed variables are stored in CPU memory to avoid eating into
# valuable GPU memory.
device_spec = tf.DeviceSpec(device_type="CPU", device_index=0)
with tf.device(device_spec):
# Create variables to hold the smoothed versions of all trainable
# variables.
smooth_vars = {}
for v in tf.compat.v1.trainable_variables():
assert v.name[-2:] == ":0"
name = v.name[:-2] + "_smooth"
s = tf.compat.v1.get_variable(name=name,
initializer=tf.zeros_like(v),
trainable=False,
use_resource=True)
smooth_vars[v.name] = s
# Define the ops to update the smoothed variables.
self._update_ops = []
for v in tf.compat.v1.trainable_variables():
s = smooth_vars[v.name]
updated_s = (1 - adjusted_smoothing_factor) * s \
+ adjusted_smoothing_factor * v
self._update_ops += [tf.compat.v1.assign(s, updated_s)]
# Define the ops to swap the raw and smoothed variables.
self._swap_ops = []
for v in tf.compat.v1.trainable_variables():
s = smooth_vars[v.name]
v_value = v.read_value()
s_value = s.read_value()
with tf.control_dependencies([v_value, s_value]):
self._swap_ops += [v.assign(s_value)]
self._swap_ops += [s.assign(v_value)]
@property
def update_ops(self):
return self._update_ops
@property
def swap_ops(self):
return self._swap_ops
@property
def update_frequency(self):
return self._update_frequency
| bsd-3-clause | 2,266,908,589,127,385,000 | 42.123457 | 79 | 0.608932 | false | 4.371715 | false | false | false |
thakadu/Abraxas | scripts/load_feeds.py | 1 | 1986 | """Loads Feed data from a csv file into the feed table of the database"""
import logging
import csv
from optparse import OptionParser
from paste.deploy import appconfig
#from pylons import app_globals
from abraxas.config.environment import load_environment
from sqlalchemy import create_engine, MetaData, select
from sqlalchemy.exc import IntegrityError
import sqlalchemy as sa
"""
The format of the input file should be csv with these fields
Title, Web Url, Feed Url
"""
log = logging.getLogger(__name__)
class DataFormatException(Exception):
"""Raise when the csv file does not have the correct number of fields"""
pass
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--ini',
help='INI file to use for application settings',
type='str',
default='development.ini')
parser.add_option('--filename',
help='File containing place names data.',
type='str',
default='data/feeds.csv')
(options, args) = parser.parse_args()
conf = appconfig('config:' + options.ini, relative_to='.')
load_environment(conf.global_conf, conf.local_conf)
engine = create_engine(conf['sqlalchemy.url'], echo=True)
meta = MetaData()
conn = engine.connect()
print conn
fh = open(options.filename)
reader = csv.reader(fh)
feed_table = sa.Table('feed', meta, autoload=True, autoload_with=engine)
for line in reader:
if len(line) != 3:
raise DataFormatException
title = line[0]
weburl = line[1]
url = line[2]
insert = feed_table.insert().values(
title=title,
url=url,
weburl=weburl)
try:
conn.execute(insert)
except IntegrityError:
# Most likely loading a duplicate feed row
log.debug("Duplicate row, skipping this line...")
continue
| bsd-3-clause | 1,864,919,659,832,746,000 | 27.782609 | 76 | 0.613797 | false | 4.336245 | false | false | false |
axiom-data-science/pyaxiom | pyaxiom/netcdf/sensors/dsg/timeseries/ir.py | 1 | 1602 | #!python
# coding=utf-8
from pyaxiom.netcdf import CFDataset
from pyaxiom import logger
class IndexedRaggedTimeseries(CFDataset):
@classmethod
def is_mine(cls, dsg):
try:
rvars = dsg.get_variables_by_attributes(cf_role='timeseries_id')
assert len(rvars) == 1
assert dsg.featureType.lower() == 'timeseries'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
r_index_vars = dsg.get_variables_by_attributes(
instance_dimension=lambda x: x is not None
)
assert len(r_index_vars) == 1
assert r_index_vars[0].instance_dimension in dsg.dimensions # Station dimension
# Allow for string variables
rvar = rvars[0]
# 0 = single
# 1 = array of strings/ints/bytes/etc
# 2 = array of character arrays
assert 0 <= len(rvar.dimensions) <= 2
except AssertionError:
return False
return True
def from_dataframe(self, df, variable_attributes=None, global_attributes=None):
variable_attributes = variable_attributes or {}
global_attributes = global_attributes or {}
raise NotImplementedError
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True):
# if df is None:
# df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows)
raise NotImplementedError
def to_dataframe(self):
raise NotImplementedError
| mit | 3,502,373,414,636,524,500 | 31.693878 | 94 | 0.601124 | false | 4.035264 | false | false | false |
andrewyoung1991/scons | QMTest/TestCommon.py | 1 | 27429 | """
TestCommon.py: a testing framework for commands and scripts
with commonly useful error handling
The TestCommon module provides a simple, high-level interface for writing
tests of executable commands and scripts, especially commands and scripts
that interact with the file system. All methods throw exceptions and
exit on failure, with useful error messages. This makes a number of
explicit checks unnecessary, making the test scripts themselves simpler
to write and easier to read.
The TestCommon class is a subclass of the TestCmd class. In essence,
TestCommon is a wrapper that handles common TestCmd error conditions in
useful ways. You can use TestCommon directly, or subclass it for your
program and add additional (or override) methods to tailor it to your
program's specific needs. Alternatively, the TestCommon class serves
as a useful example of how to define your own TestCmd subclass.
As a subclass of TestCmd, TestCommon provides access to all of the
variables and methods from the TestCmd module. Consequently, you can
use any variable or method documented in the TestCmd module without
having to explicitly import TestCmd.
A TestCommon environment object is created via the usual invocation:
import TestCommon
test = TestCommon.TestCommon()
You can use all of the TestCmd keyword arguments when instantiating a
TestCommon object; see the TestCmd documentation for details.
Here is an overview of the methods and keyword arguments that are
provided by the TestCommon class:
test.must_be_writable('file1', ['file2', ...])
test.must_contain('file', 'required text\n')
test.must_contain_all_lines(output, lines, ['title', find])
test.must_contain_any_line(output, lines, ['title', find])
test.must_contain_exactly_lines(output, lines, ['title', find])
test.must_exist('file1', ['file2', ...])
test.must_match('file', "expected contents\n")
test.must_not_be_writable('file1', ['file2', ...])
test.must_not_contain('file', 'banned text\n')
test.must_not_contain_any_line(output, lines, ['title', find])
test.must_not_exist('file1', ['file2', ...])
test.run(options = "options to be prepended to arguments",
stdout = "expected standard output from the program",
stderr = "expected error output from the program",
status = expected_status,
match = match_function)
The TestCommon module also provides the following variables
TestCommon.python
TestCommon._python_
TestCommon.exe_suffix
TestCommon.obj_suffix
TestCommon.shobj_prefix
TestCommon.shobj_suffix
TestCommon.lib_prefix
TestCommon.lib_suffix
TestCommon.dll_prefix
TestCommon.dll_suffix
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCommon.py 1.3.D001 2010/06/03 12:58:27 knight"
__version__ = "1.3"
import copy
import os
import stat
import sys
import glob
try:
from collections import UserList
except ImportError:
# no 'collections' module or no UserList in collections
exec('from UserList import UserList')
from TestCmd import *
from TestCmd import __all__
__all__.extend([ 'TestCommon',
'exe_suffix',
'obj_suffix',
'shobj_prefix',
'shobj_suffix',
'lib_prefix',
'lib_suffix',
'dll_prefix',
'dll_suffix',
])
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=False):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
# Variables that describe the prefixes and suffixes on this system.
if sys.platform == 'win32':
exe_suffix = '.exe'
obj_suffix = '.obj'
shobj_suffix = '.obj'
shobj_prefix = ''
lib_prefix = ''
lib_suffix = '.lib'
dll_prefix = ''
dll_suffix = '.dll'
elif sys.platform == 'cygwin':
exe_suffix = '.exe'
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'cyg'
dll_suffix = '.dll'
elif sys.platform.find('irix') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.o'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
elif sys.platform.find('darwin') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
elif sys.platform.find('sunos') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.o'
shobj_prefix = 'so_'
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
else:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
def is_List(e):
return isinstance(e, (list, UserList))
def is_Tuple(e):
return isinstance(e, tuple)
def is_Sequence(e):
return (not hasattr(e, "strip") and
hasattr(e, "__getitem__") or
hasattr(e, "__iter__"))
def is_writable(f):
mode = os.stat(f)[stat.ST_MODE]
return mode & stat.S_IWUSR
def separate_files(flist):
existing = []
missing = []
for f in flist:
if os.path.exists(f):
existing.append(f)
else:
missing.append(f)
return existing, missing
if os.name == 'posix':
def _failed(self, status = 0):
if self.status is None or status is None:
return None
return _status(self) != status
def _status(self):
return self.status
elif os.name == 'nt':
def _failed(self, status = 0):
return not (self.status is None or status is None) and \
self.status != status
def _status(self):
return self.status
class TestCommon(TestCmd):
# Additional methods from the Perl Test::Cmd::Common module
# that we may wish to add in the future:
#
# $test->subdir('subdir', ...);
#
# $test->copy('src_file', 'dst_file');
def __init__(self, **kw):
"""Initialize a new TestCommon instance. This involves just
calling the base class initialization, and then changing directory
to the workdir.
"""
TestCmd.__init__(self, **kw)
os.chdir(self.workdir)
def options_arguments(self, options, arguments):
"""Merges the "options" keyword argument with the arguments."""
if options:
if arguments is None:
return options
if isinstance(options, str):
options = [options]
if isinstance(arguments, str):
arguments = [arguments]
arguments = ' '.join(options + arguments)
return arguments
def must_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
not writable.
"""
files = [is_List(x) and os.path.join(*x) or x for x in files]
existing, missing = separate_files(files)
unwritable = [x for x in existing if not is_writable(x)]
if missing:
print "Missing files: `%s'" % "', `".join(missing)
if unwritable:
print "Unwritable files: `%s'" % "', `".join(unwritable)
self.fail_test(missing + unwritable)
def must_contain(self, file, required, mode = 'rb', find = None):
"""Ensures that the specified file contains the required text.
"""
file_contents = self.read(file, mode)
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
contains = find(file_contents, required)
if not contains:
print "File `%s' does not contain required string." % file
print self.banner('Required string ')
print required
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(not contains)
def must_contain_all_lines(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains all of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
missing = []
if is_List(output):
output = '\n'.join(output)
for line in lines:
if find(output, line) is None:
missing.append(line)
if missing:
if title is None:
title = 'output'
sys.stdout.write("Missing expected lines from %s:\n" % title)
for line in missing:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' ') + '\n')
sys.stdout.write(output)
self.fail_test()
def must_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains at least one of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
for line in lines:
if find(output, line) is not None:
return
if title is None:
title = 'output'
sys.stdout.write("Missing any expected line from %s:\n" % title)
for line in lines:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' ') + '\n')
sys.stdout.write(output)
self.fail_test()
def must_contain_exactly_lines(self, output, expect, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains all of the lines in the expected string (second argument)
with none left over.
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output. The function must return the index
of the found line in the output, or None if the line is not found.
"""
out = output.splitlines()
if is_List(expect):
exp = [ e.rstrip('\n') for e in expect ]
else:
exp = expect.splitlines()
if sorted(out) == sorted(exp):
# early out for exact match
return
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
missing = []
for line in exp:
found = find(out, line)
if found is None:
missing.append(line)
else:
out.pop(found)
if not missing and not out:
# all lines were matched
return
if title is None:
title = 'output'
if missing:
sys.stdout.write("Missing expected lines from %s:\n" % title)
for line in missing:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner('Missing %s ' % title) + '\n')
if out:
sys.stdout.write("Extra unexpected lines from %s:\n" % title)
for line in out:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner('Extra %s ' % title) + '\n')
sys.stdout.flush()
self.fail_test()
def must_contain_lines(self, lines, output, title=None, find = None):
# Deprecated; retain for backwards compatibility.
return self.must_contain_all_lines(output, lines, title, find)
def must_exist(self, *files):
"""Ensures that the specified file(s) must exist. An individual
file be specified as a list of directory names, in which case the
pathname will be constructed by concatenating them. Exits FAILED
if any of the files does not exist.
"""
files = [is_List(x) and os.path.join(*x) or x for x in files]
missing = [x for x in files if not os.path.exists(x) and not os.path.islink(x) ]
if missing:
print "Missing files: `%s'" % "', `".join(missing)
self.fail_test(missing)
def must_exist_one_of(self, files):
"""Ensures that at least one of the specified file(s) exists.
The filenames can be given as a list, where each entry may be
a single path string, or a tuple of folder names and the final
filename that get concatenated.
Supports wildcard names like 'foo-1.2.3-*.rpm'.
Exits FAILED if none of the files exists.
"""
missing = []
for x in files:
if is_List(x) or is_Tuple(x):
xpath = os.path.join(*x)
else:
xpath = is_Sequence(x) and os.path.join(x) or x
if glob.glob(xpath):
return
missing.append(xpath)
print "Missing one of: `%s'" % "', `".join(missing)
self.fail_test(missing)
def must_match(self, file, expect, mode = 'rb', match=None):
"""Matches the contents of the specified file (first argument)
against the expected contents (second argument). The expected
contents are a list of lines or a string which will be split
on newlines.
"""
file_contents = self.read(file, mode)
if not match:
match = self.match
try:
self.fail_test(not match(file_contents, expect))
except KeyboardInterrupt:
raise
except:
print "Unexpected contents of `%s'" % file
self.diff(expect, file_contents, 'contents ')
raise
def must_not_contain(self, file, banned, mode = 'rb', find = None):
"""Ensures that the specified file doesn't contain the banned text.
"""
file_contents = self.read(file, mode)
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
contains = find(file_contents, banned)
if contains:
print "File `%s' contains banned string." % file
print self.banner('Banned string ')
print banned
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(contains)
def must_not_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
does not contain any of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
unexpected = []
for line in lines:
if find(output, line) is not None:
unexpected.append(line)
if unexpected:
if title is None:
title = 'output'
sys.stdout.write("Unexpected lines in %s:\n" % title)
for line in unexpected:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' ') + '\n')
sys.stdout.write(output)
self.fail_test()
def must_not_contain_lines(self, lines, output, title=None, find=None):
return self.must_not_contain_any_line(output, lines, title, find)
def must_not_exist(self, *files):
"""Ensures that the specified file(s) must not exist.
An individual file be specified as a list of directory names, in
which case the pathname will be constructed by concatenating them.
Exits FAILED if any of the files exists.
"""
files = [is_List(x) and os.path.join(*x) or x for x in files]
existing = [x for x in files if os.path.exists(x) or os.path.islink(x)]
if existing:
print "Unexpected files exist: `%s'" % "', `".join(existing)
self.fail_test(existing)
def must_not_exist_any_of(self, files):
"""Ensures that none of the specified file(s) exists.
The filenames can be given as a list, where each entry may be
a single path string, or a tuple of folder names and the final
filename that get concatenated.
Supports wildcard names like 'foo-1.2.3-*.rpm'.
Exits FAILED if any of the files exists.
"""
existing = []
for x in files:
if is_List(x) or is_Tuple(x):
xpath = os.path.join(*x)
else:
xpath = is_Sequence(x) and os.path.join(x) or x
if glob.glob(xpath):
existing.append(xpath)
if existing:
print "Unexpected files exist: `%s'" % "', `".join(existing)
self.fail_test(existing)
def must_not_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are not writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
writable.
"""
files = [is_List(x) and os.path.join(*x) or x for x in files]
existing, missing = separate_files(files)
writable = list(filter(is_writable, existing))
if missing:
print "Missing files: `%s'" % "', `".join(missing)
if writable:
print "Writable files: `%s'" % "', `".join(writable)
self.fail_test(missing + writable)
def _complete(self, actual_stdout, expected_stdout,
actual_stderr, expected_stderr, status, match):
"""
Post-processes running a subcommand, checking for failure
status and displaying output appropriately.
"""
if _failed(self, status):
expect = ''
if status != 0:
expect = " (expected %s)" % str(status)
print "%s returned %s%s" % (self.program, _status(self), expect)
print self.banner('STDOUT ')
print actual_stdout
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if (expected_stdout is not None
and not match(actual_stdout, expected_stdout)):
self.diff(expected_stdout, actual_stdout, 'STDOUT ')
if actual_stderr:
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if (expected_stderr is not None
and not match(actual_stderr, expected_stderr)):
print self.banner('STDOUT ')
print actual_stdout
self.diff(expected_stderr, actual_stderr, 'STDERR ')
self.fail_test()
def start(self, program = None,
interpreter = None,
options = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment, handling
any exceptions.
"""
arguments = self.options_arguments(options, arguments)
try:
return TestCmd.start(self, program, interpreter, arguments,
universal_newlines, **kw)
except KeyboardInterrupt:
raise
except Exception, e:
print self.banner('STDOUT ')
try:
print self.stdout()
except IndexError:
pass
print self.banner('STDERR ')
try:
print self.stderr()
except IndexError:
pass
cmd_args = self.command_args(program, interpreter, arguments)
sys.stderr.write('Exception trying to execute: %s\n' % cmd_args)
raise e
def finish(self, popen, stdout = None, stderr = '', status = 0, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument. Additional arguments are similar
to those of the run() method:
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
"""
TestCmd.finish(self, popen, **kw)
match = kw.get('match', self.match)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def run(self, options = None, arguments = None,
stdout = None, stderr = '', status = 0, **kw):
"""Runs the program under test, checking that the test succeeded.
The parameters are the same as the base TestCmd.run() method,
with the addition of:
options Extra options that get appended to the beginning
of the arguments.
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
By default, this expects a successful exit (status = 0), does
not test standard output (stdout = None), and expects that error
output is empty (stderr = "").
"""
kw['arguments'] = self.options_arguments(options, arguments)
try:
match = kw['match']
del kw['match']
except KeyError:
match = self.match
TestCmd.run(self, **kw)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def skip_test(self, message="Skipping test.\n"):
"""Skips a test.
Proper test-skipping behavior is dependent on the external
TESTCOMMON_PASS_SKIPS environment variable. If set, we treat
the skip as a PASS (exit 0), and otherwise treat it as NO RESULT.
In either case, we print the specified message as an indication
that the substance of the test was skipped.
(This was originally added to support development under Aegis.
Technically, skipping a test is a NO RESULT, but Aegis would
treat that as a test failure and prevent the change from going to
the next step. Since we ddn't want to force anyone using Aegis
to have to install absolutely every tool used by the tests, we
would actually report to Aegis that a skipped test has PASSED
so that the workflow isn't held up.)
"""
if message:
sys.stdout.write(message)
sys.stdout.flush()
pass_skips = os.environ.get('TESTCOMMON_PASS_SKIPS')
if pass_skips in [None, 0, '0']:
# skip=1 means skip this function when showing where this
# result came from. They only care about the line where the
# script called test.skip_test(), not the line number where
# we call test.no_result().
self.no_result(skip=1)
else:
# We're under the development directory for this change,
# so this is an Aegis invocation; pass the test (exit 0).
self.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 3,280,582,583,667,174,400 | 36.116373 | 88 | 0.580298 | false | 4.225046 | true | false | false |
Pikecillo/genna | external/PyXML-0.8.4/test/dom/test_entity.py | 1 | 1296 | from TestSuite import EMPTY_NAMESPACE
def test(tester):
tester.startGroup('Entity')
tester.startTest('Testing syntax')
try:
from xml.dom import Entity
from xml.dom.Entity import Entity
except:
tester.error('Error in syntax', 1)
tester.testDone()
tester.startTest('Creating test environment')
from xml.dom import implementation
dt = implementation.createDocumentType('','','')
doc = implementation.createDocument(EMPTY_NAMESPACE,'ROOT',dt)
ent = doc._4dom_createEntity("-//FOURTHOUGHT//EN", "/tmp/entity", "")
tester.testDone()
tester.startTest('Testing attributes')
if ent.publicId != '-//FOURTHOUGHT//EN':
tester.error('publicId is incorrect')
if ent.systemId != '/tmp/entity':
tester.error('systemId is incorrect')
tester.testDone()
tester.startTest('Test cloneNode()')
ent1 = ent.cloneNode(1)
if ent1.publicId != ent.publicId:
tester.error("cloneNode fails on publicId")
if ent1.systemId != ent.systemId:
tester.error("cloneNode fails on systemId")
tester.testDone()
return tester.groupDone()
if __name__ == '__main__':
import sys
import TestSuite
tester = TestSuite.TestSuite()
retVal = test(tester)
sys.exit(retVal)
| gpl-2.0 | 7,661,269,185,216,156,000 | 24.411765 | 73 | 0.652778 | false | 4.088328 | true | false | false |
yro/veda_worker | veda_worker/generate_delivery.py | 1 | 5380 |
import boto
import boto.s3
from boto.s3.key import Key
import hashlib
import os
from os.path import expanduser
import sys
import shutil
from global_vars import *
from reporting import ErrorObject
from config import WorkerSetup
homedir = expanduser("~")
"""
Gets specified Video and Encode object, and delivers file to endpoint
from VEDA_WORK_DIR, retrieves and checks URL, and passes info to objects
"""
WS = WorkerSetup()
if os.path.exists(WS.instance_yaml):
WS.run()
settings = WS.settings_dict
class Deliverable():
def __init__(self, VideoObject, encode_profile, output_file, **kwargs):
self.VideoObject = VideoObject
self.encode_profile = encode_profile
self.output_file = output_file
self.jobid = kwargs.get('jobid', None)
self.workdir = kwargs.get('workdir', None)
self.endpoint_url = None
self.hash_sum = 0
self.upload_filesize = 0
self.delivered = False
def run(self):
"""
Get file particulars, upload to s3
"""
if self.workdir is None:
if self.jobid is None:
self.workdir = os.path.join(
homedir,
'ENCODE_WORKDIR'
)
else:
self.workdir = os.path.join(
homedir,
'ENCODE_WORKDIR',
self.jobid
)
# file size
self.upload_filesize = os.stat(
os.path.join(self.workdir, self.output_file)
).st_size
# hash sum
self.hash_sum = hashlib.md5(
open(
os.path.join(
self.workdir,
self.output_file
), 'rb'
).read()
).hexdigest()
if self.upload_filesize < MULTI_UPLOAD_BARRIER:
"""
Upload single part
"""
self.delivered = self._s3_upload()
else:
"""
Upload multipart
"""
self.delivered = self._boto_multipart()
if self.delivered is False:
return None
self.endpoint_url = '/'.join((
'https://s3.amazonaws.com',
settings['aws_deliver_bucket'],
self.output_file
))
return True
def _s3_upload(self):
"""
Upload single part (under threshold in node_config)
node_config MULTI_UPLOAD_BARRIER
"""
try:
conn = boto.connect_s3(
settings['aws_deliver_access_key'],
settings['aws_deliver_secret_key']
)
delv_bucket = conn.get_bucket(settings['aws_deliver_bucket'])
except:
ErrorObject().print_error(
message='Deliverable Fail: s3 Connection Error - Singleton'
)
return False
upload_key = Key(delv_bucket)
upload_key.key = self.output_file
upload_key.set_contents_from_filename(
os.path.join(self.workdir, self.output_file)
)
return True
def _boto_multipart(self):
"""
Split file into chunks, upload chunks
NOTE: this should never happen, as your files should be much
smaller than this, but one never knows
"""
if not os.path.exists(
os.path.join(
self.workdir,
self.output_file.split('.')[0]
)
):
os.mkdir(os.path.join(
self.workdir,
self.output_file.split('.')[0]
))
os.chdir(
os.path.join(self.workdir, self.output_file.split('.')[0])
)
# Split File into chunks
split_command = 'split -b10m -a5' # 5 part names of 5mb
sys.stdout.write('%s : %s\n' % (self.output_file, 'Generating Multipart'))
os.system(' '.join((split_command, os.path.join(self.workdir, self.output_file))))
sys.stdout.flush()
# Connect to s3
try:
c = boto.connect_s3(
settings['aws_deliver_access_key'],
settings['aws_deliver_secret_key']
)
b = c.lookup(settings['aws_deliver_bucket'])
except:
ErrorObject().print_error(
message='Deliverable Fail: s3 Connection Error - Multipart'
)
return False
if b is None:
ErrorObject().print_error(
message='Deliverable Fail: s3 Bucket Connection Error'
)
return False
"""
Upload and stitch parts
"""
mp = b.initiate_multipart_upload(self.output_file)
x = 1
for fle in sorted(os.listdir(
os.path.join(
self.workdir,
self.output_file.split('.')[0]
)
)):
sys.stdout.write('%s : %s\r' % (fle, 'uploading part'))
fp = open(fle, 'rb')
mp.upload_part_from_file(fp, x)
fp.close()
sys.stdout.flush()
x += 1
sys.stdout.write('\n')
mp.complete_upload()
# Clean up multipart
shutil.rmtree(os.path.join(self.workdir, self.output_file.split('.')[0]))
return True
def main():
pass
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 7,279,386,653,945,648,000 | 26.589744 | 90 | 0.512639 | false | 4.148034 | false | false | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/isomorphism/ismags.py | 1 | 42775 | """
****************
ISMAGS Algorithm
****************
Provides a Python implementation of the ISMAGS algorithm. [1]_
It is capable of finding (subgraph) isomorphisms between two graphs, taking the
symmetry of the subgraph into account. In most cases the VF2 algorithm is
faster (at least on small graphs) than this implementation, but in some cases
there is an exponential number of isomorphisms that are symmetrically
equivalent. In that case, the ISMAGS algorithm will provide only one solution
per symmetry group.
>>> petersen = nx.petersen_graph()
>>> ismags = nx.isomorphism.ISMAGS(petersen, petersen)
>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=False))
>>> len(isomorphisms)
120
>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=True))
>>> answer = [{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7:7, 8: 8, 9: 9}]
>>> answer == isomorphisms
True
In addition, this implementation also provides an interface to find the
largest common induced subgraph [2]_ between any two graphs, again taking
symmetry into account. Given `graph` and `subgraph` the algorithm will remove
nodes from the `subgraph` until `subgraph` is isomorphic to a subgraph of
`graph`. Since only the symmetry of `subgraph` is taken into account it is
worth thinking about how you provide your graphs:
>>> graph1 = nx.path_graph(4)
>>> graph2 = nx.star_graph(3)
>>> ismags = nx.isomorphism.ISMAGS(graph1, graph2)
>>> ismags.is_isomorphic()
False
>>> largest_common_subgraph = list(ismags.largest_common_subgraph())
>>> answer = [
... {1: 0, 0: 1, 2: 2},
... {2: 0, 1: 1, 3: 2}
... ]
>>> answer == largest_common_subgraph
True
>>> ismags2 = nx.isomorphism.ISMAGS(graph2, graph1)
>>> largest_common_subgraph = list(ismags2.largest_common_subgraph())
>>> answer = [
... {1: 0, 0: 1, 2: 2},
... {1: 0, 0: 1, 3: 2},
... {2: 0, 0: 1, 1: 2},
... {2: 0, 0: 1, 3: 2},
... {3: 0, 0: 1, 1: 2},
... {3: 0, 0: 1, 2: 2}
... ]
>>> answer == largest_common_subgraph
True
However, when not taking symmetry into account, it doesn't matter:
>>> largest_common_subgraph = list(ismags.largest_common_subgraph(symmetry=False))
>>> answer = [
... {1: 0, 0: 1, 2: 2},
... {1: 0, 2: 1, 0: 2},
... {2: 0, 1: 1, 3: 2},
... {2: 0, 3: 1, 1: 2},
... {1: 0, 0: 1, 2: 3},
... {1: 0, 2: 1, 0: 3},
... {2: 0, 1: 1, 3: 3},
... {2: 0, 3: 1, 1: 3},
... {1: 0, 0: 2, 2: 3},
... {1: 0, 2: 2, 0: 3},
... {2: 0, 1: 2, 3: 3},
... {2: 0, 3: 2, 1: 3}
... ]
>>> answer == largest_common_subgraph
True
>>> largest_common_subgraph = list(ismags2.largest_common_subgraph(symmetry=False))
>>> answer = [
... {1: 0, 0: 1, 2: 2},
... {1: 0, 0: 1, 3: 2},
... {2: 0, 0: 1, 1: 2},
... {2: 0, 0: 1, 3: 2},
... {3: 0, 0: 1, 1: 2},
... {3: 0, 0: 1, 2: 2},
... {1: 1, 0: 2, 2: 3},
... {1: 1, 0: 2, 3: 3},
... {2: 1, 0: 2, 1: 3},
... {2: 1, 0: 2, 3: 3},
... {3: 1, 0: 2, 1: 3},
... {3: 1, 0: 2, 2: 3}
... ]
>>> answer == largest_common_subgraph
True
Notes
-----
- The current implementation works for undirected graphs only. The algorithm
in general should work for directed graphs as well though.
- Node keys for both provided graphs need to be fully orderable as well as
hashable.
- Node and edge equality is assumed to be transitive: if A is equal to B, and
B is equal to C, then A is equal to C.
References
----------
.. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle,
M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General
Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph
Enumeration", PLoS One 9(5): e97896, 2014.
https://doi.org/10.1371/journal.pone.0097896
.. [2] https://en.wikipedia.org/wiki/Maximum_common_induced_subgraph
"""
__all__ = ["ISMAGS"]
from collections import defaultdict, Counter
from functools import reduce, wraps
import itertools
def are_all_equal(iterable):
"""
Returns ``True`` if and only if all elements in `iterable` are equal; and
``False`` otherwise.
Parameters
----------
iterable: collections.abc.Iterable
The container whose elements will be checked.
Returns
-------
bool
``True`` iff all elements in `iterable` compare equal, ``False``
otherwise.
"""
try:
shape = iterable.shape
except AttributeError:
pass
else:
if len(shape) > 1:
message = "The function does not works on multidimension arrays."
raise NotImplementedError(message) from None
iterator = iter(iterable)
first = next(iterator, None)
return all(item == first for item in iterator)
def make_partitions(items, test):
"""
Partitions items into sets based on the outcome of ``test(item1, item2)``.
Pairs of items for which `test` returns `True` end up in the same set.
Parameters
----------
items : collections.abc.Iterable[collections.abc.Hashable]
Items to partition
test : collections.abc.Callable[collections.abc.Hashable, collections.abc.Hashable]
A function that will be called with 2 arguments, taken from items.
Should return `True` if those 2 items need to end up in the same
partition, and `False` otherwise.
Returns
-------
list[set]
A list of sets, with each set containing part of the items in `items`,
such that ``all(test(*pair) for pair in itertools.combinations(set, 2))
== True``
Notes
-----
The function `test` is assumed to be transitive: if ``test(a, b)`` and
``test(b, c)`` return ``True``, then ``test(a, c)`` must also be ``True``.
"""
partitions = []
for item in items:
for partition in partitions:
p_item = next(iter(partition))
if test(item, p_item):
partition.add(item)
break
else: # No break
partitions.append({item})
return partitions
def partition_to_color(partitions):
"""
Creates a dictionary with for every item in partition for every partition
in partitions the index of partition in partitions.
Parameters
----------
partitions: collections.abc.Sequence[collections.abc.Iterable]
As returned by :func:`make_partitions`.
Returns
-------
dict
"""
colors = dict()
for color, keys in enumerate(partitions):
for key in keys:
colors[key] = color
return colors
def intersect(collection_of_sets):
"""
Given an collection of sets, returns the intersection of those sets.
Parameters
----------
collection_of_sets: collections.abc.Collection[set]
A collection of sets.
Returns
-------
set
An intersection of all sets in `collection_of_sets`. Will have the same
type as the item initially taken from `collection_of_sets`.
"""
collection_of_sets = list(collection_of_sets)
first = collection_of_sets.pop()
out = reduce(set.intersection, collection_of_sets, set(first))
return type(first)(out)
class ISMAGS:
"""
Implements the ISMAGS subgraph matching algorith. [1]_ ISMAGS stands for
"Index-based Subgraph Matching Algorithm with General Symmetries". As the
name implies, it is symmetry aware and will only generate non-symmetric
isomorphisms.
Notes
-----
The implementation imposes additional conditions compared to the VF2
algorithm on the graphs provided and the comparison functions
(:attr:`node_equality` and :attr:`edge_equality`):
- Node keys in both graphs must be orderable as well as hashable.
- Equality must be transitive: if A is equal to B, and B is equal to C,
then A must be equal to C.
Attributes
----------
graph: networkx.Graph
subgraph: networkx.Graph
node_equality: collections.abc.Callable
The function called to see if two nodes should be considered equal.
It's signature looks like this:
``f(graph1: networkx.Graph, node1, graph2: networkx.Graph, node2) -> bool``.
`node1` is a node in `graph1`, and `node2` a node in `graph2`.
Constructed from the argument `node_match`.
edge_equality: collections.abc.Callable
The function called to see if two edges should be considered equal.
It's signature looks like this:
``f(graph1: networkx.Graph, edge1, graph2: networkx.Graph, edge2) -> bool``.
`edge1` is an edge in `graph1`, and `edge2` an edge in `graph2`.
Constructed from the argument `edge_match`.
References
----------
.. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle,
M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General
Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph
Enumeration", PLoS One 9(5): e97896, 2014.
https://doi.org/10.1371/journal.pone.0097896
"""
def __init__(self, graph, subgraph, node_match=None, edge_match=None, cache=None):
"""
Parameters
----------
graph: networkx.Graph
subgraph: networkx.Graph
node_match: collections.abc.Callable or None
Function used to determine whether two nodes are equivalent. Its
signature should look like ``f(n1: dict, n2: dict) -> bool``, with
`n1` and `n2` node property dicts. See also
:func:`~networkx.algorithms.isomorphism.categorical_node_match` and
friends.
If `None`, all nodes are considered equal.
edge_match: collections.abc.Callable or None
Function used to determine whether two edges are equivalent. Its
signature should look like ``f(e1: dict, e2: dict) -> bool``, with
`e1` and `e2` edge property dicts. See also
:func:`~networkx.algorithms.isomorphism.categorical_edge_match` and
friends.
If `None`, all edges are considered equal.
cache: collections.abc.Mapping
A cache used for caching graph symmetries.
"""
# TODO: graph and subgraph setter methods that invalidate the caches.
# TODO: allow for precomputed partitions and colors
self.graph = graph
self.subgraph = subgraph
self._symmetry_cache = cache
# Naming conventions are taken from the original paper. For your
# sanity:
# sg: subgraph
# g: graph
# e: edge(s)
# n: node(s)
# So: sgn means "subgraph nodes".
self._sgn_partitions_ = None
self._sge_partitions_ = None
self._sgn_colors_ = None
self._sge_colors_ = None
self._gn_partitions_ = None
self._ge_partitions_ = None
self._gn_colors_ = None
self._ge_colors_ = None
self._node_compat_ = None
self._edge_compat_ = None
if node_match is None:
self.node_equality = self._node_match_maker(lambda n1, n2: True)
self._sgn_partitions_ = [set(self.subgraph.nodes)]
self._gn_partitions_ = [set(self.graph.nodes)]
self._node_compat_ = {0: 0}
else:
self.node_equality = self._node_match_maker(node_match)
if edge_match is None:
self.edge_equality = self._edge_match_maker(lambda e1, e2: True)
self._sge_partitions_ = [set(self.subgraph.edges)]
self._ge_partitions_ = [set(self.graph.edges)]
self._edge_compat_ = {0: 0}
else:
self.edge_equality = self._edge_match_maker(edge_match)
@property
def _sgn_partitions(self):
if self._sgn_partitions_ is None:
def nodematch(node1, node2):
return self.node_equality(self.subgraph, node1, self.subgraph, node2)
self._sgn_partitions_ = make_partitions(self.subgraph.nodes, nodematch)
return self._sgn_partitions_
@property
def _sge_partitions(self):
if self._sge_partitions_ is None:
def edgematch(edge1, edge2):
return self.edge_equality(self.subgraph, edge1, self.subgraph, edge2)
self._sge_partitions_ = make_partitions(self.subgraph.edges, edgematch)
return self._sge_partitions_
@property
def _gn_partitions(self):
if self._gn_partitions_ is None:
def nodematch(node1, node2):
return self.node_equality(self.graph, node1, self.graph, node2)
self._gn_partitions_ = make_partitions(self.graph.nodes, nodematch)
return self._gn_partitions_
@property
def _ge_partitions(self):
if self._ge_partitions_ is None:
def edgematch(edge1, edge2):
return self.edge_equality(self.graph, edge1, self.graph, edge2)
self._ge_partitions_ = make_partitions(self.graph.edges, edgematch)
return self._ge_partitions_
@property
def _sgn_colors(self):
if self._sgn_colors_ is None:
self._sgn_colors_ = partition_to_color(self._sgn_partitions)
return self._sgn_colors_
@property
def _sge_colors(self):
if self._sge_colors_ is None:
self._sge_colors_ = partition_to_color(self._sge_partitions)
return self._sge_colors_
@property
def _gn_colors(self):
if self._gn_colors_ is None:
self._gn_colors_ = partition_to_color(self._gn_partitions)
return self._gn_colors_
@property
def _ge_colors(self):
if self._ge_colors_ is None:
self._ge_colors_ = partition_to_color(self._ge_partitions)
return self._ge_colors_
@property
def _node_compatibility(self):
if self._node_compat_ is not None:
return self._node_compat_
self._node_compat_ = {}
for sgn_part_color, gn_part_color in itertools.product(
range(len(self._sgn_partitions)), range(len(self._gn_partitions))
):
sgn = next(iter(self._sgn_partitions[sgn_part_color]))
gn = next(iter(self._gn_partitions[gn_part_color]))
if self.node_equality(self.subgraph, sgn, self.graph, gn):
self._node_compat_[sgn_part_color] = gn_part_color
return self._node_compat_
@property
def _edge_compatibility(self):
if self._edge_compat_ is not None:
return self._edge_compat_
self._edge_compat_ = {}
for sge_part_color, ge_part_color in itertools.product(
range(len(self._sge_partitions)), range(len(self._ge_partitions))
):
sge = next(iter(self._sge_partitions[sge_part_color]))
ge = next(iter(self._ge_partitions[ge_part_color]))
if self.edge_equality(self.subgraph, sge, self.graph, ge):
self._edge_compat_[sge_part_color] = ge_part_color
return self._edge_compat_
@staticmethod
def _node_match_maker(cmp):
@wraps(cmp)
def comparer(graph1, node1, graph2, node2):
return cmp(graph1.nodes[node1], graph2.nodes[node2])
return comparer
@staticmethod
def _edge_match_maker(cmp):
@wraps(cmp)
def comparer(graph1, edge1, graph2, edge2):
return cmp(graph1.edges[edge1], graph2.edges[edge2])
return comparer
def find_isomorphisms(self, symmetry=True):
"""Find all subgraph isomorphisms between subgraph and graph
Finds isomorphisms where :attr:`subgraph` <= :attr:`graph`.
Parameters
----------
symmetry: bool
Whether symmetry should be taken into account. If False, found
isomorphisms may be symmetrically equivalent.
Yields
------
dict
The found isomorphism mappings of {graph_node: subgraph_node}.
"""
# The networkx VF2 algorithm is slightly funny in when it yields an
# empty dict and when not.
if not self.subgraph:
yield {}
return
elif not self.graph:
return
elif len(self.graph) < len(self.subgraph):
return
if symmetry:
_, cosets = self.analyze_symmetry(
self.subgraph, self._sgn_partitions, self._sge_colors
)
constraints = self._make_constraints(cosets)
else:
constraints = []
candidates = self._find_nodecolor_candidates()
la_candidates = self._get_lookahead_candidates()
for sgn in self.subgraph:
extra_candidates = la_candidates[sgn]
if extra_candidates:
candidates[sgn] = candidates[sgn] | {frozenset(extra_candidates)}
if any(candidates.values()):
start_sgn = min(candidates, key=lambda n: min(candidates[n], key=len))
candidates[start_sgn] = (intersect(candidates[start_sgn]),)
yield from self._map_nodes(start_sgn, candidates, constraints)
else:
return
@staticmethod
def _find_neighbor_color_count(graph, node, node_color, edge_color):
"""
For `node` in `graph`, count the number of edges of a specific color
it has to nodes of a specific color.
"""
counts = Counter()
neighbors = graph[node]
for neighbor in neighbors:
n_color = node_color[neighbor]
if (node, neighbor) in edge_color:
e_color = edge_color[node, neighbor]
else:
e_color = edge_color[neighbor, node]
counts[e_color, n_color] += 1
return counts
def _get_lookahead_candidates(self):
"""
Returns a mapping of {subgraph node: collection of graph nodes} for
which the graph nodes are feasible candidates for the subgraph node, as
determined by looking ahead one edge.
"""
g_counts = {}
for gn in self.graph:
g_counts[gn] = self._find_neighbor_color_count(
self.graph, gn, self._gn_colors, self._ge_colors
)
candidates = defaultdict(set)
for sgn in self.subgraph:
sg_count = self._find_neighbor_color_count(
self.subgraph, sgn, self._sgn_colors, self._sge_colors
)
new_sg_count = Counter()
for (sge_color, sgn_color), count in sg_count.items():
try:
ge_color = self._edge_compatibility[sge_color]
gn_color = self._node_compatibility[sgn_color]
except KeyError:
pass
else:
new_sg_count[ge_color, gn_color] = count
for gn, g_count in g_counts.items():
if all(new_sg_count[x] <= g_count[x] for x in new_sg_count):
# Valid candidate
candidates[sgn].add(gn)
return candidates
def largest_common_subgraph(self, symmetry=True):
"""
Find the largest common induced subgraphs between :attr:`subgraph` and
:attr:`graph`.
Parameters
----------
symmetry: bool
Whether symmetry should be taken into account. If False, found
largest common subgraphs may be symmetrically equivalent.
Yields
------
dict
The found isomorphism mappings of {graph_node: subgraph_node}.
"""
# The networkx VF2 algorithm is slightly funny in when it yields an
# empty dict and when not.
if not self.subgraph:
yield {}
return
elif not self.graph:
return
if symmetry:
_, cosets = self.analyze_symmetry(
self.subgraph, self._sgn_partitions, self._sge_colors
)
constraints = self._make_constraints(cosets)
else:
constraints = []
candidates = self._find_nodecolor_candidates()
if any(candidates.values()):
yield from self._largest_common_subgraph(candidates, constraints)
else:
return
def analyze_symmetry(self, graph, node_partitions, edge_colors):
"""
Find a minimal set of permutations and corresponding co-sets that
describe the symmetry of :attr:`subgraph`.
Returns
-------
set[frozenset]
The found permutations. This is a set of frozenset of pairs of node
keys which can be exchanged without changing :attr:`subgraph`.
dict[collections.abc.Hashable, set[collections.abc.Hashable]]
The found co-sets. The co-sets is a dictionary of {node key:
set of node keys}. Every key-value pair describes which `values`
can be interchanged without changing nodes less than `key`.
"""
if self._symmetry_cache is not None:
key = hash(
(
tuple(graph.nodes),
tuple(graph.edges),
tuple(map(tuple, node_partitions)),
tuple(edge_colors.items()),
)
)
if key in self._symmetry_cache:
return self._symmetry_cache[key]
node_partitions = list(
self._refine_node_partitions(graph, node_partitions, edge_colors)
)
assert len(node_partitions) == 1
node_partitions = node_partitions[0]
permutations, cosets = self._process_ordered_pair_partitions(
graph, node_partitions, node_partitions, edge_colors
)
if self._symmetry_cache is not None:
self._symmetry_cache[key] = permutations, cosets
return permutations, cosets
def is_isomorphic(self, symmetry=False):
"""
Returns True if :attr:`graph` is isomorphic to :attr:`subgraph` and
False otherwise.
Returns
-------
bool
"""
return len(self.subgraph) == len(self.graph) and self.subgraph_is_isomorphic(
symmetry
)
def subgraph_is_isomorphic(self, symmetry=False):
"""
Returns True if a subgraph of :attr:`graph` is isomorphic to
:attr:`subgraph` and False otherwise.
Returns
-------
bool
"""
# symmetry=False, since we only need to know whether there is any
# example; figuring out all symmetry elements probably costs more time
# than it gains.
isom = next(self.subgraph_isomorphisms_iter(symmetry=symmetry), None)
return isom is not None
def isomorphisms_iter(self, symmetry=True):
"""
Does the same as :meth:`find_isomorphisms` if :attr:`graph` and
:attr:`subgraph` have the same number of nodes.
"""
if len(self.graph) == len(self.subgraph):
yield from self.subgraph_isomorphisms_iter(symmetry=symmetry)
def subgraph_isomorphisms_iter(self, symmetry=True):
"""Alternative name for :meth:`find_isomorphisms`."""
return self.find_isomorphisms(symmetry)
def _find_nodecolor_candidates(self):
"""
Per node in subgraph find all nodes in graph that have the same color.
"""
candidates = defaultdict(set)
for sgn in self.subgraph.nodes:
sgn_color = self._sgn_colors[sgn]
if sgn_color in self._node_compatibility:
gn_color = self._node_compatibility[sgn_color]
candidates[sgn].add(frozenset(self._gn_partitions[gn_color]))
else:
candidates[sgn].add(frozenset())
candidates = dict(candidates)
for sgn, options in candidates.items():
candidates[sgn] = frozenset(options)
return candidates
@staticmethod
def _make_constraints(cosets):
"""
Turn cosets into constraints.
"""
constraints = []
for node_i, node_ts in cosets.items():
for node_t in node_ts:
if node_i != node_t:
# Node i must be smaller than node t.
constraints.append((node_i, node_t))
return constraints
@staticmethod
def _find_node_edge_color(graph, node_colors, edge_colors):
"""
For every node in graph, come up with a color that combines 1) the
color of the node, and 2) the number of edges of a color to each type
of node.
"""
counts = defaultdict(lambda: defaultdict(int))
for node1, node2 in graph.edges:
if (node1, node2) in edge_colors:
# FIXME directed graphs
ecolor = edge_colors[node1, node2]
else:
ecolor = edge_colors[node2, node1]
# Count per node how many edges it has of what color to nodes of
# what color
counts[node1][ecolor, node_colors[node2]] += 1
counts[node2][ecolor, node_colors[node1]] += 1
node_edge_colors = dict()
for node in graph.nodes:
node_edge_colors[node] = node_colors[node], set(counts[node].items())
return node_edge_colors
@staticmethod
def _get_permutations_by_length(items):
"""
Get all permutations of items, but only permute items with the same
length.
>>> found = list(ISMAGS._get_permutations_by_length([[1], [2], [3, 4], [4, 5]]))
>>> answer = [
... (([1], [2]), ([3, 4], [4, 5])),
... (([1], [2]), ([4, 5], [3, 4])),
... (([2], [1]), ([3, 4], [4, 5])),
... (([2], [1]), ([4, 5], [3, 4])),
... ]
>>> found == answer
True
"""
by_len = defaultdict(list)
for item in items:
by_len[len(item)].append(item)
yield from itertools.product(
*(itertools.permutations(by_len[l]) for l in sorted(by_len))
)
@classmethod
def _refine_node_partitions(cls, graph, node_partitions, edge_colors, branch=False):
"""
Given a partition of nodes in graph, make the partitions smaller such
that all nodes in a partition have 1) the same color, and 2) the same
number of edges to specific other partitions.
"""
def equal_color(node1, node2):
return node_edge_colors[node1] == node_edge_colors[node2]
node_partitions = list(node_partitions)
node_colors = partition_to_color(node_partitions)
node_edge_colors = cls._find_node_edge_color(graph, node_colors, edge_colors)
if all(
are_all_equal(node_edge_colors[node] for node in partition)
for partition in node_partitions
):
yield node_partitions
return
new_partitions = []
output = [new_partitions]
for partition in node_partitions:
if not are_all_equal(node_edge_colors[node] for node in partition):
refined = make_partitions(partition, equal_color)
if (
branch
and len(refined) != 1
and len({len(r) for r in refined}) != len([len(r) for r in refined])
):
# This is where it breaks. There are multiple new cells
# in refined with the same length, and their order
# matters.
# So option 1) Hit it with a big hammer and simply make all
# orderings.
permutations = cls._get_permutations_by_length(refined)
new_output = []
for n_p in output:
for permutation in permutations:
new_output.append(n_p + list(permutation[0]))
output = new_output
else:
for n_p in output:
n_p.extend(sorted(refined, key=len))
else:
for n_p in output:
n_p.append(partition)
for n_p in output:
yield from cls._refine_node_partitions(graph, n_p, edge_colors, branch)
def _edges_of_same_color(self, sgn1, sgn2):
"""
Returns all edges in :attr:`graph` that have the same colour as the
edge between sgn1 and sgn2 in :attr:`subgraph`.
"""
if (sgn1, sgn2) in self._sge_colors:
# FIXME directed graphs
sge_color = self._sge_colors[sgn1, sgn2]
else:
sge_color = self._sge_colors[sgn2, sgn1]
if sge_color in self._edge_compatibility:
ge_color = self._edge_compatibility[sge_color]
g_edges = self._ge_partitions[ge_color]
else:
g_edges = []
return g_edges
def _map_nodes(self, sgn, candidates, constraints, mapping=None, to_be_mapped=None):
"""
Find all subgraph isomorphisms honoring constraints.
"""
if mapping is None:
mapping = {}
else:
mapping = mapping.copy()
if to_be_mapped is None:
to_be_mapped = set(self.subgraph.nodes)
# Note, we modify candidates here. Doesn't seem to affect results, but
# remember this.
# candidates = candidates.copy()
sgn_candidates = intersect(candidates[sgn])
candidates[sgn] = frozenset([sgn_candidates])
for gn in sgn_candidates:
# We're going to try to map sgn to gn.
if gn in mapping.values() or sgn not in to_be_mapped:
# gn is already mapped to something
continue # pragma: no cover
# REDUCTION and COMBINATION
mapping[sgn] = gn
# BASECASE
if to_be_mapped == set(mapping.keys()):
yield {v: k for k, v in mapping.items()}
continue
left_to_map = to_be_mapped - set(mapping.keys())
new_candidates = candidates.copy()
sgn_neighbours = set(self.subgraph[sgn])
not_gn_neighbours = set(self.graph.nodes) - set(self.graph[gn])
for sgn2 in left_to_map:
if sgn2 not in sgn_neighbours:
gn2_options = not_gn_neighbours
else:
# Get all edges to gn of the right color:
g_edges = self._edges_of_same_color(sgn, sgn2)
# FIXME directed graphs
# And all nodes involved in those which are connected to gn
gn2_options = {n for e in g_edges for n in e if gn in e}
# Node color compatibility should be taken care of by the
# initial candidate lists made by find_subgraphs
# Add gn2_options to the right collection. Since new_candidates
# is a dict of frozensets of frozensets of node indices it's
# a bit clunky. We can't do .add, and + also doesn't work. We
# could do |, but I deem union to be clearer.
new_candidates[sgn2] = new_candidates[sgn2].union(
[frozenset(gn2_options)]
)
if (sgn, sgn2) in constraints:
gn2_options = {gn2 for gn2 in self.graph if gn2 > gn}
elif (sgn2, sgn) in constraints:
gn2_options = {gn2 for gn2 in self.graph if gn2 < gn}
else:
continue # pragma: no cover
new_candidates[sgn2] = new_candidates[sgn2].union(
[frozenset(gn2_options)]
)
# The next node is the one that is unmapped and has fewest
# candidates
# Pylint disables because it's a one-shot function.
next_sgn = min(
left_to_map, key=lambda n: min(new_candidates[n], key=len)
) # pylint: disable=cell-var-from-loop
yield from self._map_nodes(
next_sgn,
new_candidates,
constraints,
mapping=mapping,
to_be_mapped=to_be_mapped,
)
# Unmap sgn-gn. Strictly not necessary since it'd get overwritten
# when making a new mapping for sgn.
# del mapping[sgn]
def _largest_common_subgraph(self, candidates, constraints, to_be_mapped=None):
"""
Find all largest common subgraphs honoring constraints.
"""
if to_be_mapped is None:
to_be_mapped = {frozenset(self.subgraph.nodes)}
# The LCS problem is basically a repeated subgraph isomorphism problem
# with smaller and smaller subgraphs. We store the nodes that are
# "part of" the subgraph in to_be_mapped, and we make it a little
# smaller every iteration.
# pylint disable becuase it's guarded against by default value
current_size = len(
next(iter(to_be_mapped), [])
) # pylint: disable=stop-iteration-return
found_iso = False
if current_size <= len(self.graph):
# There's no point in trying to find isomorphisms of
# graph >= subgraph if subgraph has more nodes than graph.
# Try the isomorphism first with the nodes with lowest ID. So sort
# them. Those are more likely to be part of the final
# correspondence. This makes finding the first answer(s) faster. In
# theory.
for nodes in sorted(to_be_mapped, key=sorted):
# Find the isomorphism between subgraph[to_be_mapped] <= graph
next_sgn = min(nodes, key=lambda n: min(candidates[n], key=len))
isomorphs = self._map_nodes(
next_sgn, candidates, constraints, to_be_mapped=nodes
)
# This is effectively `yield from isomorphs`, except that we look
# whether an item was yielded.
try:
item = next(isomorphs)
except StopIteration:
pass
else:
yield item
yield from isomorphs
found_iso = True
# BASECASE
if found_iso or current_size == 1:
# Shrinking has no point because either 1) we end up with a smaller
# common subgraph (and we want the largest), or 2) there'll be no
# more subgraph.
return
left_to_be_mapped = set()
for nodes in to_be_mapped:
for sgn in nodes:
# We're going to remove sgn from to_be_mapped, but subject to
# symmetry constraints. We know that for every constraint we
# have those subgraph nodes are equal. So whenever we would
# remove the lower part of a constraint, remove the higher
# instead. This is all dealth with by _remove_node. And because
# left_to_be_mapped is a set, we don't do double work.
# And finally, make the subgraph one node smaller.
# REDUCTION
new_nodes = self._remove_node(sgn, nodes, constraints)
left_to_be_mapped.add(new_nodes)
# COMBINATION
yield from self._largest_common_subgraph(
candidates, constraints, to_be_mapped=left_to_be_mapped
)
@staticmethod
def _remove_node(node, nodes, constraints):
"""
Returns a new set where node has been removed from nodes, subject to
symmetry constraints. We know, that for every constraint we have
those subgraph nodes are equal. So whenever we would remove the
lower part of a constraint, remove the higher instead.
"""
while True:
for low, high in constraints:
if low == node and high in nodes:
node = high
break
else: # no break, couldn't find node in constraints
break
return frozenset(nodes - {node})
@staticmethod
def _find_permutations(top_partitions, bottom_partitions):
"""
Return the pairs of top/bottom partitions where the partitions are
different. Ensures that all partitions in both top and bottom
partitions have size 1.
"""
# Find permutations
permutations = set()
for top, bot in zip(top_partitions, bottom_partitions):
# top and bot have only one element
if len(top) != 1 or len(bot) != 1:
raise IndexError(
"Not all nodes are coupled. This is"
f" impossible: {top_partitions}, {bottom_partitions}"
)
if top != bot:
permutations.add(frozenset((next(iter(top)), next(iter(bot)))))
return permutations
@staticmethod
def _update_orbits(orbits, permutations):
"""
Update orbits based on permutations. Orbits is modified in place.
For every pair of items in permutations their respective orbits are
merged.
"""
for permutation in permutations:
node, node2 = permutation
# Find the orbits that contain node and node2, and replace the
# orbit containing node with the union
first = second = None
for idx, orbit in enumerate(orbits):
if first is not None and second is not None:
break
if node in orbit:
first = idx
if node2 in orbit:
second = idx
if first != second:
orbits[first].update(orbits[second])
del orbits[second]
def _couple_nodes(
self,
top_partitions,
bottom_partitions,
pair_idx,
t_node,
b_node,
graph,
edge_colors,
):
"""
Generate new partitions from top and bottom_partitions where t_node is
coupled to b_node. pair_idx is the index of the partitions where t_ and
b_node can be found.
"""
t_partition = top_partitions[pair_idx]
b_partition = bottom_partitions[pair_idx]
assert t_node in t_partition and b_node in b_partition
# Couple node to node2. This means they get their own partition
new_top_partitions = [top.copy() for top in top_partitions]
new_bottom_partitions = [bot.copy() for bot in bottom_partitions]
new_t_groups = {t_node}, t_partition - {t_node}
new_b_groups = {b_node}, b_partition - {b_node}
# Replace the old partitions with the coupled ones
del new_top_partitions[pair_idx]
del new_bottom_partitions[pair_idx]
new_top_partitions[pair_idx:pair_idx] = new_t_groups
new_bottom_partitions[pair_idx:pair_idx] = new_b_groups
new_top_partitions = self._refine_node_partitions(
graph, new_top_partitions, edge_colors
)
new_bottom_partitions = self._refine_node_partitions(
graph, new_bottom_partitions, edge_colors, branch=True
)
new_top_partitions = list(new_top_partitions)
assert len(new_top_partitions) == 1
new_top_partitions = new_top_partitions[0]
for bot in new_bottom_partitions:
yield list(new_top_partitions), bot
def _process_ordered_pair_partitions(
self,
graph,
top_partitions,
bottom_partitions,
edge_colors,
orbits=None,
cosets=None,
):
"""
Processes ordered pair partitions as per the reference paper. Finds and
returns all permutations and cosets that leave the graph unchanged.
"""
if orbits is None:
orbits = [{node} for node in graph.nodes]
else:
# Note that we don't copy orbits when we are given one. This means
# we leak information between the recursive branches. This is
# intentional!
orbits = orbits
if cosets is None:
cosets = {}
else:
cosets = cosets.copy()
assert all(
len(t_p) == len(b_p) for t_p, b_p in zip(top_partitions, bottom_partitions)
)
# BASECASE
if all(len(top) == 1 for top in top_partitions):
# All nodes are mapped
permutations = self._find_permutations(top_partitions, bottom_partitions)
self._update_orbits(orbits, permutations)
if permutations:
return [permutations], cosets
else:
return [], cosets
permutations = []
unmapped_nodes = {
(node, idx)
for idx, t_partition in enumerate(top_partitions)
for node in t_partition
if len(t_partition) > 1
}
node, pair_idx = min(unmapped_nodes)
b_partition = bottom_partitions[pair_idx]
for node2 in sorted(b_partition):
if len(b_partition) == 1:
# Can never result in symmetry
continue
if node != node2 and any(
node in orbit and node2 in orbit for orbit in orbits
):
# Orbit prune branch
continue
# REDUCTION
# Couple node to node2
partitions = self._couple_nodes(
top_partitions,
bottom_partitions,
pair_idx,
node,
node2,
graph,
edge_colors,
)
for opp in partitions:
new_top_partitions, new_bottom_partitions = opp
new_perms, new_cosets = self._process_ordered_pair_partitions(
graph,
new_top_partitions,
new_bottom_partitions,
edge_colors,
orbits,
cosets,
)
# COMBINATION
permutations += new_perms
cosets.update(new_cosets)
mapped = {
k
for top, bottom in zip(top_partitions, bottom_partitions)
for k in top
if len(top) == 1 and top == bottom
}
ks = {k for k in graph.nodes if k < node}
# Have all nodes with ID < node been mapped?
find_coset = ks <= mapped and node not in cosets
if find_coset:
# Find the orbit that contains node
for orbit in orbits:
if node in orbit:
cosets[node] = orbit.copy()
return permutations, cosets
| gpl-3.0 | -174,510,596,902,812,900 | 36.002595 | 88 | 0.56678 | false | 4.00365 | false | false | false |
ngoix/OCRF | sklearn/datasets/fetch_ml_mieux.py | 1 | 13778 | from zipfile import ZipFile
from io import BytesIO
import logging
from os.path import exists, join
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
from .base import get_data_home
from .base import Bunch
from .base import _pkl_filepath
from ..utils.fixes import makedirs
from ..externals import joblib
from ..utils import check_random_state
logger = logging.getLogger()
def fetch_spambase(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the spambase dataset, downloading it if necessary.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
dataset.target : numpy array of shape (581012,)
Each value corresponds to one of the 7 forest spambases with values
ranging between 1 to 7.
dataset.DESCR : string
Description of the forest spambase dataset.
"""
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/spambase/spambase.zip')
data_home = get_data_home(data_home=data_home)
spambase_dir = join(data_home, "spambase")
samples_path = _pkl_filepath(spambase_dir, "samples")
targets_path = _pkl_filepath(spambase_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(spambase_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
file_ = ZipFile(f, mode='r').open('spambase.data')
Xy = np.genfromtxt(file_, delimiter=',')
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_annthyroid(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the annthyroid dataset, downloading it if necessary.
"""
URL1 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/thyroid-disease/ann-train.data')
URL2 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/thyroid-disease/ann-test.data')
data_home = get_data_home(data_home=data_home)
annthyroid_dir = join(data_home, "annthyroid")
samples_path = _pkl_filepath(annthyroid_dir, "samples")
targets_path = _pkl_filepath(annthyroid_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(annthyroid_dir, exist_ok=True)
logger.warning("Downloading %s" % URL1)
f = BytesIO(urlopen(URL1).read())
# ou X = np.load(f)
Xy1 = np.genfromtxt(f, delimiter=' ')
logger.warning("Downloading %s" % URL2)
f = BytesIO(urlopen(URL2).read())
Xy2 = np.genfromtxt(f, delimiter=' ')
Xy = np.r_[Xy1, Xy2]
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_arrhythmia(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the arrhythmia dataset, downloading it if necessary.
"""
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/arrhythmia/arrhythmia.data')
data_home = get_data_home(data_home=data_home)
arrhythmia_dir = join(data_home, "arrhythmia")
samples_path = _pkl_filepath(arrhythmia_dir, "samples")
targets_path = _pkl_filepath(arrhythmia_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(arrhythmia_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
# ou X = np.load(f)
Xy = np.genfromtxt(f, delimiter=',')
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_pendigits(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the pendigits dataset, downloading it if necessary.
"""
URL1 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/pendigits/pendigits.tra')
URL2 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/pendigits/pendigits.tes')
data_home = get_data_home(data_home=data_home)
pendigits_dir = join(data_home, "pendigits")
samples_path = _pkl_filepath(pendigits_dir, "samples")
targets_path = _pkl_filepath(pendigits_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(pendigits_dir, exist_ok=True)
logger.warning("Downloading %s" % URL1)
f = BytesIO(urlopen(URL1).read())
# ou X = np.load(f)
Xy1 = np.genfromtxt(f, delimiter=',')
logger.warning("Downloading %s" % URL2)
f = BytesIO(urlopen(URL2).read())
Xy2 = np.genfromtxt(f, delimiter=',')
Xy = np.r_[Xy1, Xy2]
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_pima(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the pima-indians-diabetes dataset, downloading it if necessary.
"""
URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'pima-indians-diabetes/pima-indians-diabetes.data')
data_home = get_data_home(data_home=data_home)
pima_dir = join(data_home, "pima")
samples_path = _pkl_filepath(pima_dir, "samples")
targets_path = _pkl_filepath(pima_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(pima_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
# ou X = np.load(f)
Xy = np.genfromtxt(f, delimiter=',')
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_wilt(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the wilt dataset, downloading it if necessary.
"""
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/00285/wilt.zip')
data_home = get_data_home(data_home=data_home)
wilt_dir = join(data_home, "wilt")
samples_path = _pkl_filepath(wilt_dir, "samples")
targets_path = _pkl_filepath(wilt_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(wilt_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
# ou X = np.load(f)
ff = ZipFile(f, mode='r')
file1 = ff.open('training.csv')
Xy1 = np.genfromtxt(file1, delimiter=',', dtype=object)
file2 = ff.open('testing.csv')
Xy2 = np.genfromtxt(file2, delimiter=',', dtype=object)
# the first row is nan:
Xy1 = Xy1[1:, :]
Xy2 = Xy2[1:, :]
Xy = np.r_[Xy1, Xy2]
X = Xy[:, 1:].astype(float)
y = Xy[:, 0]
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_internet_ads(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the internet_ads dataset, downloading it if necessary.
"""
URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'internet_ads/ad.data')
data_home = get_data_home(data_home=data_home)
internet_ads_dir = join(data_home, "internet_ads")
samples_path = _pkl_filepath(internet_ads_dir, "samples")
targets_path = _pkl_filepath(internet_ads_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(internet_ads_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
Xy = np.genfromtxt(f, delimiter=',', dtype=object)
X = Xy[:, :-1].astype(object)
X = np.delete(X, [0, 1, 2, 3], axis=1)
# remaining features are not continuous: not adapted to OneClassRF
X = X.astype(float)
y = Xy[:, -1]
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_adult(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the adult dataset, downloading it if necessary.
"""
URL1 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.data')
URL2 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.test')
data_home = get_data_home(data_home=data_home)
adult_dir = join(data_home, "adult")
samples_path = _pkl_filepath(adult_dir, "samples")
targets_path = _pkl_filepath(adult_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(adult_dir, exist_ok=True)
logger.warning("Downloading %s" % URL1)
f = BytesIO(urlopen(URL1).read())
# ou X = np.load(f)
Xy1 = np.genfromtxt(f, delimiter=',', dtype=object)
# select continuous features:
Xy1 = Xy1[:, [0, 2, 4, 10, 11, 12, -1]]
logger.warning("Downloading %s" % URL2)
f = BytesIO(urlopen(URL2).read())
# idem that Xy1 but skip first line which contains instructions:
Xy2 = np.genfromtxt(f, delimiter=',', skip_header=1, dtype=object)
Xy2 = Xy2[:, [0, 2, 4, 10, 11, 12, -1]]
Xy = np.r_[Xy1, Xy2]
X = Xy[:, :-1].astype(float)
y = Xy[:, -1]
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
| bsd-3-clause | -8,359,151,599,805,482,000 | 31.495283 | 78 | 0.604369 | false | 3.307249 | false | false | false |
capitalone/cloud-custodian | c7n/filters/vpc.py | 1 | 10368 | # Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.exceptions import PolicyValidationError
from c7n.utils import local_session, type_schema
from .core import Filter, ValueFilter
from .related import RelatedResourceFilter
class MatchResourceValidator:
def validate(self):
if self.data.get('match-resource'):
self.required_keys = set('key',)
return super(MatchResourceValidator, self).validate()
class SecurityGroupFilter(MatchResourceValidator, RelatedResourceFilter):
"""Filter a resource by its associated security groups."""
schema = type_schema(
'security-group', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
schema_alias = True
RelatedResource = "c7n.resources.vpc.SecurityGroup"
AnnotationKey = "matched-security-groups"
class SubnetFilter(MatchResourceValidator, RelatedResourceFilter):
"""Filter a resource by its associated subnets."""
schema = type_schema(
'subnet', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
schema_alias = True
RelatedResource = "c7n.resources.vpc.Subnet"
AnnotationKey = "matched-subnets"
class VpcFilter(MatchResourceValidator, RelatedResourceFilter):
"""Filter a resource by its associated vpc."""
schema = type_schema(
'vpc', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
schema_alias = True
RelatedResource = "c7n.resources.vpc.Vpc"
AnnotationKey = "matched-vpcs"
class DefaultVpcBase(Filter):
"""Filter to resources in a default vpc."""
vpcs = None
default_vpc = None
permissions = ('ec2:DescribeVpcs',)
def match(self, vpc_id):
if self.default_vpc is None:
self.log.debug("querying default vpc %s" % vpc_id)
client = local_session(self.manager.session_factory).client('ec2')
vpcs = [v['VpcId'] for v
in client.describe_vpcs()['Vpcs']
if v['IsDefault']]
if vpcs:
self.default_vpc = vpcs.pop()
return vpc_id == self.default_vpc and True or False
class NetworkLocation(Filter):
"""On a network attached resource, determine intersection of
security-group attributes, subnet attributes, and resource attributes.
The use case is a bit specialized, for most use cases using `subnet`
and `security-group` filters suffice. but say for example you wanted to
verify that an ec2 instance was only using subnets and security groups
with a given tag value, and that tag was not present on the resource.
:Example:
.. code-block:: yaml
policies:
- name: ec2-mismatched-sg-remove
resource: ec2
filters:
- type: network-location
compare: ["resource","security-group"]
key: "tag:TEAM_NAME"
ignore:
- "tag:TEAM_NAME": Enterprise
actions:
- type: modify-security-groups
remove: network-location
isolation-group: sg-xxxxxxxx
"""
schema = type_schema(
'network-location',
**{'missing-ok': {
'type': 'boolean',
'default': False,
'description': (
"How to handle missing keys on elements, by default this causes"
"resources to be considered not-equal")},
'match': {'type': 'string', 'enum': ['equal', 'not-equal'],
'default': 'non-equal'},
'compare': {
'type': 'array',
'description': (
'Which elements of network location should be considered when'
' matching.'),
'default': ['resource', 'subnet', 'security-group'],
'items': {
'enum': ['resource', 'subnet', 'security-group']}},
'key': {
'type': 'string',
'description': 'The attribute expression that should be matched on'},
'max-cardinality': {
'type': 'integer', 'default': 1,
'title': ''},
'ignore': {'type': 'array', 'items': {'type': 'object'}},
'required': ['key'],
})
schema_alias = True
permissions = ('ec2:DescribeSecurityGroups', 'ec2:DescribeSubnets')
def validate(self):
rfilters = self.manager.filter_registry.keys()
if 'subnet' not in rfilters:
raise PolicyValidationError(
"network-location requires resource subnet filter availability on %s" % (
self.manager.data))
if 'security-group' not in rfilters:
raise PolicyValidationError(
"network-location requires resource security-group filter availability on %s" % (
self.manager.data))
return self
def process(self, resources, event=None):
self.sg = self.manager.filter_registry.get('security-group')({}, self.manager)
related_sg = self.sg.get_related(resources)
self.subnet = self.manager.filter_registry.get('subnet')({}, self.manager)
related_subnet = self.subnet.get_related(resources)
self.sg_model = self.manager.get_resource_manager('security-group').get_model()
self.subnet_model = self.manager.get_resource_manager('subnet').get_model()
self.vf = self.manager.filter_registry.get('value')({}, self.manager)
# filter options
key = self.data.get('key')
self.compare = self.data.get('compare', ['subnet', 'security-group', 'resource'])
self.max_cardinality = self.data.get('max-cardinality', 1)
self.match = self.data.get('match', 'not-equal')
self.missing_ok = self.data.get('missing-ok', False)
results = []
for r in resources:
resource_sgs = self.filter_ignored(
[related_sg[sid] for sid in self.sg.get_related_ids([r])])
resource_subnets = self.filter_ignored([
related_subnet[sid] for sid in self.subnet.get_related_ids([r])])
found = self.process_resource(r, resource_sgs, resource_subnets, key)
if found:
results.append(found)
return results
def filter_ignored(self, resources):
ignores = self.data.get('ignore', ())
results = []
for r in resources:
found = False
for i in ignores:
for k, v in i.items():
if self.vf.get_resource_value(k, r) == v:
found = True
if found is True:
break
if found is True:
continue
results.append(r)
return results
def process_resource(self, r, resource_sgs, resource_subnets, key):
evaluation = []
sg_space = set()
subnet_space = set()
if 'subnet' in self.compare:
subnet_values = {
rsub[self.subnet_model.id]: self.subnet.get_resource_value(key, rsub)
for rsub in resource_subnets}
if not self.missing_ok and None in subnet_values.values():
evaluation.append({
'reason': 'SubnetLocationAbsent',
'subnets': subnet_values})
subnet_space = set(filter(None, subnet_values.values()))
if len(subnet_space) > self.max_cardinality:
evaluation.append({
'reason': 'SubnetLocationCardinality',
'subnets': subnet_values})
if 'security-group' in self.compare:
sg_values = {
rsg[self.sg_model.id]: self.sg.get_resource_value(key, rsg)
for rsg in resource_sgs}
if not self.missing_ok and None in sg_values.values():
evaluation.append({
'reason': 'SecurityGroupLocationAbsent',
'security-groups': sg_values})
sg_space = set(filter(None, sg_values.values()))
if len(sg_space) > self.max_cardinality:
evaluation.append({
'reason': 'SecurityGroupLocationCardinality',
'security-groups': sg_values})
if ('subnet' in self.compare and
'security-group' in self.compare and
sg_space != subnet_space):
evaluation.append({
'reason': 'LocationMismatch',
'subnets': subnet_values,
'security-groups': sg_values})
if 'resource' in self.compare:
r_value = self.vf.get_resource_value(key, r)
if not self.missing_ok and r_value is None:
evaluation.append({
'reason': 'ResourceLocationAbsent',
'resource': r_value})
elif 'security-group' in self.compare and resource_sgs and r_value not in sg_space:
evaluation.append({
'reason': 'ResourceLocationMismatch',
'resource': r_value,
'security-groups': sg_values})
elif 'subnet' in self.compare and resource_subnets and r_value not in subnet_space:
evaluation.append({
'reason': 'ResourceLocationMismatch',
'resource': r_value,
'subnet': subnet_values})
if 'security-group' in self.compare and resource_sgs:
mismatched_sgs = {sg_id: sg_value
for sg_id, sg_value in sg_values.items()
if sg_value != r_value}
if mismatched_sgs:
evaluation.append({
'reason': 'SecurityGroupMismatch',
'resource': r_value,
'security-groups': mismatched_sgs})
if evaluation and self.match == 'not-equal':
r['c7n:NetworkLocation'] = evaluation
return r
elif not evaluation and self.match == 'equal':
return r
| apache-2.0 | -2,030,043,321,442,465,000 | 37.831461 | 97 | 0.558738 | false | 4.385787 | false | false | false |
vmagamedov/pi | fixers/fix_imports.py | 1 | 2965 | import os.path
from lib2to3.pygram import python_symbols
from lib2to3.fixer_util import Name
from lib2to3.fixes.fix_imports import FixImports as BaseFixImports, alternates
PATH = '{}/../pi/_requires'.format(os.path.dirname(__file__))
LIBS = [i.rstrip('.py') for i in os.listdir(PATH)
if not i.startswith('_') and not i.endswith(('.pyc', '.pyo'))]
def build_pattern(mapping):
mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
dotted_mod_list = ' | '.join(["module_name=dotted_name<'{}' ('.' NAME)*>"
.format(key)
for key in mapping])
bare_names = alternates(mapping.keys())
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (dotted_mod_list, dotted_mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % dotted_mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (dotted_mod_list, dotted_mod_list)
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (mod_list, mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (mod_list, mod_list)
# Find usages of module members in code e.g. thread.foo(bar)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(BaseFixImports):
mapping = {"{}".format(lib): 'pi._requires.{}'.format(lib)
for lib in LIBS}
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod and import_mod.type == python_symbols.dotted_name:
mod_name = import_mod.children[0].value
new_name = self.mapping[mod_name]
tail = ''.join(child.value for child in import_mod.children[1:])
import_mod.replace(Name(new_name + tail, prefix=import_mod.prefix))
if "name_import" in results:
self.replace[mod_name] = new_name
if "multiple_imports" in results:
results = self.match(node)
if results:
self.transform(node, results)
else:
return super().transform(node, results)
| bsd-3-clause | -102,682,145,929,447,380 | 41.357143 | 79 | 0.536931 | false | 3.467836 | false | false | false |
swapnilsm/redis-rw-lock | setup.py | 1 | 1796 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
from os.path import dirname
from os.path import join
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='redis-rw-lock',
version='1.0.6',
license='MIT',
description="Redis based Reader-Writer lock with Writer's priority.",
long_description='',
author='Swapnil S. Mahajan',
author_email='[email protected]',
url='https://github.com/swapnilsm/redis-rw-lock',
packages=['redis_rw_lock', ],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
'redis', 'lock', 'rwlock'
],
install_requires=[
'redis>=2.10.0',
'python-redis-lock>=3.2.0'
]
)
| mit | 9,055,668,874,280,245,000 | 29.965517 | 90 | 0.603563 | false | 4.017897 | false | false | false |
rwatson/chromium-capsicum | webkit/tools/layout_tests/layout_package/platform_utils_mac.py | 1 | 5197 | # Copyright (c) 2008-2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is the Mac implementation of the layout_package.platform_utils
package. This file should only be imported by that package."""
import os
import platform
import signal
import subprocess
import path_utils
def PlatformName():
"""Returns the name of the platform we're currently running on."""
# At the moment all chromium mac results are version-independent. At some
# point we may need to return 'chromium-mac' + PlatformVersion()
return 'chromium-mac'
def PlatformVersion():
"""Returns the version string for the platform, e.g. '-vista' or
'-snowleopard'. If the platform does not distinguish between
minor versions, it returns ''."""
os_version_string = platform.mac_ver()[0] # e.g. "10.5.6"
if not os_version_string:
return '-leopard'
release_version = int(os_version_string.split('.')[1])
# we don't support 'tiger' or earlier releases
if release_version == 5:
return '-leopard'
elif release_version == 6:
return '-snowleopard'
return ''
def GetNumCores():
"""Returns the number of cores on the machine. For hyperthreaded machines,
this will be double the number of actual processors."""
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# TODO: We should add leopard and snowleopard to the list of paths to check
# once we start running the tests from snowleopard.
def BaselineSearchPath(all_versions=False):
"""Returns the list of directories to search for baselines/results, in
order of preference. Paths are relative to the top of the source tree."""
return [path_utils.ChromiumBaselinePath(PlatformName()),
path_utils.WebKitBaselinePath('mac' + PlatformVersion()),
path_utils.WebKitBaselinePath('mac')]
def WDiffPath():
"""Path to the WDiff executable, which we assume is already installed and
in the user's $PATH."""
return 'wdiff'
def ImageDiffPath(target):
"""Path to the image_diff executable
Args:
target: build type - 'Debug','Release',etc."""
return path_utils.PathFromBase('xcodebuild', target, 'image_diff')
def LayoutTestHelperPath(target):
"""Path to the layout_test_helper executable, if needed, empty otherwise
Args:
target: build type - 'Debug','Release',etc."""
return path_utils.PathFromBase('xcodebuild', target, 'layout_test_helper')
def TestShellPath(target):
"""Path to the test_shell executable.
Args:
target: build type - 'Debug','Release',etc."""
# TODO(pinkerton): make |target| happy with case-sensitive file systems.
return path_utils.PathFromBase('xcodebuild', target, 'TestShell.app',
'Contents', 'MacOS','TestShell')
def ApacheExecutablePath():
"""Returns the executable path to start Apache"""
return os.path.join("/usr", "sbin", "httpd")
def ApacheConfigFilePath():
"""Returns the path to Apache config file"""
return path_utils.PathFromBase("third_party", "WebKit", "LayoutTests", "http",
"conf", "apache2-httpd.conf")
def LigHTTPdExecutablePath():
"""Returns the executable path to start LigHTTPd"""
return path_utils.PathFromBase('third_party', 'lighttpd', 'mac',
'bin', 'lighttpd')
def LigHTTPdModulePath():
"""Returns the library module path for LigHTTPd"""
return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', 'lib')
def LigHTTPdPHPPath():
"""Returns the PHP executable path for LigHTTPd"""
return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', 'bin',
'php-cgi')
def ShutDownHTTPServer(server_pid):
"""Shut down the lighttpd web server. Blocks until it's fully shut down.
Args:
server_pid: The process ID of the running server.
"""
# server_pid is not set when "http_server.py stop" is run manually.
if server_pid is None:
# TODO(mmoss) This isn't ideal, since it could conflict with lighttpd
# processes not started by http_server.py, but good enough for now.
KillAllProcess('lighttpd')
KillAllProcess('httpd')
else:
try:
os.kill(server_pid, signal.SIGTERM)
#TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
except OSError:
# Sometimes we get a bad PID (e.g. from a stale httpd.pid file), so if
# kill fails on the given PID, just try to 'killall' web servers.
ShutDownHTTPServer(None)
def KillProcess(pid):
"""Forcefully kill the process.
Args:
pid: The id of the process to be killed.
"""
os.kill(pid, signal.SIGKILL)
def KillAllProcess(process_name):
# On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
# -SIGNALNUMBER must come first. Example problem:
# $ killall -u $USER -TERM lighttpd
# killall: illegal option -- T
# Use of the earlier -TERM placement is just fine on 10.5.
null = open("/dev/null");
subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'), process_name],
stderr=null)
null.close()
def KillAllTestShells():
"""Kills all instances of the test_shell binary currently running."""
KillAllProcess('TestShell')
| bsd-3-clause | -4,367,166,175,675,160,000 | 34.59589 | 80 | 0.689051 | false | 3.665021 | true | false | false |
mareknetusil/twist | demo/dynamic/flap.py | 1 | 1495 | from __future__ import print_function
__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2010 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
class Obstruction(Hyperelasticity):
def mesh(self):
n = 4
return RectangleMesh(Point(0, 0), Point(0.2, 0.5), n, 5*n/2)
def end_time(self):
return 4.0
def time_step(self):
return 0.001
def is_dynamic(self):
return True
def neumann_conditions(self):
fluid_force = Expression(("magnitude*t", "0.0"), magnitude=1.5, t=0, degree=0)
return [fluid_force]
def neumann_boundaries(self):
fluid_interface = "x[1] > 0.0 && x[0] == 0"
return [fluid_interface]
def dirichlet_values(self):
fix = Constant((0.0, 0.0))
return [fix]
def dirichlet_boundaries(self):
bottom = "x[1] == 0.0"
return [bottom]
def material_model(self):
mu = 60
lmbda = 90
#material = StVenantKirchhoff([mu, lmbda])
material = neoHookean({'half_nkT':mu, 'bulk':lmbda})
return material
def reference_density(self):
return 1.0
def time_stepping(self):
return "CG1"
def __str__(self):
return "An obstruction being deformed by an ambient flow"
# Setup problem
problem = Obstruction()
problem.parameters['element_degree'] = 1
# Solve problem
print(problem)
problem.solve()
| gpl-3.0 | -6,887,895,058,826,794,000 | 23.508197 | 86 | 0.599331 | false | 3.300221 | false | false | false |
jinankjain/zamboni | mkt/api/paginator.py | 1 | 4202 | import urlparse
from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
from django.http import QueryDict
from django.utils.http import urlencode
from rest_framework import pagination, serializers
class ESPaginator(Paginator):
"""
A better paginator for search results
The normal Paginator does a .count() query and then a slice. Since ES
results contain the total number of results, we can take an optimistic
slice and then adjust the count.
"""
def validate_number(self, number):
"""
Validates the given 1-based page number.
This class overrides the default behavior and ignores the upper bound.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""
Returns a page object.
This class overrides the default behavior and ignores "orphans" and
assigns the count from the ES result to the Paginator.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
page = Page(self.object_list[bottom:top], number, self)
# Force the search to evaluate and then attach the count. We want to
# avoid an extra useless query even if there are no results, so we
# directly fetch the count from _results_cache instead of calling
# page.object_list.count().
# FIXME: replace by simply calling page.object_list.count() when
# https://github.com/mozilla/elasticutils/pull/212 is merged and
# released.
page.object_list.execute()
self._count = page.object_list._results_cache.count
return page
class MetaSerializer(serializers.Serializer):
"""
Serializer for the 'meta' dict holding pagination info that allows to stay
backwards-compatible with the way tastypie does pagination (using offsets
instead of page numbers), while still using a "standard" Paginator class.
"""
next = serializers.SerializerMethodField('get_next')
previous = serializers.SerializerMethodField('get_previous')
total_count = serializers.SerializerMethodField('get_total_count')
offset = serializers.SerializerMethodField('get_offset')
limit = serializers.SerializerMethodField('get_limit')
def replace_query_params(self, url, params):
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = QueryDict(query).dict()
query_dict.update(params)
query = urlencode(query_dict)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def get_offset_link_for_page(self, page, number):
request = self.context.get('request')
url = request and request.get_full_path() or ''
number = number - 1 # Pages are 1-based, but offsets are 0-based.
per_page = page.paginator.per_page
return self.replace_query_params(url, {'offset': number * per_page,
'limit': per_page})
def get_next(self, page):
if not page.has_next():
return None
return self.get_offset_link_for_page(page, page.next_page_number())
def get_previous(self, page):
if not page.has_previous():
return None
return self.get_offset_link_for_page(page, page.previous_page_number())
def get_total_count(self, page):
return page.paginator.count
def get_offset(self, page):
index = page.start_index()
if index > 0:
# start_index() is 1-based, and we want a 0-based offset, so we
# need to remove 1, unless it's already 0.
return index - 1
return index
def get_limit(self, page):
return page.paginator.per_page
class CustomPaginationSerializer(pagination.BasePaginationSerializer):
meta = MetaSerializer(source='*') # Takes the page object as the source
results_field = 'objects'
| bsd-3-clause | 899,639,658,611,445,600 | 36.185841 | 79 | 0.651832 | false | 4.26599 | false | false | false |
hwen3/410-lab5 | todolist.py | 1 | 2612 | import sqlite3
from flask import Flask, render_template, g, request, session, flash, redirect, url_for, abort
DATABASE = 'test.db'
USERNAME = 'admin'
PASSWORD = 'admin'
SECRET_KEY = 'he who shall not be named'
app = Flask(__name__)
app.config.from_object(__name__)
@app.route('/')
def welcome():
return '<h1>Welcome to COMPUT 410 - Jinja Lab!</h1>'
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'invalid password'
else:
session['logged_in'] = True
flash("You are logged in :-)")
return redirect(url_for('task'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in')
flash("You are logged out!")
return redirect(url_for('task'))
@app.route('/delete', methods=['GET', 'POST'])
def delete():
if not session.get('logged_in'):
abort(401)
removetask(request.form['id'])
flash('Task was deleted successfully!')
return redirect(url_for('task'))
@app.route('/task', methods=['GET', 'POST'])
def task():
if request.method == 'POST':
if not session.get('logged_in'):
abort(401)
category = request.form['category']
priority = request.form['priority']
description = request.form['description']
addtask(category, priority, description)
flash("New task added successfully")
return redirect(url_for('task'))
return render_template('show_entries.html', tasks=query_db('select * from tasks'))
def query_db(query, args=(), one=False):
cur = get_db().cursor()
cur.execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row
return db
def removetask(id):
query_db('delete from tasks where id = ?', [id], True)
get_db().commit()
def addtask(category, priority, description):
query_db('insert into tasks values (null, ?, ?, ?)', [category, priority, description], True)
get_db().commit()
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
db = None
if __name__ == '__main__':
app.debug = True
app.run() | apache-2.0 | 7,709,553,751,303,551,000 | 27.402174 | 97 | 0.609495 | false | 3.622746 | false | false | false |
Azure/azure-storage-python | samples/blob/block_blob_usage.py | 1 | 18393 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import io
import os
import random
import time
import uuid
from azure.storage.blob import (
ContentSettings,
BlobBlock,
BlockListType,
)
class BlockBlobSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_block_blob_service()
self.delete_blob()
self.blob_metadata()
self.blob_properties()
self.blob_exists()
self.copy_blob()
self.snapshot_blob()
self.lease_blob()
self.blob_with_bytes()
self.blob_with_stream()
self.blob_with_path()
self.blob_with_text()
self.blocks()
def _get_resource_reference(self, prefix):
return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
def _get_blob_reference(self, prefix='blob'):
return self._get_resource_reference(prefix)
def _create_blob(self, container_name, prefix='blob'):
blob_name = self._get_resource_reference(prefix)
self.service.create_blob_from_text(container_name, blob_name, u'hello world')
return blob_name
def _create_container(self, prefix='container'):
container_name = self._get_resource_reference(prefix)
self.service.create_container(container_name)
return container_name
def _get_random_bytes(self, size):
rand = random.Random()
result = bytearray(size)
for i in range(size):
result[i] = rand.randint(0, 255)
return bytes(result)
def delete_blob(self):
container_name = self._create_container()
blob_name = self._create_blob(container_name)
# Basic
self.service.delete_blob(container_name, blob_name)
self.service.delete_container(container_name)
def blob_metadata(self):
container_name = self._create_container()
blob_name = self._create_blob(container_name)
metadata = {'val1': 'foo', 'val2': 'blah'}
# Basic
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
metadata = {'new': 'val'}
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'new': 'val'}
# Capital letters
metadata = {'NEW': 'VAL'}
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'new': 'VAL'}
# Clearing
self.service.set_blob_metadata(container_name, blob_name)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={}
self.service.delete_container(container_name)
def blob_properties(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
metadata = {'val1': 'foo', 'val2': 'blah'}
self.service.create_blob_from_text(container_name, blob_name, u'hello world', metadata=metadata)
settings = ContentSettings(content_type='html', content_language='fr')
# Basic
self.service.set_blob_properties(container_name, blob_name, content_settings=settings)
blob = self.service.get_blob_properties(container_name, blob_name)
content_language = blob.properties.content_settings.content_language # fr
content_type = blob.properties.content_settings.content_type # html
content_length = blob.properties.content_length # 512
# Metadata
# Can't set metadata, but get will return metadata already on the blob
blob = self.service.get_blob_properties(container_name, blob_name)
metadata = blob.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
settings = ContentSettings(content_encoding='utf-8')
self.service.set_blob_properties(container_name, blob_name, content_settings=settings)
blob = self.service.get_blob_properties(container_name, blob_name)
content_encoding = blob.properties.content_settings.content_encoding # utf-8
content_language = blob.properties.content_settings.content_language # None
self.service.delete_container(container_name)
def blob_exists(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Basic
exists = self.service.exists(container_name, blob_name) # False
self.service.create_blob_from_text(container_name, blob_name, u'hello world')
exists = self.service.exists(container_name, blob_name) # True
self.service.delete_container(container_name)
def copy_blob(self):
container_name = self._create_container()
source_blob_name = self._create_blob(container_name)
# Basic
# Copy the blob from the directory to the root of the container
source = self.service.make_blob_url(container_name, source_blob_name)
copy = self.service.copy_blob(container_name, 'blob1copy', source)
# Poll for copy completion
count = 0
while copy.status != 'success':
count = count + 1
if count > 5:
print('Timed out waiting for async copy to complete.')
time.sleep(30)
copy = self.service.get_blob_properties(container_name, 'blob1copy').properties.copy
# With SAS from a remote account to local blob
# Commented out as remote container, directory, blob, and sas would need to be created
'''
source_blob_url = self.service.make_blob_url(
remote_container_name,
remote_blob_name,
sas_token=remote_sas_token,
)
copy = self.service.copy_blob(destination_containername,
destination_blob_name,
source_blob_url)
'''
# Abort copy
# Commented out as this involves timing the abort to be sent while the copy is still running
# Abort copy is useful to do along with polling
# self.service.abort_copy_blob(container_name, blob_name, copy.id)
# Sync copy
# Set requires_sync=True to indicate that the service should not return a result until the blob is copied.
# This eliminates the need for polling.
self.service.copy_blob(container_name, 'blob1copy', source, requires_sync=True)
self.service.delete_container(container_name)
def snapshot_blob(self):
container_name = self._create_container()
base_blob_name = self._create_blob(container_name)
# Basic
snapshot_blob = self.service.snapshot_blob(container_name, base_blob_name)
snapshot_id = snapshot_blob.snapshot
# Set Metadata (otherwise metadata will be copied from base blob)
metadata = {'val1': 'foo', 'val2': 'blah'}
snapshot_blob = self.service.snapshot_blob(container_name, base_blob_name, metadata=metadata)
snapshot_id = snapshot_blob.snapshot
self.service.delete_container(container_name)
def lease_blob(self):
container_name = self._create_container()
blob_name1 = self._create_blob(container_name)
blob_name2 = self._create_blob(container_name)
blob_name3 = self._create_blob(container_name)
# Acquire
# Defaults to infinite lease
infinite_lease_id = self.service.acquire_blob_lease(container_name, blob_name1)
# Acquire
# Set lease time, may be between 15 and 60 seconds
fixed_lease_id = self.service.acquire_blob_lease(container_name, blob_name2, lease_duration=30)
# Acquire
# Proposed lease id
proposed_lease_id_1 = '55e97f64-73e8-4390-838d-d9e84a374321'
modified_lease_id = self.service.acquire_blob_lease(container_name,
blob_name3,
proposed_lease_id=proposed_lease_id_1,
lease_duration=30)
modified_lease_id # equal to proposed_lease_id_1
# Renew
# Resets the 30 second lease timer
# Note that the lease may be renewed even if it has expired as long as
# the container has not been leased again since the expiration of that lease
self.service.renew_blob_lease(container_name, blob_name3, proposed_lease_id_1)
# Change
# Change the lease ID of an active lease.
proposed_lease_id_2 = '55e97f64-73e8-4390-838d-d9e84a374322'
self.service.change_blob_lease(container_name, blob_name3, modified_lease_id,
proposed_lease_id=proposed_lease_id_2)
# Release
# Releasing the lease allows another client to immediately acquire the
# lease for the container as soon as the release is complete.
self.service.release_blob_lease(container_name, blob_name3, proposed_lease_id_2)
# Break
# A matching lease ID is not required.
# By default, a fixed-duration lease breaks after the remaining lease period
# elapses, and an infinite lease breaks immediately.
infinite_lease_break_time = self.service.break_blob_lease(container_name, blob_name1)
infinite_lease_break_time # 0
# Break
# By default this would leave whatever time remained of the 30 second
# lease period, but a break period can be provided to indicate when the
# break should take affect
lease_break_time = self.service.break_blob_lease(container_name, blob_name2, lease_break_period=10)
lease_break_time # 10
self.service.delete_container(container_name)
def blob_with_bytes(self):
container_name = self._create_container()
# Basic
data = b'hello world'
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data)
blob = self.service.get_blob_to_bytes(container_name, blob_name)
content = blob.content # hello world
# Download range
blob = self.service.get_blob_to_bytes(container_name, blob_name,
start_range=3, end_range=10)
content = blob.content # data from 3-10
# Upload from index in byte array
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data, index=3)
# Content settings, metadata
settings = ContentSettings(content_type='html', content_language='fr')
metadata = {'val1': 'foo', 'val2': 'blah'}
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data, content_settings=settings,
metadata=metadata)
blob = self.service.get_blob_to_bytes(container_name, blob_name)
metadata = blob.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
content_language = blob.properties.content_settings.content_language # fr
content_type = blob.properties.content_settings.content_type # html
# Progress
# Use slightly larger data so the chunking is more visible
data = self._get_random_bytes(8 * 1024 * 1024)
def upload_callback(current, total):
print('({}, {})'.format(current, total))
def download_callback(current, total):
print('({}, {}) '.format(current, total))
blob_name = self._get_blob_reference()
print('upload: ')
self.service.create_blob_from_bytes(container_name, blob_name, data, progress_callback=upload_callback)
print('download: ')
blob = self.service.get_blob_to_bytes(container_name, blob_name,
progress_callback=download_callback)
self.service.delete_container(container_name)
def blob_with_stream(self):
container_name = self._create_container()
# Basic
input_stream = io.BytesIO(self._get_random_bytes(15))
output_stream = io.BytesIO()
blob_name = self._get_blob_reference()
self.service.create_blob_from_stream(container_name, blob_name, input_stream, 15)
blob = self.service.get_blob_to_stream(container_name, blob_name,
output_stream)
content_length = blob.properties.content_length
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
def blob_with_path(self):
container_name = self._create_container()
INPUT_FILE_PATH = 'blob_input.temp.dat'
OUTPUT_FILE_PATH = 'blob_output.temp.dat'
data = self._get_random_bytes(4 * 1024)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
# Basic
blob_name = self._get_blob_reference()
self.service.create_blob_from_path(container_name, blob_name, INPUT_FILE_PATH)
blob = self.service.get_blob_to_path(container_name, blob_name, OUTPUT_FILE_PATH)
content_length = blob.properties.content_length
# Open mode
# Append to the blob instead of starting from the beginning
# Append streams are not seekable and so must be downloaded serially by setting max_connections=1.
blob = self.service.get_blob_to_path(container_name, blob_name, OUTPUT_FILE_PATH, open_mode='ab',
max_connections=1)
content_length = blob.properties.content_length # will be the same, but local blob length will be longer
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
if os.path.isfile(INPUT_FILE_PATH):
try:
os.remove(INPUT_FILE_PATH)
except:
pass
if os.path.isfile(OUTPUT_FILE_PATH):
try:
os.remove(OUTPUT_FILE_PATH)
except:
pass
def blob_with_text(self):
container_name = self._create_container()
# Basic
data = u'hello world'
blob_name = self._get_blob_reference()
self.service.create_blob_from_text(container_name, blob_name, data)
blob = self.service.get_blob_to_text(container_name, blob_name)
content = blob.content # 'hello world'
# Encoding
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
blob_name = self._get_blob_reference()
self.service.create_blob_from_text(container_name, blob_name, text, 'utf-16')
blob = self.service.get_blob_to_text(container_name, blob_name, 'utf-16')
content = blob.content # 'hello 啊齄丂狛狜 world'
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
def blocks(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Put block
# Block id's must be the same length
self.service.put_block(container_name, blob_name, b'AAA', '1')
self.service.put_block(container_name, blob_name, b'BBB', '2')
self.service.put_block(container_name, blob_name, b'CCC', '3')
# Get Block List
# Defaults to committed only, specify all to get committed and uncommitted
block_list = self.service.get_block_list(container_name, blob_name,
block_list_type=BlockListType.All)
uncommitted = len(block_list.uncommitted_blocks) # 3
committed = len(block_list.committed_blocks) # 0
# Note the blob does not yet appears as blocks have not been committed
exists = self.service.exists(container_name, blob_name) # False
# Commit the blocks
# BlockBlock state defaults to Latest meaning the uncommitted and then
# the committed list is searched for the block id to commit
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
self.service.put_block_list(container_name, blob_name, block_list)
# Get Block List
# Defaults to committed only, specify all to get committed and uncommitted
block_list = self.service.get_block_list(container_name, blob_name,
block_list_type=BlockListType.All)
uncommitted = len(block_list.uncommitted_blocks) # 0
committed = len(block_list.committed_blocks) # 3
# Add a block
# Put the block
self.service.put_block(container_name, blob_name, b'DDD', '4')
# Get the existing blocks
block_list = self.service.get_block_list(container_name, blob_name,
block_list_type=BlockListType.All)
uncommitted = len(block_list.uncommitted_blocks) # 1
committed = len(block_list.committed_blocks) # 3
# Added the new block to the existing list and commit
new_block_list = block_list.committed_blocks
new_block_list.append(block_list.uncommitted_blocks[0])
self.service.put_block_list(container_name, blob_name, new_block_list)
self.service.delete_container(container_name)
| mit | 8,936,790,025,452,813,000 | 40.474041 | 120 | 0.618625 | false | 4.002832 | false | false | false |
mbalazin/cse599c-17sp-projects | spark-advantage/pandasbench.py | 1 | 1488 |
# coding: utf-8
# In[1]:
import pandas as pd
import time
from sys import argv
logfile = argv[1]
filesize = argv[2]
# # Python Pandas Benchmark
# In[3]:
prefix = "file:////Users/tony/Dropbox/Projects/UW/cse599c-17sp-projects/spark-advantage/data/"
if(filesize == 'original'):
tairfname = "Tair_WA_nohead.csv"
tsoilfname = "Tsoil_WA_nohead.csv"
tsurfacefname = "Tsurface_WA_nohead.csv"
elif (filesize == 'medium'):
tairfname = "Tair_WA_nohead.MEDIUM.csv"
tsoilfname = "Tsoil_WA_nohead.MEDIUM.csv"
tsurfacefname = "Tsurface_WA_nohead.MEDIUM.csv"
elif (filesize == "small"):
tairfname = "Tair_WA_nohead.SMALL.csv"
tsoilfname = "Tsoil_WA_nohead.SMALL.csv"
tsurfacefname = "Tsurface_WA_nohead.SMALL.csv"
startTime = time.time()
tair = pd.read_csv(prefix+tairfname)
tsoil = pd.read_csv(prefix+tsoilfname)
tsurface = pd.read_csv(prefix+tsurfacefname)
joined = tair.merge(tsoil, on=["datetime", " lat", " lon"]).merge(tsurface, on=["datetime", " lat", " lon"])
joined.columns = [name.strip() for name in joined.columns]
joined[['lat', 'lon']] = joined[['lat', 'lon']].apply(pd.to_numeric)
seattle = joined[(joined['lon'] > -125.52) & \
(joined['lon'] < -120.2) & \
(joined['lat'] > 49.0) & \
(joined['lat'] < 51.0)]
seattle.groupby(by=['lat', 'lon'])['Tair'].mean()
exptime = time.time() - startTime
with open(logfile, 'a') as log:
log.write(str(exptime)+'\n') | bsd-3-clause | -4,992,153,284,442,744,000 | 24.672414 | 108 | 0.623656 | false | 2.695652 | false | false | false |
openai/baselines | baselines/ppo1/run_atari.py | 1 | 1583 | #!/usr/bin/env python3
from mpi4py import MPI
from baselines.common import set_global_seeds
from baselines import bench
import os.path as osp
from baselines import logger
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.cmd_util import atari_arg_parser
def train(env_id, num_timesteps, seed):
from baselines.ppo1 import pposgd_simple, cnn_policy
import baselines.common.tf_util as U
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() if seed is not None else None
set_global_seeds(workerseed)
env = make_atari(env_id)
def policy_fn(name, ob_space, ac_space): #pylint: disable=W0613
return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space)
env = bench.Monitor(env, logger.get_dir() and
osp.join(logger.get_dir(), str(rank)))
env.seed(workerseed)
env = wrap_deepmind(env)
env.seed(workerseed)
pposgd_simple.learn(env, policy_fn,
max_timesteps=int(num_timesteps * 1.1),
timesteps_per_actorbatch=256,
clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
gamma=0.99, lam=0.95,
schedule='linear'
)
env.close()
def main():
args = atari_arg_parser().parse_args()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
| mit | -257,343,598,731,253,380 | 31.979167 | 87 | 0.670246 | false | 3.061896 | false | false | false |
bptripp/it-cnn | tuning/clutter.py | 1 | 2870 | __author__ = 'bptripp'
import numpy as np
import matplotlib
matplotlib.rcParams['xtick.labelsize'] = 14
matplotlib.rcParams['ytick.labelsize'] = 14
import matplotlib.pyplot as plt
from cnn_stimuli import get_image_file_list
from alexnet import preprocess, load_net, load_vgg
def get_clutter_responses(remove_level):
model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_level)
use_vgg = False
# model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_level)
# use_vgg = True
bottom_dir = './images/clutter/bottom/'
bottom_image_files = get_image_file_list(bottom_dir, 'png', with_path=True)
bottom_out = model.predict(preprocess(bottom_image_files, use_vgg=use_vgg))
top_dir = './images/clutter/top/'
top_image_files = get_image_file_list(top_dir, 'png', with_path=True)
top_out = model.predict(preprocess(top_image_files, use_vgg=use_vgg))
pair_dir = './images/clutter/pair/'
pair_image_files = get_image_file_list(pair_dir, 'png', with_path=True)
pair_out = model.predict(preprocess(pair_image_files, use_vgg=use_vgg))
maxima = np.max(pair_out, axis=0)
n = 100
ind = (-maxima).argsort()[:n]
# n = 500
# ind = range(n)
sum_out = np.zeros_like(pair_out)
n_top = len(top_image_files)
n_bottom = len(bottom_image_files)
for i in range(n_top):
for j in range(n_bottom):
sum_out[i*n_bottom+j,:] = top_out[i,:] + bottom_out[j,:]
large_pair_out = pair_out[:,ind]
large_sum_out = sum_out[:,ind]
return large_pair_out, large_sum_out
if False:
remove_level = 1
large_pair_out, large_sum_out = get_clutter_responses(remove_level)
plt.figure(figsize=(4.5,4))
plt.scatter(large_sum_out, large_pair_out, marker='.', c='k')
plt.plot([0, 15], [0, 15], 'k--')
plt.plot([0, 15], [0, 7.5], 'k')
plt.xlim((0,16))
plt.ylim((0,16))
plt.xlabel('Sum of responses to single objects', fontsize=14)
plt.ylabel('Response to object pairs', fontsize=14)
plt.tight_layout()
plt.savefig('../figures/clutter-' + str(remove_level) + '.eps')
plt.show()
if True:
plt.figure(figsize=(6,2))
edges = np.linspace(0, np.pi/2, 20)
for remove_level in range(3):
plt.subplot(1,3,remove_level+1)
large_pair_out, large_sum_out = get_clutter_responses(remove_level)
angle = np.arctan((large_pair_out.flatten() + 1e-6) / (large_sum_out.flatten() + 1e-6))
plt.hist(angle, edges, color=[.5,.5,.5])
# if remove_level == 1:
# plt.xlabel('Angle from horizontal (radians)', fontsize=14)
plt.yticks([])
plt.xticks([0, np.pi/4], ['0', 'pi/4'])
plt.plot([np.arctan(.5), np.arctan(.5)], plt.gca().get_ylim(), 'r')
plt.tight_layout()
plt.savefig('../figures/clutter-angles.eps')
plt.show() | mit | -5,060,137,306,948,743,000 | 35.341772 | 95 | 0.628223 | false | 2.937564 | false | false | false |
amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_wireless_controller_hotspot20_h2qp_osu_provider.py | 1 | 13230 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_hotspot20_h2qp_osu_provider
short_description: Configure online sign up (OSU) provider list in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller_hotspot20 feature and h2qp_osu_provider category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.4
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_hotspot20_h2qp_osu_provider:
description:
- Configure online sign up (OSU) provider list.
default: null
type: dict
suboptions:
friendly_name:
description:
- OSU provider friendly name.
type: list
suboptions:
friendly_name:
description:
- OSU provider friendly name.
type: str
index:
description:
- OSU provider friendly name index.
required: true
type: int
lang:
description:
- Language code.
type: str
icon:
description:
- OSU provider icon. Source wireless-controller.hotspot20.icon.name.
type: str
name:
description:
- OSU provider ID.
required: true
type: str
osu_method:
description:
- OSU method list.
type: str
choices:
- oma-dm
- soap-xml-spp
- reserved
osu_nai:
description:
- OSU NAI.
type: str
server_uri:
description:
- Server URI.
type: str
service_description:
description:
- OSU service name.
type: list
suboptions:
lang:
description:
- Language code.
type: str
service_description:
description:
- Service description.
type: str
service_id:
description:
- OSU service ID.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure online sign up (OSU) provider list.
fortios_wireless_controller_hotspot20_h2qp_osu_provider:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wireless_controller_hotspot20_h2qp_osu_provider:
friendly_name:
-
friendly_name: "<your_own_value>"
index: "5"
lang: "<your_own_value>"
icon: "<your_own_value> (source wireless-controller.hotspot20.icon.name)"
name: "default_name_8"
osu_method: "oma-dm"
osu_nai: "<your_own_value>"
server_uri: "<your_own_value>"
service_description:
-
lang: "<your_own_value>"
service_description: "<your_own_value>"
service_id: "15"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_hotspot20_h2qp_osu_provider_data(json):
option_list = ['friendly_name', 'icon', 'name',
'osu_method', 'osu_nai', 'server_uri',
'service_description']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_hotspot20_h2qp_osu_provider(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_hotspot20_h2qp_osu_provider_data = data['wireless_controller_hotspot20_h2qp_osu_provider']
filtered_data = underscore_to_hyphen(filter_wireless_controller_hotspot20_h2qp_osu_provider_data(wireless_controller_hotspot20_h2qp_osu_provider_data))
if state == "present":
return fos.set('wireless-controller.hotspot20',
'h2qp-osu-provider',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller.hotspot20',
'h2qp-osu-provider',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller_hotspot20(data, fos):
if data['wireless_controller_hotspot20_h2qp_osu_provider']:
resp = wireless_controller_hotspot20_h2qp_osu_provider(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_hotspot20_h2qp_osu_provider": {
"required": False, "type": "dict", "default": None,
"options": {
"friendly_name": {"required": False, "type": "list",
"options": {
"friendly_name": {"required": False, "type": "str"},
"index": {"required": True, "type": "int"},
"lang": {"required": False, "type": "str"}
}},
"icon": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"osu_method": {"required": False, "type": "str",
"choices": ["oma-dm", "soap-xml-spp", "reserved"]},
"osu_nai": {"required": False, "type": "str"},
"server_uri": {"required": False, "type": "str"},
"service_description": {"required": False, "type": "list",
"options": {
"lang": {"required": False, "type": "str"},
"service_description": {"required": False, "type": "str"},
"service_id": {"required": False, "type": "int"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,820,145,933,093,998,000 | 32.324937 | 155 | 0.552079 | false | 4.308043 | false | false | false |
icyflame/batman | pywikibot/families/wikibooks_family.py | 1 | 7244 | # -*- coding: utf-8 -*-
"""Family module for Wikibooks."""
from __future__ import absolute_import, unicode_literals
from pywikibot import family
__version__ = '$Id$'
# The Wikimedia family that is known as Wikibooks
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family class for Wikibooks."""
name = 'wikibooks'
closed_wikis = [
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Afar_Wikibooks
'aa',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Akan_Wikibooks
'ak',
# https://als.wikipedia.org/wiki/Wikipedia:Stammtisch/Archiv_2008-1#Afterwards.2C_closure_and_deletion_of_Wiktionary.2C_Wikibooks_and_Wikiquote_sites
'als',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Assamese_Wikibooks
'as',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Asturianu_Wikibooks
'ast',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Aymar_Wikibooks
'ay',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bashkir_Wikibooks
'ba',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bislama_Wikibooks
'bi',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bambara_Wikibooks
'bm',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Tibetan_Wikibooks
'bo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Chamorro_Wikibooks
'ch',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Corsu_Wikibooks
'co',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gaeilge_Wikibooks
'ga',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gothic_Wikibooks
'got',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Guarani_Wikibooks
'gn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gujarati_Wikibooks
'gu',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kannada_Wikibooks
'kn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kashmiri_Wikibooks
'ks',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_L%C3%ABtzebuergesch_Wikibooks
'lb',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Lingala_Wikibooks
'ln',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Latvian_Wikibooks
'lv',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Maori_Wikibooks
'mi',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Mongolian_Wikibooks
'mn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Burmese_Wikibooks
'my',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nauruan_Wikibooks
'na',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nahuatl_Wikibooks
'nah',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Plattd%C3%BC%C3%BCtsch_Wikibooks
'nds',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Pashto_Wikibooks
'ps',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Quechua_Wikibooks
'qu',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Rumantsch_Wikibooks
'rm',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Sami_Wikibooks
'se',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Simple_English_Wikibooks_(3)
'simple',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Basa_Sunda_Wikibooks_(2)
'su',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Swahili_Wikibooks
'sw',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Turkmen_Wikibooks
'tk',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Uyghur_Wikibooks
'ug',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Volap%C3%BCk_Wikibooks
'vo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Walon_Wikibooks
'wa',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Xhosa_Wikibooks
'xh',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Yoruba_Wikibooks
'yo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Zhuang_Wikibooks
'za',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Zulu_Wikibooks
'zu',
]
removed_wikis = [
'tokipona',
]
def __init__(self):
"""Constructor."""
self.languages_by_size = [
'en', 'de', 'fr', 'hu', 'ja', 'it', 'es', 'pt', 'nl', 'pl', 'he',
'vi', 'ca', 'id', 'sq', 'fi', 'ru', 'fa', 'cs', 'zh', 'sv', 'hr',
'tr', 'ro', 'sr', 'ar', 'no', 'th', 'ko', 'gl', 'da', 'ta', 'mk',
'az', 'tl', 'is', 'ka', 'lt', 'tt', 'uk', 'eo', 'bg', 'sk', 'sl',
'el', 'hy', 'ms', 'sa', 'si', 'li', 'la', 'ml', 'ur', 'bn', 'ang',
'ia', 'cv', 'et', 'hi', 'km', 'mr', 'eu', 'oc', 'kk', 'fy', 'ne',
'ie', 'te', 'af', 'tg', 'ky', 'bs', 'pa', 'be', 'mg', 'cy',
'zh-min-nan', 'ku', 'uz',
]
super(Family, self).__init__()
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = [
'af', 'ang', 'ca', 'fa', 'fy', 'it', 'nl', 'ru', 'th', 'zh',
]
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are
# put after those, in code-alphabetical order.
self.interwiki_putfirst = {
'en': self.alphabetic,
'fi': self.alphabetic,
'fr': self.alphabetic,
'he': ['en'],
'hu': ['en'],
'pl': self.alphabetic,
'simple': self.alphabetic
}
def shared_data_repository(self, code, transcluded=False):
"""Return the shared data repository for this family."""
return ('wikidata', 'wikidata')
| mit | -3,807,221,727,684,377,000 | 48.278912 | 157 | 0.628934 | false | 3.023372 | false | false | false |
mpachas/django-monthfield | month/tests.py | 1 | 5197 | from django.test import TestCase
from month.models import Month
from example.models import Example
import datetime
# Create your tests here.
def TestMonthFunctions(TestCase):
def test_constructors(self):
m = Month(2010, 1)
self.assertEqual(m.year, 2010)
self.assertEqual(m.month, 1)
m = Month.from_string('2010-01')
self.assertEqual(m.year, 2010)
self.assertEqual(m.month, 1)
m = Month.from_date(datetime.date(year=2010, month=1, day=20))
self.assertEqual(m.year, 2010)
self.assertEqual(m.month, 1)
def test_addition(self):
m = Month(2010, 1)
x = m + 5
self.assertEqual(x.year, 2010)
self.assertEqual(x.month, 6)
x = m + 11
self.assertEqual(x.year, 2010)
self.assertEqual(x.month, 12)
x = m + 12
self.assertEqual(x.year, 2011)
self.assertEqual(x.month, 1)
x = m + 13
self.assertEqual(x.year, 2011)
self.assertEqual(x.month, 2)
x = m - 1
self.assertEqual(x.year, 2009)
self.assertEqual(x.month, 12)
x = m + 0
self.assertEqual(x.year, 2010)
self.assertEqual(x.month, 1)
x = m - 12
self.assertEqual(x.year, 2009)
self.assertEqual(x.month, 1)
x = m.next_month()
self.assertEqual(x.year, 2010)
self.assertEqual(x.month, 2)
x = m.prev_month()
self.assertEqual(x.year, 2009)
self.assertEqual(x.month, 12)
def test_firstday(self):
m = Month(2010, 1)
self.assertEqual(m.firstDay(), datetime.date(year=2010, month=1, day=1))
self.assertEqual(m.last_day(), datetime.date(year=2010, month=1, day=31))
m = Month(2010, 2)
self.assertEqual(m.firstDay(), datetime.date(year=2010, month=2, day=1))
self.assertEqual(m.last_day(), datetime.date(year=2010, month=2, day=28))
m = Month(2008, 2)
self.assertEqual(m.firstDay(), datetime.date(year=2008, month=2, day=1))
self.assertEqual(m.last_day(), datetime.date(year=2008, month=2, day=29))
def test_contains(self):
m = Month(2010, 1)
assert datetime.date(year=2010, month=1, day=1) in m
assert datetime.date(year=2010, month=1, day=10) in m
assert datetime.date(year=2010, month=1, day=31) in m
assert datetime.date(year=2010, month=2, day=1) not in m
assert datetime.date(year=2009, month=12, day=31) not in m
assert datetime.date(year=2009, month=1, day=31) not in m
assert datetime.date(year=2010, month=2, day=15) not in m
def test_int_conversion(self):
m = Month(2010, 1)
n = Month.from_int(int(m))
self.assertEqual(n.year, 2010)
self.assertEqual(n.month, 1)
def test_comparisons(self):
m = Month(2010, 1)
assert m == "2010-01-20"
assert m == "2010-01-20"
assert m == "2010-01"
assert m == "2010-01-20"
assert m < "2010-02-01"
assert m > "2009-12"
assert m > "2009-12-31"
p = m.prev_month()
n = m.next_month()
assert m == m
assert m <= m
assert m >= m
assert not m > m
assert not m < m
assert not m != m
assert not m == p
assert m > p
assert m >= p
assert not m <= p
assert not m < p
assert m != p
assert not m == n
assert m != n
assert m < n
assert m <= n
assert not m > n
assert not m >= n
class test_model_field(TestCase):
def test_queries(self):
e = Example(name='2010-01', month=Month(2010, 1))
e.save()
assert isinstance(e.month, Month)
assert e.month.month == 1
assert e.month.year == 2010
pk = e.pk
e = Example.objects.get(pk=pk)
assert isinstance(e.month, Month)
assert e.month.month == 1
assert e.month.year == 2010
e = Example(name='2010-01', month='2010-01')
e.save()
pk = e.pk
e = Example.objects.get(pk=pk)
assert isinstance(e.month, Month)
assert e.month.month == 1
assert e.month.year == 2010
e = Example(name='2010-01', month=datetime.date(year=2010, month=1, day=20))
e.save()
pk = e.pk
e = Example.objects.get(pk=pk)
assert isinstance(e.month, Month)
assert e.month.month == 1
assert e.month.year == 2010
Example.objects.all().delete()
for year in range(2001, 2011):
for month in range(1, 13):
name = "%s - %02d" %(year, month)
Example(name=name, month=Month(year, month)).save()
qs = Example.objects.filter(month='2005-12')
assert qs.exists()
assert qs.count() == 1
qs = Example.objects.filter(month__gte='2005-12')
assert qs.exists()
self.assertEqual(qs.count(), 61)
qs = Example.objects.filter(month__gt='2005-12')
assert qs.exists()
assert qs.count() == 60
def tearDown(self):
Example.objects.all().delete()
| bsd-3-clause | 6,722,813,776,884,100,000 | 28.196629 | 84 | 0.552819 | false | 3.478581 | true | false | false |
sh0ked/vmmaster | backend/queue_producer.py | 1 | 4076 | # coding: utf-8
import aioamqp
import uuid
import logging
from core.utils import async_wait_for
log = logging.getLogger(__name__)
class AsyncQueueProducer(object):
messages = {}
connection = None
channel = None
consumer_tag = None
responses_queue = None
commands_queue = None
def __init__(self, app):
self.app = app
async def connect(self):
params = {
'loop': self.app.loop,
'login': self.app.cfg.RABBITMQ_USER,
'password': self.app.cfg.RABBITMQ_PASSWORD,
'host': self.app.cfg.RABBITMQ_HOST,
'port': self.app.cfg.RABBITMQ_PORT
}
self.connection = await self.make_connection(params)
self.channel = await self.connection.channel()
self.responses_queue, self.consumer_tag = await self.create_queue_and_consume()
self.commands_queue = await self.create_queue(self.app.cfg.RABBITMQ_COMMAND_QUEUE)
async def create_queue(self, queue_name=None):
if not queue_name:
result = await self.channel.queue_declare(exclusive=True)
else:
result = await self.channel.queue_declare(queue_name=queue_name)
queue, messages, consumers = result.get('queue'), result.get('message_count'), result.get('consumer_count')
log.info("Queue %s was declared(messages: %s, consumers: %s)" % (queue, messages, consumers))
return queue
async def delete_queue(self, queue_name):
await self.channel.queue_delete(queue_name)
log.info('Queue %s was deleted' % queue_name)
async def create_queue_and_consume(self, queue_name=None):
if not queue_name:
queue_name = await self.create_queue()
else:
await self.create_queue(queue_name)
consumer_tag = await self.queue_consume(queue_name)
return queue_name, consumer_tag
@staticmethod
async def make_connection(params):
transport, connection = await aioamqp.connect(**params)
return connection
async def queue_consume(self, queue_name):
log.info("Start consuming for queue %s" % queue_name)
await self.channel.basic_consume(
callback=self.on_message, queue_name=queue_name, no_ack=False
)
async def on_message(self, channel, body, envelope, properties):
log.debug("Got new message %s" % body)
for correlation_id in list(self.messages.keys()):
if correlation_id == properties.correlation_id:
log.info("Response with corr_id %s from queue %s: %s" % (correlation_id, self.responses_queue, body))
self.messages[correlation_id]["response"] = body
channel.basic_client_ack(delivery_tag=envelope.delivery_tag)
async def add_msg_to_queue(self, queue_name, msg):
correlation_id = str(uuid.uuid4())
await self.channel.basic_publish(
payload=str(msg),
exchange_name='',
routing_key=queue_name,
properties={
"reply_to": self.responses_queue,
"correlation_id": correlation_id
})
log.info("Message(id:%s body: %s) was published to %s" % (correlation_id, msg, queue_name))
self.messages[correlation_id] = {"request": msg, "response": None}
return correlation_id
async def get_message_from_queue(self, correlation_id):
log.info("Waiting response for message with id: %s" % correlation_id)
response = await async_wait_for(
lambda: self.messages.get(correlation_id).get("response"),
self.app.loop,
timeout=self.app.cfg.BACKEND_REQUEST_TIMEOUT
)
del self.messages[correlation_id]
log.info("Got response %s for message with id: %s" % (response, correlation_id))
return response
async def add_msg_to_queue_with_response(self, queue_name, msg):
correlation_id = await self.add_msg_to_queue(queue_name, msg)
response = await self.get_message_from_queue(correlation_id)
return response
| mit | 1,020,926,549,248,728,000 | 38.192308 | 117 | 0.626349 | false | 3.82723 | false | false | false |
fsmMLK/inkscapeCircuitSymbols | 0.9x/drawRLC.py | 1 | 14781 | #!/usr/bin/python
import inkscapeMadeEasy_Base as inkBase
import inkscapeMadeEasy_Draw as inkDraw
class RLC(inkBase.inkscapeMadeEasy):
# ---------------------------------------------
def drawBipoleGeneral(self, parent, position=[0, 0], value='Z', label='Bipole', angleDeg=0, flagVolt=True,
voltName='v', flagCurr=True, currName='i', invertArrows=False, convention='passive'):
""" draws a generic bipole with a rectangle
parent: parent object
position: position [x,y]
value: string with resistor value. (default 'Z')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group)
inkDraw.line.relCoords(elem, [[15.5, 0]], position)
inkDraw.line.relCoords(elem, [[19, 0], [0, -6], [-19, 0], [0, 6]], [position[0] + 15.5, position[1] + 3])
inkDraw.line.relCoords(elem, [[15.5, 0]], [position[0] + 34.5, position[1]])
pos_text = [position[0] + 25, position[1] - 3 - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, [position[0] + 40, position[1] - 5], name=currName, color=self.currentColor,
angleDeg=angleDeg, invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawResistor(self, parent, position=[0, 0], value='R', label='Resistor', angleDeg=0, flagVolt=True,
voltName='v', flagCurr=True, currName='i', invertArrows=False, convention='passive'):
""" draws a resistor
parent: parent object
position: position [x,y]
value: string with resistor value. If it ends with 'ohm', 'OHM' or 'Ohm', proper Ohm symbol will be added. (Default 'R')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group)
inkDraw.line.relCoords(elem, [[15.5, 0], [2, 3], [3, -6], [3, 6], [3, -6], [3, 6], [3, -6], [2, 3], [15.5, 0]],
position)
pos_text = [position[0] + 25, position[1] - 3 - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, [position[0] + 40, position[1] - 5], name=currName, color=self.currentColor,
angleDeg=angleDeg, invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawPotentiometer(self, parent, position=[0, 0], value='R', label='Potentiometer', angleDeg=0, flagVolt=True,
voltName='v', flagCurr=True, currName='i', invertArrows=False, is3T=False,
convention='passive'):
""" draws a potentiometer
parent: parent object
position: position [x,y]
value: string with resistor value.
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
is3T: indicates the drawPotentiometer has 3 terminals (default:false)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group)
# build arrow marker
colorBlack = inkDraw.color.defined('black')
L_arrow = 2.5
markerPath = 'M 0,0 l -%f,%f l 0,-%f z' % (L_arrow * 1.2, L_arrow / 2.0, L_arrow)
markerArrow = inkDraw.marker.createMarker(self, 'BJTArrow', markerPath, RenameMode=1, strokeColor=colorBlack,
fillColor=colorBlack, lineWidth=0.6,
markerTransform='translate (1,0)')
lineStyleArrow = inkDraw.lineStyle.set(lineWidth=1, lineColor=colorBlack, markerEnd=markerArrow)
inkDraw.line.relCoords(elem, [[15.5, 0], [2, 3], [3, -6], [3, 6], [3, -6], [3, 6], [3, -6], [2, 3], [15.5, 0]],
position)
# 2-terminal Potentiometer
if is3T:
inkDraw.line.relCoords(elem, [[0, -10]], [position[0] + 25, position[1] + 15], lineStyle=lineStyleArrow)
pos_text = [position[0] + 25, position[1] - 3 - self.textOffset]
else:
inkDraw.line.relCoords(elem, [[20, -12]], [position[0] + 15, position[1] + 6], lineStyle=lineStyleArrow)
pos_text = [position[0] + 25, position[1] - 6 - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if is3T:
pos = [position[0] + 25, position[1] - 13]
invertCurvature = True
else:
pos = [position[0] + 25, position[1] + 8]
invertCurvature = False
if convention == 'passive':
self.drawVoltArrowSimple(group, pos, name=voltName, color=self.voltageColor, angleDeg=0,
invertArrows=invertArrows, invertCurvatureDirection=invertCurvature)
if convention == 'active':
self.drawVoltArrowSimple(group, pos, name=voltName, color=self.voltageColor, angleDeg=0,
invertArrows=not invertArrows, invertCurvatureDirection=invertCurvature)
if flagCurr:
if is3T:
pos = [position[0] + 40, position[1] - 5]
else:
pos = [position[0] + 42, position[1] - 5]
self.drawCurrArrow(group, pos, name=currName, color=self.currentColor, angleDeg=angleDeg,
invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawCapacitor(self, parent, position=[0, 0], value='C', label='Capacitor', flagPol=False, angleDeg=0,
flagVolt=True, voltName='v', flagCurr=True, currName='i', invertArrows=False,
convention='passive'):
""" draws a capacitor
parent: parent object
position: position [x,y]
value: string with value.
label: label of the object (it can be repeated)
flagPol: draw sign for polarized capacitor
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group, label)
inkDraw.line.relCoords(elem, [[23, 0]], position)
inkDraw.line.relCoords(elem, [[-23, 0]], [position[0] + 50, position[1]])
inkDraw.line.relCoords(elem, [[0, -14]], [position[0] + 23, position[1] + 7])
inkDraw.line.relCoords(elem, [[0, -14]], [position[0] + 27, position[1] + 7])
pos_text = [position[0] + 25, position[1] - 8 - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if flagPol:
inkDraw.text.write(self, '+', [position[0] + 31, position[1] - 3], group, self.textStyle, fontSize=5)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 9], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 9], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, [position[0] + 40, position[1] - 5], name=currName, color=self.currentColor,
angleDeg=angleDeg, invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawInductor(self, parent, position=[0, 0], value='L', label='Inductro', angleDeg=0, flagVolt=True,
voltName='v', flagCurr=True, currName='i', invertArrows=False, convention='passive'):
""" draws an inductor
parent: parent object
position: position [x,y]
value: string with resistor value. If it ends with 'ohm', 'OHM' or 'Ohm', proper Ohm symbol will be added. (Default 'R')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group, label)
inkDraw.line.relCoords(elem, [[13, 0]], position)
inkDraw.line.relCoords(elem, [[-13, 0]], [position[0] + 50, position[1]])
inkDraw.arc.centerAngStartAngEnd(elem, [position[0] + 16, position[1]], 3.0, 0.0, 180.0, [0, 0], flagOpen=True,
largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem, [position[0] + 22, position[1]], 3.0, 0.0, 180.0, [0, 0], flagOpen=True,
largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem, [position[0] + 28, position[1]], 3.0, 0.0, 180.0, [0, 0], flagOpen=True,
largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem, [position[0] + 34, position[1]], 3.0, 0.0, 180.0, [0, 0], flagOpen=True,
largeArc=False)
pos_text = [position[0] + 25, position[1] - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, [position[0] + 40, position[1] - 5], name=currName, color=self.currentColor,
angleDeg=angleDeg, invertArrows=invertArrows)
return group
| gpl-3.0 | -5,235,465,511,251,690,000 | 48.767677 | 128 | 0.577633 | false | 3.762984 | false | false | false |
Iconoclasteinc/tgit | testing/drivers/track_list_tab_driver.py | 1 | 3521 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QMenu, QTableWidget
from hamcrest import contains, has_items, equal_to
from cute import gestures
from cute.matchers import named
from cute.widgets import MenuDriver, TableViewDriver
from tgit.ui.pages.track_list_tab import TrackListTab
from ._screen_driver import ScreenDriver
def track_list_tab(parent):
return TrackListTabDriver.find_single(parent, TrackListTab, named("track_list_tab"))
class TrackListTabDriver(ScreenDriver):
def __init__(self, selector, prober, gesture_performer):
super().__init__(selector, prober, gesture_performer)
def shows_column_headers(self, *headers):
self._track_table().has_headers(contains(*headers))
def shows_track_details(self, *details):
return self._track_table().has_row(has_items(*details))
def has_selected_track(self, *cells):
return self._track_table().has_selected_row(has_items(*cells))
def shows_tracks_in_order(self, *tracks):
rows = [has_items(*[column for column in track]) for track in tracks]
return self._track_table().contains_rows(contains(*rows))
def has_track_count(self, count):
self._track_table().has_row_count(equal_to(count))
def add_tracks(self):
self.button(named("_add_tracks_button")).click()
def has_context_menu_item(self, matching):
context_menu = self._from_context_menu()
context_menu.has_menu_item(matching)
context_menu.close()
@property
def remove_button(self):
return self.button(named("_remove_track_button"))
@property
def move_up_button(self):
return self.button(named("_move_track_up_button"))
@property
def move_down_button(self):
return self.button(named("_move_track_down_button"))
def has_disabled_play_context_menu_item(self, title):
self.select_track(title)
self._from_context_menu().menu_item(named("_play_action")).is_disabled()
def _from_context_menu(self):
self.perform(gestures.mouse_right_click())
return MenuDriver.find_single(self, QMenu, named("context_menu"))
def select_track(self, title):
row = self.shows_track_details(title)
self._track_table().click_on_cell(row, 0)
def play_track(self, title):
self.select_track(title)
self._from_context_menu().select_menu_item(named("_play_action"))
def stop_track(self, title):
self.select_track(title)
self._from_context_menu().select_menu_item(named("_stop_action"))
def remove_selected_track(self, using="shortcut"):
if using == "shortcut":
self.perform(gestures.delete_previous())
elif using == "menu":
self._from_context_menu().select_menu_item(named("_remove_action"))
elif using == "button":
self.remove_button.click()
else:
raise AssertionError("Don't know how to remove a track using {}", using)
def remove_track(self, title):
self.select_track(title)
self.remove_selected_track()
def move_track(self, title, to):
from_ = self.shows_track_details(title)
self._track_table().move_row(from_, to)
def move_track_up(self):
self.move_up_button.click()
def move_track_down(self):
self.move_down_button.click()
def _track_table(self):
table = TableViewDriver.find_single(self, QTableWidget, named('_track_table'))
table.is_showing_on_screen()
return table
| gpl-3.0 | 5,953,029,582,942,851,000 | 33.184466 | 88 | 0.652655 | false | 3.622428 | false | false | false |
nict-isp/scn-openflow-driver | src/ncps_openflow/protocols/application/tcp.py | 1 | 13729 | # -*- coding: utf-8 -*-
"""
protocols.application.tcp
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright (c) 2015, National Institute of Information and Communications Technology.All rights reserved.
:license: GPL3, see LICENSE for more details.
"""
import logging
from protocols.application.application import Server
from protocols.application.application import Client
from protocols import ipv4 as Ipv4Agent
from protocols import tcp as TcpAgent
from protocols.tcp import TcpSegment
from protocols.tcp import TcpConnection
from pox.core import core
from pox.lib.packet.tcp import tcp
from random import randint
log = logging.getLogger('protocols.application.tcp')
class TcpServer(Server):
"""Implements a basic TCP Server which handles raw TCP packets passed to it."""
protocol = tcp
agent = TcpAgent
def __init__(self, lport, max_active_conns=1000): #was max_active_conns=250
"""port is the port the TCPServer should listen for SYN packets on."""
assert lport>=0 and lport<65536, "Port must be between 0 and 65536 (exclusive)"
self.lport = lport
self.connections = {}
self.max_active_conns = max_active_conns
Server.__init__(self)
def processPacket(self, packet, *args, **kwargs):
tcpPkt = self.agent.extract(packet)
if tcpPkt is None:
return None
if not self.matches(packet, *args, **kwargs):
return None
for f in self.filters:
packet = f.packetIn(packet)
conn = self.getConnection(packet, *args, **kwargs)
if not conn:
return None
conn = self.processTcpPkt(conn, packet, tcpPkt)
if not conn or conn.closed:
return None
conn = self.processTcpData(conn, packet, tcpPkt, *args, **kwargs)
_tcpPkt = self.getReply(packet, *args, **kwargs)
if _tcpPkt is None:
return None
resp = self.agent.buildStandardTcpResponse(packet, _tcpPkt, payload=_tcpPkt.next, ipId=None if not conn.ipId else conn.ipId+1)
if conn.ipId is None:
ipId = Ipv4Agent.extractId(resp)
if ipId == 0:
ipId = randint(0, 2**16-1)
conn.ipId = ipId
conn.ipId += 1
return resp
def matches(self, packet, *args, **kwargs):
dstip, dstport = self.agent.extractDst(packet)
if dstport == self.lport:
return True
return False
def getReply(self, packet, *args, **kwargs):
conn = self.getConnection(packet, *args, **kwargs)
if conn is None:
return None
pkts = conn.get_packets_to_send()
if not pkts:
return
return pkts[0]
def getConnection(self, packet, *args, **kwargs):
socPair = self.agent.extractConnection(packet)
key = self.agent.socPairInt(socPair)
conn = self.connections.get(key)
if not conn:
conn = self.createConnection(packet, socPair, *args, **kwargs)
if conn is None:
return conn
self.connections[key] = conn
log.debug("{TcpServer} adding the %dth connection [%s]" % (len(self.connections), key))
return conn
def createConnection(self, packet, socPair, *args, **kwargs):
if len(self.connections) >= self.max_active_conns:
s = 'Ignoring new connection request:'
s += 'already have %d active connections'
log.warn(s % self.max_active_conns)
return None
if not self.agent.isSyn(packet):
return None
_kwargs = {}
_kwargs.update(kwargs)
_kwargs['connection_over_cb'] = self.connectionClosed
_kwargs['has_data_to_send_cb'] = self.connHasDataToSend
conn = TcpConnection.createFromPacket(packet, **_kwargs)
return conn
def connHasDataToSend(self, conn):
if conn is None:
return None
pkts = conn.get_packets_to_send()
if len(pkts)==0:
return None
tcpPkt = pkts[0]
pkt = self.agent.buildFrameFromConn(conn, tcpPkt)
self.sendConnectionPkt(conn, pkt)
def sendConnectionPkt(self, conn, pkt):
self.sendPkt(pkt)
def processTcpPkt(self, conn, packet, tcpPkt):
seq = self.agent.extractSeq(tcpPkt)
if seq is None:
return None
try:
if len(tcpPkt.next) > 0:
segment = TcpSegment(seq, tcpPkt.next)
conn.add_segment(segment)
except Exception as inst:
log.exception(inst)
conn.close()
return None
if self.agent.isFin(tcpPkt):
conn.fin_received(seq)
window = self.agent.extractWin(tcpPkt)
if window is None:
return None
conn.window = max(1460, window) # ignore requests to shrink the window below an MTU
if self.agent.isAck(tcpPkt):
ack = self.agent.extractAck(tcpPkt)
if ack is None:
return None
conn.set_ack(ack)
return conn
def processTcpData(self, conn, packet, tcpPkt, *args, **kwargs):
if not conn or conn.closed:
return conn
if not conn.has_ready_data():
return conn
data = conn.get_data()
self.payloadReceived(packet, data, *args, **kwargs)
conn.segments = []
return conn
def sendPayload(self, payload, *args, **kwargs):
""" TODO """
def connectionClosed(self, *args, **kwargs):
"""Called when it is ready to be removed. Removes the connection."""
if len(args) == 0:
return
conn = args[0]
if not conn:
return
socPair = conn.get_socket_pair()
socPair = socPair[::-1]
key = self.agent.socPairInt(socPair)
try:
conn = self.connections[key]
core.callDelayed(1, self.delConnection, key)
if not conn.closed:
conn.close()
except KeyError:
log.warn('Tried to remove connection which is not in our dictionary: %s' % str(key))
pass
def delConnection(self, key):
try:
del self.connections[key]
log.debug("Deleting the %dth connection [%s]" % (len(self.connections)+1, key))
except:
log.error("unable to delete this connection [%s]" % key)
pass
class OF_TcpServer(TcpServer):
def sendConnectionPkt(self, conn, pkt):
self.sendPkt(pkt, conn.dpid, conn.port)
def getConnection(self, packet, dpid, port, *args, **kwargs):
socPair = self.agent.extractConnection(packet)
key = self.agent.socPairInt(socPair)
conn = self.connections.get(key)
if not conn:
kwargs['dpid'] = dpid
kwargs['port'] = port
conn = self.createConnection(packet, socPair, *args, **kwargs)
if conn is None:
return conn
self.connections[key] = conn
log.debug("{OF_TcpServer} Adding the %dth connection [%s]" % (len(self.connections), key))
return conn
class TcpClient(Client, TcpConnection):
protocol = tcp
agent = TcpAgent
def __init__(self, src, dst, payload=''):
kwargs = {}
kwargs['my_mac'] = src[0]
kwargs['my_ip'] = src[1]
if len(src) == 3:
srcport = src[2]
kwargs['my_port'] = srcport
kwargs['other_mac'] = dst[0]
kwargs['other_ip'] = dst[1]
kwargs['other_port'] = dst[2]
kwargs['connection_over_cb'] = self.connectionClosed
kwargs['has_data_to_send_cb'] = self.connHasDataToSend
kwargs['payload'] = payload
self.d = None # deferred
TcpConnection.__init__(self, **kwargs)
Client.__init__(self)
def start(self):
tcpPkt = self.createSyn()
self.firstSYN = tcpPkt
packet = self.agent.buildFrameFromConn(self, tcpPkt)
self.sendPkt(packet)
def processPacket(self, packet, *args, **kwargs):
tcpPkt = self.agent.extract(packet)
if tcpPkt is None:
return None
if not self.matches(packet, *args, **kwargs):
return None
for f in self.filters:
packet = f.packetIn(packet)
if self.agent.isRst(tcpPkt) and not self.my_first_syn_acked:
self.doConnectionFailure()
return
self.processTcpPkt(packet, tcpPkt)
self.processTcpData(packet, tcpPkt, *args, **kwargs)
_tcpPkt = self.getReply(packet, *args, **kwargs)
if _tcpPkt is None:
return None
resp = self.agent.buildStandardTcpResponse(packet, _tcpPkt, payload=_tcpPkt.next, ipId=None if not self.ipId else self.ipId)
if self.ipId is None:
ipId = Ipv4Agent.extractId(resp)
if ipId == 0:
ipId = randint(0, 2**16-1)
del randint
self.ipId = ipId
self.ipId += 1
return resp
def matches(self, packet, *args, **kwargs):
src = (self.my_mac, self.my_ip, self.my_port)
dst = (self.other_mac, self.other_ip, self.other_port)
socPair = self.agent.extractConnection(packet)
if src != socPair[1]:
return False
if dst != socPair[0]:
return False
return True
def processTcpPkt(self, packet, tcpPkt):
ethPkt = packet
ipPkt = ethPkt.find('ipv4')
if tcpPkt.payload_len > 0 and not (ipPkt.iplen==tcpPkt.hdr_len+len(ipPkt.hdr(''))):
self.add_segment(TcpSegment(tcpPkt.seq, tcpPkt.next))
if self.agent.isFin(tcpPkt):
if not self.closed:
self.fin_received(tcpPkt.seq)
# remember window and latest ACK
self.window = max(1460, tcpPkt.win) # ignore requests to shrink the window below an MTU
if not self.agent.isAck(tcpPkt):
return
if not self.my_first_syn_acked:
self.my_first_syn_acked = True
self.my_syn_acked = True
self.need_to_send_ack = True
self.first_unacked_seq = tcpPkt.ack
self.next_seq_needed = tcpPkt.seq + 1
if self.agent.isFin(tcpPkt) and self.closed:
# it means we already sent a fin, ack and we just received a fin, ack
self.need_to_send_ack = True
self.last_seq_sent += 1
self.next_seq_needed += 1
self.set_ack(tcpPkt.ack)
else:
if self.my_first_syn_acked and not self.connected:
self.connected = True
core.callDelayed(0.01, self.connectionEstablished)
self.set_ack(tcpPkt.ack)
def processTcpData(self, packet, tcpPkt, *args, **kwargs):
if not self.has_ready_data():
return self
data = self.get_data()
self.payloadReceived(packet, data, *args, **kwargs)
self.segments = []
def getReply(self, packet, *args, **kwargs):
if not self.my_first_syn_acked:
return self.firstSYN
pkts = self.get_packets_to_send()
if not pkts:
return
return pkts[0]
def connHasDataToSend(self, conn):
if self != conn:
return None
pkts = self.get_packets_to_send()
if len(pkts) == 0:
return None
tcpPkt = pkts[0]
pkt = self.agent.buildFrameFromConn(self, tcpPkt)
self.sendPkt(pkt)
def connectionEstablished(self):
""" to be overriden """
# when syn, syn_ack, ack is finished
def connectionLost(self):
""" to be overriden """
def doConnectionFailure(self):
self.dead = True
self.connectionFailure()
def connectionFailure(self):
""" to be overriden """
def sendPayload(self, payload, *args, **kwargs):
i = 0
payloadLenth = len(payload)
while i < payloadLenth:
#TODO change 1460 by the real value for this connection
endOfSegment= min(i+1000, payloadLenth) #
dataToSend = payload[i:endOfSegment]
i = endOfSegment
tcpPkt = self.buildDataTransmissionAck(dataToSend)
packet = self.agent.buildFrameFromConn(self, tcpPkt)
self.sendPkt(packet)
self.last_seq_sent += len(dataToSend)
self.all_data_sent = True
tcpPkt = self.buildFin(self.last_seq_sent+1)
packet = self.agent.buildFrameFromConn(self, tcpPkt)
self.sendPkt(packet)
self.my_fin_sent = True
self.last_seq_sent += 1
self.payloadSent(payload, *args, **kwargs)
def connectionClosed(self, *args, **kwargs):
core.callDelayed(0.01, self.finished)
class OF_TcpClient(TcpClient):
def __init__(self, dpid, port, src, dst, payload=''):
self.dpid = dpid
self.port = port
TcpClient.__init__(self, src, dst, payload)
def matches(self, packet, dpid, port):
_dpid = getattr(self, 'dpid', None)
if _dpid is None:
self.dpid = dpid
if self.dpid != dpid:
return False
_port = getattr(self, 'port', None)
if _port is None:
self.port = port
if self.port != port:
return False
return TcpClient.matches(self, packet, dpid, port)
def sendPkt(self, pkt, *args, **kwargs):
if not self.sendCb:
return
TcpClient.sendPkt(self, pkt, self.dpid, self.port)
| gpl-3.0 | 1,546,076,728,354,864,000 | 27.365702 | 134 | 0.571127 | false | 3.753144 | false | false | false |
csningli/MultiAgent | examples/avoid_static_obstacle/static_sim.py | 1 | 2287 |
# MultiAgent 2.0
# (c) 2017-2018, NiL, [email protected]
import sys, math
sys.path.append("../..")
from mas.multiagent import *
from mas.extension import ShowLabelObject
POS_ERROR = 5
SPIN_SPEED = math.pi / 6.0
class SpinModule(ObjectModule) :
def act(self, resp) :
resp.add_msg(Message(key = "avel", value = SPIN_SPEED))
super(SpinModule, self).act(resp)
class AvoidObstacleAgent(Agent) :
def __init__(self, name) :
super(AvoidObstacleAgent, self).__init__(name)
self.mods = [RadarModule(), SpinModule()]
def get_focus(self) :
focus_info = super(AvoidObstacleAgent, self).get_focus()
pos = self.mem.read("pos", None)
detect = self.mem.read("radar_detect", None)
if detect is not None :
for i, block in enumerate(detect) :
if pos is None or abs(block[2] - pos[0]) > POS_ERROR or abs(block[3] - pos[1]) > POS_ERROR :
focus_info["block_%d" % i] = "(%.1f, %.1f)" % (block[2], block[3])
else :
focus_info["detect"] = "none"
return focus_info
def run_sim(filename = None) :
'''
run_sim(filename = None)
------------------------
filename : the name of the file to save the data; None by default.
'''
# create the oracle space
oracle = OracleSpace()
# create the context
context = Context(oracle = oracle)
# create the schedule for adding agents in the running
schedule = Schedule()
# add objects and agents to the context
obt = Obstacle(name ="0", a = (50.0, -50.0), b = (50.0, 50.0), radius = 2.0)
context.add_obt(obt)
obj = ShowLabelObject(name = "0")
obj.pos = (0, 0)
context.add_obj(obj)
agent = AvoidObstacleAgent(name = "0")
agent.mem.reg("radar_dist", 100.0)
schedule.add_agent(agent)
# create the driver
driver = Driver(context = context, schedule = schedule)
# create the inspector
# inspector = Inspector(delay = 10)
# create the simulator
sim = Simulator(driver = driver)
print("Simulating")
sim.simulate(graphics = True, filename = filename)
if __name__ == '__main__' :
filename = None
if (len(sys.argv) > 1) :
filename = sys.argv[1]
run_sim(filename = filename)
| apache-2.0 | -4,195,351,703,851,307,000 | 23.591398 | 108 | 0.589418 | false | 3.314493 | false | false | false |
olivierverdier/sfepy | sfepy/mechanics/matcoefs.py | 1 | 12857 | # -*- coding: utf-8 -*-
from sfepy.base.base import *
##
# c: 22.07.2008
def youngpoisson_to_lame( young, poisson, plane = 'strain' ):
r"""
The relationship between Lame parameters and Young's modulus, Poisson's
ratio (see [1],[2]):
.. math::
\lambda = {\nu E \over (1+\nu)(1-2\nu)},\qquad \mu = {E \over 2(1+\nu)}
The plain stress hypothesis:
.. math::
\bar\lambda = {2\lambda\mu \over \lambda + 2\mu}
[1] I.S. Sokolnikoff: Mathematical Theory of Elasticity. New York, 1956.
[2] T.J.R. Hughes: The Finite Element Method, Linear Static and Dynamic
Finite Element Analysis. New Jersey, 1987.
"""
mu = young/(2.0*(1.0 + poisson))
lam = young*poisson/((1.0 + poisson)*(1.0 - 2.0*poisson))
if plane == 'stress':
lam = 2*lam*mu/(lam + 2*mu)
return lam, mu
##
# c: 22.07.2008
def stiffness_tensor_lame( dim, lam, mu ):
r"""
Stiffness tensor - using Lame coefficients
.. math::
{\bm D}_{(2D)} = \begin{bmatrix} \lambda + 2\mu & \lambda & 0\\
\lambda & \lambda + 2\mu & 0\\ 0 & 0 & \mu \end{bmatrix}
.. math::
{\bm D}_{(3D)} = \begin{bmatrix} \lambda + 2\mu & \lambda &
\lambda & 0 & 0 & 0\\ \lambda & \lambda + 2\mu & \lambda & 0 & 0 & 0 \\
\lambda & \lambda & \lambda + 2\mu & 0 & 0 & 0 \\ 0 & 0 & 0 & \mu & 0 &
0 \\ 0 & 0 & 0 & 0 & \mu & 0 \\ 0 & 0 & 0 & 0 & 0 & \mu\\ \end{bmatrix}
"""
sym = (dim + 1) * dim / 2
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
return lam * oot + mu * nm.diag( o + 1.0 )
##
# c: 22.07.2008
def stiffness_tensor_youngpoisson( dim, young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return stiffness_tensor_lame( dim, lam, mu )
##
# c: 10.08.2009
def stiffness_tensor_lame_mixed( dim, lam, mu ):
r"""
Stiffness tensor - using Lame coefficients
.. math::
{\bm D}_{(2D)} = \begin{bmatrix} \widetilde\lambda + 2\mu &
\widetilde\lambda & 0\\ \widetilde\lambda & \widetilde\lambda + 2\mu &
0\\ 0 & 0 & \mu \end{bmatrix}
.. math::
{\bm D}_{(3D)} = \begin{bmatrix} \widetilde\lambda + 2\mu &
\widetilde\lambda & \widetilde\lambda & 0 & 0 & 0\\ \widetilde\lambda &
\widetilde\lambda + 2\mu & \widetilde\lambda & 0 & 0 & 0 \\
\widetilde\lambda & \widetilde\lambda & \widetilde\lambda + 2\mu & 0 &
0 & 0 \\ 0 & 0 & 0 & \mu & 0 & 0 \\ 0 & 0 & 0 & 0 & \mu & 0 \\ 0 & 0 &
0 & 0 & 0 & \mu\\ \end{bmatrix}
where
.. math::
\widetilde\lambda = {2\over 3} (\lambda - \mu)
"""
sym = (dim + 1) * dim / 2
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
return 2.0/3.0*(lam-mu) * oot + mu * nm.diag( o + 1.0 )
##
# c: 10.08.2009
def stiffness_tensor_youngpoisson_mixed( dim, young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return stiffness_tensor_lame_mixed( dim, lam, mu )
##
# c: 10.08.2009
def bulk_modulus_lame( lam, mu ):
r"""
Bulk modulus - using Lame coefficients
.. math::
\gamma = {1\over 3}(\lambda + 2\mu)
"""
return 1.0/3.0 * (2*mu + lam)
##
# c: 10.08.2009
def bulk_modulus_youngpoisson( young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return bulk_modulus_lame( lam, mu )
elastic_constants_relations = {
}
class ElasticConstants(Struct):
r"""
Conversion formulas for various groups of elastic constants. The elastic
constants supported are:
- :math:`E` : Young's modulus
- :math:`\nu` : Poisson's ratio
- :math:`K` : bulk modulus
- :math:`\lambda` : Lamé's first parameter
- :math:`\mu, G` : shear modulus, Lamé's second parameter
- :math:`M` : P-wave modulus, longitudinal wave modulus
The elastic constants are referred to by the following keyword arguments:
young, poisson, bulk, lam, mu, p_wave.
Exactly two of them must be provided to the __init__() method.
Examples
--------
- basic usage::
>>> from sfepy.mechanics.matcoefs import ElasticConstants
>>> ec = ElasticConstants(lam=1.0, mu=1.5)
>>> ec.young
3.6000000000000001
>>> ec.poisson
0.20000000000000001
>>> ec.bulk
2.0
>>> ec.p_wave
4.0
>>> ec.get(['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave'])
[2.0, 1.0, 1.5, 3.6000000000000001, 0.20000000000000001, 4.0]
- reinitialize existing instance::
>>> ec.init(p_wave=4.0, bulk=2.0)
>>> ec.get(['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave'])
[2.0, 1.0, 1.5, 3.6000000000000001, 0.20000000000000001, 4.0]
"""
def __init__(self, young=None, poisson=None, bulk=None, lam=None,
mu=None, p_wave=None, _regenerate_relations=False):
"""
Set exactly two of the elastic constants, and compute the remaining.
"""
self.names = ['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave']
if _regenerate_relations:
self.relations = self._construct_relations()
else:
from elastic_constants import relations
self.relations = relations
## print sorted(self.relations.keys())
## print len(self.relations)
self.init(young=young, poisson=poisson, bulk=bulk, lam=lam,
mu=mu, p_wave=p_wave)
def _construct_relations(self):
"""
Construct the dictionary of all relations among the six elastic
constants and save it as `elastic_constants.py` module, that can be
imported for reuse. Users should not call this!
"""
import sympy as sm
relations = {}
def _expand_keys(sols):
for key, val in sols.iteritems():
if len(val) == 2 and (key.name == 'poisson'):
val = val[0]
else:
val = val[-1]
skey = tuple(sorted([ii.name for ii in val.atoms()
if ii.is_Symbol])) + (key.name,)
if skey in relations:
print '!', skey
relations[skey] = val
bulk, lam, mu, young, poisson, p_wave = sm.symbols(self.names, real=True)
_expand_keys(sm.solve(bulk - (lam + 2 * mu / 3)))
_expand_keys(sm.solve(young - (mu * (3 * lam + 2 * mu) / (lam + mu))))
_expand_keys(sm.solve(poisson - (lam / (2 * (lam + mu)))))
_expand_keys(sm.solve(p_wave - (lam + 2 * mu)))
_expand_keys(sm.solve(bulk - (young / (3 * (1 - 2 * poisson)))))
_expand_keys(sm.solve(p_wave - ((young * (1 - poisson))
/ ((1 + poisson) * (1 - 2 * poisson)))))
# Choose the correct root manually.
## relations[('p_wave', 'young', 'poisson')] \
## = (young - p_wave + (-10*p_wave*young + young**2 +
## 9*p_wave**2)**(0.5))/(4*p_wave)
_expand_keys(sm.solve(lam - (young * poisson
/ ((1 + poisson) * (1 - 2 * poisson)))))
# Choose the correct root.
## relations[('lam', 'young', 'poisson')] \
## = (lam + young - (2*lam*young + young**2 +
## 9*(lam**2))**(0.5))/(-4*lam)
_expand_keys(sm.solve(mu - (young / (2 * (1 + poisson)))))
_expand_keys(sm.solve(bulk - (young * mu / (3 * (3 * mu - young)))))
_expand_keys(sm.solve(p_wave - (mu * (4 * mu - young)
/ (3 * mu - young))))
_expand_keys(sm.solve(young - (9 * bulk * (bulk - lam)
/ (3 * bulk - lam))))
_expand_keys(sm.solve(poisson - (lam / (3 * bulk - lam))))
_expand_keys(sm.solve(p_wave - (3 * bulk - 2 * lam)))
_expand_keys(sm.solve(poisson - ((3 * bulk - 2 * mu)
/ (2 * (3 * bulk + mu)))))
_expand_keys(sm.solve(p_wave - (bulk + 4 * mu / 3)))
_expand_keys(sm.solve(p_wave - (lam * (1 - poisson) / poisson)))
_expand_keys(sm.solve(p_wave - (2 * mu * (1 - poisson)
/ (1 - 2 * poisson))))
_expand_keys(sm.solve(p_wave - (3 * bulk * (1 - poisson)
/ (1 + poisson))))
_expand_keys(sm.solve(p_wave - (3 * bulk * (3 * bulk + young)
/ (9 * bulk - young))))
_expand_keys(sm.solve(young - ((lam*p_wave + p_wave**2 - 2*lam**2)
/ (lam + p_wave))))
fd = open(os.path.join(os.path.dirname(__file__),
'elastic_constants.py'), 'w')
fd.write("""
from __future__ import division
import sympy as sm
names = ['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave']
bulk, lam, mu, young, poisson, p_wave = sm.symbols(names, real=True)
relations = {
%s
}
""" % ',\n'.join([' %s : %s' % (key, val)
for key, val in relations.iteritems()]))
fd.close()
return relations
def init(self, young=None, poisson=None, bulk=None, lam=None,
mu=None, p_wave=None):
"""
Set exactly two of the elastic constants, and compute the
remaining. (Re)-initializes the existing instance of ElasticConstants.
"""
Struct.__init__(self, young=young, poisson=poisson, bulk=bulk, lam=lam,
mu=mu, p_wave=p_wave)
values = {}
for key, val in self.__dict__.iteritems():
if (key in self.names) and (val is not None):
values[key] = val
known = values.keys()
if len(known) != 2:
raise ValueError('exactly two elastic constants must be provided!')
unknown = set(self.names).difference(known)
for name in unknown:
key = tuple(sorted(known)) + (name,)
val = float(self.relations[key].n(subs=values))
setattr(self, name, val)
def get(self, names):
"""
Get the named elastic constants.
"""
out = [getattr(self, name) for name in names]
return out
class TransformToPlane( Struct ):
"""Transformmations of constitutive law coefficients of 3D problems to 2D."""
def __init__( self, iplane = None ):
"""`iplane` ... vector of indices denoting the plane, e.g.: [0, 1]"""
if iplane is None:
iplane = [0, 1]
# Choose the "master" variables and the "slave" ones
# ... for vectors
i_m = nm.sort( iplane )
i_s = nm.setdiff1d( nm.arange( 3 ), i_m )
# ... for second order tensors (symmetric storage)
i_ms = {(0, 1) : [0, 1, 3],
(0, 2) : [0, 2, 4],
(1, 2) : [1, 2, 5]}[tuple( i_m )]
i_ss = nm.setdiff1d( nm.arange( 6 ), i_ms )
Struct.__init__( self, iplane = iplane,
i_m = i_m, i_s = i_s,
i_ms = i_ms, i_ss = i_ss )
def tensor_plane_stress( self, c3 = None, d3 = None, b3 = None ):
"""Transforms all coefficients of the piezoelectric constitutive law
from 3D to plane stress problem in 2D: strain/stress ordering/ 11 22
33 12 13 23. If `d3` is None, uses only the stiffness tensor `c3`.
`c3` ... stiffness tensor
`d3` ... dielectric tensor
`b3` ... piezoelectric coupling tensor"""
mg = nm.meshgrid
cs = c3[mg(self.i_ss,self.i_ss)]
cm = c3[mg(self.i_ss,self.i_ms)].T
if d3 is None: # elasticity only.
A = cs
Feps = cm
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
return c2
else:
dm = d3[mg(self.i_s,self.i_m)].T
ds = d3[mg(self.i_s,self.i_s)]
ii = mg( self.i_s, self.i_ss )
A = nm.r_[nm.c_[cs, b3[ii]],
nm.c_[b3[ii].T, -ds]] #=> sym !!!
F = nm.r_[nm.c_[cm, b3[mg(self.i_m,self.i_ss)]],
nm.c_[b3[mg(self.i_s,self.i_ms)].T, -dm ]]
Feps = F[:,:3]
FE = F[:,3:]
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
d2 = d3[mg(self.i_m,self.i_m)] \
- nm.dot( FE.T, nm.dot( Ainv, FE ) )
b2 = b3[mg(self.i_m,self.i_ms)].T \
- nm.dot( FE.T, nm.dot( Ainv, Feps ) )
return c2, d2, b2
| bsd-3-clause | -4,280,246,495,239,971,000 | 33.371658 | 81 | 0.496227 | false | 3.13231 | false | false | false |
opennode/waldur-mastermind | src/waldur_mastermind/booking/processors.py | 1 | 4711 | from django.db import transaction
from django.utils import timezone
from django.utils.dateparse import datetime_re, parse_datetime
from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import ValidationError
from waldur_mastermind.booking.utils import (
get_offering_bookings,
get_other_offering_booking_requests,
)
from waldur_mastermind.marketplace import models as marketplace_models
from waldur_mastermind.marketplace import processors
from .utils import TimePeriod, is_interval_in_schedules, sort_attributes_schedules
class BookingCreateProcessor(processors.BaseOrderItemProcessor):
def process_order_item(self, user):
with transaction.atomic():
sort_attributes_schedules(self.order_item.attributes)
resource = marketplace_models.Resource(
project=self.order_item.order.project,
offering=self.order_item.offering,
plan=self.order_item.plan,
limits=self.order_item.limits,
attributes=self.order_item.attributes,
name=self.order_item.attributes.get('name') or '',
state=marketplace_models.Resource.States.CREATING,
)
resource.init_cost()
resource.save()
resource.init_quotas()
self.order_item.resource = resource
self.order_item.save(update_fields=['resource'])
def validate_order_item(self, request):
schedules = self.order_item.attributes.get('schedules')
# We check that the schedule is set.
if not schedules:
raise ValidationError(_('Schedules are required.'))
if not len(schedules):
raise ValidationError(_('Schedules are required.'))
for period in schedules:
try:
start = period['start']
end = period['end']
if not start or not end:
raise ValidationError(
_(
'Value \'start\' or \'end\' does not exist in schedules item.'
)
)
except KeyError:
raise ValidationError(
_('Key \'start\' or \'end\' does not exist in schedules item.')
)
for value in [start, end]:
match = datetime_re.match(value)
kw = match.groupdict()
if list(
filter(
lambda x: not kw[x],
['hour', 'month', 'second', 'year', 'tzinfo', 'day', 'minute'],
)
):
raise ValidationError(
_('The value %s does not match the format.') % value
)
if parse_datetime(start) < timezone.now():
raise ValidationError(_('Past slots are not available for selection.'))
# Check that the schedule is available for the offering.
offering = self.order_item.offering
offering_schedules = offering.attributes.get('schedules', [])
for period in schedules:
if not is_interval_in_schedules(
TimePeriod(period['start'], period['end']),
[TimePeriod(i['start'], i['end']) for i in offering_schedules],
):
raise ValidationError(
_(
'Time period from %s to %s is not available for selected offering.'
)
% (period['start'], period['end'])
)
# Check that there are no other bookings.
bookings = get_offering_bookings(offering)
for period in schedules:
if is_interval_in_schedules(
TimePeriod(period['start'], period['end']), bookings
):
raise ValidationError(
_('Time period from %s to %s is not available.')
% (period['start'], period['end'])
)
# Check that there are no other booking requests.
booking_requests = get_other_offering_booking_requests(self.order_item)
for period in schedules:
if is_interval_in_schedules(
TimePeriod(period['start'], period['end']), booking_requests
):
raise ValidationError(
_(
'Time period from %s to %s is not available. Other booking request exists.'
)
% (period['start'], period['end'])
)
class BookingDeleteProcessor(processors.DeleteScopedResourceProcessor):
pass
| mit | 1,439,147,289,079,598,300 | 37.933884 | 99 | 0.545956 | false | 4.846708 | false | false | false |
frac/lettuce | tests/functional/output_features/success_table/success_table_steps.py | 1 | 1617 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lettuce import step
from lettuce import world
from lettuce.terrain import before
from nose.tools import assert_equals
@before.all
def set_balance():
world.my_balance = 0
@step('I have (\d+) bucks')
def compare_bucks(step, cash):
assert_equals(world.my_balance, int(cash))
@step('I have these items')
def havetheseitems(step):
cars = {}
for data in step.hashes:
key = data['name']
value = int(data['price'])
cars[key] = value
world.cars = cars
@step('sell the "([^"]+)"')
def sell_item(step, name):
world.my_balance += world.cars[name]
del world.cars[name]
@step('my garage contains:')
def alsothese(step):
cars = {}
for data in step.hashes:
key = data['name']
value = int(data['price'])
cars[key] = value
assert_equals(cars, world.cars)
| gpl-3.0 | 2,192,169,248,575,762,400 | 28.925926 | 71 | 0.686881 | false | 3.380753 | false | false | false |
prculley/gramps | gramps/plugins/view/geofamily.py | 1 | 20779 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011-2016 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Geography for one family
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import operator
from gi.repository import Gdk
KEY_TAB = Gdk.KEY_Tab
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("GeoGraphy.geofamily")
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import EventRoleType, EventType
from gramps.gen.config import config
from gramps.gen.datehandler import displayer
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.gen.utils.place import conv_lat_lon
from gramps.gui.views.bookmarks import FamilyBookmarks
from gramps.plugins.lib.maps.geography import GeoGraphyView
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UI_DEF = '''\
<ui>
<menubar name="MenuBar">
<menu action="GoMenu">
<placeholder name="CommonGo">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
</placeholder>
</menu>
<menu action="EditMenu">
<placeholder name="CommonEdit">
<menuitem action="PrintView"/>
</placeholder>
</menu>
<menu action="BookMenu">
<placeholder name="AddEditBook">
<menuitem action="AddBook"/>
<menuitem action="EditBook"/>
</placeholder>
</menu>
</menubar>
<toolbar name="ToolBar">
<placeholder name="CommonNavigation">
<toolitem action="Back"/>
<toolitem action="Forward"/>
</placeholder>
<placeholder name="CommonEdit">
<toolitem action="PrintView"/>
</placeholder>
</toolbar>
</ui>
'''
# pylint: disable=no-member
# pylint: disable=unused-variable
# pylint: disable=unused-argument
#-------------------------------------------------------------------------
#
# GeoView
#
#-------------------------------------------------------------------------
class GeoFamily(GeoGraphyView):
"""
The view used to render family map.
"""
def __init__(self, pdata, dbstate, uistate, nav_group=0):
GeoGraphyView.__init__(self, _('Family places map'),
pdata, dbstate, uistate,
FamilyBookmarks,
nav_group)
self.dbstate = dbstate
self.uistate = uistate
self.place_list = []
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.nbplaces = 0
self.nbmarkers = 0
self.sort = []
self.additional_uis.append(self.additional_ui())
self.no_show_places_in_status_bar = False
self.cal = None
def get_title(self):
"""
Used to set the titlebar in the configuration window.
"""
return _('GeoFamily')
def get_stock(self):
"""
Returns the name of the stock icon to use for the display.
This assumes that this icon has already been registered
as a stock icon.
"""
return 'geo-show-family'
def get_viewtype_stock(self):
"""Type of view in category
"""
return 'geo-show-family'
def additional_ui(self):
"""
Specifies the UIManager XML code that defines the menus and buttons
associated with the interface.
"""
return _UI_DEF
def navigation_type(self):
"""
Indicates the navigation type. Navigation type can be the string
name of any of the primary objects.
"""
return 'Family'
def goto_handle(self, handle=None):
"""
Rebuild the tree with the given person handle as the root.
"""
self.build_tree()
def build_tree(self):
"""
This is called by the parent class when the view becomes visible. Since
all handling of visibility is now in rebuild_trees, see that for more
information.
"""
if not self.dbstate.is_open():
return
if self.uistate.get_active('Family'):
self._createmap(self.uistate.get_active('Family'))
else:
self._createmap(self.uistate.get_active('Person'))
def _createpersonmarkers(self, dbstate, person, comment, fam_id):
"""
Create all markers for the specified person.
"""
self.cal = config.get('preferences.calendar-format-report')
latitude = longitude = ""
if person:
# For each event, if we have a place, set a marker.
for event_ref in person.get_event_ref_list():
if not event_ref:
continue
role = event_ref.get_role()
event = dbstate.db.get_event_from_handle(event_ref.ref)
eyear = event.get_date_object().to_calendar(self.cal).get_year()
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(latitude,
longitude, "D.D8")
descr = _pd.display(dbstate.db, place)
evt = EventType(event.get_type())
descr1 = _("%(eventtype)s : %(name)s") % {
'eventtype': evt,
'name': _nd.display(person)}
# place.get_longitude and place.get_latitude return
# one string. We have coordinates when the two values
# contains non null string.
if longitude and latitude:
if not self._present_in_places_list(2,
str(descr1 + descr + str(evt))):
self._append_to_places_list(descr,
str(descr1 + descr + str(evt)),
_nd.display(person),
latitude, longitude,
role, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
fam_id
)
else:
self._append_to_places_without_coord(
place.gramps_id, descr)
family_list = person.get_family_handle_list()
for family_hdl in family_list:
family = self.dbstate.db.get_family_from_handle(family_hdl)
if family is not None:
for event_ref in family.get_event_ref_list():
if event_ref:
event = dbstate.db.get_event_from_handle(
event_ref.ref)
role = event_ref.get_role()
if event.get_place_handle():
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(
place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
(latitude,
longitude) = conv_lat_lon(latitude,
longitude,
"D.D8")
descr = _pd.display(dbstate.db, place)
evt = EventType(event.get_type())
(father_name,
mother_name) = self._get_father_and_mother_name(event)
descr1 = "%s : %s - " % (evt,
father_name)
descr1 = "%s%s" % (descr1, mother_name)
eyear = event.get_date_object().to_calendar(self.cal).get_year()
if longitude and latitude:
if not self._present_in_places_list(
2, str(descr1 + descr + str(evt))):
self._append_to_places_list(
descr,
str(descr1 + descr + str(evt)),
_nd.display(person),
latitude, longitude,
role, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
family.gramps_id
)
else:
self._append_to_places_without_coord(place.gramps_id, descr)
def family_label(self, family):
"""
Create the family label depending on existence of the father and mother
"""
if family is None:
return "Unknown"
father = mother = None
hdl = family.get_father_handle()
if hdl:
father = self.dbstate.db.get_person_from_handle(hdl)
hdl = family.get_mother_handle()
if hdl:
mother = self.dbstate.db.get_person_from_handle(hdl)
if father and mother:
label = _("%(gramps_id)s : %(father)s and %(mother)s") % {
'father' : _nd.display(father),
'mother' : _nd.display(mother),
'gramps_id' : family.gramps_id,
}
elif father:
label = "%(gramps_id)s : %(father)s" % {
'father' : _nd.display(father),
'gramps_id' : family.gramps_id,
}
elif mother:
label = "%(gramps_id)s : %(mother)s" % {
'mother' : _nd.display(mother),
'gramps_id' : family.gramps_id,
}
else:
# No translation for bare gramps_id
label = "%(gramps_id)s :" % {
'gramps_id' : family.gramps_id,
}
return label
def _createmap_for_one_family(self, family):
"""
Create all markers for one family : all event's places with a lat/lon.
"""
dbstate = self.dbstate
self.message_layer.add_message(
_("Family places for %s") % self.family_label(family))
person = None
if family:
person = dbstate.db.get_person_from_handle(
family.get_father_handle())
else:
return
family_id = family.gramps_id
if person is None: # family without father ?
handle = family.get_mother_handle()
if handle:
person = dbstate.db.get_person_from_handle(handle)
if person is None:
handle = self.uistate.get_active('Person')
if handle:
person = dbstate.db.get_person_from_handle(handle)
if person is not None:
family_list = person.get_family_handle_list()
if len(family_list) > 0:
fhandle = family_list[0] # first is primary
fam = dbstate.db.get_family_from_handle(fhandle)
father = mother = None
handle = fam.get_father_handle()
if handle:
father = dbstate.db.get_person_from_handle(handle)
if father:
comment = _("Father : %(id)s : %(name)s") % {
'id': father.gramps_id,
'name': _nd.display(father)}
self._createpersonmarkers(dbstate, father,
comment, family_id)
handle = fam.get_mother_handle()
if handle:
mother = dbstate.db.get_person_from_handle(handle)
if mother:
comment = _("Mother : %(id)s : %(name)s") % {
'id': mother.gramps_id,
'name': _nd.display(mother)}
self._createpersonmarkers(dbstate, mother,
comment, family_id)
index = 0
child_ref_list = fam.get_child_ref_list()
if child_ref_list:
for child_ref in child_ref_list:
child = dbstate.db.get_person_from_handle(child_ref.ref)
if child:
index += 1
comment = _("Child : %(id)s - %(index)d "
": %(name)s") % {
'id' : child.gramps_id,
'index' : index,
'name' : _nd.display(child)
}
self._createpersonmarkers(dbstate, child,
comment, family_id)
else:
comment = _("Person : %(id)s %(name)s has no family.") % {
'id' : person.gramps_id,
'name' : _nd.display(person)
}
self._createpersonmarkers(dbstate, person, comment, family_id)
def _createmap(self, handle):
"""
Create all markers for each people's event in the database which has
a lat/lon.
"""
if not handle:
return
self.place_list = []
self.place_without_coordinates = []
self.places_found = []
self.nbplaces = 0
self.nbmarkers = 0
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.message_layer.clear_messages()
if self.dbstate.db.has_family_handle(handle):
family = self.dbstate.db.get_family_from_handle(handle)
self._createmap_for_one_family(family)
else:
person = self.dbstate.db.get_person_from_handle(handle)
if not person:
return
family_list = person.get_family_handle_list()
for family_hdl in family_list:
family = self.dbstate.db.get_family_from_handle(family_hdl)
if family is not None:
self._createmap_for_one_family(family)
self.sort = sorted(self.place_list,
key=operator.itemgetter(3, 4, 6)
)
self._create_markers()
def add_event_bubble_message(self, event, lat, lon, mark, menu):
"""
Add an item to the popup menu.
"""
self.itemoption = Gtk.Menu()
itemoption = self.itemoption
itemoption.show()
menu.set_submenu(itemoption)
modify = Gtk.MenuItem(label=_("Edit Family"))
modify.show()
modify.connect("activate", self.edit_family, event, lat, lon, mark)
itemoption.append(modify)
modify = Gtk.MenuItem(label=_("Edit Person"))
modify.show()
modify.connect("activate", self.edit_person, event, lat, lon, mark)
itemoption.append(modify)
modify = Gtk.MenuItem(label=_("Edit Event"))
modify.show()
modify.connect("activate", self.edit_event, event, lat, lon, mark)
itemoption.append(modify)
center = Gtk.MenuItem(label=_("Center on this place"))
center.show()
center.connect("activate", self.center_here, event, lat, lon, mark)
itemoption.append(center)
def bubble_message(self, event, lat, lon, marks):
"""
Add the popup menu.
"""
self.menu = Gtk.Menu()
menu = self.menu
menu.set_title("family")
message = ""
oldplace = ""
prevmark = None
for mark in marks:
if message != "":
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.add_event_bubble_message(event, lat, lon,
prevmark, add_item)
if mark[0] != oldplace:
message = "%s :" % mark[0]
self.add_place_bubble_message(event, lat, lon,
marks, menu, message, mark)
oldplace = mark[0]
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
# format the date as described in preferences.
date = displayer.display(evt.get_date_object())
if date == "":
date = _("Unknown")
if mark[5] == EventRoleType.PRIMARY:
message = "(%s) %s : %s" % (date, mark[7], mark[1])
elif mark[5] == EventRoleType.FAMILY:
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
(father_name,
mother_name) = self._get_father_and_mother_name(evt)
message = "(%s) %s : %s - %s" % (date, mark[7],
father_name, mother_name)
else:
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
descr = evt.get_description()
if descr == "":
descr = _('No description')
message = "(%s) %s => %s" % (date, mark[5], descr)
prevmark = mark
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.add_event_bubble_message(event, lat, lon, prevmark, add_item)
menu.popup(None, None, None,
None, event.button, event.time)
return 1
def add_specific_menu(self, menu, event, lat, lon):
"""
Add specific entry to the navigation menu.
"""
return
def get_default_gramplets(self):
"""
Define the default gramplets for the sidebar and bottombar.
"""
return (("Family Filter",),
())
| gpl-2.0 | 6,691,864,510,758,257,000 | 40.39243 | 104 | 0.455893 | false | 4.626809 | false | false | false |
iScrE4m/RSES | tests/objects/test_stock.py | 1 | 2053 | # coding=utf-8
from pytest import raises
from rses.src.objects import stock
import rses_errors
def test_ingredient_type_create(ingredient_type_no_create):
ingredient_type = stock.IngredientType(name=ingredient_type_no_create)
assert ingredient_type.id
assert ingredient_type.name == ingredient_type_no_create
def test_ingredient_type_rename(ingredient_type, ingredient_type_new_name):
ingredient_type.name = ingredient_type_new_name
assert ingredient_type.name == ingredient_type_new_name
new = stock.IngredientType(ingredient_type_id=ingredient_type.id)
assert new.name == ingredient_type_new_name
assert ingredient_type == new
def test_ingredient_type_delete(ingredient_type):
ingredient_type.delete()
with raises(rses_errors.DoesNotExist) as e:
stock.IngredientType.load_by_name(ingredient_type.name)
assert ingredient_type.name in str(e)
def test_ingredient_create(ingredient_type,
ingredient_no_create,
ingredient_unit,
positive_float,
positive_float2,
positive_int):
"""
In arguments, ingredient_type has to come before ingredient_no_create,
otherwise teardown of ingredient_type will come first and will cascade
delete ingredient_unit
"""
ingredient = stock.Ingredient(name=ingredient_no_create,
unit=ingredient_unit,
ingredient_type=ingredient_type,
suggestion_threshold=positive_float,
rebuy_threshold=positive_float2,
durability=positive_int)
assert ingredient.name == ingredient_no_create
assert ingredient.unit == ingredient_unit
assert ingredient.type == ingredient_type
assert ingredient.suggestion_threshold == positive_float
assert ingredient.rebuy_threshold == positive_float2
assert ingredient.durability == positive_int
| mit | 7,995,718,306,625,569,000 | 39.254902 | 75 | 0.651242 | false | 3.725953 | false | false | false |
kimbauters/sparsepy | search_structure.py | 1 | 8409 | import textwrap # used for embellishing the Graphviz DOT file layout
class Node:
# since we will be using a lot of Node instances, optimise the memory use by relying on slots rather than a dict
__slots__ = ['problem', 'parent', 'action', 'effect', 'state', 'is_goal', 'children',
'visits', 'utility', 'untried_actions', 'tried_actions']
def __init__(self, problem, parent, action, effect, state):
self.problem = problem # the problem space in which this node is relevant
self.parent = parent # parent node of this node
self.action = action # action that was used to get from the parent node to this node
self.effect = effect # effect of the action that resulted in the current node
self.state = state # the state of the world in this node
self.is_goal = problem.goal_reached(self.state) # whether or not this node represents a goal state
self.children = dict() # dictionary of children of this node, key-ed by the action and effect to get to them
self.visits = 0 # number of times this node has been visited
self.utility = 0 # cumulative utility from going through this node
# the available actions for which the current state agrees with their preconditions
self.untried_actions = [a for a in problem.actions if
any(pos <= self.state and not (neg & self.state) for neg, pos in a.preconditions)]
self.tried_actions = {} # dictionary with the actions we tried so far as keys,
# and linked to a tuple consisting of their average reward and number of times we applied them: e.g.
# a1 -> (15, 2)
# a2 -> (10, 1)
def simulate_action(self, action, most_probable=False):
""" Execute the rollout of an action, *without* taking this action out of the list of untried actions.
:param action: the action to execute
:return: a new node obtained by applying the action in the current node """
if most_probable:
effect = action.effects[0]
else:
effect = action.outcome() # trigger one of the effects of the action
if (action, effect) in self.children: # check whether we already applied this action, and gotten this effect
child = self.children[(action, effect)] # we already encountered this state; retrieve it
else:
state = self.state - effect.delete | effect.add # compute the new state by using set operations
child = Node(self.problem, self, action, effect, state) # create a new node with state
self.children[(action, effect)] = child # add this child to the children of this node
return child
def perform_action(self, action):
""" Execute the rollout of an action, *with* taking this action out of the list of untried actions.
:param action: the action to execute
:return: a new node obtained through action in the current node, and the reward associated with this effect
:raises: a ValueError if trying to perform an action that is already tried for this node """
self.untried_actions.remove(action) # remove the action from the list of untried actions
self.tried_actions[action] = (0, 0) # add the action to the sequence of actions we already tried
return self.simulate_action(action) # get and return (one of) the child(ren) as a result of applying the action
def rollout_actions(self, rollout_action, depth, horizon):
""" Organise a rollout from a given node to either a goal node or a leaf node (e.g. by hitting the horizon).
:param rollout_action: the heuristic to select the action to use for the rollout
:param depth: the current depth at which the rollout is requested
:param horizon: the maximum depth to consider
:return: a new node obtained through action in the current node, and the reward associated with this effect
:raises: a ValueError if trying to perform an action that is already tried for this node """
if self.is_goal: # check if we have hit a goal state
return self, depth
elif depth < horizon:
action = rollout_action(self) # use the heuristic to select the next action to perform
node = self.simulate_action(action, True) # simulate the execution of this action
return node.rollout_actions(rollout_action, depth + 1, horizon)
else: # the horizon has been reached; return the current node, reward so far, and the current depth
return self, depth
def update(self, discounting):
""" Traverse back up a branch to collect all rewards and to backpropagate these rewards to successor nodes.
:param discounting: the discounting factor to use when updating ancestor nodes """
node = self # set this node as the current node in the backpropagation
current_reward = 0 # initialise the reward to 0
while node is not None: # continue until we have processed the root node
current_reward *= discounting # discount the reward obtained in descendants
if node.is_goal: # check if this node is a goal state
current_reward += self.problem.goal_reward # if it is, assign to it the goal reward
if node.effect:
current_reward += node.effect.reward # add any rewards obtained associated with the effect
if not node.parent or node.action in node.parent.tried_actions: # only update the real non-simulated nodes
if node.parent: # check if it is not the root node; continue if not
utility, visits = node.parent.tried_actions[node.action] # get the action info from the parent
node.parent.tried_actions[node.action] = (utility + current_reward, visits + 1) # and update
node.utility += current_reward # update the total utility gathered in this node
node.visits += 1 # update the number of visits to this node
node = node.parent # move to the parent node
def create_graphviz(self, location="graphviz.dot"):
""" Produce the contents for a Graphviz DOT file representing the search tree as starting from this node.
:param location: the location of where to save the generated file.
:return: the location where the Graphviz DOT file has been saved """
output = "graph sparsepy {\n"
output += textwrap.indent(self.__graphviz(), " ")
output += "}"
with open(location, 'w') as file:
file.write(output)
return location
def __graphviz(self, name="0"):
""" Internal method used in the creation of the Graphviz DOT file. This method will be called recursively,
and only helps to fill the body specifications of the DOT file. """
output = 'decision_node' + str(name) # give a unique name to this node
output += ' [label="' + ', '.join(self.state) + '\n' + \
str('%0.2f' % self.utility) + ',' + str(self.visits) + '"]\n' # add the label to identify its state
next_id = 0
for key, child in self.children.items():
(action, effect) = key # extract the action out of the (action, effect) pair
if action in self.tried_actions: # if this is an action we actually performed, not just simulated: show it
output += 'action_node' + str(name) + action.name
output += '[label="' + action.name + '", shape=box]\n'
child_node_name = name + '_' + str(next_id)
output += child.__graphviz(child_node_name)
output += 'action_node' + str(name) + action.name + ' -- '
output += 'decision_node' + str(child_node_name) + ' [style=dashed, label="' + str(effect) + '"]\n'
next_id += 1
for action, info in self.tried_actions.items():
reward, visits = info
output += 'decision_node' + str(name) + ' -- action_node' + str(name) + action.name
output += ' [label="' + '%0.2f' % reward + ',' + str(visits) + '", penwidth="' + str(visits**(1/4)) + '"]\n'
return output
| mit | 5,978,570,223,246,047,000 | 68.663866 | 120 | 0.625996 | false | 4.255567 | false | false | false |
alexanderganderson/Diffusion-Probabilistic-Models | regression.py | 1 | 8365 | """
Defines the function approximators
"""
import numpy as np
import theano.tensor as T
# from theano.tensor.signal import downsample
from blocks.bricks import Activation, MLP, Initializable, application, Identity
from blocks.bricks.conv import ConvolutionalActivation
from blocks.initialization import IsotropicGaussian, Constant, Orthogonal
# TODO IsotropicGaussian init will be wrong scale for some layers
class LeakyRelu(Activation):
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_):
return T.switch(input_ > 0, input_, 0.05*input_)
dense_nonlinearity = LeakyRelu()
# dense_nonlinearity = Tanh()
conv_nonlinearity = LeakyRelu()
class MultiScaleConvolution(Initializable):
def __init__(self, num_channels, num_filters, spatial_width, num_scales, filter_size, downsample_method='meanout', name=""):
"""
A brick implementing a single layer in a multi-scale convolutional network.
"""
super(MultiScaleConvolution, self).__init__()
self.num_scales = num_scales
self.filter_size = filter_size
self.num_filters = num_filters
self.spatial_width = spatial_width
self.downsample_method = downsample_method
self.children = []
print "adding MultiScaleConvolution layer"
# for scale in range(self.num_scales-1, -1, -1):
for scale in range(self.num_scales):
print "scale %d"%scale
conv_layer = ConvolutionalActivation(activation=conv_nonlinearity.apply,
filter_size=(filter_size,filter_size), num_filters=num_filters,
num_channels=num_channels, image_size=(spatial_width/2**scale, spatial_width/2**scale),
# assume images are spatially smooth -- in which case output magnitude scales with
# # filter pixels rather than square root of # filter pixels, so initialize
# accordingly.
weights_init=IsotropicGaussian(std=np.sqrt(1./(num_filters))/filter_size**2),
biases_init=Constant(0), border_mode='full', name=name+"scale%d"%scale)
self.children.append(conv_layer)
def downsample(self, imgs_in, scale):
"""
Downsample an image by a factor of 2**scale
"""
imgs = imgs_in.copy()
if scale == 0:
return imgs
# if self.downsample_method == 'maxout':
# print "maxout",
# imgs_maxout = downsample.max_pool_2d(imgs.copy(), (2**scale, 2**scale), ignore_border=False)
# else:
# print "meanout",
# imgs_maxout = self.downsample_mean_pool_2d(imgs.copy(), (2**scale, 2**scale))
num_imgs = imgs.shape[0].astype('int16')
num_layers = imgs.shape[1].astype('int16')
nlx0 = imgs.shape[2].astype('int16')
nlx1 = imgs.shape[3].astype('int16')
scalepow = np.int16(2**scale)
# downsample
imgs = imgs.reshape((num_imgs, num_layers, nlx0/scalepow, scalepow, nlx1/scalepow, scalepow))
imgs = T.mean(imgs, axis=5)
imgs = T.mean(imgs, axis=3)
return imgs
@application
def apply(self, X):
print "MultiScaleConvolution apply"
nsamp = X.shape[0].astype('int16')
Z = 0
overshoot = (self.filter_size - 1)/2
imgs_accum = 0 # accumulate the output image
for scale in range(self.num_scales-1, -1, -1):
# downsample image to appropriate scale
imgs_down = self.downsample(X, scale)
# do a convolutional transformation on it
conv_layer = self.children[scale]
# NOTE this is different than described in the paper, since each conv_layer
# includes a nonlinearity -- it's not just one nonlinearity at the end
imgs_down_conv = conv_layer.apply(imgs_down)
# crop the edge so it's the same size as the input at that scale
imgs_down_conv_croppoed = imgs_down_conv[:,:,overshoot:-overshoot,overshoot:-overshoot]
imgs_accum += imgs_down_conv_croppoed
if scale > 0:
# scale up by factor of 2
layer_width = self.spatial_width/2**scale
imgs_accum = imgs_accum.reshape((nsamp, self.num_filters, layer_width, 1, layer_width, 1))
imgs_accum = T.concatenate((imgs_accum, imgs_accum), axis=5)
imgs_accum = T.concatenate((imgs_accum, imgs_accum), axis=3)
imgs_accum = imgs_accum.reshape((nsamp, self.num_filters, layer_width*2, layer_width*2))
return imgs_accum/self.num_scales
class MultiLayerConvolution(Initializable):
def __init__(self, n_layers, n_hidden, spatial_width, n_colors, n_scales, filter_size=3):
"""
A brick implementing a multi-layer convolutional network.
TODO make this multi-scale multi-layer convolution
"""
super(MultiLayerConvolution, self).__init__()
self.children = []
num_channels = n_colors
for ii in xrange(n_layers):
conv_layer = MultiScaleConvolution(num_channels, n_hidden, spatial_width, n_scales, filter_size, name="layer%d_"%ii)
self.children.append(conv_layer)
num_channels = n_hidden
@application
def apply(self, X):
Z = X
for conv_layer in self.children:
Z = conv_layer.apply(Z)
return Z
class MLP_conv_dense(Initializable):
def __init__(self, n_layers_conv, n_layers_dense_lower, n_layers_dense_upper,
n_hidden_conv, n_hidden_dense_lower, n_hidden_dense_lower_output, n_hidden_dense_upper,
spatial_width, n_colors, n_scales, n_temporal_basis):
"""
The multilayer perceptron, that provides temporal weighting coefficients for mu and sigma
images. This consists of a lower segment with a convolutional MLP, and optionally with a
dense MLP in parallel. The upper segment then consists of a per-pixel dense MLP
(convolutional MLP with 1x1 kernel).
"""
super(MLP_conv_dense, self).__init__()
self.n_colors = n_colors
self.spatial_width = spatial_width
self.n_hidden_dense_lower = n_hidden_dense_lower
self.n_hidden_dense_lower_output = n_hidden_dense_lower_output
self.n_hidden_conv = n_hidden_conv
## the lower layers
self.mlp_conv = MultiLayerConvolution(n_layers_conv, n_hidden_conv, spatial_width, n_colors, n_scales)
self.children = [self.mlp_conv]
if n_hidden_dense_lower > 0 and n_layers_dense_lower > 0:
n_input = n_colors*spatial_width**2
n_output = n_hidden_dense_lower_output*spatial_width**2
self.mlp_dense_lower = MLP([dense_nonlinearity] * n_layers_conv,
[n_input] + [n_hidden_dense_lower] * (n_layers_conv-1) + [n_output],
name='MLP dense lower', weights_init=Orthogonal(), biases_init=Constant(0))
self.children.append(self.mlp_dense_lower)
else:
n_hidden_dense_lower_output = 0
## the upper layers (applied to each pixel independently)
n_output = n_colors*n_temporal_basis*2 # "*2" for both mu and sigma
self.mlp_dense_upper = MLP([dense_nonlinearity] * (n_layers_dense_upper-1) + [Identity()],
[n_hidden_conv+n_hidden_dense_lower_output] +
[n_hidden_dense_upper] * (n_layers_dense_upper-1) + [n_output],
name='MLP dense upper', weights_init=Orthogonal(), biases_init=Constant(0))
self.children.append(self.mlp_dense_upper)
@application
def apply(self, X):
"""
Take in noisy input image and output temporal coefficients for mu and sigma.
"""
Y = self.mlp_conv.apply(X)
Y = Y.dimshuffle(0,2,3,1)
if self.n_hidden_dense_lower > 0:
n_images = X.shape[0].astype('int16')
X = X.reshape((n_images, self.n_colors*self.spatial_width**2))
Y_dense = self.mlp_dense_lower.apply(X)
Y_dense = Y_dense.reshape((n_images, self.spatial_width, self.spatial_width,
self.n_hidden_dense_lower_output))
Y = T.concatenate([Y/T.sqrt(self.n_hidden_conv),
Y_dense/T.sqrt(self.n_hidden_dense_lower_output)], axis=3)
Z = self.mlp_dense_upper.apply(Y)
return Z
| mit | 2,050,625,259,363,093,200 | 42.341969 | 128 | 0.618649 | false | 3.549003 | false | false | false |
downpoured/labs_coordinate_pictures | src/tools/ben_python_img/img_tests.py | 1 | 15862 |
import img_utils
import img_convert_resize
import img_resize_keep_exif
import PIL
from PIL import Image
import sys
sys.path.append('bn_python_common.zip')
from bn_python_common import *
def img_utils_testGetMarkFromFilename():
# tests splitting a filename that contains the "__MARKAS__" marker.
assertEq(('/test/file.jpg', '123'), img_utils.getMarkFromFilename('/test/file__MARKAS__123.jpg'))
assertEq(('/test/file.also.jpg', '123'), img_utils.getMarkFromFilename('/test/file.also__MARKAS__123.jpg'))
assertEq(('/test/file.jpg', ''), img_utils.getMarkFromFilename('/test/file__MARKAS__.jpg'))
assertException(lambda: img_utils.getMarkFromFilename(
'/test/dirmark__MARKAS__b/file__MARKAS__123.jpg'), ValueError, 'Directories')
assertException(lambda: img_utils.getMarkFromFilename(
'/test/dirmark__MARKAS__b/file.jpg'), ValueError, 'Directories')
assertException(lambda: img_utils.getMarkFromFilename(
'/test/file__MARKAS__123__MARKAS__123.jpg'), ValueError, 'exactly one marker')
assertException(lambda: img_utils.getMarkFromFilename(
'/test/file.jpg'), ValueError, 'exactly one marker')
assertException(lambda: img_utils.getMarkFromFilename(
'/test/file__MARKAS__123.foo.jpg'), ValueError, 'after the marker')
def img_utils_testGetFilesWithWrongExtension(tmpDir):
# looks for files that do not have the given extension.
tmpDirExt = files.join(tmpDir, 'testWrongExtension')
files.makedirs(tmpDirExt)
files.writeall(files.join(tmpDirExt, 'a.jpg'), 'content')
files.writeall(files.join(tmpDirExt, 'B.JPG'), 'content')
files.writeall(files.join(tmpDirExt, 'c.jpg'), 'content')
files.writeall(files.join(tmpDirExt, 'd.txt'), 'content')
files.writeall(files.join(tmpDirExt, 'e'), 'content')
files.makedirs(tmpDirExt + '/subdir')
fnGetFiles = files.listfiles
setRet = img_utils.getFilesWrongExtension(tmpDirExt, fnGetFiles, 'jpg')
expected = [files.join(tmpDirExt, 'd.txt'), files.join(tmpDirExt, 'e')]
assertEq(expected, list(sorted(f[0] for f in setRet)))
def img_convert_testGetNewSizeFromResizeSpec():
# common valid cases
assertEq((50, 100), img_convert_resize.getNewSizeFromResizeSpec('50%', 100, 200))
assertEq((90, 180), img_convert_resize.getNewSizeFromResizeSpec('90%', 101, 201))
assertEq((80, 160), img_convert_resize.getNewSizeFromResizeSpec('80h', 100, 200))
assertEq((160, 80), img_convert_resize.getNewSizeFromResizeSpec('80h', 200, 100))
assertEq((5, 10), img_convert_resize.getNewSizeFromResizeSpec('5%', 100, 200))
# invalid spec
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50x', 100, 200), ValueError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50', 100, 200), ValueError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('0.5%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec(' 50%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50% ', 100, 200), ValueError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50%%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50%50%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('0%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('00%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('h', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('1a0%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('1a0h', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('110%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('-10%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('-10h', 100, 200), AssertionError)
# cases not to resize.
assertEq((0, 0), img_convert_resize.getNewSizeFromResizeSpec('100%', 100, 200))
assertEq((0, 0), img_convert_resize.getNewSizeFromResizeSpec('101h', 100, 200))
assertEq((0, 0), img_convert_resize.getNewSizeFromResizeSpec('101h', 200, 100))
def img_resize_keep_exif_testActualFiles(tmpDir):
trace('img_resize_keep_exif_testActualFiles started.')
tmpDir = files.join(tmpDir, 'testResizeKeepExif')
files.makedirs(tmpDir)
# create initial files
im = createTestImage(96, 144, 1)
filenames = [files.join(tmpDir, 'a100p__MARKAS__100%.jpg'),
files.join(tmpDir, 'a50p__MARKAS__50%.jpg'),
files.join(tmpDir, 'a32h__MARKAS__32h.jpg'),
files.join(tmpDir, 'a200h__MARKAS__200h.jpg')]
for filename in filenames:
im.save(filename)
del im
for index, filename in enumerate(filenames):
assertEq((96, 144), img_utils.getImageDims(filename))
# set an obscure tag that won't be transferred
img_utils.setExifField(filename, 'ProfileCopyright', 'ObscureTagSet' + str(index))
assertEq('ObscureTagSet' + str(index), img_utils.readExifField(filename, 'ProfileCopyright'))
# set a common tag that will be transferred
img_utils.setExifField(filename, 'Make', 'TestingMake' + str(index))
assertEq('TestingMake' + str(index), img_utils.readExifField(filename, 'Make'))
# run the resizes. resizeAllAndKeepExif resizes based on the filename.
img_resize_keep_exif.resizeAllAndKeepExif(tmpDir,
recurse=False, storeOriginalFilename=True, storeExifFromOriginal=True, jpgHighQualityChromaSampling=False)
# check dimensions
assertEq((96, 144), img_utils.getImageDims(files.join(tmpDir, 'a100p.jpg')))
assertEq((48, 72), img_utils.getImageDims(files.join(tmpDir, 'a50p.jpg')))
assertEq((32, 48), img_utils.getImageDims(files.join(tmpDir, 'a32h.jpg')))
assertEq((96, 144), img_utils.getImageDims(files.join(tmpDir, 'a200h.jpg')))
# check common tag, should have been transferred
assertEq('TestingMake0', img_utils.readExifField(files.join(tmpDir, 'a100p.jpg'), 'Make'))
assertEq('TestingMake1', img_utils.readExifField(files.join(tmpDir, 'a50p.jpg'), 'Make'))
assertEq('TestingMake2', img_utils.readExifField(files.join(tmpDir, 'a32h.jpg'), 'Make'))
assertEq('TestingMake3', img_utils.readExifField(files.join(tmpDir, 'a200h.jpg'), 'Make'))
# check uncommon tag, should only be present for the ones moved instead of resized
assertEq('ObscureTagSet0', img_utils.readExifField(files.join(tmpDir, 'a100p.jpg'), 'ProfileCopyright'))
assertEq('', img_utils.readExifField(files.join(tmpDir, 'a50p.jpg'), 'ProfileCopyright'))
assertEq('', img_utils.readExifField(files.join(tmpDir, 'a32h.jpg'), 'ProfileCopyright'))
assertEq('ObscureTagSet3', img_utils.readExifField(files.join(tmpDir, 'a200h.jpg'), 'ProfileCopyright'))
# check that original filename is stored in exif data
assertEq('a100p.jpg', img_utils.readOriginalFilename(files.join(tmpDir, 'a100p.jpg')))
assertEq('a50p.jpg', img_utils.readOriginalFilename(files.join(tmpDir, 'a50p.jpg')))
assertEq('a32h.jpg', img_utils.readOriginalFilename(files.join(tmpDir, 'a32h.jpg')))
assertEq('a200h.jpg', img_utils.readOriginalFilename(files.join(tmpDir, 'a200h.jpg')))
expectedSizes = '''a100p.jpg|8524
a200h.jpg|8524
a200h__MARKAS__200h.jpg|8502
a32h.jpg|1335
a32h__MARKAS__32h.jpg|8502
a50p.jpg|2549
a50p__MARKAS__50%.jpg|8502'''.replace('\r\n', '\n')
resultSizes = '\n'.join([short + '|' + str(files.getsize(file))
for file, short in sorted(files.listfiles(tmpDir))])
assertEq(expectedSizes, resultSizes, 'current pillow version=%s' % PIL.PILLOW_VERSION)
trace('img_resize_keep_exif_testActualFiles passed.')
def img_resize_keep_exif_testCleanup(tmpDir):
# when the user has reviewed that the conversion looks correct, they'll run cleanup()
# which will discard the previous files with __MARKAS__.
trace('img_resize_keep_exif_testCleanup started.')
tmpDir = files.join(tmpDir, 'testCleanup')
files.makedirs(tmpDir)
files.writeall(files.join(tmpDir, 'a1.jpg'), '')
files.writeall(files.join(tmpDir, 'a1__MARKAS__50%.jpg'), '')
files.writeall(files.join(tmpDir, 'a2.jpg'), '')
files.writeall(files.join(tmpDir, 'a2__MARKAS__200h.jpg'), '')
files.writeall(files.join(tmpDir, 'a3.png'), '')
files.writeall(files.join(tmpDir, 'a3__MARKAS__100%.png'), '')
# file with no corresponding markas should not be deleted.
files.writeall(files.join(tmpDir, 'a4.jpg'), '')
# files with no corresponding converted file should not be deleted.
files.writeall(files.join(tmpDir, 'a5__MARKAS__100%.jpg'), '')
files.writeall(files.join(tmpDir, 'a6__MARKAS__.jpg'), '')
img_resize_keep_exif.cleanup(tmpDir, recurse=False, prompt=False)
expectedSizes = '''a1.jpg|0
a2.jpg|0
a3.png|0
a3__MARKAS__100%.png|0
a4.jpg|0
a5__MARKAS__100%.jpg|0
a6__MARKAS__.jpg|0'''.replace('\r\n', '\n')
resultSizes = '\n'.join([short + '|' + str(files.getsize(file))
for file, short in sorted(files.listfiles(tmpDir))])
assertEq(expectedSizes, resultSizes)
trace('img_resize_keep_exif_testCleanup passed.')
def assertExceptionOrFalse(fn, excType):
ret = False
try:
ret = fn()
except:
e = sys.exc_info()[1]
assertTrue(isinstance(e, excType), 'wrong exc type')
assertTrue(not ret)
def img_resize_keep_exif_testExifErrorsShouldRaise(tmpDir):
# most exif operations on an invalid jpg should raise PythonImgExifError
files.writeall(files.join(tmpDir, 'invalidjpg.jpg'), 'not a valid jpg')
files.writeall(files.join(tmpDir, 'invalidjpg2.jpg'), 'not a valid jpg')
assertExceptionOrFalse(lambda: not img_utils.readOriginalFilename(
files.join(tmpDir, 'invalidjpg.jpg')), img_utils.PythonImgExifError)
assertException(lambda: img_utils.stampJpgWithOriginalFilename(
files.join(tmpDir, 'invalidjpg.jpg'), 'test'), img_utils.PythonImgExifError)
assertException(lambda: img_utils.transferMostUsefulExifTags(
files.join(tmpDir, 'invalidjpg.jpg'),
files.join(tmpDir, 'invalidjpg2.jpg')), img_utils.PythonImgExifError)
assertException(lambda: img_utils.removeResolutionTags(
files.join(tmpDir, 'invalidjpg.jpg')), img_utils.PythonImgExifError)
class RNG(object):
# so that same sequence is generated regardless of Python version
def __init__(self, seed=0):
self.previous = seed
def next(self):
# use contants from glibc's rand()
modulus = 2**31 - 1
a, c = 1103515245, 12345
ret = (self.previous * a + c) % modulus
self.previous = ret
return ret
def createTestImage(width, height, seed):
rng = RNG(seed)
im = Image.new("RGB", (width, height))
for y in xrange(height):
for x in xrange(width):
v = rng.next() % 256
im.putpixel((x, y), (v, v, v))
return im
def testCombinatoricImageConversion(tmpDir, testImage):
# go from each format to every other format!
# note: bmp should be first in the list
formats = ['bmp', 'png', 'jpg', 'webp']
jpgQuality = 100
if not getInputBool('run combinatoricImageConversionTest?'):
return
for format in formats:
startfile = files.join(tmpDir, 'start.' + format)
if format == 'bmp':
testImage.save(startfile)
else:
img_convert_resize.convertOrResizeImage(files.join(tmpDir, 'start.bmp'),
startfile, jpgQuality=jpgQuality)
for outformat in formats:
if outformat != format:
outfile = startfile + '.' + outformat
assertTrue(not files.exists(outfile))
img_convert_resize.convertOrResizeImage(startfile, outfile, jpgQuality=jpgQuality)
assertTrue(files.exists(outfile))
expectedSizes = '''start.bmp|43254
start.bmp.jpg|15580
start.bmp.png|39430
start.bmp.webp|14454
start.jpg|15580
start.jpg.bmp|43254
start.jpg.png|39483
start.jpg.webp|14454
start.png|39430
start.png.bmp|43254
start.png.jpg|15580
start.png.webp|14454
start.webp|14454
start.webp.bmp|43254
start.webp.jpg|15580
start.webp.png|22366'''.replace('\r\n', '\n')
resultSizes = '\n'.join([short + '|' + str(files.getsize(file))
for file, short in sorted(files.listfiles(tmpDir)) if short.startswith('start')])
assertEq(expectedSizes, resultSizes)
# are bmps equivalent
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp'), files.join(tmpDir, 'start.png.bmp')))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp'), files.join(tmpDir, 'start.webp.bmp')))
# are jpgs equivalent
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.jpg'), files.join(tmpDir, 'start.jpg')))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.jpg'), files.join(tmpDir, 'start.png.jpg')))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.jpg'), files.join(tmpDir, 'start.webp.jpg')))
# are webps equivalent
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.webp'), files.join(tmpDir, 'start.png.webp')))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.webp'), files.join(tmpDir, 'start.webp')))
# are pngs equivalent
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.png'), files.join(tmpDir, 'start.png')))
# png written by dwebp is different, but it should still roundtrip
img_convert_resize.convertOrResizeImage(files.join(tmpDir, 'start.webp.png'), files.join(tmpDir, 'start.webp.png.bmp'))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp'), files.join(tmpDir, 'start.webp.png.bmp')))
def testJpgQualities(tmpDir, testImage):
# simply write several jpgs at different qualities, and make sure the file sizes are as expected.
tmpDir = files.join(tmpDir, 'testJpgQuality')
files.makedirs(tmpDir)
testImage.save(files.join(tmpDir, 'start.bmp'))
qualities = [100, 90, 60, 10]
for qual in qualities:
img_convert_resize.convertOrResizeImage(files.join(tmpDir, 'start.bmp'),
files.join(tmpDir, 'q%d.jpg'%qual), jpgQuality=qual)
expectedSizes = '''q10.jpg|993
q100.jpg|15580
q60.jpg|5120
q90.jpg|9406
start.bmp|43254'''.replace('\r\n', '\n')
resultSizes = '\n'.join([short + '|' + str(files.getsize(file))
for file, short in sorted(files.listfiles(tmpDir))])
assertEq(expectedSizes, resultSizes)
def img_convert_resize_tests(tmpDir):
width, height = 120, 120
testImage = createTestImage(width, height, 1)
testCombinatoricImageConversion(tmpDir, testImage)
testJpgQualities(tmpDir, testImage)
if __name__ == '__main__':
# passes on pillow 3.2, 3.3, 4.0
tmpDir = files.join(img_utils.getTempLocation(), 'testimgconvert')
if files.isdir(tmpDir):
files.rmtree(tmpDir)
files.makedirs(tmpDir)
try:
img_utils_testGetMarkFromFilename()
img_utils_testGetFilesWithWrongExtension(tmpDir)
img_resize_keep_exif_testActualFiles(tmpDir)
img_resize_keep_exif_testCleanup(tmpDir)
img_resize_keep_exif_testExifErrorsShouldRaise(tmpDir)
img_convert_testGetNewSizeFromResizeSpec()
img_convert_resize_tests(tmpDir)
finally:
files.rmtree(tmpDir)
| gpl-3.0 | 5,360,751,421,662,296,000 | 48.26087 | 123 | 0.696129 | false | 3.27119 | true | false | false |
sunweaver/ganetimgr | apply/urls/user.py | 1 | 2001 | # -*- coding: utf-8 -*- vim:fileencoding=utf-8:
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.conf.urls.defaults import patterns, url
from apply import views
urlpatterns = patterns(
'',
url(r'^info/(?P<type>\w+)/(?P<usergroup>[\w\.\@-]+)/?$', views.user_info, name="user-info"),
url(r'^idle/$', views.idle_accounts, name="idle_accounts"),
url(r'^profile/$', views.profile, name="profile"),
url(r'^mail_change/$', views.mail_change, name="mail-change"),
url(r'^name_change/$', views.name_change, name="name-change"),
url(r'^other_change/$', views.other_change, name="other-change"),
url(r'^keys/$', views.user_keys, name="user-keys"),
url(r'^keys/delete/(?P<key_id>\d+)?$', views.delete_key, name="delete-key"),
url(r'^login/', 'django.contrib.auth.views.login', {'template_name': 'users/login.html'}, name="login"),
url(r'^logout/', 'django.contrib.auth.views.logout', {'next_page': '/'}, name="logout"),
url(r'^pass_change/$', 'django.contrib.auth.views.password_change', {'template_name':'users/pass_change.html', 'post_change_redirect':'done'}, name="pass_change"),
url(r'^pass_change/done/$', 'django.contrib.auth.views.password_change_done', {'template_name':'users/pass_change_done.html'}, name="pass_change_done" ),
url(r'^pass_change/notify/$', views.pass_notify, name="pass_change_notify"),
)
| gpl-3.0 | -2,009,622,974,093,488,000 | 54.583333 | 167 | 0.684658 | false | 3.403061 | false | false | false |
luisxiaomai/robotframework-anywherelibrary | src/AnywhereLibrary/base/logging.py | 1 | 1083 | import os
#from robot.variables import GLOBAL_VARIABLES
from robot.libraries.BuiltIn import BuiltIn
from robot.api import logger
from keywordgroup import KeywordGroup
class Logging(KeywordGroup):
# Private
def _debug(self, message):
logger.debug(message)
def _get_log_dir(self):
logfile = GLOBAL_VARIABLES['${LOG FILE}']
if logfile != 'NONE':
return os.path.dirname(logfile)
return GLOBAL_VARIABLES['${OUTPUTDIR}']
def _html(self, message):
logger.info(message, True, False)
def _info(self, message):
logger.info(message)
def _log(self, message, level='INFO'):
level = level.upper()
if (level == 'INFO'): self._info(message)
elif (level == 'DEBUG'): self._debug(message)
elif (level == 'WARN'): self._warn(message)
elif (level == 'HTML'): self._html(message)
elif (level=='Error'):self._error(message)
def _error(self,message):
raise AssertionError(message)
def _warn(self, message):
logger.warn(message)
| mit | -5,113,758,825,957,334,000 | 27.5 | 53 | 0.615882 | false | 3.909747 | false | false | false |
regebro/hovercraft | hovercraft/generate.py | 1 | 8341 | import os
import re
import shutil
from lxml import etree, html
from pkg_resources import resource_string
from .parse import rst2xml, SlideMaker
from .position import position_slides
from .template import (
Template,
CSS_RESOURCE,
JS_RESOURCE,
JS_POSITION_HEADER,
JS_POSITION_BODY,
OTHER_RESOURCE,
DIRECTORY_RESOURCE,
)
class ResourceResolver(etree.Resolver):
def resolve(self, url, pubid, context):
if url.startswith("resource:"):
prefix, filename = url.split(":", 1)
return self.resolve_string(resource_string(__name__, filename), context)
def rst2html(
filepath,
template_info,
auto_console=False,
skip_help=False,
skip_notes=False,
mathjax=False,
slide_numbers=False,
):
# Read the infile
with open(filepath, "rb") as infile:
rststring = infile.read()
presentation_dir = os.path.split(filepath)[0]
# First convert reST to XML
xml, dependencies = rst2xml(rststring, filepath)
tree = etree.fromstring(xml)
# Fix up the resulting XML so it makes sense
sm = SlideMaker(tree, skip_notes=skip_notes)
tree = sm.walk()
# Pick up CSS information from the tree:
for attrib in tree.attrib:
if attrib.startswith("css"):
if "-" in attrib:
dummy, media = attrib.split("-", 1)
else:
media = "screen,projection"
css_files = tree.attrib[attrib].split()
for css_file in css_files:
if media in ("console", "preview"):
# The "console" media is used to style the presenter
# console and does not need to be included in the header,
# but must be copied. So we add it as a non css file,
# even though it's a css-file.
template_info.add_resource(
os.path.abspath(os.path.join(presentation_dir, css_file)),
OTHER_RESOURCE,
target=css_file,
)
else:
# Add as a css resource:
template_info.add_resource(
os.path.abspath(os.path.join(presentation_dir, css_file)),
CSS_RESOURCE,
target=css_file,
extra_info=media,
)
elif attrib.startswith("js"):
if attrib == "js-header":
media = JS_POSITION_HEADER
else:
# Put javascript in body tag as default.
media = JS_POSITION_BODY
js_files = tree.attrib[attrib].split()
for js_file in js_files:
template_info.add_resource(
os.path.abspath(os.path.join(presentation_dir, js_file)),
JS_RESOURCE,
target=js_file,
extra_info=media,
)
if sm.need_mathjax and mathjax:
if mathjax.startswith("http"):
template_info.add_resource(
None, JS_RESOURCE, target=mathjax, extra_info=JS_POSITION_HEADER
)
else:
# Local copy
template_info.add_resource(mathjax, DIRECTORY_RESOURCE, target="mathjax")
template_info.add_resource(
None,
JS_RESOURCE,
target="mathjax/MathJax.js?config=TeX-MML-AM_CHTML",
extra_info=JS_POSITION_HEADER,
)
# Position all slides
position_slides(tree)
# Add the template info to the tree:
tree.append(template_info.xml_node())
# If the console-should open automatically, set an attribute on the document:
if auto_console:
tree.attrib["auto-console"] = "True"
# If the console-should open automatically, set an attribute on the document:
if skip_help:
tree.attrib["skip-help"] = "True"
# If the slide numbers should be displayed, set an attribute on the document:
if slide_numbers:
tree.attrib["slide-numbers"] = "True"
# We need to set up a resolver for resources, so we can include the
# reST.xsl file if so desired.
parser = etree.XMLParser()
parser.resolvers.add(ResourceResolver())
# Transform the tree to HTML
xsl_tree = etree.fromstring(template_info.xsl, parser)
transformer = etree.XSLT(xsl_tree)
tree = transformer(tree)
result = html.tostring(tree)
return template_info.doctype + result, dependencies
def copy_resource(filename, sourcedir, targetdir):
if filename[0] == "/" or ":" in filename:
# Absolute path or URI: Do nothing
return None # No monitoring needed
sourcepath = os.path.join(sourcedir, filename)
targetpath = os.path.join(targetdir, filename)
if os.path.exists(targetpath) and os.path.getmtime(sourcepath) <= os.path.getmtime(
targetpath
):
# File has not changed since last copy, so skip.
return sourcepath # Monitor this file
targetdir = os.path.split(targetpath)[0]
if not os.path.exists(targetdir):
os.makedirs(targetdir)
shutil.copy2(sourcepath, targetpath)
return sourcepath # Monitor this file
def generate(args):
"""Generates the presentation and returns a list of files used"""
source_files = {args.presentation}
# Parse the template info
template_info = Template(args.template)
if args.css:
presentation_dir = os.path.split(args.presentation)[0]
target_path = os.path.relpath(args.css, presentation_dir)
template_info.add_resource(
args.css, CSS_RESOURCE, target=target_path, extra_info="all"
)
source_files.add(args.css)
if args.js:
presentation_dir = os.path.split(args.presentation)[0]
target_path = os.path.relpath(args.js, presentation_dir)
template_info.add_resource(
args.js, JS_RESOURCE, target=target_path, extra_info=JS_POSITION_BODY
)
source_files.add(args.js)
# Make the resulting HTML
htmldata, dependencies = rst2html(
args.presentation,
template_info,
args.auto_console,
args.skip_help,
args.skip_notes,
args.mathjax,
args.slide_numbers,
)
source_files.update(dependencies)
# Write the HTML out
if not os.path.exists(args.targetdir):
os.makedirs(args.targetdir)
with open(os.path.join(args.targetdir, "index.html"), "wb") as outfile:
outfile.write(htmldata)
# Copy supporting files
source_files.update(template_info.copy_resources(args.targetdir))
# Copy files from the source:
sourcedir = os.path.split(os.path.abspath(args.presentation))[0]
tree = html.fromstring(htmldata)
for image in tree.iterdescendants("img"):
filename = image.attrib["src"]
source_files.add(copy_resource(filename, sourcedir, args.targetdir))
for source in tree.iterdescendants('source'):
filename = source.attrib['src']
source_files.add(copy_resource(filename, sourcedir, args.targetdir))
RE_CSS_URL = re.compile(br"""url\(['"]?(.*?)['"]?[\)\?\#]""")
# Copy any files referenced by url() in the css-files:
for resource in template_info.resources:
if resource.resource_type != CSS_RESOURCE:
continue
# path in CSS is relative to CSS file; construct source/dest accordingly
css_base = template_info.template_root if resource.is_in_template else sourcedir
css_sourcedir = os.path.dirname(os.path.join(css_base, resource.filepath))
css_targetdir = os.path.dirname(
os.path.join(args.targetdir, resource.final_path())
)
uris = RE_CSS_URL.findall(template_info.read_data(resource))
uris = [uri.decode() for uri in uris]
if resource.is_in_template and template_info.builtin_template:
for filename in uris:
template_info.add_resource(
filename, OTHER_RESOURCE, target=css_targetdir, is_in_template=True
)
else:
for filename in uris:
source_files.add(copy_resource(filename, css_sourcedir, css_targetdir))
# All done!
return {os.path.abspath(f) for f in source_files if f}
| mit | -971,954,912,299,686,000 | 33.754167 | 88 | 0.602686 | false | 4.010096 | false | false | false |
GluuFederation/cluster-tools | recovery/recovery.py | 1 | 10339 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2016 Gluu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import logging
import os
import socket
import subprocess
import sys
import time
DATABASE_URI = "/var/lib/gluuengine/db/shared.json"
DATABASE_URI_COMPAT = "/var/lib/gluu-cluster/db/shared.json"
RECOVERY_PRIORITY_CHOICES = {
"ldap": 1,
"oxauth": 2,
"oxtrust": 3,
"oxidp": 4,
"nginx": 5,
}
logger = logging.getLogger("recovery")
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
fmt = logging.Formatter('[%(levelname)s] %(message)s')
ch.setFormatter(fmt)
logger.addHandler(ch)
def load_database():
"""Loads JSON-based database as Python object.
"""
data = []
if not any(map(os.path.exists, [DATABASE_URI, DATABASE_URI_COMPAT])):
logger.warn("unable to read {} or {}".format(DATABASE_URI, DATABASE_URI_COMPAT)) # noqa
sys.exit(1)
with open(DATABASE_URI) as fp:
data = json.loads(fp.read())
return data
def get_current_cluster():
"""Gets a cluster.
"""
data = load_database()
clusters = [item for _, item in data.get("clusters", {}).iteritems()]
try:
cluster = clusters[0]
except IndexError:
cluster = {}
return cluster
def get_node(hostname=""):
"""Gets node based.
:param hostname: Hostname; if omitted, will check for FQDN or hostname
from socket connection.
"""
data = load_database()
nodes = [
item for _, item in data.get("nodes", {}).iteritems()
if item["name"] in (hostname, socket.getfqdn(), socket.gethostname(),)
]
try:
node = nodes[0]
except IndexError:
node = {}
return node
def get_containers(node_id):
"""Gets all containers belong to certain node.
:param node_id: ID of the node.
"""
data = load_database()
containers = []
for _, item in data.get("containers", {}).iteritems():
if item["node_id"] == node_id and item["state"] == "SUCCESS":
# adds recovery_priority
item["recovery_priority"] = RECOVERY_PRIORITY_CHOICES.get(
item["type"], 0
)
containers.append(item)
return containers
def safe_subprocess_exec(cmd):
"""Runs shell command safely.
:param cmd: String of command.
"""
cmdlist = cmd.strip().split()
ppn = subprocess.Popen(
cmdlist,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = ppn.communicate()
return out.strip(), err.strip(), ppn.returncode
def container_stopped(container_id):
"""Checks whether a container is stopped.
:param container_id: ID of the container assigned by docker daemon.
"""
out, _, _ = safe_subprocess_exec("docker inspect {}".format(container_id))
data = json.loads(out)
return data[0]["State"]["Running"] is False
def container_exists(container_id):
"""Checks whether a container exists.
:param container_id: ID of the container assigned by docker daemon.
"""
out, _, _ = safe_subprocess_exec("docker inspect {}".format(container_id))
data = json.loads(out)
return len(data) > 0
def restart_container(container_id):
"""Restarts a container regardless its state.
:param container_id: ID of the container assigned by docker daemon.
"""
return safe_subprocess_exec("docker restart {}".format(container_id))
def add_dns(container_id, hostname):
"""Adds DNS entry to weavedns.
:param container_id: ID of the container assigned by docker daemon.
:param hostname: Hostname that should be added into weavedns.
"""
return safe_subprocess_exec("weave dns-add {} -h {}".format(
container_id, hostname
))
def detach_ip(container_id):
"""Detaches container from weave network.
:param container_id: ID of the container assigned by docker daemon.
"""
safe_subprocess_exec("weave detach {}".format(container_id))
def httpd_crashed(container_id):
"""Checks whether httpd process managed by supervisor is crashed or not.
:param container_id: ID of the container assigned by docker daemon.
"""
out, _, _ = safe_subprocess_exec(
"docker exec {} supervisorctl status httpd".format(container_id)
)
return "RUNNING" not in out
def weave_component_ready(name):
delay = 10
max_retry = 6
retry_attempt = 0
component_ready = False
while retry_attempt < max_retry:
if container_stopped(name):
logger.warn("{} is not ready; retrying ...".format(name))
time.sleep(delay)
retry_attempt += 1
else:
component_ready = True
break
return component_ready
def recover_containers(node_id, ox_cluster_hostname):
"""Recovers all containers.
:param node_id: ID of the node.
:param ox_cluster_hostname: Name of IDP server.
"""
containers = sorted(get_containers(node_id),
key=lambda x: x["recovery_priority"])
for container in containers:
if not container_exists(container["cid"]):
continue
if not container_stopped(container["cid"]):
# no need to restart already running container
logger.info("{} container {} already running; skipping ...".format(
container["type"], container["name"],
))
continue
logger.info("restarting {} container {}".format(
container["type"], container["name"]
))
_, err, returncode = restart_container(container["cid"])
if returncode != 0:
# if restarting failed, continue to other containers
# and let this specific container stopped so we can
# retry the recovery process again
logger.warn(
"something is wrong while restarting "
"{} container {}; reason={}".format(
container["type"], container["name"], err
)
)
continue
# DISABLED container must be detached from weave network
if container["state"] == "DISABLED":
detach_ip(container["cid"])
continue
# manually re-adding DNS entry
logger.info("adding DNS entry {} for {} container {}".format(
container["hostname"], container["type"], container["name"]
))
add_dns(container["cid"], container["hostname"])
if container["type"] in ("ldap", "oxauth", "oxtrust",):
add_dns(container["cid"], "{}.weave.local".format(container["type"])) # noqa
if container["type"] == "ldap":
# introduce delay to wait for a running opendj instance
# before restarting other containers
logger.info("waiting for ldap server startup; "
"this may take a while ...")
time.sleep(20)
# if cluster hostname contains `weave.local` suffix, this extra DNS
# entry will be added into weavedns; pretty useful for setup which
# doesn't have resolvable domain name
if container["type"] == "nginx":
add_dns(container["cid"], ox_cluster_hostname)
# currently, only oxauth and oxidp use httpd
if container["type"] in ("oxauth", "oxidp"):
if httpd_crashed(container["cid"]):
# httpd refuses to work if previous shutdown was unclean
# a workaround is to remove ``/var/run/apache2/apache2.pid``
# before restarting supervisor program
cmd = "rm /var/run/apache2/apache2.pid " \
"&& supervisorctl restart httpd"
safe_subprocess_exec(
'''docker exec {} sh -c "{}"'''.format(container["cid"], cmd) # noqa
)
if __name__ == "__main__":
try:
logger.info("starting recovery process for current node; "
"this may take a while ...")
cluster = get_current_cluster()
if not cluster:
logger.warn("unable to find any cluster")
sys.exit(1)
node = get_node()
if not node:
logger.warn("unable to find node matches existing hostname")
sys.exit(1)
if not weave_component_ready("weave"):
logger.error("aborting recovery process due to weave being "
"not ready; please try again later ...")
sys.exit(1)
if not weave_component_ready("weaveproxy"):
logger.error("aborting recovery process due to weaveproxy being "
"not ready; please try again later ...")
sys.exit(1)
if not weave_component_ready("weaveplugin"):
logger.error("aborting recovery process due to weaveplugin being "
"not ready; please try again later ...")
sys.exit(1)
time.sleep(10)
recover_containers(node.get("id"), cluster.get("ox_cluster_hostname"))
logger.info("recovery process for current node is finished")
except KeyboardInterrupt:
logger.warn("recovery process aborted by user")
sys.exit(0)
| mit | 5,439,546,729,971,558,000 | 31.309375 | 96 | 0.610697 | false | 4.261748 | false | false | false |
mancoast/CPythonPyc_test | cpython/234_test_sax.py | 1 | 19309 | # regression test for SAX 2.0 -*- coding: iso-8859-1 -*-
# $Id: test_sax.py,v 1.24.16.1 2004/03/20 08:20:03 fdrake Exp $
from xml.sax import make_parser, ContentHandler, \
SAXException, SAXReaderNotAvailable, SAXParseException
try:
make_parser()
except SAXReaderNotAvailable:
# don't try to test this module if we cannot create a parser
raise ImportError("no XML parsers available")
from xml.sax.saxutils import XMLGenerator, escape, unescape, quoteattr, \
XMLFilterBase
from xml.sax.expatreader import create_parser
from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
from cStringIO import StringIO
from test.test_support import verify, verbose, TestFailed, findfile
import os
# ===== Utilities
tests = 0
failures = []
def confirm(outcome, name):
global tests
tests = tests + 1
if outcome:
if verbose:
print "Failed", name
else:
failures.append(name)
def test_make_parser2():
try:
# Creating parsers several times in a row should succeed.
# Testing this because there have been failures of this kind
# before.
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
except:
return 0
else:
return p
# ===========================================================================
#
# saxutils tests
#
# ===========================================================================
# ===== escape
def test_escape_basic():
return escape("Donald Duck & Co") == "Donald Duck & Co"
def test_escape_all():
return escape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_escape_extra():
return escape("Hei på deg", {"å" : "å"}) == "Hei på deg"
# ===== unescape
def test_unescape_basic():
return unescape("Donald Duck & Co") == "Donald Duck & Co"
def test_unescape_all():
return unescape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_unescape_extra():
return unescape("Hei på deg", {"å" : "å"}) == "Hei på deg"
def test_unescape_amp_extra():
return unescape("&foo;", {"&foo;": "splat"}) == "&foo;"
# ===== quoteattr
def test_quoteattr_basic():
return quoteattr("Donald Duck & Co") == '"Donald Duck & Co"'
def test_single_quoteattr():
return (quoteattr('Includes "double" quotes')
== '\'Includes "double" quotes\'')
def test_double_quoteattr():
return (quoteattr("Includes 'single' quotes")
== "\"Includes 'single' quotes\"")
def test_single_double_quoteattr():
return (quoteattr("Includes 'single' and \"double\" quotes")
== "\"Includes 'single' and "double" quotes\"")
# ===== make_parser
def test_make_parser():
try:
# Creating a parser should succeed - it should fall back
# to the expatreader
p = make_parser(['xml.parsers.no_such_parser'])
except:
return 0
else:
return p
# ===== XMLGenerator
start = '<?xml version="1.0" encoding="iso-8859-1"?>\n'
def test_xmlgen_basic():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc></doc>"
def test_xmlgen_content():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("huhei")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc>huhei</doc>"
def test_xmlgen_pi():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.processingInstruction("test", "data")
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<?test data?><doc></doc>"
def test_xmlgen_content_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("<huhei&")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc><huhei&</doc>"
def test_xmlgen_attr_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"a": '"'})
gen.startElement("e", {"a": "'"})
gen.endElement("e")
gen.startElement("e", {"a": "'\""})
gen.endElement("e")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start \
+ "<doc a='\"'><e a=\"'\"></e><e a=\"'"\"></e></doc>"
def test_xmlgen_ignorable():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.ignorableWhitespace(" ")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc> </doc>"
ns_uri = "http://www.python.org/xml-ns/saxtest/"
def test_xmlgen_ns():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping("ns1", ns_uri)
gen.startElementNS((ns_uri, "doc"), "ns1:doc", {})
# add an unqualified name
gen.startElementNS((None, "udoc"), None, {})
gen.endElementNS((None, "udoc"), None)
gen.endElementNS((ns_uri, "doc"), "ns1:doc")
gen.endPrefixMapping("ns1")
gen.endDocument()
return result.getvalue() == start + \
('<ns1:doc xmlns:ns1="%s"><udoc></udoc></ns1:doc>' %
ns_uri)
# ===== XMLFilterBase
def test_filter_basic():
result = StringIO()
gen = XMLGenerator(result)
filter = XMLFilterBase()
filter.setContentHandler(gen)
filter.startDocument()
filter.startElement("doc", {})
filter.characters("content")
filter.ignorableWhitespace(" ")
filter.endElement("doc")
filter.endDocument()
return result.getvalue() == start + "<doc>content </doc>"
# ===========================================================================
#
# expatreader tests
#
# ===========================================================================
# ===== XMLReader support
def test_expat_file():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(open(findfile("test"+os.extsep+"xml")))
return result.getvalue() == xml_test_out
# ===== DTDHandler support
class TestDTDHandler:
def __init__(self):
self._notations = []
self._entities = []
def notationDecl(self, name, publicId, systemId):
self._notations.append((name, publicId, systemId))
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._entities.append((name, publicId, systemId, ndata))
def test_expat_dtdhandler():
parser = create_parser()
handler = TestDTDHandler()
parser.setDTDHandler(handler)
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY img SYSTEM "expat.gif" NDATA GIF>\n')
parser.feed(' <!NOTATION GIF PUBLIC "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN">\n')
parser.feed(']>\n')
parser.feed('<doc></doc>')
parser.close()
return handler._notations == [("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)] and \
handler._entities == [("img", None, "expat.gif", "GIF")]
# ===== EntityResolver support
class TestEntityResolver:
def resolveEntity(self, publicId, systemId):
inpsrc = InputSource()
inpsrc.setByteStream(StringIO("<entity/>"))
return inpsrc
def test_expat_entityresolver():
parser = create_parser()
parser.setEntityResolver(TestEntityResolver())
result = StringIO()
parser.setContentHandler(XMLGenerator(result))
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY test SYSTEM "whatever">\n')
parser.feed(']>\n')
parser.feed('<doc>&test;</doc>')
parser.close()
return result.getvalue() == start + "<doc><entity></entity></doc>"
# ===== Attributes support
class AttrGatherer(ContentHandler):
def startElement(self, name, attrs):
self._attrs = attrs
def startElementNS(self, name, qname, attrs):
self._attrs = attrs
def test_expat_attrs_empty():
parser = create_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
return verify_empty_attrs(gather._attrs)
def test_expat_attrs_wattr():
parser = create_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc attr='val'/>")
parser.close()
return verify_attrs_wattr(gather._attrs)
def test_expat_nsattrs_empty():
parser = create_parser(1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
return verify_empty_nsattrs(gather._attrs)
def test_expat_nsattrs_wattr():
parser = create_parser(1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc xmlns:ns='%s' ns:attr='val'/>" % ns_uri)
parser.close()
attrs = gather._attrs
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
(attrs.getQNames() == [] or attrs.getQNames() == ["ns:attr"]) and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs[(ns_uri, "attr")] == "val"
# ===== InputSource support
xml_test_out = open(findfile("test"+os.extsep+"xml"+os.extsep+"out")).read()
def test_expat_inpsource_filename():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test"+os.extsep+"xml"))
return result.getvalue() == xml_test_out
def test_expat_inpsource_sysid():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(InputSource(findfile("test"+os.extsep+"xml")))
return result.getvalue() == xml_test_out
def test_expat_inpsource_stream():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
inpsrc = InputSource()
inpsrc.setByteStream(open(findfile("test"+os.extsep+"xml")))
parser.parse(inpsrc)
return result.getvalue() == xml_test_out
# ===== IncrementalParser support
def test_expat_incremental():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
return result.getvalue() == start + "<doc></doc>"
def test_expat_incremental_reset():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("text")
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.reset()
parser.feed("<doc>")
parser.feed("text")
parser.feed("</doc>")
parser.close()
return result.getvalue() == start + "<doc>text</doc>"
# ===== Locator support
def test_expat_locator_noinfo():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
return parser.getSystemId() is None and \
parser.getPublicId() is None and \
parser.getLineNumber() == 1
def test_expat_locator_withinfo():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.parse(findfile("test.xml"))
return parser.getSystemId() == findfile("test.xml") and \
parser.getPublicId() is None
# ===========================================================================
#
# error reporting
#
# ===========================================================================
def test_expat_inpsource_location():
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
source = InputSource()
source.setByteStream(StringIO("<foo bar foobar>")) #ill-formed
name = "a file name"
source.setSystemId(name)
try:
parser.parse(source)
except SAXException, e:
return e.getSystemId() == name
def test_expat_incomplete():
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
try:
parser.parse(StringIO("<foo>"))
except SAXParseException:
return 1 # ok, error found
else:
return 0
def test_sax_parse_exception_str():
# pass various values from a locator to the SAXParseException to
# make sure that the __str__() doesn't fall apart when None is
# passed instead of an integer line and column number
#
# use "normal" values for the locator:
str(SAXParseException("message", None,
DummyLocator(1, 1)))
# use None for the line number:
str(SAXParseException("message", None,
DummyLocator(None, 1)))
# use None for the column number:
str(SAXParseException("message", None,
DummyLocator(1, None)))
# use None for both:
str(SAXParseException("message", None,
DummyLocator(None, None)))
return 1
class DummyLocator:
def __init__(self, lineno, colno):
self._lineno = lineno
self._colno = colno
def getPublicId(self):
return "pubid"
def getSystemId(self):
return "sysid"
def getLineNumber(self):
return self._lineno
def getColumnNumber(self):
return self._colno
# ===========================================================================
#
# xmlreader tests
#
# ===========================================================================
# ===== AttributesImpl
def verify_empty_attrs(attrs):
try:
attrs.getValue("attr")
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName("attr")
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs["attr"]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key("attr") and \
attrs.keys() == [] and \
attrs.get("attrs") is None and \
attrs.get("attrs", 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def verify_attrs_wattr(attrs):
return attrs.getLength() == 1 and \
attrs.getNames() == ["attr"] and \
attrs.getQNames() == ["attr"] and \
len(attrs) == 1 and \
attrs.has_key("attr") and \
attrs.keys() == ["attr"] and \
attrs.get("attr") == "val" and \
attrs.get("attr", 25) == "val" and \
attrs.items() == [("attr", "val")] and \
attrs.values() == ["val"] and \
attrs.getValue("attr") == "val" and \
attrs.getValueByQName("attr") == "val" and \
attrs.getNameByQName("attr") == "attr" and \
attrs["attr"] == "val" and \
attrs.getQNameByName("attr") == "attr"
def test_attrs_empty():
return verify_empty_attrs(AttributesImpl({}))
def test_attrs_wattr():
return verify_attrs_wattr(AttributesImpl({"attr" : "val"}))
# ===== AttributesImpl
def verify_empty_nsattrs(attrs):
try:
attrs.getValue((ns_uri, "attr"))
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("ns:attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("ns:attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName((ns_uri, "attr"))
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs[(ns_uri, "attr")]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [] and \
attrs.get((ns_uri, "attr")) is None and \
attrs.get((ns_uri, "attr"), 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def test_nsattrs_empty():
return verify_empty_nsattrs(AttributesNSImpl({}, {}))
def test_nsattrs_wattr():
attrs = AttributesNSImpl({(ns_uri, "attr") : "val"},
{(ns_uri, "attr") : "ns:attr"})
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
attrs.getQNames() == ["ns:attr"] and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs.getValueByQName("ns:attr") == "val" and \
attrs.getNameByQName("ns:attr") == (ns_uri, "attr") and \
attrs[(ns_uri, "attr")] == "val" and \
attrs.getQNameByName((ns_uri, "attr")) == "ns:attr"
# ===== Main program
def make_test_output():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test"+os.extsep+"xml"))
outf = open(findfile("test"+os.extsep+"xml"+os.extsep+"out"), "w")
outf.write(result.getvalue())
outf.close()
items = locals().items()
items.sort()
for (name, value) in items:
if name[ : 5] == "test_":
confirm(value(), name)
if verbose:
print "%d tests, %d failures" % (tests, len(failures))
if failures:
raise TestFailed("%d of %d tests failed: %s"
% (len(failures), tests, ", ".join(failures)))
| gpl-3.0 | -2,254,858,870,572,087,000 | 26.782734 | 117 | 0.5702 | false | 3.682815 | true | false | false |
osuripple/pep.py | objects/chatFilters.py | 1 | 1250 | class chatFilters:
def __init__(self, fileName="filters.txt"):
"""
Initialize chat filters
:param fileName: name of the file containing filters. Default: filters.txt
"""
self.filters = {}
self.loadFilters(fileName)
def loadFilters(self, fileName="filters.txt"):
"""
Load filters from a file
:param fileName: name of the file containing filters. Default: filters.txt
:return:
"""
# Reset chat filters
self.filters = {}
# Open filters file
with open(fileName, "r") as f:
# Read all lines
data = f.readlines()
# Process each line
for line in data:
# Get old/new word and save it in dictionary
lineSplit = line.split("=")
self.filters[lineSplit[0].lower()] = lineSplit[1].replace("\n", "")
def filterMessage(self, message):
"""
Replace forbidden words with filtered ones
:param message: normal message
:return: filtered message
"""
return message
"""
# Split words by spaces
messageTemp = message.split(" ")
# Check each word
for word in messageTemp:
lowerWord = word.lower()
# If the word is filtered, replace it
if lowerWord in self.filters:
message = message.replace(word, self.filters[lowerWord])
# Return filtered message
return message
"""
| agpl-3.0 | -5,941,986,136,163,623,000 | 22.148148 | 76 | 0.672 | false | 3.501401 | false | false | false |
Autostew/autostew | autostew_back/event_handlers/collision.py | 1 | 4077 | from autostew_back.event_handlers.base_event_handler import BaseEventHandler
from autostew_web_enums.models import EventType, ParticipantState
from autostew_web_session.models.event import Event
from autostew_web_session.models.participant import Participant
warn_at = 0.7
environment_crash_multiplier = 0.1
class HandleCollision(BaseEventHandler):
@classmethod
def can_consume(cls, server, event: Event):
return (
event.type.name == EventType.impact and
event.participant is not None
)
@classmethod
def consume(cls, server, event: Event):
magnitude = event.magnitude if event.human_to_human else int(event.magnitude * environment_crash_multiplier)
if event.ai_involved:
return
if event.participant.state.name != ParticipantState.racing:
return
if event.other_participant and event.other_participant.state.name != ParticipantState.racing:
return
if event.participant.is_player:
cls.add_crash_points(magnitude, event.participant, server, event.other_participant)
if event.other_participant and event.other_participant.is_player:
cls.add_crash_points(magnitude, event.other_participant, server, event.participant)
@classmethod
def add_crash_points(cls, crash_points_increase: int, participant: Participant, server, opponent: Participant=None):
if opponent:
crash_points_increase *= cls.get_interclass_multiplier(participant, opponent)
crash_points_increase = round(crash_points_increase)
participant.accumulated_crash_points += crash_points_increase
class_changed = participant.member.steam_user.add_crash_points(crash_points_increase)
cls.crash_notification(crash_points_increase, participant, server, opponent, class_changed)
if participant.member.steam_user.over_class_kick_impact_threshold(crash_points_increase):
participant.kick(server, server.back_crash_points_limit_ban_seconds)
if server.back_crash_points_limit and participant.accumulated_crash_points > server.back_crash_points_limit:
participant.kick(server, server.back_crash_points_limit_ban_seconds)
elif server.back_crash_points_limit and participant.accumulated_crash_points > warn_at * server.back_crash_points_limit:
cls.crash_limit_warning(participant, server)
@classmethod
def get_interclass_multiplier(cls, participant: Participant, opponent: Participant):
if (
opponent.member.steam_user.safety_class and
opponent.member.steam_user.safety_class.impact_weight and
participant.member.steam_user.safety_class and
participant.member.steam_user.safety_class.impact_weight and
participant.member.steam_user.safety_class.impact_weight < opponent.member.steam_user.safety_class.impact_weight
):
return participant.member.steam_user.safety_class.impact_weight / opponent.member.steam_user.safety_class.impact_weight
return 1
@classmethod
def crash_notification(cls, crash_points_increase, participant, server, opponent: Participant=None, class_changed=False):
participant.send_chat("", server)
if opponent:
participant.send_chat("CONTACT with {}".format(opponent.name), server)
participant.send_chat("CONTACT logged for {points} points.".format(points=crash_points_increase), server)
if class_changed:
participant.send_chat("Your SAFETY CLASS is now {}".format(participant.member.steam_user.safety_class), server)
@classmethod
def crash_limit_warning(cls, participant, server):
participant.send_chat(
"CONTACT: You have collected {points} crash points.".format(points=participant.accumulated_crash_points),
server
)
participant.send_chat(
"CONTACT: Disqualification at {max_crash_points} points.".format(max_crash_points=server.back_crash_points_limit),
server
)
| agpl-3.0 | 1,326,310,007,162,899,000 | 49.333333 | 131 | 0.704194 | false | 3.908917 | false | false | false |
47lining/nucleator-core | lib/nucleator/cli/cli.py | 1 | 5116 | # Copyright 2015 47Lining LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nucleator.cli import properties
from nucleator.cli import utils
import sys, os, argparse
class Cli(object):
"""
An object and helper methods to represent an installation of Nucleator
"""
def __init__(self):
self.commands = {}
self.command_paths = []
# Setup the root argument parser
self.parser = argparse.ArgumentParser()
self.parser.add_argument("-v", "--verbosity", required=False, action="count", help="Increase output verbosity")
self.parser.add_argument("--debug-credentials", required=False, action='store_true', help="Show credential output for debugging purposes")
self.parser.add_argument("--no-debug-credentials", required=False, action='store_false', help="Dont show credential output")
self.parser.add_argument("-p", "--preview", required=False, action="store_true",
help="Display information about what a command will do, without actually executing the command.\n" +
"The --preview flag should come before any subcommands on the command line.")
self.parser.add_argument("-d", "--debug", required=False, action="store_true",
help="Turn on debugging mode")
self.subparsers = self.parser.add_subparsers(dest="command")
def core_path(self):
"""path to to core commands installed with Nucleator"""
return properties.core_path()
def contrib_path(self):
"""path to to contrib commands, added by user via Nucleator's update command"""
return properties.contrib_path()
def import_commands(self, path):
# too early to use self.parse()
debug = "--debug" in sys.argv or "-d" in sys.argv
if debug:
print ">>>IMPORT COMMANDS PATH: "+path
if not os.path.isdir(path):
if debug:
print ">>>IMPORT PATH NOT A DIR: "+path
# skip if path to import doesn't exist
return
sys.path.append(path)
# iterate through nucleator command definitions found as immediate subdirs of path
for command_dir in next(os.walk(path))[1]:
self.command_paths.append(os.path.join(path,command_dir))
if debug:
print ">>> IMPORT COMMAND_DIR: "+command_dir
candidate_location = os.path.join(path, command_dir, "commands")
if debug:
print ">>> IMPORT CANDIDATE LOCATION: "+candidate_location
import_candidates = os.listdir(candidate_location) if os.path.isdir(candidate_location) else []
# iterate through filtered import candidates
for name in [n for n in import_candidates
if n.endswith('.py') and n != "__init__.py"]:
if debug:
print ">>> IMPORT CANDIDATE NAME: "+name
name = name.replace('.py', '')
if debug:
print ">>> IMPORT "+"{0}.commands.{1}".format(command_dir, name)
module = __import__(
"{0}.commands.{1}".format(command_dir, name),
fromlist=['']
)
command = getattr(module, "command", None)
if command is None:
utils.write_err("Invalid command implementation (%s)" % name)
command.parser_init(self.subparsers)
self.commands[command.name] = command
def parse(self):
self.opts = vars(self.parser.parse_args())
self.opts["verbose"] = self.opts.get("verbosity", 0) > 0
self.opts["cli"] = self
return self.opts
def current_command_name(self):
return self.opts.get("command")
def get_nucleator_command(self, command_name):
return self.commands[command_name]
def current_nucleator_command(self):
return self.get_nucleator_command(self.current_command_name())
def execute(self):
self.current_nucleator_command().execute(**self.opts)
def dump(self):
utils.write ("{0}{1}".format(self.current_command_name(), os.linesep))
import json
utils.write (
"{0}{1}".format(
json.dumps(
{k:v for (k,v) in self.opts.iteritems() if k != "cli"},
self.opts,
sort_keys=True,
indent=4, separators=(',', ': ')
),
os.linesep
)
)
| apache-2.0 | 9,091,823,914,105,722,000 | 40.934426 | 146 | 0.585418 | false | 4.324598 | false | false | false |
robcarver17/pysystemtrade | sysobjects/production/roll_state.py | 1 | 2906 | from enum import Enum
from syscore.objects import named_object
RollState = Enum("RollState",(
"No_Roll",
"Passive",
"Force",
"Force_Outright",
"Roll_Adjusted"))
default_state = RollState.No_Roll
roll_adj_state = RollState.Roll_Adjusted
roll_explanations = {
RollState.No_Roll:"No rolling happens. Will only trade priced contract.",
RollState.Passive:"Allow the contract to roll naturally (closing trades in priced contract, opening trades in forward contract)",
RollState.Force:"Force the contract to roll ASAP using spread order",
RollState.Force_Outright:"Force the contract to roll ASAP using two outright orders",
RollState.Roll_Adjusted:"Roll adjusted prices from existing priced to new forward contract (after adjusted prices have been changed, will automatically move state to no roll"}
def is_forced_roll_state(roll_state: RollState):
if roll_state == RollState.Force or roll_state == RollState.Force_Outright:
return True
else:
return False
def is_type_of_active_rolling_roll_state(roll_state: RollState):
if is_forced_roll_state(roll_state) or roll_state == RollState.Roll_Adjusted:
return True
else:
return False
def explain_roll_state_str(roll_state: RollState):
return roll_explanations[RollState[roll_state]]
def name_of_roll_state(roll_state: RollState):
return roll_state.name
def complete_roll_state(roll_state: RollState, priced_position):
if priced_position == 0:
flag_position_in_priced = 0
else:
flag_position_in_priced = 1
return "%s%s" % (name_of_roll_state(roll_state), flag_position_in_priced)
def allowable_roll_state_from_current_and_position(
current_roll_state: RollState, priced_position:int):
# Transition matrix: First option is recommended
# A 0 suffix indicates we have no position in the priced contract
# A 1 suffix indicates we do have a position in the priced contract
allowed_transition = dict(
No_Roll0=["Roll_Adjusted", "Passive", "No_Roll"],
No_Roll1=["Passive", "Force", "Force_Outright", "No_Roll"],
Passive0=["Roll_Adjusted", "Passive", "No_Roll"],
Passive1=["Force", "Force_Outright", "Passive", "No_Roll"],
Force0=["Roll_Adjusted", "Passive"],
Force1=["Force", "Force_Outright", "Passive", "No_Roll"],
Force_Outright0=["Roll_Adjusted", "Passive"],
Force_Outright1=["Force", "Force_Outright", "Passive", "No_Roll"],
Roll_Adjusted0=["No_Roll"],
Roll_Adjusted1=["Roll_Adjusted"],
)
status_plus_position = complete_roll_state(
current_roll_state, priced_position)
try:
allowable_states = allowed_transition[status_plus_position]
except KeyError:
raise Exception(
"State plus position %s not recognised" %
status_plus_position)
return allowable_states
| gpl-3.0 | 1,969,242,482,449,701,400 | 36.25641 | 179 | 0.681693 | false | 3.45541 | false | false | false |
Bajoo/client-pc | bajoo/common/periodic_task.py | 1 | 5677 | # -*- coding: utf-8 -*-
import logging
from threading import Timer, Lock
from ..promise import Deferred, CancelledError
_logger = logging.getLogger(__name__)
class PeriodicTask(object):
"""Generic Thread-based service, executing a task at regular interval.
The task is executed first right after the call to `start()`, in a new
thread.
After each execution, the next execution is scheduled after the specified
delay. The delay doesn't include the task's duration.
Attributes:
delay (int): delay between two executions, in seconds. When modified,
the new value will be used only after the next execution.
context (dict): dict that can be used as a scope shared between the
multiple executions and/or the caller.
args (tuple): arguments passed to the task.
kwargs (dict): keyword arguments passed to the task.
Note:
context, args and kwargs attributes are not thread-safe. If needed, the
sync mechanisms (to avoid race conditions) are up to the user.
Example:
>>> def _task(pt, arg):
... assert pt.context['value'] == 3
... assert arg == 17
>>> args = 1
>>> task = PeriodicTask('MyTask', 1, _task, 17)
>>> task.context['value'] = 3
>>> task.start()
>>> task.stop()
"""
def __init__(self, name, delay, task, *args, **kwargs):
"""Constructor
Args:
name (str): Thread name.
delay (float): Delay between two executions, in seconds
task (Callable[[PeriodicTask, ...], T]): task to execute each
periods. First argument is the PeriodicTask instance.
*args (optional): arguments passed to the task.
**kwargs (optional): keywords arguments passed to the task.
"""
self.delay = delay
self.context = {}
self.args = args
self.kwargs = kwargs
self._name = name
self._task = task
self._timer = None
self._canceled = False
self._lock = Lock()
self._is_running = False # must be acceded only with self._lock
self._apply_now = False
self._deferred = None
def _exec_task(self, *args, **kwargs):
with self._lock:
df = self._deferred
self._deferred = None
self._is_running = True
# self._lock must be released during task execution.
result, error = None, None
try:
result = self._task(self, *args, **kwargs)
except BaseException as err:
error = err
_logger.exception('Periodic task %s has raised exception',
self._task)
with self._lock:
self._is_running = False
if self._apply_now:
delay = 0
self._apply_now = False
else:
delay = self.delay
self._timer = Timer(delay, self._exec_task, args=self.args,
kwargs=self.kwargs)
self._timer.name = self._name
self._timer.daemon = True
if not self._canceled:
self._timer.start()
if df:
if error is None:
df.resolve(result)
else:
df.reject(error)
def start(self):
"""Start the task.
The first execution is immediate.
"""
_logger.debug('Start periodic task %s', self._task)
self._timer = Timer(0, self._exec_task, args=self.args,
kwargs=self.kwargs)
self._timer.name = self._name
self._timer.daemon = True
self._timer.start()
def stop(self, join=False):
"""Stop the task.
Note that if the function is running at the moment this method is
called, the current iteration cannot be stopped.
Args:
join (bool, optional): if True, will block until the running task
finish. Default to False
"""
_logger.debug('Stop periodic task %s', self._task)
with self._lock:
self._canceled = True
self._timer.cancel()
if self._deferred:
self._deferred.reject(CancelledError('PeriodicTask stop now.'))
self._deferred = None
if join:
self._timer.join()
def apply_now(self):
"""Apply the task as soon as possible.
Note that if the task is currently running, it will wait the end, then
another iteration will be executed immediately after that.
The method can be called from inside the task itself.
Returns:
Promise[T]: resolved when the task has returned. The promise
resolves with the value returned by the task. If the task
raises an exception, the promise is rejected.
"""
self._timer.cancel()
with self._lock:
if self._deferred:
# special case: twice or more apply_now() at the same time.
return self._deferred.promise
self._deferred = Deferred()
if self._is_running:
# We can't stop the current task, so we set a flag to rerun as
# soon as the task returns.
self._apply_now = True
else:
self._timer.cancel()
self._timer = Timer(0, self._exec_task, args=self.args)
self._timer.name = self._name
self._timer.daemon = True
self._timer.start()
return self._deferred.promise
| gpl-3.0 | -4,949,742,910,043,404,000 | 33.198795 | 79 | 0.550114 | false | 4.593042 | false | false | false |
spillai/procgraph | src/procgraph_mpl/plot_anim.py | 1 | 1093 |
__all__ = [
'PlotAnim',
]
class PlotAnim(object):
def __init__(self):
self.handle_line = {}
self.handle_text = {}
self.pylab = None
def set_pylab(self, pylab):
self.pylab = pylab
def assert_pylab_given(self):
if self.pylab is None:
msg = 'Please call set_pylab() before plotting.'
raise ValueError(msg)
def plot(self, name, x, y, *args, **kwargs):
self.assert_pylab_given()
if not name in self.handle_line:
handle, = self.pylab.plot(x, y, *args, **kwargs)
self.handle_line[name] = handle
else:
handle = self.handle_line[name]
handle.set_data(x, y)
def text(self, name, x, y, text, *args, **kwargs):
self.assert_pylab_given()
if not name in self.handle_text:
handle = self.pylab.text(x, y, text, *args, **kwargs)
self.handle_text[name] = handle
else:
handle = self.handle_text[name]
handle.set_text(text)
| lgpl-3.0 | -8,553,758,529,945,379,000 | 26.325 | 65 | 0.511436 | false | 3.717687 | false | false | false |
GregSilverman/cohort_rest_api | rest_api/build_atom.py | 1 | 14183 | #!/usr/bin/env python
from sqlalchemy import between
from sqlalchemy.sql import and_, label
from app import db, models
import htsql_methods as hsql
Clinical = models.ClinicalData
Attribute = models.Attribute
"""
Example SQL atomic query for modified nested model:
select *
from
clinical_data
(select patient_sid, lft, rgt, a.attribute_id
from clinical_data
where attribute_id = 'ID FOR DEMOGRAPHICS') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft
and
cd.rgt <= ldl.rgt
Procedure creates an atomic query, defined by:
i%2 = 0 -> initial subquery of bucket/attribute
From above example, the initial subquery that pulls the bucket:
select patient_sid, lft, rgt, attribute_id
from clinical_data
where attribute_id = 'ID FOR DEMOGRAPHICS')
1%2 != 0 -> drill down to specific bucket attribute
URL comprised of a single atom will look like:
atom: demographics:'id for demographics';eq;demographics;demographics:'id for sex';eq;M
NB: this is attached to incoming requests as a JSON document
element part 1: bucket
type:key -> demographics:attribute.id for attribute.value = demographics
comparator -> eq
attribute value (bucket) -> demographics
element part 2: bucket item
type:key -> demographics:attribute.id for attribute.value = sex
comparator -> eq
attribute value -> M
molecule made up of two atoms: (test_code:'id for test_code';eq;13457-7;test_code:'id for result_value_num';ge;160
&
basic_vitals:'id for blood_pressure_systolic';eq;blood_pressure_systolic;basic_vitals:'id for blood_pressure_systolic';ge;160)
example query:
select *
from
clinical_data cd inner join
(select patient_sid, lft as lft_ldl, rgt as rgt_ldl
from clinical_data
where string_value = '13457-7' and attribute_id = '12345') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft_ldl
and
cd.rgt <= ldl.rgt_ldl
where double_value >= 160 and attribute_id = '34567'
order by cd.lft;
"""
# assemble canonical atomic query using parsed components from URL payload
def make_atomic_query(key_type, key, comparator, value, comparator_date, value_date):
a = [] # atomic array of query elements
date = []
whole = [] # entire data set with no constraints
transform = ['medications', 'demographics']# data need to have special characters removed for querying
numeric = ['int', 'float', 'double']
char = ['string']
# initialize lists
for i in xrange(0, 2):
a.append('')
whole.append('')
if comparator[i] == 'between':
arg = value[i].split(',', 2)
if comparator_date[i]:
if comparator_date[i] == 'between':
date = value_date[i].split(',', 2)
# create queries
for i in xrange(0, 2):
# assemble base query
if i == 0:
a[i] = db.session.query(Clinical.patient_sid,
Clinical.lft,
Clinical.rgt,
Clinical.attribute_id)
else:
a[i] = db.session.query(Clinical.patient_sid,
Clinical.lft,
Clinical.rgt,
label('attribute_value', Clinical.attribute_id),
Clinical.double_value,
Clinical.string_value)
'''
equivalent to:
select patient_sid, lft, rgt
from clinical_data
'''
# grab the desired bucket
if i == 0:
# grab bucket by attribute
a[i] = a[i].filter(Clinical.attribute_id == int(key[i]))
'''
equivalent to:
select patient_sid, lft, rgt
from clinical_data
where attribute_id = '12345'
'''
# NB: these are special characters for building the parse tree -> clean them
if key_type[i] in transform:
name = value[i].replace('_', ' ').\
replace('{', '('). \
replace('}', ')')
else: name = value[i]
# grab specific bucket
a[i] = a[i].filter(Clinical.string_value.op(comparator[i])(name)).subquery()
'''
equivalent to:
select patient_sid, lft, rgt
from clinical_data
where string_value = '13457-7' and attribute_id = '12345'
'''
# pull item from bucket by attribute name with criterion value
elif i == 1:
# grab attribute of interest by name
'''
a[i] = a[i].join(a[i-1],
and_(Clinical.patient_sid == a[i-1].c.patient_sid,
Clinical.lft >= a[i-1].c.lft,
Clinical.rgt <= a[i-1].c.rgt)).\
filter(Clinical.attribute_id == key[i])
'''
a[i] = a[i].join(a[i-1],
and_(Clinical.patient_sid == a[i-1].c.patient_sid,
Clinical.attribute_id == int(key[i]))). \
filter(Clinical.lft >= a[i-1].c.lft,
Clinical.rgt <= a[i-1].c.rgt)
# unconstrained data set for printing all records
whole[i] = a[i]
'''
equivalent to:
select patient_sid, lft, rgt
from
clinical_data cd inner join
(select patient_sid, lft, rgt
from clinical_data
where string_value = '13457-7' and attribute_id = '12345') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft
and
cd.rgt <= ldl.rgt
where attribute_id = '34567';
'''
# flag to control output of all data for desired bucket
print_all = False
# for all data for bucket, no filtering is necessary
if 'OUT' in comparator[i]:
print_all = True
if not 'OUT' in comparator[i]:
qstring = "/attribute{data_type.name}?id='" + key[i] + "'"
data_type = hsql.get_data(qstring)
# first: convert to correct data type for utilization of proper covering index
# NB: default is string
if data_type in numeric:
if comparator[i] != 'between':
a[i] = a[i].filter(Clinical.double_value.op(comparator[i])((float(value[i]))))
else:
a[i] = a[i].filter(between(Clinical.double_value,
float(arg[0]),
float(arg[1])))
elif data_type in char:
# clean up incoming string values representative of specific criterion value
if key_type[i] in transform:
name = value[i].replace('_', ' ').\
replace('{', '('). \
replace('}', ')')
else: name = value[i]
a[i] = a[i].filter(Clinical.string_value.op(comparator[i])(name))
'''
equivalent to:
select patient_sid, lft, rgt
from
clinical_data cd inner join
(select attribute_id, patient_sid, lft, rgt
from clinical_data
where string_value = '13457-7' and attribute_id = '12345') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft
and
cd.rgt <= ldl.rgt
where double_value >= 160 and attribute_id = '34567';
'''
# query by date
if comparator_date[i]:
if comparator_date[i] == 'between':
a[i] = a[i].filter(between(Clinical.event_date,
date[0],
date[1]))
else:
a[i] = a[i].filter(Clinical.event_date.op(comparator_date[i])([value_date[i]]))
'''
equivalent to:
select patient_sid, lft, rgt
from
clinical_data cd inner join
(select attribute_id, patient_sid, lft, rgt
from clinical_data
where string_value = '13457-7' and attribute_id = '12345') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft
and
cd.rgt <= ldl.rgt
where double_value >= 160 and attribute_id = '34567'
and cd.event_date >= '1/1/1970';
'''
# construct final subquery
a[i] = a[i].subquery()
else:
print 'ERROR'
return a[1], whole[1], print_all
# parse query components: atoms -> particles
# TODO future: implement more general method of mapping using
# http://stackoverflow.com/questions/14845196/dynamically-constructing-filters-in-sqlalchemy
# TODO: implement as parallel loop
def parse_atomic_particles(atom):
# delimiter between atomic query particles: key, comparator, value
# used to split atom into particles
separator = ';'
# initialize lists
a = [] # list element for returned atoms
whole = []
for i in xrange(0, 1):
a.append('')
whole.append('')
for j in xrange(0, 1):
# initialize query components
particles = atom.split(separator, 6) # atom consists of 6 query components to be parsed, aka particles
key_type = [] # array of query bucket names
key = [] # array of key bucket ids
comparator = [] # array of comparators
value = [] # array of values
comparator_date = [] # array of date comparators
value_date = [] # array of date components
for i in xrange(len(particles)):
particle = particles[i]
# Each atomic unit consists of 6 "particles" delimited by a ';',
# where each particle consists of a:
#
# -> key: representing a bucket name by an attribute
# -> comparator: representing the logical operation to perform, NB: for bucket this should always be 'eq'
# -> value: name of bucket
# -> key: representing an item within the bucket to query by attribute name
# -> comparator: representing the logical operation to perform on given attribute compared to given value
# -> value: attribute item's value for comparison
# map particle components to appropriate lists
value_comparator_list = ['eq',
'grte',
'lete',
'bt',
'gt',
'lt',
'prn']
date_comparator_list = ['between',
'grt',
'lss']
comparator_mapper = [
(':', particle.split(':')),
('eq', '='),
('grte', '>='),
('lete', '<='),
('bt', 'between'),
('between', 'between'),
('grt', '>='),
('lss', '<='),
('lt', '<'),
('gt', '>'),
('prn', 'OUT')
]
if any(ext in particle for ext in value_comparator_list) or \
any(ext in particle for ext in date_comparator_list) or \
':' in particle:
def apply_mapper(particle):
for item, action in comparator_mapper:
if item in particle:
if ':' in particle:
key_type.append(action[0])
key.append(action[1])
break
elif any(ext in particle for ext in value_comparator_list):
comparator.append(action)
break
# date comparison given in value particle
elif any(ext in particle for ext in date_comparator_list):
# grab operator for date comparison from list
date_stuff = particle.split(',DATE,')
value.append(date_stuff[0])
comparator_date.append(action)
# get dates and split in the case of a between comparison
date = date_stuff[1].split(',')
if len(date) == 2:
temp = date[1]
else:
temp = date[1] + ',' + date[2]
value_date.append(temp)
break
else:
print'error'
apply_mapper(particle)
# if DATE component is not part of value particle use way back in history as default
else:
comparator_date.append('>=')
value_date.append('1776-01-01')
value.append(particle)
a[j], whole[j], print_all = make_atomic_query(key_type, key, comparator, value, comparator_date, value_date)
return a[0], whole[0], print_all
| gpl-3.0 | -4,821,364,456,509,184,000 | 34.019753 | 131 | 0.476345 | false | 4.509698 | false | false | false |
crakensio/django_training | lib/python2.7/site-packages/bpython/curtsiesfrontend/interaction.py | 1 | 4789 | import greenlet
import time
import curtsies.events as events
from bpython.repl import Interaction as BpythonInteraction
from bpython.curtsiesfrontend.manual_readline import char_sequences as rl_char_sequences
class StatusBar(BpythonInteraction):
"""StatusBar and Interaction for Repl
Passing of control back and forth between calls that use interact api
(notify, confirm, file_prompt) like bpython.Repl.write2file and events
on the main thread happens via those calls and self.wait_for_request_or_notify.
Calling one of these three is required for the main thread to regain control!
This is probably a terrible idea, and better would be rewriting this
functionality in a evented or callback style, but trying to integrate
bpython.Repl code.
"""
def __init__(self, initial_message='', permanent_text="", refresh_request=lambda: None):
self._current_line = ''
self.cursor_offset_in_line = 0
self.in_prompt = False
self.in_confirm = False
self.waiting_for_refresh = False
self.prompt = ''
self._message = initial_message
self.message_start_time = time.time()
self.message_time = 3
self.permanent_text = permanent_text
self.main_greenlet = greenlet.getcurrent()
self.request_greenlet = None
self.refresh_request = refresh_request
@property
def has_focus(self):
return self.in_prompt or self.in_confirm or self.waiting_for_refresh
def message(self, msg):
self.message_start_time = time.time()
self._message = msg
def _check_for_expired_message(self):
if self._message and time.time() > self.message_start_time + self.message_time:
self._message = ''
def process_event(self, e):
"""Returns True if shutting down"""
assert self.in_prompt or self.in_confirm or self.waiting_for_refresh
if isinstance(e, events.RefreshRequestEvent):
self.waiting_for_refresh = False
self.request_greenlet.switch()
elif isinstance(e, events.PasteEvent):
for ee in e.events:
self.add_normal_character(ee if len(ee) == 1 else ee[-1]) #strip control seq
elif e in rl_char_sequences:
self.cursor_offset_in_line, self._current_line = rl_char_sequences[e](self.cursor_offset_in_line, self._current_line)
elif e == "":
raise KeyboardInterrupt()
elif e == "":
raise SystemExit()
elif self.in_prompt and e in ("\n", "\r"):
line = self._current_line
self.escape()
self.request_greenlet.switch(line)
elif self.in_confirm:
if e in ('y', 'Y'):
self.request_greenlet.switch(True)
else:
self.request_greenlet.switch(False)
self.escape()
elif e in ['\x1b']:
self.request_greenlet.switch(False)
self.escape()
else: # add normal character
self.add_normal_character(e)
def add_normal_character(self, e):
self._current_line = (self._current_line[:self.cursor_offset_in_line] +
e +
self._current_line[self.cursor_offset_in_line:])
self.cursor_offset_in_line += 1
def escape(self):
"""unfocus from statusbar, clear prompt state, wait for notify call"""
self.in_prompt = False
self.in_confirm = False
self.prompt = ''
self._current_line = ''
@property
def current_line(self):
self._check_for_expired_message()
if self.in_prompt:
return self.prompt + self._current_line
if self.in_confirm:
return self.prompt
if self._message:
return self._message
return self.permanent_text
# interaction interface - should be called from other greenlets
def notify(self, msg, n=3):
self.request_greenlet = greenlet.getcurrent()
self.message_time = n
self.message(msg)
self.waiting_for_refresh = True
self.refresh_request()
self.main_greenlet.switch(msg)
# below Really ought to be called from greenlets other than main because they block
def confirm(self, q):
"""Expected to return True or False, given question prompt q"""
self.request_greenlet = greenlet.getcurrent()
self.prompt = q
self.in_confirm = True
return self.main_greenlet.switch(q)
def file_prompt(self, s):
"""Expected to return a file name, given """
self.request_greenlet = greenlet.getcurrent()
self.prompt = s
self.in_prompt = True
result = self.main_greenlet.switch(s)
return result
| cc0-1.0 | -3,079,365,304,622,484,500 | 37.007937 | 129 | 0.618083 | false | 4.007531 | false | false | false |
cltrudeau/django-awl | awl/templatetags/awltags.py | 1 | 4065 | # awl.templatetags.awltags.py
from django import template
register = template.Library()
# ============================================================================
@register.filter
def getitem(dictionary, keyvar):
"""Custom django template filter that allows access to an item of a
dictionary through the key contained in a template variable. Example:
.. code-block:: python
context_data = {
'data':{
'foo':'bar',
},
'key':'foo',
}
template = Template('{% load awltags %}{{data|getitem:key}}')
context = Context(context_data)
result = template.render(context)
>>> result
'bar'
.. note::
Any KeyErrors are ignored and return an empty string
"""
try:
return dictionary[keyvar]
except KeyError:
return ''
# ----------------------------------------------------------------------------
@register.tag
def accessor(parser, token):
"""This template tag is used to do complex nested attribute accessing of
an object. The first parameter is the object being accessed, subsequent
paramters are one of:
* a variable in the template context
* a literal in the template context
* either of the above surrounded in square brackets
For each variable or literal parameter given a `getattr` is called on the
object, chaining to the next parameter. For any sqaure bracket enclosed
items the access is done through a dictionary lookup.
Example::
{% accessor car where 'front_seat' [position] ['fabric'] %}
The above would result in the following chain of commands:
.. code-block:: python
ref = getattr(car, where)
ref = getattr(ref, 'front_seat')
ref = ref[position]
return ref['fabric']
This tag also supports "as" syntax, putting the results into a template
variable::
{% accessor car 'interior' as foo %}
"""
contents = token.split_contents()
tag = contents[0]
if len(contents) < 3:
raise template.TemplateSyntaxError(('%s requires at least two '
'arguments: object and one or more getattr parms') % tag)
as_var = None
if len(contents) >= 4:
# check for "as" syntax
if contents[-2] == 'as':
as_var = contents[-1]
contents = contents[:-2]
return AccessorNode(contents[1], contents[2:], as_var)
class AccessorNode(template.Node):
def __init__(self, obj_name, parms, as_var):
self.obj_name = obj_name
self.parms = parms
self.as_var = as_var
def render(self, context):
try:
ref = context[self.obj_name]
for parm in self.parms:
if parm[0] == '"' or parm[0] == "'":
# parm is a literal
ref = getattr(ref, parm[1:-1])
elif parm[0] == '[':
# parm is a dictionary lookup
if parm[1] == '"' or parm[1] == "'":
# dict key is a literal
ref = ref[parm[2:-2]]
else:
# dict key is a template var
key = context[parm[1:-1]]
ref = ref[key]
else:
# parm is a template var
attr = context[parm]
ref = getattr(ref, attr)
if self.as_var:
context[self.as_var] = ref
return ''
return ref
except:
# any lookup errors should result in empty
if self.as_var:
context[self.as_var] = ''
return ''
# ----------------------------------------------------------------------------
@register.simple_tag
def nop(*args):
"""This tag does nothing. Useful for a comment without having to build a
full comment block. All parameters are ignored.
Example::
{% nop 'this is a string' %}
"""
return ''
| mit | -5,902,167,371,774,256,000 | 28.456522 | 78 | 0.512423 | false | 4.521691 | false | false | false |
squidsrc/python-rocksdb | setup.py | 1 | 1079 | from setuptools import setup, find_packages
from distutils.extension import Extension
from Cython.Build import cythonize
extension_defaults = {
'extra_compile_args': [
'-std=gnu++11',
'-O3',
'-Wall',
'-Wextra',
'-Wconversion',
'-fno-strict-aliasing'
],
'language': 'c++',
'libraries': [
'rt',
'snappy',
'bz2',
'z'
],
'extra_objects': ['librocksdb.a']
}
mod1 = Extension(
'rocksdb._rocksdb',
['rocksdb/_rocksdb.pyx'],
**extension_defaults
)
setup(
name="pyrocksdb",
version='0.3',
description="Python bindings for RocksDB",
keywords='rocksdb',
author='Stephan Hofmockel',
author_email="Use the github issues",
url="https://github.com/stephan-hof/pyrocksdb",
license='BSD License',
install_requires=[
'setuptools',
'Cython>=0.20',
],
package_dir={'rocksdb': 'rocksdb'},
packages=find_packages('.'),
ext_modules=cythonize([mod1]),
test_suite='rocksdb.tests',
include_package_data=True
)
| bsd-3-clause | 5,162,525,097,040,509,000 | 21.479167 | 51 | 0.578313 | false | 3.382445 | false | false | false |
nateprewitt/pipenv | pipenv/project.py | 1 | 17449 | # -*- coding: utf-8 -*-
import json
import os
import re
import sys
import base64
import hashlib
import contoml
import delegator
import pipfile
import toml
from .utils import (
mkdir_p, convert_deps_from_pip, pep423_name, recase_file,
find_requirements, is_file, is_vcs, python_version, cleanup_toml,
is_installable_file, is_valid_url
)
from .environments import PIPENV_MAX_DEPTH, PIPENV_VENV_IN_PROJECT
from .environments import PIPENV_VIRTUALENV, PIPENV_PIPFILE
if PIPENV_PIPFILE:
if not os.path.isfile(PIPENV_PIPFILE):
raise RuntimeError('Given PIPENV_PIPFILE is not found!')
else:
PIPENV_PIPFILE = os.path.abspath(PIPENV_PIPFILE)
class Project(object):
"""docstring for Project"""
def __init__(self, chdir=True):
super(Project, self).__init__()
self._name = None
self._virtualenv_location = None
self._download_location = None
self._proper_names_location = None
self._pipfile_location = None
self._requirements_location = None
self._original_dir = os.path.abspath(os.curdir)
# Hack to skip this during pipenv run, or -r.
if ('run' not in sys.argv) and chdir:
try:
os.chdir(self.project_directory)
except (TypeError, AttributeError):
pass
def path_to(self, p):
"""Returns the absolute path to a given relative path."""
if os.path.isabs(p):
return p
return os.sep.join([self._original_dir, p])
def _build_package_list(self, package_section):
"""Returns a list of packages for pip-tools to consume."""
ps = {}
# TODO: Separate the logic for showing packages from the filters for supplying pip-tools
for k, v in self.parsed_pipfile.get(package_section, {}).items():
# Skip editable VCS deps.
if hasattr(v, 'keys'):
# When a vcs url is gven without editable it only appears as a key
# Eliminate any vcs, path, or url entries which are not editable
# Since pip-tools can't do deep resolution on them, even setuptools-installable ones
if (is_vcs(v) or is_vcs(k) or (is_installable_file(k) or is_installable_file(v)) or
any((prefix in v and
(os.path.isfile(v[prefix]) or is_valid_url(v[prefix])))
for prefix in ['path', 'file'])):
# If they are editable, do resolve them
if 'editable' not in v:
continue
else:
ps.update({k: v})
else:
ps.update({k: v})
else:
# Since these entries have no attributes we know they are not editable
# So we can safely exclude things that need to be editable in order to be resolved
# First exclude anything that is a vcs entry either in the key or value
if not (any(is_vcs(i) for i in [k, v]) or
# Then exclude any installable files that are not directories
# Because pip-tools can resolve setup.py for example
any(is_installable_file(i) for i in [k, v]) or
# Then exclude any URLs because they need to be editable also
# Things that are excluded can only be 'shallow resolved'
any(is_valid_url(i) for i in [k, v])):
ps.update({k: v})
return ps
@property
def name(self):
if self._name is None:
self._name = self.pipfile_location.split(os.sep)[-2]
return self._name
@property
def pipfile_exists(self):
return bool(self.pipfile_location)
@property
def required_python_version(self):
if self.pipfile_exists:
required = self.parsed_pipfile.get('requires', {}).get('python_full_version')
if not required:
required = self.parsed_pipfile.get('requires', {}).get('python_version')
if required != "*":
return required
@property
def project_directory(self):
if self.pipfile_location is not None:
return os.path.abspath(os.path.join(self.pipfile_location, os.pardir))
else:
return None
@property
def requirements_exists(self):
return bool(self.requirements_location)
@property
def virtualenv_exists(self):
# TODO: Decouple project from existence of Pipfile.
if self.pipfile_exists and os.path.exists(self.virtualenv_location):
if os.name == 'nt':
extra = ['Scripts', 'activate.bat']
else:
extra = ['bin', 'activate']
return os.path.isfile(os.sep.join([self.virtualenv_location] + extra))
return False
@property
def virtualenv_name(self):
# Replace dangerous characters into '_'. The length of the sanitized
# project name is limited as 42 because of the limit of linux kernel
#
# 42 = 127 - len('/home//.local/share/virtualenvs//bin/python2') - 32 - len('-HASHHASH')
#
# 127 : BINPRM_BUF_SIZE - 1
# 32 : Maximum length of username
#
# References:
# https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html
# http://www.tldp.org/LDP/abs/html/special-chars.html#FIELDREF
# https://github.com/torvalds/linux/blob/2bfe01ef/include/uapi/linux/binfmts.h#L18
sanitized = re.sub(r'[ $`!*@"\\\r\n\t]', '_', self.name)[0:42]
# Hash the full path of the pipfile
hash = hashlib.sha256(self.pipfile_location.encode()).digest()[:6]
encoded_hash = base64.urlsafe_b64encode(hash).decode()
# If the pipfile was located at '/home/user/MY_PROJECT/Pipfile',
# the name of its virtualenv will be 'my-project-wyUfYPqE'
return sanitized + '-' + encoded_hash
@property
def virtualenv_location(self):
# if VIRTUAL_ENV is set, use that.
if PIPENV_VIRTUALENV:
return PIPENV_VIRTUALENV
# Use cached version, if available.
if self._virtualenv_location:
return self._virtualenv_location
# The user wants the virtualenv in the project.
if not PIPENV_VENV_IN_PROJECT:
c = delegator.run('pew dir "{0}"'.format(self.virtualenv_name))
loc = c.out.strip()
# Default mode.
else:
loc = os.sep.join(self.pipfile_location.split(os.sep)[:-1] + ['.venv'])
self._virtualenv_location = loc
return loc
@property
def virtualenv_src_location(self):
loc = os.sep.join([self.virtualenv_location, 'src'])
mkdir_p(loc)
return loc
@property
def download_location(self):
if self._download_location is None:
loc = os.sep.join([self.virtualenv_location, 'downloads'])
self._download_location = loc
# Create the directory, if it doesn't exist.
mkdir_p(self._download_location)
return self._download_location
@property
def proper_names_location(self):
if self._proper_names_location is None:
loc = os.sep.join([self.virtualenv_location, 'pipenv-proper-names.txt'])
self._proper_names_location = loc
# Create the database, if it doesn't exist.
open(self._proper_names_location, 'a').close()
return self._proper_names_location
@property
def proper_names(self):
with open(self.proper_names_location) as f:
return f.read().splitlines()
def register_proper_name(self, name):
"""Registers a proper name to the database."""
with open(self.proper_names_location, 'a') as f:
f.write('{0}\n'.format(name))
@property
def pipfile_location(self):
if PIPENV_PIPFILE:
return PIPENV_PIPFILE
if self._pipfile_location is None:
try:
loc = pipfile.Pipfile.find(max_depth=PIPENV_MAX_DEPTH)
except RuntimeError:
loc = None
self._pipfile_location = loc
return self._pipfile_location
@property
def requirements_location(self):
if self._requirements_location is None:
try:
loc = find_requirements(max_depth=PIPENV_MAX_DEPTH)
except RuntimeError:
loc = None
self._requirements_location = loc
return self._requirements_location
@property
def parsed_pipfile(self):
# Open the pipfile, read it into memory.
with open(self.pipfile_location) as f:
contents = f.read()
# If any outline tables are present...
if ('[packages.' in contents) or ('[dev-packages.' in contents):
data = toml.loads(contents)
# Convert all outline tables to inline tables.
for section in ('packages', 'dev-packages'):
for package in data.get(section, {}):
# Convert things to inline tables — fancy :)
if hasattr(data[section][package], 'keys'):
_data = data[section][package]
data[section][package] = toml._get_empty_inline_table(dict)
data[section][package].update(_data)
# We lose comments here, but it's for the best.)
try:
return contoml.loads(toml.dumps(data, preserve=True))
except RuntimeError:
return toml.loads(toml.dumps(data, preserve=True))
else:
# Fallback to toml parser, for large files.
try:
return contoml.loads(contents)
except Exception:
return toml.loads(contents)
@property
def _pipfile(self):
"""Pipfile divided by PyPI and external dependencies."""
pfile = self.parsed_pipfile
for section in ('packages', 'dev-packages'):
p_section = pfile.get(section, {})
for key in list(p_section.keys()):
# Normalize key name to PEP 423.
norm_key = pep423_name(key)
p_section[norm_key] = p_section.pop(key)
return pfile
@property
def settings(self):
"""A dictionary of the settings added to the Pipfile."""
return self.parsed_pipfile.get('pipenv', {})
def update_settings(self, d):
settings = self.settings
changed = False
for new in d:
if new not in settings:
settings[new] = d[new]
changed = True
if changed:
p = self.parsed_pipfile
p['pipenv'] = settings
# Write the changes to disk.
self.write_toml(p)
@property
def _lockfile(self):
"""Pipfile.lock divided by PyPI and external dependencies."""
pfile = pipfile.load(self.pipfile_location)
lockfile = json.loads(pfile.lock())
for section in ('default', 'develop'):
lock_section = lockfile.get(section, {})
for key in list(lock_section.keys()):
norm_key = pep423_name(key)
lockfile[section][norm_key] = lock_section.pop(key)
return lockfile
@property
def lockfile_location(self):
return '{0}.lock'.format(self.pipfile_location)
@property
def lockfile_exists(self):
return os.path.isfile(self.lockfile_location)
@property
def lockfile_content(self):
with open(self.lockfile_location) as lock:
return json.load(lock)
@property
def vcs_packages(self):
"""Returns a list of VCS packages, for not pip-tools to consume."""
ps = {}
for k, v in self.parsed_pipfile.get('packages', {}).items():
if is_vcs(v) or is_vcs(k):
ps.update({k: v})
return ps
@property
def vcs_dev_packages(self):
"""Returns a list of VCS packages, for not pip-tools to consume."""
ps = {}
for k, v in self.parsed_pipfile.get('dev-packages', {}).items():
if is_vcs(v) or is_vcs(k):
ps.update({k: v})
return ps
@property
def all_packages(self):
"""Returns a list of all packages."""
p = dict(self.parsed_pipfile.get('dev-packages', {}))
p.update(self.parsed_pipfile.get('packages', {}))
return p
@property
def packages(self):
"""Returns a list of packages, for pip-tools to consume."""
return self._build_package_list('packages')
@property
def dev_packages(self):
"""Returns a list of dev-packages, for pip-tools to consume."""
return self._build_package_list('dev-packages')
def touch_pipfile(self):
"""Simply touches the Pipfile, for later use."""
with open('Pipfile', 'a'):
os.utime('Pipfile', None)
@property
def pipfile_is_empty(self):
if not self.pipfile_exists:
return True
with open(self.pipfile_location, 'r') as f:
if not f.read():
return True
return False
def create_pipfile(self, python=None):
"""Creates the Pipfile, filled with juicy defaults."""
data = {
# Default source.
u'source': [
{u'url': u'https://pypi.python.org/simple', u'verify_ssl': True, 'name': 'pypi'}
],
# Default packages.
u'packages': {},
u'dev-packages': {},
}
# Default requires.
if python:
data[u'requires'] = {'python_version': python_version(python)[:len('2.7')]}
self.write_toml(data, 'Pipfile')
def write_toml(self, data, path=None):
"""Writes the given data structure out as TOML."""
if path is None:
path = self.pipfile_location
try:
formatted_data = contoml.dumps(data).rstrip()
except Exception:
for section in ('packages', 'dev-packages'):
for package in data[section]:
# Convert things to inline tables — fancy :)
if hasattr(data[section][package], 'keys'):
_data = data[section][package]
data[section][package] = toml._get_empty_inline_table(dict)
data[section][package].update(_data)
formatted_data = toml.dumps(data).rstrip()
formatted_data = cleanup_toml(formatted_data)
with open(path, 'w') as f:
f.write(formatted_data)
@property
def sources(self):
if self.lockfile_exists:
meta_ = self.lockfile_content['_meta']
sources_ = meta_.get('sources')
if sources_:
return sources_
if 'source' in self.parsed_pipfile:
return self.parsed_pipfile['source']
else:
return [{u'url': u'https://pypi.python.org/simple', u'verify_ssl': True, 'name': 'pypi'}]
def get_source(self, name=None, url=None):
for source in self.sources:
if name:
if source.get('name') == name:
return source
elif url:
if source.get('url') in url:
return source
def destroy_lockfile(self):
"""Deletes the lockfile."""
try:
return os.remove(self.lockfile_location)
except OSError:
pass
def remove_package_from_pipfile(self, package_name, dev=False):
# Read and append Pipfile.
p = self._pipfile
package_name = pep423_name(package_name)
key = 'dev-packages' if dev else 'packages'
if key in p and package_name in p[key]:
del p[key][package_name]
# Write Pipfile.
self.write_toml(recase_file(p))
def add_package_to_pipfile(self, package_name, dev=False):
# Read and append Pipfile.
p = self._pipfile
# Don't re-capitalize file URLs or VCSs.
converted = convert_deps_from_pip(package_name)
converted = converted[[k for k in converted.keys()][0]]
if not (is_file(package_name) or is_vcs(converted) or 'path' in converted):
package_name = pep423_name(package_name)
key = 'dev-packages' if dev else 'packages'
# Set empty group if it doesn't exist yet.
if key not in p:
p[key] = {}
package = convert_deps_from_pip(package_name)
package_name = [k for k in package.keys()][0]
# Add the package to the group.
p[key][package_name] = package[package_name]
# Write Pipfile.
self.write_toml(p)
def add_index_to_pipfile(self, index):
"""Adds a given index to the Pipfile."""
# Read and append Pipfile.
p = self._pipfile
source = {'url': index, 'verify_ssl': True}
# Add the package to the group.
if 'source' not in p:
p['source'] = [source]
else:
p['source'].append(source)
# Write Pipfile.
self.write_toml(p)
def recase_pipfile(self):
self.write_toml(recase_file(self._pipfile))
| mit | 562,355,048,727,634,100 | 32.41954 | 101 | 0.562224 | false | 4.038194 | false | false | false |
mrcslws/nupic.research | projects/dynamic_sparse/runs/run_test.py | 1 | 1756 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
from nupic.research.frameworks.dynamic_sparse.common.utils import run_ray
# alternative initialization based on configuration
exp_config = dict(
device="cuda",
network="resnet18",
dataset_name="CIFAR10",
input_size=(3, 32, 32),
num_classes=10,
stats_mean=(0.4914, 0.4822, 0.4465),
stats_std=(0.2023, 0.1994, 0.2010),
model="SparseModel",
data_dir="~/nta/data",
on_perc=0.2,
batch_size_train=10,
batch_size_test=10,
)
# run
tune_config = dict(
name=__file__,
num_samples=1,
local_dir=os.path.expanduser("~/nta/results"),
checkpoint_freq=0,
checkpoint_at_end=False,
stop={"training_iteration": 10},
resources_per_trial={"cpu": 1, "gpu": 1},
verbose=2,
)
run_ray(tune_config, exp_config)
| agpl-3.0 | 5,158,335,504,190,792,000 | 31.518519 | 73 | 0.646925 | false | 3.720339 | false | false | false |
wendlers/scratch-pynetsense | example-src/WrappedRemoteSensor.py | 1 | 2457 | ##
# This file is part of the Scratch Remote Sensor (SRS) Library project
#
# Copyright (C) 2012 Stefan Wendler <[email protected]>
#
# The SRS Library is free software; you can redistribute
# it and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SRS Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with the JSherpa firmware; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA.
##
'''
This file is part of the Scratch Remote Sensor Library project
'''
import time
import socket
import logging
from scratch.remotesensor import RemoteSensor, DEFAULT_HOST, DEFAULT_PORT
class WrappedRemoteSensor(RemoteSensor):
'''
This example shows how to write a baic wrapped remote sensor. It reads
"/proc/meminfo" and parses out the values for "memtotal" and "memfree".
Each time one of this values changes, a sensor-update is send to the
server.
To start this sensor, pass it as a wrapper to the wrapper daemon:
source setenv.sh
python src/scratch/wrappers/daemon.py --foreground --loglevel DEBUG \
--wrap WrappedRemoteSensor#WrappedRemoteSensor start
'''
__args = None
# name used e.g. for heartbeat
name = "wrap"
def __init__(self, myArgs = {}):
'''
Create a new instance of the monitoring remote sensor.
@param myArgs arguments for the sensor: host and port.
'''
RemoteSensor.__init__(self, args = myArgs)
def worker(self):
'''
Read memory info from proc filesystem (memtotal and memfree). If the
value changed, send a sensor-update message to the server.
'''
try:
f = open('/proc/meminfo', 'r')
lines = f.readlines()
f.close()
changed = False
for l in lines:
w = l.split(':')
k = w[0].strip().lower()
v = int(w[1].strip().split(' ')[0])
# this are the only field we are interested in
if k in [ 'memtotal', 'memfree']:
if self.values.set(k, v):
changed = True
if changed:
self.bcastMsg('input-changed')
except Exception as e:
logging.error(e)
| lgpl-2.1 | -1,918,675,807,934,093,300 | 26.606742 | 74 | 0.699634 | false | 3.450843 | false | false | false |
luotao1/Paddle | python/paddle/tensor/logic.py | 1 | 16431 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.layers.layer_function_generator import templatedoc
from .. import fluid
from ..fluid.framework import in_dygraph_mode
from paddle.common_ops_import import *
from ..framework import VarBase as Tensor
# TODO: define logic functions of a tensor
from ..fluid.layers import is_empty #DEFINE_ALIAS
from ..fluid.layers import logical_and #DEFINE_ALIAS
from ..fluid.layers import logical_not #DEFINE_ALIAS
from ..fluid.layers import logical_or #DEFINE_ALIAS
from ..fluid.layers import logical_xor #DEFINE_ALIAS
__all__ = [
'equal',
'equal_all',
'greater_equal',
'greater_than',
'is_empty',
'less_equal',
'less_than',
'logical_and',
'logical_not',
'logical_or',
'logical_xor',
'not_equal',
'allclose',
'is_tensor'
# 'isnan'
]
def equal_all(x, y, name=None):
"""
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): Tensor, data type is float32, float64, int32, int64.
y(Tensor): Tensor, data type is float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: output Tensor, data type is bool, value is [False] or [True].
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y)
print(result1) # result1 = [True ]
result2 = paddle.equal_all(x, z)
print(result2) # result2 = [False ]
"""
helper = LayerHelper("equal_all", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(
type='equal_all', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
${comment}
Args:
x(Tensor): ${input_comment}.
y(Tensor): ${other_comment}.
rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
equal_nan(equalnantype, optional): ${equal_nan_comment}.
name (str, optional): Name for the operation. For more information, please
refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: ${out_comment}.
Raises:
TypeError: The data type of ``x`` must be one of float32, float64.
TypeError: The data type of ``y`` must be one of float32, float64.
TypeError: The type of ``rtol`` must be float.
TypeError: The type of ``atol`` must be float.
TypeError: The type of ``equal_nan`` must be bool.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [False]
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True]
"""
if in_dygraph_mode():
return core.ops.allclose(x, y, 'rtol',
str(rtol), 'atol',
str(atol), 'equal_nan', equal_nan)
check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose')
check_type(rtol, 'rtol', float, 'allclose')
check_type(atol, 'atol', float, 'allclose')
check_type(equal_nan, 'equal_nan', bool, 'allclose')
helper = LayerHelper("allclose", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
inputs = {'Input': x, 'Other': y}
outputs = {'Out': out}
attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan}
helper.append_op(
type='allclose', inputs=inputs, outputs=outputs, attrs=attrs)
return out
@templatedoc()
def equal(x, y, name=None):
"""
This layer returns the truth value of :math:`x == y` elementwise.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): Tensor, data type is float32, float64, int32, int64.
y(Tensor): Tensor, data type is float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: output Tensor, it's shape is the same as the input's Tensor,
and the data type is bool. The result of this op is stop_gradient.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y)
print(result1) # result1 = [True False False]
"""
if in_dygraph_mode():
return core.ops.equal(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"equal")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"equal")
helper = LayerHelper("equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def greater_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y)
print(result1) # result1 = [True False True]
"""
if in_dygraph_mode():
return core.ops.greater_equal(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"greater_equal")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"greater_equal")
helper = LayerHelper("greater_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='greater_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [out]})
return out
@templatedoc()
def greater_than(x, y, name=None):
"""
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x` .
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y)
print(result1) # result1 = [False False True]
"""
if in_dygraph_mode():
return core.ops.greater_than(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"greater_than")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"greater_than")
helper = LayerHelper("greater_than", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='greater_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [out]})
return out
@templatedoc()
def less_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y)
print(result1) # result1 = [True True False]
"""
if in_dygraph_mode():
return core.ops.less_equal(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_equal")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"less_equal")
helper = LayerHelper("less_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='less_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def less_than(x, y, name=None):
"""
This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y)
print(result1) # result1 = [False True False]
"""
if in_dygraph_mode():
return core.ops.less_than(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"less_than")
helper = LayerHelper("less_than", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='less_than', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def not_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y)
print(result1) # result1 = [False True True]
"""
if in_dygraph_mode():
return core.ops.not_equal(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"not_equal")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"not_equal")
helper = LayerHelper("not_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='not_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
def is_tensor(x):
"""
This function tests whether input object is a paddle.Tensor.
Args:
x (object): Object to test.
Returns:
A boolean value. True if 'x' is a paddle.Tensor, otherwise False.
Examples:
.. code-block:: python
import paddle
input1 = paddle.rand(shape=[2, 3, 5], dtype='float32')
check = paddle.is_tensor(input1)
print(check) #True
input3 = [1, 4]
check = paddle.is_tensor(input3)
print(check) #False
"""
return isinstance(x, Tensor)
| apache-2.0 | 1,783,713,438,296,025,000 | 35.676339 | 128 | 0.585296 | false | 3.685734 | false | false | false |
alsrgv/tensorflow | tensorflow/python/ops/math_grad.py | 1 | 57271 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // math_ops.maximum(y, 1)
@ops.RegisterGradient("ArgMax")
def _ArgMaxGrad(op, grad):
del op, grad
return [None, None]
@ops.RegisterGradient("ArgMin")
def _ArgMinGrad(op, grad):
del op, grad
return [None, None]
# TODO(rmlarsen): Implement gradient.
ops.NotDifferentiable("EuclideanNorm")
_empty_tuple = ()
def _IsScalar(x):
return x._shape_tuple() is _empty_tuple # pylint: disable=protected-access
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
if input_0_shape is not None:
axes = tensor_util.constant_value(op.inputs[1])
if axes is not None:
rank = len(input_0_shape)
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
if context.executing_eagerly():
ctx = context.context()
new_shape = ctx.ones_rank_cache().get(rank)
if new_shape is None:
new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32)
ctx.ones_rank_cache().put(rank, new_shape)
else:
new_shape = [1] * rank
grad = array_ops.reshape(grad, new_shape)
# If shape is not fully defined (but rank is), we use Shape.
if None not in input_0_shape:
input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32)
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
# TODO(apassos) remove this once device placement for eager ops makes more
# sense.
with ops.colocate_with(input_shape):
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Min or Max. Amazingly it's precisely the same code."""
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
y = op.outputs[0]
y = array_ops.reshape(y, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
# Compute the number of selected (maximum or minimum) elements in each
# reduction dimension. If there are multiple minimum or maximum elements
# then the gradient will be divided between them.
indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)
num_selected = array_ops.reshape(
math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)
return [math_ops.divide(indicators, num_selected) * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access
if (input_shape is not None and output_shape is not None and
None not in input_shape and None not in output_shape):
input_size = np.prod(input_shape)
output_size = np.prod(output_shape)
factor = input_size // max(output_size, 1)
factor = constant_op.constant(factor, dtype=sum_grad.dtype)
else:
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = _safe_shape_div(
math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# The gradient can be expressed by dividing the product by each entry of the
# input tensor, but this approach can't deal with zeros in the input.
# Here, we avoid this problem by composing the output as a product of two
# cumprod operations.
input_shape = array_ops.shape(op.inputs[0])
# Reshape reduction indices for the case where the parameter is a scalar
reduction_indices = array_ops.reshape(op.inputs[1], [-1])
# Expand grad to full input shape
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
grad = array_ops.tile(grad, tile_scaling)
# Pack all reduced dimensions into a single one, so we can perform the
# cumprod ops. If the reduction dims list is empty, it defaults to float32,
# so we need to cast here. We put all the shape-related ops on CPU to avoid
# copying back and forth, and since listdiff is CPU only.
with ops.device("/cpu:0"):
rank = array_ops.rank(op.inputs[0])
reduction_indices = (reduction_indices + rank) % rank
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, rank)
other, _ = array_ops.setdiff1d(idx, reduced)
perm = array_ops.concat([reduced, other], 0)
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
permuted_shape = array_ops.shape(permuted)
reshaped = array_ops.reshape(permuted, (reduced_num, other_num))
# Calculate product, leaving out the current entry
left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
# For complex inputs, the gradient is in the conjugate direction.
y = array_ops.reshape(
math_ops.conj(left) * math_ops.conj(right), permuted_shape)
# Invert the transpose and reshape operations.
# Make sure to set the statically known shape information through a reshape.
out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
return array_ops.reshape(out, input_shape), None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat([
array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
], 0)
ones = array_ops.fill(ones_shape, constant_op.constant(1, dtype=grad.dtype))
scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None)
@ops.RegisterGradient("SparseSegmentSumWithNumSegments")
def _SparseSegmentSumWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSumWithNumSegments."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None, None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentMeanWithNumSegments")
def _SparseSegmentMeanWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentMeanWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
@ops.RegisterGradient("SparseSegmentSqrtN")
def _SparseSegmentSqrtNGrad(op, grad):
"""Gradient for SparseSegmentSqrtN."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentSqrtNWithNumSegments")
def _SparseSegmentSqrtNWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSqrtNWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
def _SegmentMinOrMaxGrad(op, grad):
""" Gradient for SegmentMin and SegmentMax. """
zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
num_selected = math_ops.segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
return array_ops.where(is_selected, gathered_grads, zeros), None
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
return _SegmentMinOrMaxGrad(op, grad)
def _GatherDropNegatives(params,
ids,
zero_clipped_indices=None,
is_positive=None):
""" Helper function for unsorted segment ops.
Gathers params for
positive segment ids and gathers 0 for inputs with negative segment id.
Also returns the clipped indices and a boolean mask with the same shape
as ids where a positive id is masked as true. With this, the latter two
can be passed as arguments to this function to reuse them.
"""
if zero_clipped_indices is None:
zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))
gathered = array_ops.gather(params, zero_clipped_indices)
if is_positive is None:
is_positive = math_ops.greater_equal(ids, 0)
# tf.where(condition, x, y) requires condition to have the same shape as x
# and y.
# todo(philjd): remove this if tf.where supports broadcasting (#9284)
for _ in range(gathered.shape.ndims - is_positive.shape.ndims):
is_positive = array_ops.expand_dims(is_positive, -1)
is_positive = (
is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool))
# replace gathered params of negative indices with 0
zero_slice = array_ops.zeros_like(gathered)
return (array_ops.where(is_positive, gathered, zero_slice),
zero_clipped_indices, is_positive)
def _UnsortedSegmentMinOrMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs, zero_clipped_indices, is_positive = \
_GatherDropNegatives(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
is_selected = math_ops.logical_and(is_selected, is_positive)
num_selected = math_ops.unsorted_segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,
zero_clipped_indices, is_positive)
zeros = array_ops.zeros_like(gathered_grads)
return array_ops.where(is_selected, gathered_grads, zeros), None, None
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for UnsortedSegmentSum."""
return _GatherDropNegatives(grad, op.inputs[1])[0], None, None
@ops.RegisterGradient("UnsortedSegmentMax")
def _UnsortedSegmentMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMax. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentMin")
def _UnsortedSegmentMinGrad(op, grad):
""" Gradient for UnsortedSegmentMin. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentProd")
def _UnsortedSegmentProdGrad(op, grad):
""" Gradient for UnsortedSegmentProd.
The gradient can be expressed for each segment by dividing the segment's
product by each element of the segment input tensor, but this approach can't
deal with zeros in the input.
Unlike reduce_prod we can't use cumsum here as individual segments may have
a different number of elements. Therefore we consider three cases:
1) A segment input contains no zeros and we can safely divide by the input
tensor.
2) A segment contains exactly one zero. Then the gradient of each input of
the segment is zero except for the 0-input, there the gradient is
the product of the remaining segment entries.
3) A segment contains at least two zeros. The gradient is zero for all
segment inputs.
"""
# Note that unsorted_segment_sum will filter out the negative indices,
# so we don't need to do a logical_and with is_positive here
is_zero = math_ops.equal(op.inputs[0], 0)
num_zeros = gen_math_ops.unsorted_segment_sum(
math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2])
# handle case 3 and set the gradient to 0 for segments with more than one
# 0 as input
grad = array_ops.where(
math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)
# replace all zeros with ones and compute the unsorted_segment_prod
non_zero_data = array_ops.where(is_zero, array_ops.ones_like(op.inputs[0]),
op.inputs[0])
non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data,
op.inputs[1], op.inputs[2])
# clip the indices for gather to be positive
zero_clipped_indices = math_ops.maximum(op.inputs[1],
array_ops.zeros_like(op.inputs[1]))
gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices)
gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices)
prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf.
# Now fetch the individual results for segments containing 0 and those that
# don't. is_zero will also fetch results for entries with negative index
# but the following gather_drop_negatives sets the corresponding entry in
# grad to 0 for these
partial_derivative = array_ops.where(is_zero, gathered_non_zero_prod,
prod_divided_by_el)
gathered_grad = _GatherDropNegatives(grad, op.inputs[1],
zero_clipped_indices)[0]
return gathered_grad * partial_derivative, None, None
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("Reciprocal")
def _ReciprocalGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("InvGrad")
def _InvGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("ReciprocalGrad")
def _ReciprocalGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
# Added control dependencies to prevent 2*x from being computed too early.
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = constant_op.constant(2.0, dtype=x.dtype)
return math_ops.multiply(grad, math_ops.multiply(x, y))
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return gen_math_ops.sqrt_grad(y, grad)
@ops.RegisterGradient("SqrtGrad")
def _SqrtGradGrad(op, grad):
a = op.inputs[0]
y = op.outputs[0] # y = 0.5 * b / conj(a)
with ops.control_dependencies([grad]):
if compat.forward_compatible(2019, 9, 14):
ga = gen_math_ops.xdivy(grad, a)
return -gen_math_ops.mul_no_nan(y, math_ops.conj(ga)), 0.5 * ga
else:
ga = grad / a
return -math_ops.conj(ga) * y, 0.5 * ga
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
"""Returns -0.5 * grad * conj(y)^3."""
y = op.outputs[0] # y = x^(-1/2)
return gen_math_ops.rsqrt_grad(y, grad)
@ops.RegisterGradient("RsqrtGrad")
def _RsqrtGradGrad(op, grad):
"""Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
a = op.inputs[0] # a = x^{-1/2}
b = op.inputs[1] # backprop gradient for a
with ops.control_dependencies([grad]):
ca = math_ops.conj(a)
cg = math_ops.conj(grad)
grad_a = -1.5 * cg * b * math_ops.square(ca)
grad_b = gen_math_ops.rsqrt_grad(ca, grad)
return grad_a, grad_b
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(y, grad)
else:
return grad * y
@ops.RegisterGradient("Expm1")
def _Expm1Grad(op, grad):
"""Returns grad * exp(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = math_ops.exp(x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(y, grad)
else:
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return gen_math_ops.xdivy(grad, x)
else:
return grad * math_ops.reciprocal(x)
@ops.RegisterGradient("Log1p")
def _Log1pGrad(op, grad):
"""Returns grad * (1/(1 + x))."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return gen_math_ops.xdivy(grad, 1 + x)
else:
return grad * math_ops.reciprocal(1 + x)
@ops.RegisterGradient("Xlogy")
def _XLogyGrad(op, grad):
"""Returns gradient of xlogy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xlogy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(x, y)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Xdivy")
def _XDivyGrad(op, grad):
"""Returns gradient of xdivy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xdivy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Sinh")
def _SinhGrad(op, grad):
"""Returns grad * cosh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cosh(x)
@ops.RegisterGradient("Cosh")
def _CoshGrad(op, grad):
"""Returns grad * sinh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.sinh(x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.tanh_grad(y, grad)
@ops.RegisterGradient("Asinh")
def _AsinhGrad(op, grad):
"""Returns grad * 1/cosh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return grad / math_ops.cosh(y)
@ops.RegisterGradient("Acosh")
def _AcoshGrad(op, grad):
"""Returns grad * 1/sinh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.xdivy(grad, math_ops.sinh(y))
else:
return grad / math_ops.sinh(y)
@ops.RegisterGradient("Atanh")
def _AtanhGrad(op, grad):
"""Returns grad * 1/ (1 - x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.subtract(one, x2))
return grad * inv
@ops.RegisterGradient("TanhGrad")
def _TanhGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad)
@ops.RegisterGradient("Erf")
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfc")
def _ErfcGrad(op, grad):
"""Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
minus_two_over_root_pi = constant_op.constant(
-2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Lgamma")
def _LgammaGrad(op, grad):
"""Returns grad * digamma(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(math_ops.digamma(x), grad)
else:
return grad * math_ops.digamma(x)
@ops.RegisterGradient("Digamma")
def _DigammaGrad(op, grad):
"""Compute gradient of the digamma function with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(partial_x, grad)
else:
return grad * partial_x
@ops.RegisterGradient("BesselI0e")
def _BesselI0eGrad(op, grad):
"""Compute gradient of bessel_i0e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
partial_x = (math_ops.bessel_i1e(x) - math_ops.sign(x) * y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(partial_x, grad)
else:
return grad * partial_x
@ops.RegisterGradient("BesselI1e")
def _BesselI1eGrad(op, grad):
"""Compute gradient of bessel_i1e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 0.5.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0e and
# bessel_i2e, but the latter is not yet implemented in Eigen.
eps = np.finfo(x.dtype.as_numpy_dtype).eps
zeros = array_ops.zeros_like(x)
x_is_not_tiny = math_ops.abs(x) > eps
safe_x = array_ops.where(x_is_not_tiny, x, eps + zeros)
dy_dx = math_ops.bessel_i0e(safe_x) - y * (
math_ops.sign(safe_x) + math_ops.reciprocal(safe_x))
dy_dx = array_ops.where(x_is_not_tiny, dy_dx, 0.5 + zeros)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(dy_dx, grad)
else:
return grad * dy_dx
@ops.RegisterGradient("Igamma")
def _IgammaGrad(op, grad):
"""Returns gradient of igamma(a, x) with respect to a and x."""
a = op.inputs[0]
x = op.inputs[1]
sa = array_ops.shape(a)
sx = array_ops.shape(x)
ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
with ops.control_dependencies([grad]):
partial_a = gen_math_ops.igamma_grad_a(a, x)
# Perform operations in log space before summing, because Gamma(a)
# and Gamma'(a) can grow large.
partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) -
math_ops.lgamma(a))
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_a, grad), ra), sa),
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),
sx))
else:
return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa),
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Igammac")
def _IgammacGrad(op, grad):
"""Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x."""
igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad)
return (-igamma_grad_a, -igamma_grad_x)
@ops.RegisterGradient("Betainc")
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
_, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (
gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
gen_math_ops.lgamma(a + b))
partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
(a - 1) * math_ops.log(x) - log_beta)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (
None, # da
None, # db
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx))
else:
return (
None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Zeta")
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (None,
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_q, grad), rq),
sq))
else:
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
@ops.RegisterGradient("Polygamma")
def _PolygammaGrad(op, grad):
"""Returns gradient of psi(n, x) with respect to n and x."""
# TODO(tillahoffmann): Add derivative with respect to n
n = op.inputs[0]
x = op.inputs[1]
# Broadcast gradients
sn = array_ops.shape(n)
sx = array_ops.shape(x)
unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)
# Evaluate gradient
with ops.control_dependencies([grad]):
n = math_ops.conj(n)
x = math_ops.conj(x)
partial_x = math_ops.polygamma(n + 1, x)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (None,
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),
sx))
else:
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.sigmoid_grad(y, grad)
@ops.RegisterGradient("SigmoidGrad")
def _SigmoidGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
gb = grad * b
return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad)
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
@ops.RegisterGradient("Tan")
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(secx2, grad)
else:
return secx2 * grad
@ops.RegisterGradient("Asin")
def _AsinGrad(op, grad):
"""Returns grad * 1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
if compat.forward_compatible(2019, 9, 14):
return math_ops.xdivy(grad, den)
else:
inv = math_ops.reciprocal(den)
return grad * inv
@ops.RegisterGradient("Acos")
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
if compat.forward_compatible(2019, 9, 14):
return -math_ops.xdivy(grad, den)
else:
inv = math_ops.reciprocal(den)
return -grad * inv
@ops.RegisterGradient("Atan")
def _AtanGrad(op, grad):
"""Returns grad * 1/ (1 + x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.add(one, x2))
return grad * inv
@ops.RegisterGradient("Atan2")
def _Atan2Grad(op, grad):
"""Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2)."""
y = op.inputs[0]
x = op.inputs[1]
with ops.control_dependencies([grad]):
if compat.forward_compatible(2019, 9, 14):
grad_inv = math_ops.xdivy(grad, (math_ops.square(x) + math_ops.square(y)))
else:
grad_inv = grad / (math_ops.square(x) + math_ops.square(y))
return x * grad_inv, -y * grad_inv
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
def _ShapesFullySpecifiedAndEqual(x, y, grad):
# pylint: disable=protected-access
x_shape = x._shape_tuple()
y_shape = y._shape_tuple()
grad_shape = grad._shape_tuple()
# pylint: enable=protected-access
return (x_shape == y_shape and x_shape == grad_shape and
x_shape is not None and None not in x_shape)
@ops.RegisterGradient("Add")
@ops.RegisterGradient("AddV2")
def _AddGrad(op, grad):
"""Gradient for Add."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return grad, None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, grad
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
else:
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
else:
gy = array_ops.reshape(math_ops.reduce_sum(grad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
"""Gradient for Sub."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, -grad
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
"""The gradient of scalar multiplication."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad) and
grad.dtype in (dtypes.int32, dtypes.float32)):
return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy))
@ops.RegisterGradient("MulNoNan")
def _MulNoNanGrad(op, grad):
"""The gradient of scalar multiplication with NaN-suppression."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(x, grad), ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.divide(math_ops.divide(-x, y), y), grad), ry),
sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.divide(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.divide(math_ops.divide(-x, y), y), ry), sy))
@ops.RegisterGradient("FloorDiv")
def _FloorDivGrad(_, unused_grad):
"""The gradient for the FloorDiv operator."""
return None, None
@ops.RegisterGradient("FloorMod")
def _FloorModGrad(op, grad):
"""Returns grad * (1, -floor(x/y))."""
x = math_ops.conj(op.inputs[0])
y = math_ops.conj(op.inputs[1])
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
floor_xy = math_ops.floor_div(x, y)
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
gy = array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
return gx, gy
@ops.RegisterGradient("TruncateDiv")
def _TruncateDivGrad(_, unused_grad):
return None, None
@ops.RegisterGradient("RealDiv")
def _RealDivGrad(op, grad):
"""RealDiv op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.realdiv(math_ops.realdiv(-x, y), y), grad),
ry), sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry),
sy))
@ops.RegisterGradient("DivNoNan")
def _DivNoNanGrad(op, grad):
"""DivNoNan op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
grad), ry), sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
if compat.forward_compatible(2019, 9, 14):
gx = array_ops.reshape(
math_ops.reduce_sum(
gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad), rx), sx)
else:
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
mask = math_ops.not_equal(x, 0)
else:
# There's no sensible real value to return if x < 0, so return 0
mask = x > 0
safe_x = array_ops.where(mask, x, array_ops.ones_like(x))
log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x))
if compat.forward_compatible(2019, 9, 14):
gy = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(z * log_x, grad), ry), sy)
else:
gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
def _MaximumMinimumGradInputOnly(op, grad, selector_op):
x = op.inputs[0]
y = op.inputs[1]
zeros = array_ops.zeros_like(grad)
xmask = selector_op(x, y)
xgrad = array_ops.where(xmask, grad, zeros)
ygrad = None # Return None for ygrad since the config allows that.
return (xgrad, ygrad)
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
# When we want to get gradients for the first input only, and the second
# input tensor is a scalar, we can do a much simpler calculation
return _MaximumMinimumGradInputOnly(op, grad, selector_op)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
else:
xgrad = array_ops.where(xmask, grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
else:
ygrad = array_ops.where(xmask, zeros, grad)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
@ops.RegisterGradient("SquaredDifference")
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
-array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("ApproximateEqual")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros_like(x)
return (None, array_ops.where(c, grad, zeros), array_ops.where(
c, zeros, grad))
@ops.RegisterGradient("SelectV2")
def _SelectGradV2(op, grad):
c = op.inputs[0]
x = op.inputs[1]
y = op.inputs[2]
zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype)
gx = array_ops.where_v2(c, grad, zeros)
x_shape = array_ops.shape(x)
output_shape = array_ops.shape(op.outputs[0])
# Reduce away broadcasted leading dims.
reduce_x, _ = gen_array_ops.broadcast_gradient_args(x_shape, output_shape)
gx = math_ops.reduce_sum(gx, keepdims=True, axis=reduce_x)
gx = array_ops.reshape(gx, x_shape)
gy = array_ops.where_v2(c, zeros, grad)
y_shape = array_ops.shape(y)
# Reduce away broadcasted leading dims.
reduce_y, _ = gen_array_ops.broadcast_gradient_args(y_shape, output_shape)
gy = math_ops.reduce_sum(gy, keepdims=True, axis=reduce_y)
gy = array_ops.reshape(gy, y_shape)
return (None, gx, gy)
def _MatMulGradAgainstFirstOnly(op, grad):
"""Gradient for MatMul, only for the first input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
return grad_a, None
def _MatMulGradAgainstSecondOnly(op, grad):
"""Gradient for MatMul, only for the second input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
if not t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return None, grad_b
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
"""Gradient for MatMul."""
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None:
if 1 in skip_input_indices:
return _MatMulGradAgainstFirstOnly(op, grad)
elif 0 in skip_input_indices:
return _MatMulGradAgainstSecondOnly(op, grad)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return grad_a, grad_b
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = {
op.inputs[0]: op.get_attr("a_is_sparse"),
op.inputs[1]: op.get_attr("b_is_sparse"),
# Use heuristic to figure out if grad might be sparse
grad: not context.executing_eagerly() and (grad.op.type == "ReluGrad")
}
def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
prod = math_ops.matmul(
t1,
t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if prod.dtype != out_dtype:
prod = math_ops.cast(prod, out_dtype)
return prod
dtype_a = op.inputs[0].dtype
dtype_b = op.inputs[1].dtype
if not t_a and not t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a),
_SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b))
elif t_a and t_b:
return (_SparseMatMul(
op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True),
_SparseMatMul(
grad, op.inputs[0], dtype_b, transpose_a=True,
transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Ceil")
def _CeilGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Round")
def _RoundGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Rint")
def _RintGrad(_, unused_grad):
# the gradient of Rint is zero
return [None]
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
return grad_x, grad_y
@ops.RegisterGradient("BatchMatMulV2")
def _BatchMatMulV2(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
# Reduce along the broadcasted batch dimensions, if broadcasting is required.
shape_x_static = x.get_shape()
shape_y_static = y.get_shape()
if not (shape_x_static.is_fully_defined() and
shape_y_static.is_fully_defined() and
shape_x_static == shape_y_static):
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2])
grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx)
grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy)
return grad_x, grad_y
ops.NotDifferentiable("Range")
ops.NotDifferentiable("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Angle")
def _AngleGrad(op, grad):
"""Returns -grad / (Im(x) + iRe(x))"""
x = op.inputs[0]
with ops.control_dependencies([grad]):
re = math_ops.real(x)
im = math_ops.imag(x)
z = math_ops.reciprocal(math_ops.complex(im, re))
zero = constant_op.constant(0, dtype=grad.dtype)
complex_grad = math_ops.complex(grad, zero)
return -complex_grad * z
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("ComplexAbs")
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
return math_ops.div_no_nan(
math_ops.complex(
grad, array_ops.zeros_like(grad)) * op.inputs[0],
math_ops.complex(
op.outputs[0], array_ops.zeros_like(op.outputs[0])))
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,
dtypes.complex64, dtypes.complex128
]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
@ops.RegisterGradient("Cross")
def _CrossGrad(op, grad):
u = op.inputs[0]
v = op.inputs[1]
return (math_ops.cross(v, grad), math_ops.cross(grad, u))
@ops.RegisterGradient("Cumsum")
def _CumsumGrad(op, grad):
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
return [
math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse),
None
]
@ops.RegisterGradient("Cumprod")
def _CumprodGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# TODO This fails when x contains 0 and should be fixed
prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
out = math_ops.cumsum(
prod * grad, axis, exclusive=exclusive, reverse=not reverse)
return [out / x, None]
@ops.RegisterGradient("NextAfter")
def _NextAfterGrad(op, grad):
"""Returns gradient of nextafter(x1, x2) with respect to x1 and x2."""
x1 = op.inputs[0]
x2 = op.inputs[1]
s_x1 = array_ops.shape(x1)
s_x2 = array_ops.shape(x2)
r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)
with ops.control_dependencies([grad]):
partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)
partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)
return (array_ops.reshape(
math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1),
array_ops.reshape(
math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))
| apache-2.0 | 2,105,415,834,781,362,400 | 33.54222 | 81 | 0.653891 | false | 3.030212 | false | false | false |
robled/rocket-depot | setup.py | 1 | 2523 | import subprocess
from distutils.log import warn, info
from setuptools import setup
setup(
name='rocket-depot',
version='1.0.0',
scripts=['rocket-depot'],
# metadata for upload to PyPI
platforms='linux',
author='David Roble',
author_email='[email protected]',
maintainer='David Roble',
maintainer_email='[email protected]',
description='An rdesktop/xfreerdp frontend.',
long_description=open('README.txt').read(),
license='GNU GPLv3',
keywords=['rdesktop', 'freerdp', 'rdp', 'remote desktop',
'terminal server'],
url='https://github.com/robled/rocket-depot',
data_files=[
('/usr/share/applications',
['data/rocket-depot.desktop']),
('/usr/share/icons/hicolor/16x16/apps',
['data/icons/16x16/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/22x22/apps',
['data/icons/22x22/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/24x24/apps',
['data/icons/24x24/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/32x32/apps',
['data/icons/32x32/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/48x48/apps',
['data/icons/48x48/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/64x64/apps',
['data/icons/64x64/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/128x128/apps',
['data/icons/128x128/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/256x256/apps',
['data/icons/256x256/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/scalable/apps',
['data/icons/scalable/apps/rocket-depot.svg']),
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
)
info('running gtk-update-icon-cache')
try:
subprocess.call(['gtk-update-icon-cache', '-q', '-f', '-t',
'/usr/share/icons/hicolor'])
except OSError as e:
warn('updating the GTK icon cache failed: %s' % str(e))
| gpl-3.0 | 8,640,305,625,367,821,000 | 38.421875 | 70 | 0.604439 | false | 3.513928 | false | false | false |
swegener/gruvi | lib/gruvi/address.py | 1 | 3675 | #
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2014 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import six
import pyuv
from .hub import get_hub, switch_back, switchpoint
__all__ = ['saddr', 'paddr', 'getaddrinfo', 'getnameinfo']
def saddr(address):
"""Return a string representation for an address.
The *address* paramater can be a pipe name, an IP address tuple, or a
socket address.
The return value is always a ``str`` instance.
"""
if isinstance(address, six.binary_type) and six.PY3:
return address.decode('utf8')
elif isinstance(address, six.string_types):
return address
elif isinstance(address, tuple) and ':' in address[0]:
return '[{}]:{}'.format(address[0], address[1])
elif isinstance(address, tuple):
return '{}:{}'.format(*address)
else:
raise TypeError('illegal address type: {!s}'.format(type(address)))
def paddr(address):
"""Parse a string representation of an address.
This function is the inverse of :func:`saddr`.
"""
if address.startswith('['):
p1 = address.find(']:')
if p1 == -1:
raise ValueError
return (address[1:p1], int(address[p1+2:]))
elif ':' in address:
p1 = address.find(':')
return (address[:p1], int(address[p1+1:]))
else:
return address
@switchpoint
def getaddrinfo(node, service=0, family=0, socktype=0, protocol=0, flags=0, timeout=30):
"""Resolve an Internet *node* name and *service* into a socket address.
The *family*, *socktype* and *protocol* are optional arguments that specify
the address family, socket type and protocol, respectively. The *flags*
argument allows you to pass flags to further modify the resolution process.
See the :func:`socket.getaddrinfo` function for a detailed description of
these arguments.
The return value is a list of ``(family, socktype, proto, canonname,
sockaddr)`` tuples. The fifth element (``sockaddr``) is the socket address.
It will be a 2-tuple ``(addr, port)`` for an IPv4 address, and a 4-tuple
``(addr, port, flowinfo, scopeid)`` for an IPv6 address.
The address resolution is performed in the libuv thread pool.
"""
hub = get_hub()
with switch_back(timeout) as switcher:
request = pyuv.dns.getaddrinfo(hub.loop, node, service, family,
socktype, protocol, flags, callback=switcher)
switcher.add_cleanup(request.cancel)
result = hub.switch()
result, error = result[0]
if error:
message = pyuv.errno.strerror(error)
raise pyuv.error.UVError(error, message)
return result
@switchpoint
def getnameinfo(sockaddr, flags=0, timeout=30):
"""Resolve a socket address *sockaddr* back to a ``(node, service)`` tuple.
The *flags* argument can be used to modify the resolution process. See the
:func:`socket.getnameinfo` function for more information.
The address resolution is performed in the libuv thread pool.
"""
hub = get_hub()
with switch_back(timeout) as switcher:
request = pyuv.dns.getnameinfo(hub.loop, sockaddr, flags, callback=switcher)
switcher.add_cleanup(request.cancel)
result = hub.switch()
result, error = result[0]
if error:
message = pyuv.errno.strerror(error)
raise pyuv.error.UVError(error, message)
return result
| mit | -7,268,691,083,574,085,000 | 34.336538 | 88 | 0.661224 | false | 3.832117 | false | false | false |
harrystech/arthur-redshift-etl | python/etl/monitor.py | 1 | 36933 | """
Monitoring (and logging) for ETL steps.
This module provides a context for the ETL that allows to monitor
the start time of an ETL step along with its successful or
unsuccessful completion. Events for start, finish or failure
may be emitted to a persistence layer.
"""
import http.server
import itertools
import logging
import os
import queue
import random
import socketserver
import sys
import threading
import time
import traceback
import urllib.parse
import uuid
from calendar import timegm
from collections import Counter, OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
from decimal import Decimal
from http import HTTPStatus
from operator import itemgetter
from typing import Dict, Iterable, List, Optional, Union
import boto3
import botocore.exceptions
import funcy as fy
import simplejson as json
from boto3.dynamodb.types import TypeDeserializer
from tqdm import tqdm
import etl.assets
import etl.config
import etl.text
from etl.errors import ETLRuntimeError
from etl.json_encoder import FancyJsonEncoder
from etl.timer import Timer, elapsed_seconds, utc_now
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
STEP_START = "start"
STEP_FINISH = "finish"
STEP_FAIL = "fail"
_DUMMY_TARGET = "#.dummy"
def trace_key():
"""
Return a "trace key" suitable to track program execution.
It's most likely unique between invocations.
"""
# We will never make a 32-bit operating system.
return uuid.uuid4().hex[:16].upper()
class MetaMonitor(type):
"""
Metaclass to implement read-only attributes of our ETL's Monitor.
If you need to find out the current trace key, call Monitor.etl_id.
If you want to know the "environment" (selected by using --prefix or the user's login),
then use Monitor.environment.
If you want to know the runtime environment (EMR, instance, step), use Monitor.cluster_info.
Behind the scenes, some properties actually do a lazy evaluation.
"""
@property
def etl_id(cls):
if cls._trace_key is None:
cls._trace_key = trace_key()
return cls._trace_key
@property
def environment(cls):
if cls._environment is None:
raise ValueError("value of 'environment' is None")
return cls._environment
@environment.setter
def environment(cls, value):
cls._environment = value
@property
def cluster_info(cls):
if cls._cluster_info is None:
job_flow = "/mnt/var/lib/info/job-flow.json"
if os.path.exists(job_flow):
with open(job_flow) as f:
data = json.load(f)
cluster_info = {"cluster_id": data["jobFlowId"], "instance_id": data["masterInstanceId"]}
parent_dir, current_dir = os.path.split(os.getcwd())
if parent_dir == "/mnt/var/lib/hadoop/steps":
cluster_info["step_id"] = current_dir
else:
cluster_info = {}
cls._cluster_info = cluster_info
return cls._cluster_info
class Monitor(metaclass=MetaMonitor):
"""
Context manager to monitor ETL steps for some target table.
Monitor instances have these properties which will be stored in the event payload:
environment: a description of the source folder (aka prefix)
etl_id: a UUID for each ETL run (All monitors of the same ETL run with the same 'etl_id'.)
target: name of table or view in the data warehouse
step: command that is running, like 'dump', or 'load'
The payloads will have at least the properties of the Monitor instance and:
event: one of ('start', 'finish', 'fail')
timestamp: UTC timestamp
In case of errors, they are added as an array 'errors'. It is also possible to send
some extra information into monitor payloads. Anything extra must be of type list,
dict, str, or int (or bad things will happen).
Example usage of attributes:
>>> id_ = Monitor.etl_id
>>> isinstance(id_, str)
True
>>> Monitor.etl_id == id_
True
>>> Monitor.environment
Traceback (most recent call last):
...
ValueError: value of 'environment' is None
>>> Monitor.environment = 'saturn'
>>> Monitor.environment
'saturn'
Example use of a monitor instance (with dry_run=True to avoid persistence calls during testing):
>>> m = Monitor('schema.table', 'frobnicate', dry_run=True)
>>> payload = MonitorPayload(m, 'test', utc_now())
>>> payload.step
'frobnicate'
>>> payload.event
'test'
Normally, you would leave the creation of the payload to the context manager:
>>> with Monitor('schema.table', 'frobnicate', dry_run=True):
... pass
"""
# See MetaMonitor class for getters and setters
_trace_key = None
_environment = None
_cluster_info = None
def __init__(self, target: str, step: str, dry_run: bool = False, **kwargs) -> None:
self._monitor_id = trace_key()
self._target = target
self._step = step
self._dry_run = dry_run
# Create a deep copy so that changes made later by the caller don't alter our payload.
self._extra = deepcopy(dict(**kwargs))
self._index = self._extra.get("index")
# Read-only properties (in order of cardinality)
@property
def environment(self):
return Monitor.environment
@property
def cluster_info(self):
return Monitor.cluster_info
@property
def etl_id(self):
return Monitor.etl_id
@property
def target(self):
return self._target
@property
def step(self):
return self._step
@property
def monitor_id(self):
return self._monitor_id
def __enter__(self):
if self._index:
logger.info(
"Starting %s step for '%s' (%d/%d)",
self.step,
self.target,
self._index["current"],
self._index["final"],
)
else:
logger.info("Starting %s step for '%s'", self.step, self.target)
self._start_time = utc_now()
payload = MonitorPayload(self, STEP_START, self._start_time, extra=self._extra)
payload.emit(dry_run=self._dry_run)
return self
def __exit__(self, exc_type, exc_value, tb):
self._end_time = utc_now()
seconds = elapsed_seconds(self._start_time, self._end_time)
if exc_type is None:
event = STEP_FINISH
errors = None
logger.info("Finished %s step for '%s' (%0.2fs)", self._step, self._target, seconds)
else:
event = STEP_FAIL
errors = [
{
"code": (exc_type.__module__ + "." + exc_type.__qualname__).upper(),
"message": traceback.format_exception_only(exc_type, exc_value)[0].strip(),
}
]
logger.warning("Failed %s step for '%s' (%0.2fs)", self._step, self._target, seconds)
payload = MonitorPayload(
self, event, self._end_time, elapsed=seconds, errors=errors, extra=self._extra
)
payload.emit(dry_run=self._dry_run)
def add_extra(self, key, value):
if key in self._extra:
raise KeyError("duplicate key in 'extra' payload")
self._extra[key] = value
@classmethod
def marker_payload(cls, step: str):
monitor = cls(_DUMMY_TARGET, step)
return MonitorPayload(monitor, STEP_FINISH, utc_now(), elapsed=0, extra={"is_marker": True})
class InsertTraceKey(logging.Filter):
"""Called as a logging filter: insert the ETL id as the trace key into the logging record."""
def filter(self, record):
record.trace_key = Monitor.etl_id
return True
class PayloadDispatcher:
def store(self, payload):
"""Send payload to persistence layer."""
raise NotImplementedError("PayloadDispatcher failed to implement store method")
class MonitorPayload:
"""
Simple class to encapsulate data for Monitor events which knows how to morph into JSON etc.
You should consider all attributes to be read-only with the possible exception of 'errors'
that may be set to a list of objects (in JSON-terminology) with 'code' and 'message' fields.
(Which is to say: do not modify the payload object!)
"""
# Append instances with a 'store' method here (skipping writing a metaclass this time)
dispatchers: List[PayloadDispatcher] = []
def __init__(self, monitor, event, timestamp, elapsed=None, errors=None, extra=None):
# Basic info
self.environment = monitor.environment
self.etl_id = monitor.etl_id
self.target = monitor.target
self.step = monitor.step
self.monitor_id = monitor.monitor_id
self.event = event
self.timestamp = timestamp
# Premium info (when available)
self.cluster_info = monitor.cluster_info
self.elapsed = elapsed
self.errors = errors
self.extra = extra
def emit(self, dry_run=False):
payload = vars(self)
# Delete entries that are often not present:
for key in ["cluster_info", "elapsed", "extra", "errors"]:
if not payload[key]:
del payload[key]
compact_text = json.dumps(payload, sort_keys=True, separators=(",", ":"), cls=FancyJsonEncoder)
if dry_run:
logger.debug("Dry-run: payload = %s", compact_text)
return
logger.debug("Monitor payload = %s", compact_text)
for d in MonitorPayload.dispatchers:
d.store(payload)
class DynamoDBStorage(PayloadDispatcher):
"""
Store ETL events in a DynamoDB table.
Note the table is created if it doesn't already exist when a payload needs to be stored.
"""
@staticmethod
def factory() -> "DynamoDBStorage":
table_name = "{}-{}".format(etl.config.get_config_value("resource_prefix"), "events")
return DynamoDBStorage(
table_name,
etl.config.get_config_value("etl_events.read_capacity"),
etl.config.get_config_value("etl_events.write_capacity"),
)
def __init__(self, table_name, read_capacity, write_capacity):
self.table_name = table_name
self.initial_read_capacity = read_capacity
self.initial_write_capacity = write_capacity
# Avoid default sessions and have one table reference per thread
self._thread_local_table = threading.local()
def get_table(self, create_if_not_exists=True):
"""Get table reference from DynamoDB or create it (within a new session)."""
session = boto3.session.Session()
logger.debug(f"Started new boto3 session in region '{session.region_name}'")
dynamodb = session.resource("dynamodb")
try:
table = dynamodb.Table(self.table_name)
status = table.table_status
logger.info(f"Found existing events table '{self.table_name}' in DynamoDB (status: {status})")
except botocore.exceptions.ClientError as exc:
# Check whether this is just a ResourceNotFoundException (sadly a 400, not a 404)
if exc.response["ResponseMetadata"]["HTTPStatusCode"] != 400:
raise
# Nullify assignment and start over
table = None
status = None
if not (status == "ACTIVE" or create_if_not_exists):
raise ETLRuntimeError("DynamoDB table '%s' does not exist or is not active" % self.table_name)
if table is None:
logger.info(f"Creating DynamoDB table: '{self.table_name}'")
table = dynamodb.create_table(
TableName=self.table_name,
KeySchema=[
{"AttributeName": "target", "KeyType": "HASH"},
{"AttributeName": "timestamp", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "target", "AttributeType": "S"},
{"AttributeName": "timestamp", "AttributeType": "N"},
],
ProvisionedThroughput={
"ReadCapacityUnits": self.initial_read_capacity,
"WriteCapacityUnits": self.initial_write_capacity,
},
)
status = table.table_status
if status != "ACTIVE":
logger.info(f"Waiting for events table '{self.table_name}' to become active")
table.wait_until_exists()
logger.debug(
f"Finished creating or updating events table '{self.table_name}' (arn={table.table_arn})"
)
return table
def store(self, payload: dict, _retry: bool = True):
"""
Actually send the payload to the DynamoDB table.
If this is the first call at all, then get a reference to the table, or even create the
table as necessary.
This method will try to store the payload a second time if there's an error in the first
attempt.
"""
try:
table = getattr(self._thread_local_table, "table", None)
if not table:
table = self.get_table()
self._thread_local_table.table = table
item = dict(payload)
# Cast timestamp (and elapsed seconds) into Decimal since DynamoDB cannot handle float.
# But decimals maybe finicky when instantiated from float so we make sure to fix the
# number of decimals.
item["timestamp"] = Decimal("%.6f" % item["timestamp"].timestamp())
if "elapsed" in item:
item["elapsed"] = Decimal("%.6f" % item["elapsed"])
table.put_item(Item=item)
except botocore.exceptions.ClientError:
# Something bad happened while talking to the service ... just try one more time.
if _retry:
logger.warning("Trying to store payload a second time after this mishap:", exc_info=True)
self._thread_local_table.table = None
delay = random.uniform(3, 10)
logger.debug("Snoozing for %.1fs", delay)
time.sleep(delay)
self.store(payload, _retry=False)
else:
raise
class _ThreadingSimpleServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
pass
class MemoryStorage(PayloadDispatcher):
"""
Store ETL events in memory and make the events accessible via HTTP.
When the ETL is running for extract, load, or unload, connect to port 8086.
When the ETL is running on a host other than your local computer, say in EC2, then use
port forwarding, to send requests from your host to an address seen on the other host:
ssh -L 8086:localhost:8086 <hostname>
The output should pass validator at https://validator.w3.org/#validate_by_input+with_options
"""
SERVER_HOST = "" # meaning: all that we can bind to locally
SERVER_PORT = 8086
def __init__(self):
self.queue = queue.Queue()
self.events = OrderedDict()
self.start_server()
def store(self, payload: dict):
self.queue.put(payload)
def _drain_queue(self):
try:
while True:
payload = self.queue.get_nowait()
if not payload.get("extra", {}).get("is_marker", False):
# Overwrite earlier events by later ones
key = payload["target"], payload["step"]
self.events[key] = payload
except queue.Empty:
pass
def get_indices(self):
self._drain_queue()
indices = {}
counter = Counter()
for payload in self.events.values():
index = dict(payload.get("extra", {}).get("index", {}))
name = index.setdefault("name", "N/A")
if name not in indices:
indices[name] = index
elif index["current"] > indices[name]["current"]:
indices[name].update(index)
if payload["event"] != STEP_START:
counter[name] += 1
indices[name]["counter"] = counter[name]
indices_as_list = [indices[name] for name in sorted(indices)]
return etl.assets.Content(json=indices_as_list)
def get_events(self, event_id: Optional[str]):
self._drain_queue()
if event_id is None:
events_as_list = sorted(
(self.events[key] for key in self.events),
key=lambda p: (2 if p["event"] == STEP_START else 1, p["timestamp"]),
reverse=True,
)
else:
events_as_list = [event for event in self.events.values() if event["monitor_id"] == event_id]
return etl.assets.Content(json=events_as_list)
def create_handler(self):
"""Return a handler that serves our storage content, used as factory method."""
storage = self
http_logger = logging.getLogger("arthur_http")
class MonitorHTTPHandler(http.server.BaseHTTPRequestHandler):
server_version = "MonitorHTTPServer/1.0"
log_error = http_logger.error
log_message = http_logger.info
def do_GET(self):
"""
Serve a GET (or HEAD) request.
We serve assets or JSON via the API.
If the command is HEAD (and not GET), only the header is sent. Duh.
"""
parts = urllib.parse.urlparse(self.path.rstrip("/"))
path = (parts.path or "/index.html").lstrip("/")
if path == "api/etl-id":
result = etl.assets.Content(json={"id": Monitor.etl_id})
elif path == "api/indices":
result = storage.get_indices()
elif path.startswith("api/events"):
segment = path.replace("api/events", "").strip("/")
result = storage.get_events(segment or None)
elif path == "api/command-line":
result = etl.assets.Content(json={"args": " ".join(sys.argv)})
elif etl.assets.asset_exists(path):
result = etl.assets.get_asset(path)
else:
# self.send_response(HTTPStatus.NOT_FOUND)
self.send_response(HTTPStatus.MOVED_PERMANENTLY)
new_parts = (parts.scheme, parts.netloc, "/", None, None)
new_url = urllib.parse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", result.content_type)
self.send_header("Content-Length", result.content_length)
if result.content_encoding is not None:
self.send_header("Content-Encoding", result.content_encoding)
self.send_header("Last-Modified", result.last_modified)
if result.cache_control is not None:
self.send_header("Cache-Control", result.cache_control)
self.end_headers()
if self.command == "GET":
self.wfile.write(result.content)
do_HEAD = do_GET
return MonitorHTTPHandler
def start_server(self):
"""Start background daemon to serve our events."""
handler_class = self.create_handler()
class BackgroundServer(threading.Thread):
def run(self):
logger.info("Starting background server for monitor on port %d", MemoryStorage.SERVER_PORT)
try:
httpd = _ThreadingSimpleServer(
(MemoryStorage.SERVER_HOST, MemoryStorage.SERVER_PORT), handler_class
)
httpd.serve_forever()
except Exception as exc:
logger.info("Background server stopped: %s", str(exc))
try:
thread = BackgroundServer(daemon=True)
thread.start()
except RuntimeError:
logger.warning("Failed to start monitor server:", exc_info=True)
def start_monitors(environment):
Monitor.environment = environment
memory = MemoryStorage()
MonitorPayload.dispatchers.append(memory)
if etl.config.get_config_value("etl_events.enabled"):
ddb = DynamoDBStorage.factory()
MonitorPayload.dispatchers.append(ddb)
else:
logger.warning("Writing events to a DynamoDB table is disabled in settings.")
def _format_output_column(key: str, value: str) -> str:
if value is None:
return "---"
elif key == "timestamp":
# Make timestamp readable by turning epoch seconds into a date.
return datetime.utcfromtimestamp(float(value)).replace(microsecond=0).isoformat()
elif key == "elapsed":
# Reduce number of decimals to 2.
return "{:6.2f}".format(float(value))
elif key == "rowcount":
return "{:9d}".format(int(value))
else:
return value
def _query_for_etls(step=None, hours_ago=0, days_ago=0) -> List[dict]:
"""Search for ETLs by looking for the "marker" event at the start of an ETL command."""
start_time = datetime.utcnow() - timedelta(days=days_ago, hours=hours_ago)
epoch_seconds = timegm(start_time.utctimetuple())
attribute_values = {
":marker": _DUMMY_TARGET,
":epoch_seconds": epoch_seconds,
":finish_event": STEP_FINISH,
}
if step is not None:
attribute_values[":step"] = step
filter_exp = "event = :finish_event"
if step is not None:
filter_exp += " and step = :step"
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
response = table.query(
ConsistentRead=True,
ExpressionAttributeNames={"#timestamp": "timestamp"}, # "timestamp" is a reserved word.
ExpressionAttributeValues=attribute_values,
KeyConditionExpression="target = :marker and #timestamp > :epoch_seconds",
FilterExpression=filter_exp,
ProjectionExpression="etl_id, step, #timestamp",
ReturnConsumedCapacity="TOTAL",
)
if "LastEvaluatedKey" in response:
logger.warning("This is is a partial result! Last evaluated key: '%s'", response["LastEvaluatedKey"])
logger.info(
"Query result: count = %d, scanned count = %d, consumed capacity = %f",
response["Count"],
response["ScannedCount"],
response["ConsumedCapacity"]["CapacityUnits"],
)
return response["Items"]
def query_for_etl_ids(hours_ago=0, days_ago=0) -> None:
"""Show recent ETLs with their step and execution start."""
etl_info = _query_for_etls(hours_ago=hours_ago, days_ago=days_ago)
keys = ["etl_id", "step", "timestamp"]
rows = [[_format_output_column(key, info[key]) for key in keys] for info in etl_info]
rows.sort(key=itemgetter(keys.index("timestamp")))
print(etl.text.format_lines(rows, header_row=keys))
def scan_etl_events(etl_id, selected_columns: Optional[Iterable[str]] = None) -> None:
"""
Scan for all events belonging to a specific ETL.
If a list of columns is provided, then the output is limited to those columns.
But note that the target (schema.table) and the event are always present.
"""
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
available_columns = ["target", "step", "event", "timestamp", "elapsed", "rowcount"]
if selected_columns is None:
selected_columns = available_columns
# We will always select "target" and "event" to have a meaningful output.
columns = list(fy.filter(frozenset(selected_columns).union(["target", "event"]), available_columns))
keys = ["extra.rowcount" if column == "rowcount" else column for column in columns]
# We need to scan here since the events are stored by "target" and not by "etl_id".
# TODO Try to find all the "known" relations and query on them with a filter on the etl_id.
client = boto3.client("dynamodb")
paginator = client.get_paginator("scan")
response_iterator = paginator.paginate(
TableName=table.name,
ConsistentRead=False,
ExpressionAttributeNames={"#timestamp": "timestamp"},
ExpressionAttributeValues={
":etl_id": {"S": etl_id},
":marker": {"S": _DUMMY_TARGET},
":start_event": {"S": STEP_START},
},
FilterExpression="etl_id = :etl_id and target <> :marker and event <> :start_event",
ProjectionExpression="target, step, event, #timestamp, elapsed, extra.rowcount",
ReturnConsumedCapacity="TOTAL",
# PaginationConfig={
# "PageSize": 100
# }
)
logger.info("Scanning events table '%s' for elapsed times", table.name)
consumed_capacity = 0.0
scanned_count = 0
rows: List[List[str]] = []
deserialize = TypeDeserializer().deserialize
for response in response_iterator:
consumed_capacity += response["ConsumedCapacity"]["CapacityUnits"]
scanned_count += response["ScannedCount"]
# We need to turn something like "'event': {'S': 'finish'}" into "'event': 'finish'".
deserialized = [
{key: deserialize(value) for key, value in item.items()} for item in response["Items"]
]
# Lookup "elapsed" or "extra.rowcount" (the latter as ["extra", "rowcount"]).
items = [{key: fy.get_in(item, key.split(".")) for key in keys} for item in deserialized]
# Scope down to selected keys and format the columns.
rows.extend([_format_output_column(key, item[key]) for key in keys] for item in items)
logger.info("Scan result: scanned count = %d, consumed capacity = %f", scanned_count, consumed_capacity)
if "timestamp" in keys:
rows.sort(key=itemgetter(keys.index("timestamp")))
else:
rows.sort(key=itemgetter(keys.index("target")))
print(etl.text.format_lines(rows, header_row=columns))
class EventsQuery:
def __init__(self, step: Optional[str] = None) -> None:
self._keys = ["target", "step", "event", "timestamp", "elapsed", "extra.rowcount"]
values = {
":target": None, # will be set when called
":epoch_seconds": None, # will be set when called
":start_event": STEP_START,
}
# Only look for finish or fail events
filter_exp = "event <> :start_event"
if step is not None:
values[":step"] = step
filter_exp += " and step = :step"
base_query = {
"ConsistentRead": False,
"ExpressionAttributeNames": {"#timestamp": "timestamp"},
"ExpressionAttributeValues": values,
"KeyConditionExpression": "target = :target and #timestamp > :epoch_seconds",
"FilterExpression": filter_exp,
"ProjectionExpression": "target, step, event, #timestamp, elapsed, extra.rowcount",
}
self._base_query = base_query
@property
def keys(self):
return self._keys[:]
def __call__(self, table, target, epoch_seconds):
query = deepcopy(self._base_query)
query["ExpressionAttributeValues"][":target"] = target
query["ExpressionAttributeValues"][":epoch_seconds"] = epoch_seconds
response = table.query(**query)
events = [{key: fy.get_in(item, key.split(".")) for key in self.keys} for item in response["Items"]]
# Return latest event or None
if events:
events.sort(key=itemgetter("timestamp"))
return events[-1]
return None
class BackgroundQueriesRunner(threading.Thread):
"""
An instance of this thread will repeatedly try to run queries on a DynamoDB table.
Every time a query returns a result, this result is sent to a queue and the query will no
longer be tried.
"""
def __init__(
self, targets, query, consumer_queue, start_time, update_interval, idle_time_out, **kwargs
) -> None:
super().__init__(**kwargs)
self.targets = list(targets)
self.query = query
self.queue = consumer_queue
self.start_time = start_time
self.update_interval = update_interval
self.idle_time_out = idle_time_out
def run(self):
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
targets = self.targets
start_time = self.start_time
idle = Timer()
while targets:
logger.debug(
"Waiting for events for %d target(s), start time = '%s'",
len(targets),
datetime.utcfromtimestamp(start_time).isoformat(),
)
new_start_time = datetime.utcnow() - timedelta(seconds=1) # avoid rounding errors
query_loop = Timer()
retired = set()
for target in targets:
latest_event = self.query(table, target, start_time)
if latest_event:
self.queue.put(latest_event)
retired.add(latest_event["target"])
targets = [t for t in targets if t not in retired]
start_time = timegm(new_start_time.utctimetuple())
if self.update_interval is None or not targets:
break
if retired:
idle = Timer()
elif self.idle_time_out and idle.elapsed > self.idle_time_out:
logger.info(
"Idle time-out: Waited for %d seconds but no events arrived, " "%d target(s) remaining",
self.idle_time_out,
len(targets),
)
break
if query_loop.elapsed < self.update_interval:
time.sleep(self.update_interval - query_loop.elapsed)
logger.info(
"Found events for %d out of %d target(s)", len(self.targets) - len(targets), len(self.targets)
)
self.queue.put(None)
def recently_extracted_targets(source_relations, start_time):
"""
Query the events table for "extract" events on the provided source_relations after start_time.
Waits for up to an hour, sleeping for 30s between checks.
Return the set of targets (ie, relation.identifier or event["target"]) with successful extracts.
"""
targets = [relation.identifier for relation in source_relations]
query = EventsQuery("extract")
consumer_queue = queue.Queue() # type: ignore
start_as_epoch = timegm(start_time.utctimetuple())
timeout = 60 * 60
extract_querying_thread = BackgroundQueriesRunner(
targets, query, consumer_queue, start_as_epoch, update_interval=30, idle_time_out=timeout, daemon=True
)
extract_querying_thread.start()
extracted_targets = set()
while True:
try:
event = consumer_queue.get(timeout=timeout)
if event is None:
break
if event["event"] == STEP_FINISH:
extracted_targets.add(event["target"])
except queue.Empty:
break
return extracted_targets
def summarize_events(relations, step: Optional[str] = None) -> None:
"""Summarize latest ETL step for the given relations by showing elapsed time and row count."""
etl_info = _query_for_etls(step=step, days_ago=7)
if not len(etl_info):
logger.warning("Found no ETLs within the last 7 days")
return
latest_etl = sorted(etl_info, key=itemgetter("timestamp"))[-1]
latest_start = latest_etl["timestamp"]
logger.info("Latest ETL: %s", latest_etl)
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
query = EventsQuery(step)
events = []
schema_events: Dict[str, Dict[str, Union[str, Decimal]]] = {}
for relation in tqdm(
desc="Querying for events", disable=None, iterable=relations, leave=False, unit="table"
):
event = query(table, relation.identifier, latest_start)
if event:
# Make the column for row counts easier to read by dropping "extra.".
event["rowcount"] = event.pop("extra.rowcount")
events.append(dict(event, kind=relation.kind))
schema = relation.target_table_name.schema
if schema not in schema_events:
schema_events[schema] = {
"target": schema,
"kind": "---",
"step": event["step"],
"timestamp": Decimal(0),
"event": "complete",
"elapsed": Decimal(0),
"rowcount": Decimal(0),
}
if event["timestamp"] > schema_events[schema]["timestamp"]:
schema_events[schema]["timestamp"] = event["timestamp"]
schema_events[schema]["elapsed"] += event["elapsed"]
schema_events[schema]["rowcount"] += event["rowcount"] if event["rowcount"] else 0
# Add pseudo events to show schemas are done.
events.extend(schema_events.values())
keys = ["target", "kind", "step", "timestamp", "event", "elapsed", "rowcount"]
rows = [[_format_output_column(key, info[key]) for key in keys] for info in events]
rows.sort(key=itemgetter(keys.index("timestamp")))
print(etl.text.format_lines(rows, header_row=keys))
def tail_events(
relations, start_time, update_interval=None, idle_time_out=None, step: Optional[str] = None
) -> None:
"""Tail the events table and show latest finish or fail events coming in."""
targets = [relation.identifier for relation in relations]
query = EventsQuery(step)
consumer_queue = queue.Queue() # type: ignore
epoch_seconds = timegm(start_time.utctimetuple())
thread = BackgroundQueriesRunner(
targets, query, consumer_queue, epoch_seconds, update_interval, idle_time_out, daemon=True
)
thread.start()
events = []
n_printed = 0
done = False
while not done:
progress = Timer()
while progress.elapsed < 10:
try:
event = consumer_queue.get(timeout=10)
if event is None:
done = True
break
event["timestamp"] = datetime.utcfromtimestamp(event["timestamp"]).isoformat()
events.append(event)
except queue.Empty:
break
# Keep printing tail of table that accumulates the events.
if len(events) > n_printed:
lines = etl.text.format_lines(
[[event[header] for header in query.keys] for event in events], header_row=query.keys
).split("\n")
if n_printed:
print("\n".join(lines[n_printed + 2 : -1])) # skip header and final "(x rows)" line
else:
print("\n".join(lines[:-1])) # only skip the "(x rows)" line
n_printed = len(lines) - 3 # header, separator, final = 3 extra rows
if done:
print(lines[-1])
def test_run():
Monitor.environment = "test" # type: ignore
memory = MemoryStorage()
MonitorPayload.dispatchers.append(memory)
schema_names = ["auburn", "burgundy", "cardinal", "flame", "fuchsia"]
table_names = ["apple", "banana", "cantaloupe", "durian", "fig"]
index = {"current": 0, "final": len(schema_names) * len(table_names)}
host = MemoryStorage.SERVER_HOST if MemoryStorage.SERVER_HOST else "localhost"
print("Creating events ... follow along at http://{}:{}/".format(host, MemoryStorage.SERVER_PORT))
with Monitor("color.fruit", "test", index={"current": 1, "final": 1, "name": "outer"}):
for i, names in enumerate(itertools.product(schema_names, table_names)):
try:
with Monitor(".".join(names), "test", index=dict(index, current=i + 1)):
time.sleep(random.uniform(0.5, 2.0))
# Create an error on one "table" so that highlighting of errors can be tested:
if i == 9:
raise RuntimeError("An error occurred!")
except RuntimeError:
pass
input("Press return (or Ctrl-c) to stop server\n")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# This allows to test the HTTP server. When running inside a Docker container, make sure
# that port 8086 is exposed. (bin/run_arthur.sh -w).
# Invoke using "python -m etl.monitor" inside the Docker container and follow along
# with "open http://localhost:8086" from your host.
test_run()
| mit | -3,399,315,863,732,583,000 | 38.123941 | 110 | 0.600601 | false | 4.132595 | true | false | false |
linuxrocks123/MailTask | mt_chronos.py | 1 | 1986 | #! /usr/bin/env python
# MailTask Alpha: The Email Manager
# Copyright (C) 2015 Patrick Simmons
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#Note: This library should have no dependencies on other parts of MailTask.
#This is to allow Chronos-Ananke messages to be generated and parsed
#from external software.
#Just to be clear, the license to this file is still GPLv3, though.
##Given a string representing the body of an email message,
# return a four-tuple with the information necessary to create
# a calendar event from it using the MT-CHRONOS-ANANKE format.
# The format of the return tuple:
# ("summary","description",epoch-starttime,epoch-endtime)
def extract_calendar_event(email_body):
inside_calendar=False
lines = email_body.splitlines()
i=0
while i<len(lines):
if lines[i].find("MT-CHRONOS-ANANKE")!=-1:
try:
to_return=(lines[i+1],lines[i+2],int(lines[i+3]),int(lines[i+4]))
return to_return
except:
pass
i+=1
return None
##Generate the MT-CHRONOS-ANANKE event string to put in the body of an email message
def gen_calendar_event(summary,description,starttime,endtime):
to_return="MT-CHRONOS-ANANKE\n"
to_return+=summary+"\n"
to_return+=description+"\n"
to_return+=repr(starttime)+"\n"
to_return+=repr(endtime)+"\n"
return to_return
| gpl-3.0 | -4,021,911,731,758,143,000 | 37.192308 | 84 | 0.703424 | false | 3.677778 | false | false | false |
codesy/codesy | payments/migrations/0001_initial.py | 1 | 1590 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-08-03 23:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='StripeAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_id', models.CharField(blank=True, max_length=100)),
('secret_key', models.CharField(blank=True, max_length=100)),
('public_key', models.CharField(blank=True, max_length=100)),
('available_balance', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=6)),
('verification', models.TextField(blank=True, default=b'')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='StripeEvent',
fields=[
('event_id', models.CharField(blank=True, max_length=100, primary_key=True, serialize=False)),
('type', models.CharField(blank=True, max_length=100)),
('message_text', models.TextField()),
('processed', models.BooleanField(default=False)),
],
),
]
| agpl-3.0 | -4,125,141,983,997,128,000 | 38.75 | 118 | 0.597484 | false | 4.184211 | false | false | false |
Dutchj/pbtweeter | pbtweeter/speedrun.py | 1 | 1678 | import config as cfg
import json
from datetime import datetime
from urllib2 import urlopen, quote
def get_lb():
try:
response = urlopen('http://www.speedrun.com/api_records.php?amount=999&game='+quote(cfg.game))
return json.load(response)
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), 'Error getting leaderboard data:', e
return
def get_twitter_handle(user):
try:
response = urlopen('http://www.speedrun.com/api/v1/users?max=200&name='+user)
users = json.load(response)
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), 'Error getting user search:', e
return
else:
if users['data'] is []:
print "Unable to retrieve Twitter handle: No data, user most likely doesn't exist"
return ''
for entry in users['data']:
if entry['names']['international'].lower() == user.lower():
identifier = entry['id']
break
else:
print "Unable to retrieve Twitter handle: User doesn't exist"
return ''
try:
response = urlopen('http://www.speedrun.com/api/v1/users/'+str(identifier))
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), 'Error getting user data:', e
return
else:
user_data = json.load(response)
if user_data['data']['twitter'] is None:
return ''
twitter_link = user_data['data']['twitter']['uri']
return '@' + twitter_link.replace('http://www.twitter.com/', '').replace('%40', '')
| gpl-2.0 | -8,179,702,171,126,471,000 | 37.136364 | 102 | 0.564958 | false | 4.043373 | false | false | false |
mjirik/lisa | lisa/vessels_segmentation.py | 1 | 3265 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import sys
import os.path
import io3d
path_to_script = os.path.dirname(os.path.abspath(__file__))
#import featurevector
import unittest
from loguru import logger
# logger = logging.getLogger()
# import apdb
# apdb.set_trace();
# import scipy.io
import numpy as np
# ----------------- my scripts --------
import argparse
from . import segmentation_general
class VesselSegmentation:
def __init__(self):
pass
def set_params(self):
pass
def run(self):
pass
def get_output(self):
pass
if __name__ == "__main__":
## logger = logging.getLogger()
# logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
#logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(description='Segment vessels from liver')
parser.add_argument('-dd','--dcmdir',
default=None,
help='path to data dir')
parser.add_argument('-d', '--debug', action='store_true',
help='run in debug mode')
parser.add_argument('-i', '--inputfile', default=None,
help='input file or directory with data')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
defaultoutputfile = "vessels.pkl"
if args.defaultoutputfile:
args.outputfile = defaultoutputfile
#else:
#dcm_read_from_dir('/home/mjirik/data/medical/data_orig/46328096/')
#data3d, metadata = dcmreaddata.dcm_read_from_dir()
datap = io3d.read(args.inputfile)
import sed3
# pyed = sed3.sed3(oseg.orig_scale_segmentation)
#pyed.show()
# information about crop
#cri = oseg.crinfo
#oseg.data3d = oseg.data3d[cri[0][0]:cri[0][1],cri[1][0]:cri[1][1],cri[2][0]:cri[2][1]]
#pyed = sed3.sed3(oseg.data3d, contour = oseg.orig_scale_segmentation)
print('slab', datap['slab'])
#import ipdb; ipdb.set_trace() # BREAKPOINT
#pyed = sed3.sed3(data['data3d'], contour = data['segmentation'])
#pyed.show()
#import pdb; pdb.set_trace()
outputTmp = segmentation_general.vesselSegmentation(
datap['data3d'],
segmentation = datap['segmentation'],
#segmentation = oseg.orig_scale_segmentation,
threshold = -1,
inputSigma = 0.15,
dilationIterations = 2,
nObj = 1,
biggestObjects = args.biggest,
# dataFiltering = True,
interactivity = True,
binaryClosingIterations = 2,
binaryOpeningIterations = 0)
datap['slab']['none'] = 0
datap['slab']['liver'] = 1
datap['slab']['porta'] = 2
#print np.max(output)
#import pdb; pdb.set_trace()
#data = {}
#data['data3d'] = oseg.data3d
#data['crinfo'] = oseg.crinfo
#data['segmentation'] = oseg.segmentation
datap['segmentation'][output] = datap['slab']['porta']
#data['slab'] = slab
pyed = sed3.sed3(datap['data3d'], contour=datap['segmentation'] == datap['slab']['porta'])
pyed.show()
#pyed = sed3.sed3(data['segmentation'])
#pyed.show()
# Uvolneni pameti
if args.outputfile == None:
io3d.write(datap, args.outpufile)
| bsd-3-clause | -3,153,561,909,537,196,000 | 23.89313 | 94 | 0.616069 | false | 3.238332 | false | false | false |
voilet/cmdb | assets/ztree/api.py | 1 | 11656 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# FileName: api.py
# Desc: 2015-15/4/16:下午5:54
# Author: 苦咖啡
# Email: [email protected]
# HomePage: http://blog.kukafei520.net
# History:
# =============================================================================
from django.shortcuts import render_to_response, HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
import commands, json, yaml
from assets.models import Project
from mysite.settings import auth_key
from assets.models import Host, IDC
import hashlib, time
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import redirect
# 登录
from users.models import CustomUser
from assets.models import project_swan
from assets.ztree.service import ztree_tag
from django.shortcuts import get_object_or_404
from assets.models import Host, IDC, Service, Line, Project, HostRecord
from cmdb_auth.models import AuthNode
# songxs add
@login_required
def ztree_project(request):
line_list = Line.objects.filter()
business = Project.objects.filter(line__isnull=False)
no_business = Project.objects.filter(line__isnull=True)
ztree_data = ztree_tag(request.user.username)
return render_to_response('default/default.html', locals(), context_instance=RequestContext(request))
@login_required
def ztree_business(request):
"""
树请求验证
:param request:
:return:
"""
business_name = request.GET.get("uuid", False)
get_token = str(request.GET.get("token", False))
ztree_data = ztree_tag(request.user.username)
try:
sum_token = str(hashlib.sha1(request.user.username + auth_key + business_name +
time.strftime('%Y-%m-%d', time.localtime(time.time()))).hexdigest())
except TypeError:
sum_token = False
if request.GET.get("options") == "host":
uuid = request.GET.get('uuid', '')
ip = request.GET.get('ip', '')
if uuid:
host = get_object_or_404(Host, uuid=uuid)
elif ip:
host = get_object_or_404(Host, eth1=ip)
host_record = HostRecord.objects.filter(host=host).order_by('-time')
user_audit = AuthNode.objects.filter(node=host)
audit_count = user_audit.count()
return render_to_response('ztree/host_detail.html', locals(), context_instance=RequestContext(request))
content_status = True
idle = request.GET.get("idle", False)
if get_token != sum_token:
content_status = False
return render_to_response('ztree/ztree_service.html', locals(), context_instance=RequestContext(request))
if business_name != u"未分类":
try:
bus_data = Project.objects.get(uuid=request.GET.get("uuid"))
if not idle:
server_list = Host.objects.filter(business=bus_data, idle=True).order_by("create_time")
else:
server_list = Host.objects.filter(business=bus_data, idle=False).order_by("create_time")
except:
pass
else:
bus_data = u'未分类'
idc_data = IDC.objects.filter(type=1)
if not idle:
server_list = Host.objects.filter(business__isnull=True, idc=idc_data, idle=True).order_by("create_time")
else:
server_list = Host.objects.filter(business__isnull=True, idc=idc_data, idle=False).order_by("create_time")
if request.GET.get("options") == "swan_push":
s = Ztree_class(business_name, request.user.first_name)
rst = s.swan()
rst_data = rst.get("swan_name")
status = len(rst_data)
return render_to_response('ztree/swan.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "doc":
data = Project.objects.get(pk=business_name)
# return render_to_response('ztree/swan.html', locals(), context_instance=RequestContext(request))
return render_to_response('markdown/index.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "highstate":
project = Project.objects.get(uuid=business_name)
host_list = Host.objects.filter(business=project)
return render_to_response('ztree/highstate.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "monitor":
return render_to_response('ztree/zabbix_count.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "salt":
return render_to_response('ztree/saltstack.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "project":
ip_list = []
server_list = {}
line_name = Line.objects.get(pk=business_name)
business_data = Project.objects.filter(line=business_name)
for i in business_data:
node = Host.objects.filter(business=i, idle=True)
for k in node:
if k.eth1 not in ip_list:
ip_list.append(k.eth1)
server_list[str(k.uuid)] = k.eth1
count = len(ip_list)
return render_to_response('ztree/project.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "types":
get_env = request.GET.get("name")
business_data = Project.objects.filter(pk=business_name)
server_list = Host.objects.filter(business=business_data, env=get_env).order_by("-create_time")
count = server_list.count()
return render_to_response('ztree/ztree.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "service":
s = []
bus_data = Project.objects.get(uuid=business_name)
server_list = Host.objects.filter(business=bus_data, idle=True).order_by("create_time")
for i in server_list:
t = i.service.all()
for b in t:
if b not in s:
s.append(b)
tag = request.GET.get("tgt", False)
if tag:
service_all = Service.objects.get(name=tag)
server_list = Host.objects.filter(service=service_all, business=bus_data)
return render_to_response('ztree/ztree_service.html', locals(), context_instance=RequestContext(request))
count = server_list.count()
return render_to_response('ztree/ztree.html', locals(), context_instance=RequestContext(request))
@login_required
def CdnCache(request):
"""
树请求验证
:param request:
:return:
"""
service = request.GET.get("services")
get_token = str(request.GET.get("token"))
uuid = str(request.GET.get("uuid"))
sum_token = str(hashlib.sha1(request.user.username + auth_key + service + time.strftime('%Y-%m-%d', time.localtime(
time.time()))).hexdigest())
content_status = True
if get_token != sum_token:
content_status = False
idc_data = IDC.objects.get(uuid=uuid)
service_all = Service.objects.get(name=service)
server_list = Host.objects.filter(idc=idc_data, service=service_all)
business_name = idc_data.name
service_tag = service
return render_to_response('ztree/service.html', locals(), context_instance=RequestContext(request))
@login_required
def CdnIdc(request):
"""
树请求验证
:param request:
:return:
"""
get_token = str(request.GET.get("token"))
uuid = str(request.GET.get("uuid"))
idc_data = IDC.objects.get(uuid=uuid)
sum_token = str(hashlib.sha1(request.user.username + auth_key + idc_data.name + time.strftime('%Y-%m-%d',
time.localtime(
time.time()))).hexdigest())
content_status = True
if get_token != sum_token:
content_status = False
server_list = Host.objects.filter(idc=idc_data)
business_name = idc_data.name
return render_to_response('ztree/idc.html', locals(), context_instance=RequestContext(request))
class Ztree_class(object):
"""
ztree 类
"""
def __init__(self, project_name, user):
self.project_name = project_name
self.user = user
def monitor(self):
return True
def swan(self):
rst_data = {}
user_info = CustomUser.objects.get(first_name=self.user)
myform_rst = Project.objects.get(uuid=self.project_name)
rst = project_swan.objects.filter(project_name_id=myform_rst.uuid)
"""
所有当前项目发布名称放到一个list中
"""
swan_name_list = [i.swan_name for i in rst]
swan_push = user_info.project_swan_set.all()
user = CustomUser.objects.get(first_name=self.user)
if user.is_superuser:
for i in rst:
rst_data[str(i.uuid)] = i.swan_name
else:
swan_push = user_info.project_swan_set.all()
for i in swan_push:
if i.swan_name in swan_name_list:
rst_data[str(i.uuid)] = i.swan_name
host_list = myform_rst.host_set.all()
content = {"swan_name": rst_data, "host": host_list}
return content
def highstate(self):
project = Project.objects.get(service_name=self.project_name)
# server_list = project.host_set
host_list = Host.objects.filter(business=project)
return True
@csrf_exempt
def ZtreeIndex(request):
"""
:param request:
:return:
"""
if request.method == 'POST':
otherParam = request.POST.get("otherParam")
status = request.POST.get("status")
line_id = request.POST.get("line_id")
try:
name = request.POST.get("name")
id = request.POST.get("id")
except:
name = False
if not name:
ztree = ztree_tag(request.user.username)
return HttpResponse(json.dumps(ztree, ensure_ascii=False, indent=4))
elif int(status[0]) == 1:
ztree = []
return HttpResponse(json.dumps(ztree, ensure_ascii=False, indent=4))
else:
ztree = []
bus_data = Project.objects.get(service_name=name)
server_list = Host.objects.filter(business=bus_data).order_by("create_time")
s = []
for i in server_list:
t = i.service.all().values()
for b in t:
if b not in s:
s.append(b)
tree_id = 0
for i in s:
tree_id += 1
token = hashlib.sha1(request.user.username + auth_key + i.get("name") + time.strftime('%Y-%m-%d',
time.localtime(
time.time()))).hexdigest()
ztree.append({"id": tree_id, "status": 3, "line_id": line_id, "name": i.get("name"), "token": token,
"t": i.get("name"), "business": bus_data.service_name})
return HttpResponse(json.dumps(ztree, ensure_ascii=False, indent=4))
content = {"status": 403, "message": "auth error"}
return HttpResponse(json.dumps(content, ensure_ascii=False, indent=4))
| agpl-3.0 | 1,779,326,221,662,780,700 | 34.814241 | 136 | 0.585927 | false | 3.702945 | false | false | false |
Cygnus-Inc/Cygnet-Adapter | docs/conf.py | 1 | 1240 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
sys.path.append(os.path.abspath('src/cygnet_adapter'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon'
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'cygnet_adapter'
year = u'2015'
author = u'Cygnus'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.1.0'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
pygments_style = 'trac'
templates_path = ['.']
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = True
html_sidebars = {'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'], }
html_short_title = '%s-%s' % (project, version)
| apache-2.0 | -935,034,756,308,814,500 | 27.181818 | 81 | 0.670161 | false | 2.966507 | false | true | false |
Aeronautics/aero | aero/commands/install.py | 1 | 1362 | # -*- coding: utf-8 -*-
from aero.__version__ import __version_info__
__author__ = 'nickl-'
from .base import CommandProcessor as CommandProcessor
class InstallCommand(CommandProcessor):
from .base import coroutine
package = ''
adapter = ''
def wiring(self):
self.out = self.write()
self.ticker.routine(self.progress(None))
return self.each(self.spacing(self.call(self.res())))
def seen(self, command, adapter, package, result=False):
self.package = package
self.adapter = adapter
return result
@coroutine
def res(self):
while True:
res = (yield)
if res[1] == 0:
print 'Successfully installed package: {} with {}'.format(self.package, self.adapter)
else:
print 'Aborted: Error while installing package: {} {} returned exit code {}'.format(
self.package, self.adapter, res[1]
)
@coroutine
def write(self):
import sys
out = sys.stdout
while True:
text = (yield)
out.write(text)
@coroutine
def spacing(self, target):
while True:
payload = (yield)
print u'\n'
target.send(payload)
@coroutine
def progress(self, responder):
while True: (yield)
| bsd-3-clause | -6,355,324,622,234,375,000 | 24.698113 | 101 | 0.556535 | false | 4.29653 | false | false | false |
pmghalvorsen/gramps_branch | gramps/gui/editors/displaytabs/groupembeddedlist.py | 1 | 14240 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# python
#
#-------------------------------------------------------------------------
import sys
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import pickle
#-------------------------------------------------------------------------
#
# GTK libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
from gi.repository import GObject
from gi.repository import GLib
#-------------------------------------------------------------------------
#
# GRAMPS classes
#
#-------------------------------------------------------------------------
from ...utils import is_right_click
from .embeddedlist import EmbeddedList, TEXT_COL, MARKUP_COL, ICON_COL
#-------------------------------------------------------------------------
#
# Classes
#
#-------------------------------------------------------------------------
class GroupEmbeddedList(EmbeddedList):
"""
This class provides the base class for all the list tabs that show
grouped data.
It maintains a Gtk.TreeView, including the selection and button sensitivity.
"""
_WORKGROUP = 0
def __init__(self, dbstate, uistate, track, name, build_model,
share_button=False, move_buttons=False, jump_button=False, **kwargs):
"""
Create a new list, using the passed build_model to populate the list.
"""
self.kwargs = kwargs
EmbeddedList.__init__(self, dbstate, uistate, track, name, build_model,
share_button, move_buttons, jump_button)
#connect click on the first column
self.columns[0].connect('clicked', self.groupcol_click)
for col in self.columns[1:]:
col.connect('clicked', self.col_click)
self.dbsort = True
def construct_model(self):
"""
Method that creates the model using the passed build_model parameter
Overwrites the EmbeddedList calling sequence by adding the different
groups
"""
return self.build_model(self.get_data(), self.dbstate.db,
self.groups(), **self.kwargs)
def groups(self):
"""
Return the (group key, group name)s in the order as given by get_data()
"""
raise NotImplementedError
def groupcol_click(self, obj):
"""
The group column is clicked, sort it as it was
"""
self.columns[0].set_sort_order(Gtk.SortType.ASCENDING)
self.rebuild()
self.dbsort = True
def col_click(self, obj):
self.dbsort = False
def _on_button_press(self, obj, event):
"""
Handle button press, not double-click, that is done in init_interface
"""
self._select_row_at_coords(event.x, event.y)
if is_right_click(event):
obj = self.get_selected()
if obj and obj[1]:
self._tmpgroup = obj[0]
self.right_click(obj[1], event)
return True
elif event.type == Gdk.EventType.BUTTON_PRESS and event.button == 2:
fun = self.get_middle_click()
if fun:
fun()
return True
return False
def is_empty(self):
"""
Return True if the get_data returns a length greater than
0. Typically, get_data returns the list of associated data.
"""
return len(self.get_data()[self._WORKGROUP]) == 0
def drag_data_get(self, widget, context, sel_data, info, time):
"""
Provide the drag_data_get function, which passes a tuple consisting of:
1) Drag type defined by the .drag_type field specified by the value
assigned to _DND_TYPE
2) The id value of this object, used for the purpose of determining
the source of the object. If the source of the object is the same
as the object, we are doing a reorder instead of a normal drag
and drop
3) Pickled data. The pickled version of the selected object
4) Source row. Used for a reorder to determine the original position
of the object
"""
# get the selected object, returning if not is defined
obj = self.get_selected()
if not obj or obj[1] is None:
#nothing selected or a grouping selected
return
# pickle the data, and build the tuple to be passed
value = (self._DND_TYPE.drag_type, id(self), obj[1],
self.find_index(obj))
data = pickle.dumps(value)
# pass as a string (8 bits)
sel_data.set(self._DND_TYPE.atom_drag_type, 8, data)
def drag_data_received(self, widget, context, x, y, sel_data, info, time):
"""
Handle the standard gtk interface for drag_data_received.
If the selection data is define, extract the value from sel_data.data,
and decide if this is a move or a reorder.
"""
if sel_data and sel_data.get_data():
# make sure data = 1 row
# pickle.loads(sel_data.data)[3] = 0
try:
(mytype, selfid, obj, row_from) = pickle.loads(sel_data.get_data())
except ValueError:
return
# make sure this is the correct DND type for this object
if mytype == self._DND_TYPE.drag_type:
# determine the destination row
row = self._find_row(x, y)
# if this is same object, we have a move, otherwise,
# it is a standard drag-n-drop
if id(self) == selfid and self.get_selected() is not None:
self._move(row_from, row, obj)
else:
self._handle_drag(row, obj)
self.rebuild()
elif self._DND_EXTRA and mytype == self._DND_EXTRA.drag_type:
self.handle_extra_type(mytype, obj)
def tree_drag_motion(self, *args):
"""
On drag motion one wants the list to show as the database
representation so it is clear how save will change the data
"""
if not self.dbsort:
self.columns[0].clicked()
def find_index(self, obj):
"""
Returns the index of the object within the associated data.
This will be a path (groupindex, index)
"""
data = self.get_data()
groupindex = None
index = None
for groupindex, group in enumerate(data):
try:
index = group.index(obj[1])
break
except ValueError:
pass
return (groupindex, index)
def _find_row(self, x, y):
"""
Return a path as [groupindex, index] of the row on x,y.
If no row, then a new line in the working group is returned
"""
dest = self.tree.get_dest_row_at_pos(x, y)
if dest is None:
# Below last item in list
if self.is_empty():
return [self._WORKGROUP, 0]
else:
return [self._WORKGROUP, len(self.get_data()[self._WORKGROUP])]
else:
path = dest[0].get_indices()
wgroup = path[0]
if len(path) == 1:
# On a heading
if dest[1] == Gtk.TreeViewDropPosition.BEFORE:
if wgroup != 0:
# If before then put at end of previous group
return (wgroup-1, len(self.get_data()[wgroup-1]))
else:
# unless it is the first group
return (wgroup, 0)
else:
return (wgroup, 0)
else:
if dest[1] in (Gtk.TreeViewDropPosition.BEFORE,
Gtk.TreeViewDropPosition.INTO_OR_BEFORE):
return (wgroup, path[1])
else:
return (wgroup, path[1]+1)
def _handle_drag(self, row, obj):
"""
drag from external place to row of obj
"""
if row[0] == self._WORKGROUP:
self.get_data()[self._WORKGROUP].insert(row[1], obj)
self.changed = True
else:
self.dropnotworkgroup(row, obj)
def dropnotworkgroup(self, row, obj):
"""
Drop of obj on row that is not WORKGROUP
"""
pass
def _move(self, row_from, row_to, obj):
"""
Drag and drop move of the order. Allow in workgroup
"""
if row_from[0] == row_to[0] and row_from[0] == self._WORKGROUP:
dlist = self.get_data()[self._WORKGROUP]
if row_from[1] < row_to[1]:
dlist.insert(row_to[1], obj)
del dlist[row_from[1]]
else:
del dlist[row_from[1]]
dlist.insert(row_to[1], obj)
self.changed = True
elif row_from[0] == self._WORKGROUP:
self.move_away_work(row_from, row_to, obj)
elif row_to[0] == self._WORKGROUP:
self.move_to_work(row_from, row_to, obj)
def move_away_work(self, row_from, row_to, obj):
"""
move from the workgroup to a not workgroup
handle in inherited class, default is nothing changes
"""
pass
def move_to_work(self, row_from, row_to, obj):
"""
move from a non workgroup to the workgroup
handle in inherited class, default is nothing changes
"""
pass
def _move_up(self, row_from, obj, selmethod=None):
"""
Move the item a position up in the EmbeddedList.
Eg: 0,1,2,3 needs to become 0,2,1,3, here row_from = 2
"""
if row_from[0] == self._WORKGROUP:
if selmethod :
dlist = selmethod()
else :
dlist = self.get_data()[self._WORKGROUP]
del dlist[row_from[1]]
dlist.insert(row_from[1]-1, obj)
self.changed = True
self.rebuild()
#select the row
path = (self._WORKGROUP, row_from[1]-1)
self.tree.get_selection().select_path(path)
GLib.idle_add(self.tree.scroll_to_cell, path)
else:
self._move_up_notwork(row_from, obj, selmethod)
def _move_up_notwork(self, row_from, obj, selmethod=None):
"""
move up outside of workgroup
"""
pass
def _move_up_group(self, groupindex):
"""
move up pressed on the group
"""
pass
def _move_down(self, row_from, obj, selmethod=None):
"""
Move the item a position down in the EmbeddedList.
Eg: 0,1,2,3 needs to become 0,2,1,3, here row_from = 1
"""
if row_from[0] == self._WORKGROUP:
if selmethod :
dlist = selmethod()
else :
dlist = self.get_data()[self._WORKGROUP]
del dlist[row_from[1]]
dlist.insert(row_from[1]+1, obj)
self.changed = True
self.rebuild()
#select the row
path = (self._WORKGROUP, row_from[1]+1)
self.tree.get_selection().select_path(path)
GLib.idle_add(self.tree.scroll_to_cell, path)
else:
self._move_down_notwork(row_from, obj, selmethod)
def _move_down_notwork(self, row_from, obj, selmethod=None):
"""
move down outside of workgroup
"""
pass
def _move_down_group(self, groupindex):
"""
move down pressed on the group
"""
pass
def get_icon_name(self):
"""
Specifies the basic icon used for a generic list. Typically,
a derived class will override this. The icon chosen is the
STOCK_JUSTIFY_FILL icon, which in the default GTK style
looks kind of like a list.
"""
return Gtk.STOCK_JUSTIFY_FILL
def del_button_clicked(self, obj):
ref = self.get_selected()
if ref and ref[1] is not None:
if ref[0]==self._WORKGROUP:
ref_list = self.get_data()[self._WORKGROUP]
ref_list.remove(ref[1])
self.changed = True
self.rebuild()
else:
self.del_notwork(ref)
def del_notwork(self, ref):
"""
delete of ref asked that is not part of workgroup
"""
pass
def up_button_clicked(self, obj):
ref = self.get_selected()
if ref and ref[1] is not None:
pos = self.find_index(ref)
if pos[1] > 0 :
self._move_up(pos, ref[1])
elif ref and ref[1] is None:
self._move_up_group(ref[0])
def down_button_clicked(self, obj):
ref = self.get_selected()
if ref and ref[1] is not None:
pos = self.find_index(ref)
if pos[1] >=0 and pos[1] < len(self.get_data()[pos[0]])-1:
self._move_down(pos, ref[1])
elif ref and ref[1] is None:
self._move_down_group(ref[0])
| gpl-2.0 | -4,655,648,685,835,140,000 | 34.073892 | 86 | 0.523596 | false | 4.143148 | false | false | false |
mdevaev/emonoda | emonoda/plugins/confetti/pushover.py | 1 | 3447 | """
Emonoda -- A set of tools to organize and manage your torrents
Copyright (C) 2015 Devaev Maxim <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib.parse
from typing import List
from typing import Dict
from typing import Any
from ...optconf import Option
from ...optconf import SecretOption
from ...optconf.converters import as_string_list
from ...optconf.converters import as_path_or_empty
from . import STATUSES
from . import ResultsType
from . import WithWeb
from . import WithStatuses
from . import templated
# =====
class Plugin(WithWeb, WithStatuses):
PLUGIN_NAMES = ["pushover"]
def __init__( # pylint: disable=super-init-not-called
self,
user_key: str,
api_key: str,
devices: List[str],
title: str,
template: str,
**kwargs: Any,
) -> None:
self._init_bases(**kwargs)
self._init_opener()
self.__user_key = user_key
self.__api_key = api_key
self.__devices = devices
self.__title = title
self.__template_path = template
@classmethod
def get_options(cls) -> Dict[str, Option]:
return cls._get_merged_options({
"user_key": SecretOption(default="CHANGE_ME", help="User key"),
"api_key": SecretOption(default="CHANGE_ME", help="API/Application key"),
"devices": Option(default=[], type=as_string_list, help="Devices list (empty for all)"),
"title": Option(default="Emonoda ({source})", help="Message title"),
"template": Option(default="", type=as_path_or_empty, help="Mako template file name"),
})
def send_results(self, source: str, results: ResultsType) -> None:
for status in self._statuses:
for (file_name, result) in results[status].items():
post = {
"token": self.__api_key,
"user": self.__user_key,
"html": "1",
"title": self.__title.format(source=source),
"message": templated(
name=(self.__template_path if self.__template_path else "pushover.{source}.mako").format(source=source),
built_in=(not self.__template_path),
source=source,
file_name=file_name,
status=status,
status_msg=STATUSES[status],
result=result,
),
}
if self.__devices:
post["device"] = ",".join(self.__devices)
self._read_url(
url="https://api.pushover.net/1/messages.json",
data=urllib.parse.urlencode(post).encode("utf-8"),
)
| gpl-3.0 | 4,823,935,925,674,717,000 | 35.670213 | 128 | 0.577314 | false | 4.260816 | false | false | false |
yusukemurayama/ppytrading | ppyt/filters/historical_filters.py | 1 | 1741 | # coding: utf-8
import logging
from sqlalchemy.sql import func
from ppyt.filters import FilterBase
from ppyt.models.orm import start_session, History
logger = logging.getLogger(__name__)
class AverageVolumeFilter(FilterBase):
"""平均出来形で銘柄を絞り込むクラスです。"""
_findkey = '平均出来高フィルタ' # フィルタを一意に特定できる名前をつけます。
def _setup(self, volume=None):
"""初期化処理を行います。
Args:
volume: 平均出来高の閾値
Raises:
ArgumentError: 引数チェックに引っかかった場合に発生します。
"""
self._is_valid_argument('volume', volume, int)
self.volume = float(volume)
def _filter_stocks(self, stocks):
"""銘柄を絞り込みます。
絞り込み条件:
- 過去の平均出来高が規定の値を上回っている。
Args:
stocks: 絞り込み前の銘柄のリスト
Returns:
絞り込み後の銘柄のリスト
"""
filtered_stocks = []
with start_session() as session:
for s in stocks:
avg_volume = session.query(func.avg(History.volume)) \
.filter_by(symbol=s.symbol).scalar()
logger.debug('symbol - avg_volume: {} - {}'.format(
s.symbol, avg_volume))
if avg_volume is not None and float(avg_volume) >= self.volume:
# 過去の平均出来高が規定値を上回っている場合、絞り込み後のリストに追加します。
filtered_stocks.append(s)
return filtered_stocks
| mit | -2,557,690,650,821,079,000 | 24.240741 | 79 | 0.567865 | false | 2.325939 | false | false | false |
Magic-Translater/Pwntools.Doc.In.Zh-cn | source/conf.py | 1 | 11053 | # -*- coding: utf-8 -*-
#
# pwntools documentation build configuration file, created by
# sphinx-quickstart on Wed May 28 15:00:52 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
build_dash = tags.has('dash')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
import pwnlib
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'pwnlib.internal.dochelper',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.linkcode',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
'sphinxcontrib.autoprogram',
'sphinxcontrib.napoleon'
]
doctest_global_setup = '''
import sys, os
os.environ['PWNLIB_NOTERM'] = '1'
os.environ['PWNLIB_RANDOMIZE'] = '0'
import pwnlib
pwnlib.context.context.reset_local()
pwnlib.context.ContextType.defaults['log_level'] = 'ERROR'
pwnlib.context.ContextType.defaults['randomize'] = False
pwnlib.term.text.when = 'never'
pwnlib.log.install_default_handler()
pwnlib.log.rootlogger.setLevel(1)
# Sphinx modifies sys.stdout, and context.log_terminal has
# a reference to the original instance. We need to update
# it for logging to be captured.
class stdout(object):
def __getattr__(self, name):
return getattr(sys.stdout, name)
def __setattr__(self, name, value):
return setattr(sys.stdout, name, value)
pwnlib.context.ContextType.defaults['log_console'] = stdout()
'''
autodoc_member_order = 'alphabetical'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
doctest_test_doctest_blocks = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pwntools'
copyright = u'2016, Gallopsled et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = pwnlib.__version__
version = release.rsplit('.', 1)[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = "zh_CN"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = not build_dash
# If false, no index is generated.
html_use_index = not build_dash
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pwntoolsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pwntools.tex', u'pwntools Documentation',
u'2016, Gallopsled et al.', 'manual'),
]
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None),
'paramiko': ('https://paramiko-docs.readthedocs.org/en/1.15/', None)}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pwntools', u'pwntools Documentation',
[u'2016, Gallopsled et al.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pwntools', u'pwntools Documentation',
u'', 'pwntools', 'CTF exploit writing toolkit.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
branch = release
try:
git_branch = subprocess.check_output('git describe --tags', shell = True)
except subprocess.CalledProcessError:
git_branch = '-'
try:
if '-' in git_branch:
branch = subprocess.check_output('git rev-parse HEAD', shell = True).strip()[:10]
except subprocess.CalledProcessError:
pass
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
import importlib, inspect, types
mod = importlib.import_module(info['module'])
# Try to find the value
val = mod
for k in info['fullname'].split('.'):
val = getattr(val, k, None)
if val == None:
break
# Special case for shellcraft
if info['module'].startswith('pwnlib.shellcraft.'):
filename = 'pwnlib/shellcraft/templates/%s' % val._relpath
# Case for everything else
else:
filename = info['module'].replace('.', '/') + '.py'
if isinstance(val, (types.ModuleType, types.ClassType, types.MethodType, types.FunctionType, types.TracebackType, types.FrameType, types.CodeType)):
try:
lines, first = inspect.getsourcelines(val)
filename += '#L%d-%d' % (first, first + len(lines) - 1)
except IOError:
pass
return "https://github.com/Gallopsled/pwntools/blob/%s/%s" % (branch, filename)
# The readthedocs theme is used by the Dash generator. (Can be used for HTML too.)
if build_dash:
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
| mit | -6,300,908,279,611,668,000 | 30.670487 | 156 | 0.689406 | false | 3.678203 | true | false | false |
stephenlienharrell/roster-dns-management | test/dnsrmacl_test.py | 1 | 8124 | #!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Regression test for dnsrmacl
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import os
import sys
import socket
import threading
import time
import getpass
import unittest
import roster_core
import roster_server
from roster_user_tools import roster_client_lib
USER_CONFIG = 'test_data/roster_user_tools.conf'
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
HOST = u'localhost'
USERNAME = u'sharrell'
PASSWORD = u'test'
KEYFILE=('test_data/dnsmgmt.key.pem')
CERTFILE=('test_data/dnsmgmt.cert.pem')
CREDFILE='%s/.dnscred' % os.getcwd()
EXEC='../roster-user-tools/scripts/dnsrmacl'
class options(object):
password = u'test'
username = u'sharrell'
server = None
ldap = u'ldaps://ldap.cs.university.edu:636'
credfile = CREDFILE
view_name = None
ip_address = None
target = u'machine1'
ttl = 64
class DaemonThread(threading.Thread):
def __init__(self, config_instance, port):
threading.Thread.__init__(self)
self.config_instance = config_instance
self.port = port
self.daemon_instance = None
def run(self):
self.daemon_instance = roster_server.Server(self.config_instance, KEYFILE,
CERTFILE)
self.daemon_instance.Serve(port=self.port)
class Testdnsrmacl(unittest.TestCase):
def setUp(self):
def PickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, 0))
addr, port = s.getsockname()
s.close()
return port
self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
db_instance = self.config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.port = PickUnusedPort()
self.server_name = 'https://%s:%s' % (HOST, self.port)
self.daemon_thread = DaemonThread(self.config_instance, self.port)
self.daemon_thread.daemon = True
self.daemon_thread.start()
self.core_instance = roster_core.Core(USERNAME, self.config_instance)
self.password = 'test'
time.sleep(1)
roster_client_lib.GetCredentials(USERNAME, u'test', credfile=CREDFILE,
server_name=self.server_name)
def tearDown(self):
if( os.path.exists(CREDFILE) ):
os.remove(CREDFILE)
def testRemoveAcl(self):
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.assertEqual(self.core_instance.ListACLs(),
{u'acl1': [{'cidr_block': u'192.168.1.0/24'}],
u'any': [{'cidr_block': None}]})
command = os.popen('python %s --force -a acl1 -u %s -p %s --config-file %s '
'-s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1\n')
command.close()
self.assertEqual(self.core_instance.ListACLs(),
{u'any': [{'cidr_block': None}]})
def testRemoveCIDRFromAcl(self):
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.core_instance.MakeACL(u'acl1', u'192.168.2.0/24')
self.assertEqual(self.core_instance.ListACLs(),
{u'acl1': [{'cidr_block': u'192.168.1.0/24'},
{'cidr_block': u'192.168.2.0/24'}],
u'any': [{'cidr_block': None}]})
command = os.popen('python %s -a acl1 --cidr-block 192.168.2.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1 cidr_block: 192.168.2.0/24\n')
self.assertEqual(self.core_instance.ListACLs(),
{u'acl1': [{'cidr_block': u'192.168.1.0/24'}],
u'any': [{'cidr_block': None}]})
command = os.popen('python %s -a acl1 --cidr-block 192.168.1.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1 cidr_block: 192.168.1.0/24\n')
self.assertEqual(self.core_instance.ListACLs(),
{u'any': [{'cidr_block': None}]})
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.core_instance.MakeACL(u'acl1', u'192.168.2.0/24')
command = os.popen('python %s -a acl1 --cidr-block 192.168.2.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1 cidr_block: 192.168.2.0/24\n')
command = os.popen('python %s -a acl1 --cidr-block 192.168.1.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1 cidr_block: 192.168.1.0/24\n')
self.assertEqual(self.core_instance.ListACLs(),
{u'any': [{'cidr_block': None}]})
def testErrors(self):
command = os.popen('python %s -a acl1 --cidr-block 192.168.2.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'CLIENT ERROR: No acl found with acl: acl1 cidr_block: 192.168.2.0/24'
'\n')
command = os.popen('python %s -u %s -p %s --config-file %s '
'--force -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG,
self.server_name, CREDFILE))
self.assertEqual(command.read(),
"CLIENT ERROR: The -a/--acl flag is required.\n")
command.close()
command = os.popen('python %s --acl acl1 -u %s -p %s --config-file %s '
'-s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG,
self.server_name, CREDFILE))
self.assertEqual(command.read(),
"CLIENT ERROR: Must use --force to delete entire ACL.\n")
command.close()
if( __name__ == '__main__' ):
unittest.main()
| bsd-3-clause | -5,292,346,615,663,352,000 | 39.217822 | 80 | 0.64094 | false | 3.32542 | true | false | false |
Camiloasc1/AstronomyUNAL | CelestialMechanics/orbits/test/test_ellipse.py | 1 | 4276 | import unittest
from astropy import constants as astroconst
from astropy import units as u
from astropy.time import Time
from CelestialMechanics.kepler import constants
from CelestialMechanics.orbits import ellipse
from CelestialMechanics.orbits.ellipse import delta_t_t0_aeangle
class MyTestCase(unittest.TestCase):
def test_ellipse(self):
r = ellipse.r(1.5236164, 0.0932802, 32.)
self.assertAlmostEqual(1.3996391, r, places=7)
a, e = ellipse.ae(0.275, 1.168)
self.assertAlmostEqual(0.722, a, places=3)
self.assertAlmostEqual(0.618, e, places=2)
sun = astroconst.M_sun
mercury = astroconst.M_sun / constants.Mercury
energy = ellipse.E((0.38709 * u.au).to(u.m), sun, mercury)
self.assertAlmostEqual(-3.817E32, energy.value, delta=1E32)
# 4.14
a = 17.8 * u.au
e = 0.967
q, Q = ellipse.qQ(a, e)
self.assertAlmostEqual(0.031478, ellipse.v_sun(q, a, 0).value, places=5)
self.assertAlmostEqual(0.000528, ellipse.v_sun(Q, a, 0).value, places=5)
self.assertAlmostEqual(54.50, ellipse.v_sun(q, a, 0).to(u.km / u.s).value, places=2)
self.assertAlmostEqual(0.91, ellipse.v_sun(Q, a, 0).to(u.km / u.s).value, places=2)
vq, vQ = ellipse.vqQ_sun(a, e, 0)
self.assertAlmostEqual(0.031478, vq.value, places=2)
self.assertAlmostEqual(0.000528, vQ.value, places=2)
self.assertAlmostEqual(54.50, vq.to(u.km / u.s).value, places=2)
self.assertAlmostEqual(0.91, vQ.to(u.km / u.s).value, places=2)
# 4.15
a = astroconst.R_earth + 560 * u.km
self.assertAlmostEqual(7569.5, ellipse.v(a, a, astroconst.M_earth, 0).value, delta=20)
def test_chapter_5(self):
from CelestialMechanics.mu import mu_sun
# 5.5
t0 = Time('2014-01-03T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.633 * u.d
t1 = Time('2014-04-03T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.9 * u.d
t2 = Time('2014-10-05T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.5 * u.d
a = 1 * u.au
e = 0.01669
r = 1 * u.au
mu = mu_sun(1 / constants.Earth_Moon)
angles = ellipse.angles(a, e, r)
self.assertAlmostEqual(90.9563109612867, angles[0].value)
self.assertAlmostEqual(269.0436890387133, angles[1].value)
delta_t_t0 = delta_t_t0_aeangle(a, e, angles[0], mu) % (1 * u.yr).to(u.d) # module 1 year
self.assertAlmostEqual((t1 - t0).value, delta_t_t0.value, delta=0.1)
delta_t_t0 = delta_t_t0_aeangle(a, e, angles[1], mu) % (1 * u.yr).to(u.d) # module 1 year
self.assertAlmostEqual((t2 - t0).value, delta_t_t0.value, delta=0.1)
# 5.6
a = 17.834144 * u.au
e = 0.967143
angle = 60 * u.deg
mu = mu_sun(0)
delta_t_t0 = delta_t_t0_aeangle(a, e, angle, mu)
self.assertAlmostEqual(23.7573, delta_t_t0.value, places=2)
# 5.7
t0 = Time('2003-10-23T05:57:10Z', format='isot', scale='utc').jd * u.d
t1 = Time('2007-06-20T00:00:00Z', format='isot', scale='utc').jd * u.d
a = 2.56743 * u.au
e = 0.75355
r = 2.325364 * u.au
mu = mu_sun(0)
angles = ellipse.angles(a, e, r)
self.assertAlmostEqual(360 - 226.064389, angles[0].value, places=5)
self.assertAlmostEqual(226.064389, angles[1].value, places=5)
angle = angles[1] # r. < 0
# inlined ellipse.delta_t_t0_aeangle()
E = ellipse.E_angle(e, angle)
M = ellipse.angle_M_eE(e, E)
from CelestialMechanics.kepler.kepler3 import T_sun
T = T_sun(a, 0) # 1 year (of the minor planet)
delta_t_t0 = ellipse.delta_t_t0_Mn(M, ellipse.n(a, mu)) % T # module 1 year (of the minor planet)
self.assertAlmostEqual(277.187625, E.to(u.deg).value % 360, places=6)
self.assertAlmostEqual(320.023578, M.to(u.deg).value % 360, places=6)
self.assertAlmostEqual(((t1 - t0) % T).value, delta_t_t0.value, places=4)
t0_calculated = t1 - delta_t_t0
# print(Time(t0_calculated, format='jd', scale='utc').isot)
self.assertAlmostEqual(t0.value, t0_calculated.value, places=4)
if __name__ == '__main__':
unittest.main()
| mit | -1,451,791,043,087,483,000 | 40.115385 | 106 | 0.600094 | false | 2.709759 | true | false | false |
qtproject/qt3d | tools/utils/exporters/blender/qt3d_armature_export.py | 2 | 5606 | #############################################################################
##
## Copyright (C) 2017 Klaralvdalens Datakonsult AB (KDAB).
## Contact: https://www.qt.io/licensing/
##
## This file is part of the Qt3D module of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 3 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL3 included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 3 requirements
## will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 2.0 or (at your option) the GNU General
## Public license version 3 or any later version approved by the KDE Free
## Qt Foundation. The licenses are as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-2.0.html and
## https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
# Required Blender information.
bl_info = {
"name": "Qt3D Armature Exporter",
"author": "Sean Harmer <[email protected]>, Robert Brock <[email protected]>",
"version": (0, 2),
"blender": (2, 80, 0),
"location": "File > Export > Qt3D Armature Exporter (.json)",
"description": "Export Armature to json to use with Qt3D",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export"
}
import bpy
import os
import struct
import mathutils
import math
import json
from array import array
from bpy import context
from bpy_extras.io_utils import ExportHelper
from bpy.props import (
BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from collections import defaultdict
def jsonBuilder():
bonesList = []
name = ""
boneParent = ""
ob = bpy.context.object.data
if not hasattr(ob, 'bones'):
return bonesList
for bone in ob.bones:
#check parent exists
if bone.parent:
boneParent = bone.parent.name
else:
boneParent = ""
#add the bones
bonesList.append({"bone": bone.name,
"parent": boneParent,
"matrix": jsonMatrix(bone)
})
return bonesList
def jsonMatrix(bone):
matrix = []
for i in range(0, 4):
matrix.append(str("%.4f, %.4f, %.4f, %.4f" % (bone.matrix_local[i][0],
bone.matrix_local[i][1],
bone.matrix_local[i][2],
bone.matrix_local[i][3])))
return matrix
class Qt3DMeshDataConverter:
def boneInfoToJson(self):
# Pass 1 - collect data we need to produce the output in pass 2
print(">>> About to enter Objects")
jsonData = json.dumps({ "bones": jsonBuilder()}, indent=2, sort_keys=True, separators=(',', ': '))
return jsonData
class Qt3DArmatureExporter(bpy.types.Operator, ExportHelper):
"""Qt3D Exporter"""
bl_idname = "export_scene.qt3d_armature_exporter";
bl_label = "Qt3DArmatureExporter";
bl_options = {'PRESET'};
filename_ext = ""
use_filter_folder = True
def __init__(self):
pass
def execute(self, context):
print("In Execute" + bpy.context.scene.name)
self.userpath = self.properties.filepath
# unselect all
bpy.ops.object.select_all(action='DESELECT')
converter = Qt3DMeshDataConverter()
fileContent = converter.boneInfoToJson()
with open(self.userpath + ".json", '+w') as f:
f.write(fileContent)
return {'FINISHED'}
def createBlenderMenu(self, context):
self.layout.operator(Qt3DArmatureExporter.bl_idname, text="Qt3D Armature Export(.json)")
# Register against Blender
def register():
bpy.utils.register_class(Qt3DArmatureExporter)
if bpy.app.version < (2, 80, 0):
bpy.types.INFO_MT_file_export.append(createBlenderMenu)
else:
bpy.types.TOPBAR_MT_file_export.append(createBlenderMenu)
def unregister():
bpy.utils.unregister_class(Qt3DArmatureExporter)
if bpy.app.version < (2, 80, 0):
bpy.types.INFO_MT_file_export.remove(createBlenderMenu)
else:
bpy.types.TOPBAR_MT_file_export.remove(createBlenderMenu)
# Handle running the script from Blender's text editor.
if (__name__ == "__main__"):
register();
bpy.ops.export_scene.qt3d_Armature_exporter();
| lgpl-3.0 | 1,695,850,950,272,810,000 | 32.568862 | 106 | 0.620407 | false | 3.890354 | false | false | false |
aroig/metadata-readers | libs/docmeta/utils.py | 1 | 2250 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# docmeta - A python module to extract metadata from document files
# Copyright 2012 Abdó Roig-Maranges <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import subprocess
class CommandError(Exception):
"""Conversion error"""
def __init__(self, desc, cmdlist, retlist, stderrlist):
Exception.__init__(self, desc)
self.desc = desc
self.cmdlist = cmdlist
self.retlist = retlist
self.stderrlist = stderrlist
print("Command Error !!!")
print(" cmd: %s" % ' | '.join([' '.join(c) for c in self.cmdlist]))
print(" ret: %s" % str(self.retlist))
print(" stderr: %s" % str(self.stderrlist))
def executepipe(cmdlst, outfile=None, checkreturn=True):
N = len(cmdlst)
p = []
for n in range(0,N):
cmd = cmdlst[n]
if n == 0: sin = None
else: sin = plast.stdout
if n < N-1:
sout = subprocess.PIPE
else:
if outfile != None: sout = open(outfile, 'w')
else: sout = subprocess.PIPE
serr = subprocess.PIPE
plast = subprocess.Popen(cmd, stdout=sout, stderr=serr, stdin=sin)
p.append(plast)
ret,err = plast.communicate()
if checkreturn and plast.returncode != 0:
raise CommandError("Command produced errors", cmdlst, plast.returncode, err)
if outfile == None:
if sys.version_info[0] >= 3: return ret.decode('utf-8')
else: return ret
else:
sout.close()
return None
| gpl-3.0 | 8,381,212,299,770,244,000 | 32.073529 | 84 | 0.618497 | false | 3.711221 | false | false | false |
jhamman/xray | xarray/backends/netCDF4_.py | 1 | 13873 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import numpy as np
from .. import Variable
from ..conventions import pop_to
from ..core import indexing
from ..core.utils import (FrozenOrderedDict, NdimSizeLenMixin,
DunderArrayMixin, close_on_error,
is_remote_uri)
from ..core.pycompat import iteritems, basestring, OrderedDict, PY3
from .common import (WritableCFDataStore, robust_getitem,
DataStorePickleMixin, find_root)
from .netcdf3 import (encode_nc3_attr_value, encode_nc3_variable,
maybe_convert_to_char_array)
# This lookup table maps from dtype.byteorder to a readable endian
# string used by netCDF4.
_endian_lookup = {'=': 'native',
'>': 'big',
'<': 'little',
'|': 'native'}
class BaseNetCDF4Array(NdimSizeLenMixin, DunderArrayMixin):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
dtype = array.dtype
if dtype is str:
# use object dtype because that's the only way in numpy to
# represent variable length strings; it also prevents automatic
# string concatenation via conventions.decode_cf_variable
dtype = np.dtype('O')
self.dtype = dtype
def get_array(self):
self.datastore.assert_open()
return self.datastore.ds.variables[self.variable_name]
class NetCDF4ArrayWrapper(BaseNetCDF4Array):
def __getitem__(self, key):
if self.datastore.is_remote: # pragma: no cover
getitem = functools.partial(robust_getitem, catch=RuntimeError)
else:
getitem = operator.getitem
with self.datastore.ensure_open(autoclose=True):
try:
data = getitem(self.get_array(), key)
except IndexError:
# Catch IndexError in netCDF4 and return a more informative
# error message. This is most often called when an unsorted
# indexer is used before the data is loaded from disk.
msg = ('The indexing operation you are attempting to perform '
'is not valid on netCDF4.Variable object. Try loading '
'your data into memory first by calling .load().')
if not PY3:
import traceback
msg += '\n\nOriginal traceback:\n' + traceback.format_exc()
raise IndexError(msg)
if self.ndim == 0:
# work around for netCDF4-python's broken handling of 0-d
# arrays (slicing them always returns a 1-dimensional array):
# https://github.com/Unidata/netcdf4-python/pull/220
data = np.asscalar(data)
return data
def _nc4_values_and_dtype(var):
if var.dtype.kind == 'U':
# this entire clause should not be necessary with netCDF4>=1.0.9
if len(var) > 0:
var = var.astype('O')
dtype = str
elif var.dtype.kind == 'S':
# use character arrays instead of unicode, because unicode support in
# netCDF4 is still rather buggy
data, dims = maybe_convert_to_char_array(var.data, var.dims)
var = Variable(dims, data, var.attrs, var.encoding)
dtype = var.dtype
elif var.dtype.kind in ['i', 'u', 'f', 'c']:
dtype = var.dtype
else:
raise ValueError('cannot infer dtype for netCDF4 variable')
return var, dtype
def _nc4_group(ds, group, mode):
if group in set([None, '', '/']):
# use the root group
return ds
else:
# make sure it's a string
if not isinstance(group, basestring):
raise ValueError('group must be a string or None')
# support path-like syntax
path = group.strip('/').split('/')
for key in path:
try:
ds = ds.groups[key]
except KeyError as e:
if mode != 'r':
ds = ds.createGroup(key)
else:
# wrap error to provide slightly more helpful message
raise IOError('group not found: %s' % key, e)
return ds
def _ensure_fill_value_valid(data, attributes):
# work around for netCDF4/scipy issue where _FillValue has the wrong type:
# https://github.com/Unidata/netcdf4-python/issues/271
if data.dtype.kind == 'S' and '_FillValue' in attributes:
attributes['_FillValue'] = np.string_(attributes['_FillValue'])
def _force_native_endianness(var):
# possible values for byteorder are:
# = native
# < little-endian
# > big-endian
# | not applicable
# Below we check if the data type is not native or NA
if var.dtype.byteorder not in ['=', '|']:
# if endianness is specified explicitly, convert to the native type
data = var.data.astype(var.dtype.newbyteorder('='))
var = Variable(var.dims, data, var.attrs, var.encoding)
# if endian exists, remove it from the encoding.
var.encoding.pop('endian', None)
# check to see if encoding has a value for endian its 'native'
if not var.encoding.get('endian', 'native') is 'native':
raise NotImplementedError("Attempt to write non-native endian type, "
"this is not supported by the netCDF4 "
"python library.")
return var
def _extract_nc4_variable_encoding(variable, raise_on_invalid=False,
lsd_okay=True, backend='netCDF4'):
encoding = variable.encoding.copy()
safe_to_drop = set(['source', 'original_shape'])
valid_encodings = set(['zlib', 'complevel', 'fletcher32', 'contiguous',
'chunksizes'])
if lsd_okay:
valid_encodings.add('least_significant_digit')
if (encoding.get('chunksizes') is not None and
(encoding.get('original_shape', variable.shape) !=
variable.shape) and not raise_on_invalid):
del encoding['chunksizes']
for k in safe_to_drop:
if k in encoding:
del encoding[k]
if raise_on_invalid:
invalid = [k for k in encoding if k not in valid_encodings]
if invalid:
raise ValueError('unexpected encoding parameters for %r backend: '
' %r' % (backend, invalid))
else:
for k in list(encoding):
if k not in valid_encodings:
del encoding[k]
return encoding
def _open_netcdf4_group(filename, mode, group=None, **kwargs):
import netCDF4 as nc4
ds = nc4.Dataset(filename, mode=mode, **kwargs)
with close_on_error(ds):
ds = _nc4_group(ds, group, mode)
_disable_mask_and_scale(ds)
return ds
def _disable_mask_and_scale(ds):
for var in ds.variables.values():
# we handle masking and scaling ourselves
var.set_auto_maskandscale(False)
class NetCDF4DataStore(WritableCFDataStore, DataStorePickleMixin):
"""Store for reading and writing data via the Python-NetCDF4 library.
This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""
def __init__(self, netcdf4_dataset, mode='r', writer=None, opener=None,
autoclose=False):
if autoclose and opener is None:
raise ValueError('autoclose requires an opener')
_disable_mask_and_scale(netcdf4_dataset)
self.ds = netcdf4_dataset
self._autoclose = autoclose
self._isopen = True
self.format = self.ds.data_model
self._filename = self.ds.filepath()
self.is_remote = is_remote_uri(self._filename)
self._mode = mode = 'a' if mode == 'w' else mode
if opener:
self._opener = functools.partial(opener, mode=self._mode)
else:
self._opener = opener
super(NetCDF4DataStore, self).__init__(writer)
@classmethod
def open(cls, filename, mode='r', format='NETCDF4', group=None,
writer=None, clobber=True, diskless=False, persist=False,
autoclose=False):
if format is None:
format = 'NETCDF4'
opener = functools.partial(_open_netcdf4_group, filename, mode=mode,
group=group, clobber=clobber,
diskless=diskless, persist=persist,
format=format)
ds = opener()
return cls(ds, mode=mode, writer=writer, opener=opener,
autoclose=autoclose)
def open_store_variable(self, name, var):
with self.ensure_open(autoclose=False):
dimensions = var.dimensions
data = indexing.LazilyIndexedArray(NetCDF4ArrayWrapper(name, self))
attributes = OrderedDict((k, var.getncattr(k))
for k in var.ncattrs())
_ensure_fill_value_valid(data, attributes)
# netCDF4 specific encoding; save _FillValue for later
encoding = {}
filters = var.filters()
if filters is not None:
encoding.update(filters)
chunking = var.chunking()
if chunking is not None:
if chunking == 'contiguous':
encoding['contiguous'] = True
encoding['chunksizes'] = None
else:
encoding['contiguous'] = False
encoding['chunksizes'] = tuple(chunking)
# TODO: figure out how to round-trip "endian-ness" without raising
# warnings from netCDF4
# encoding['endian'] = var.endian()
pop_to(attributes, encoding, 'least_significant_digit')
# save source so __repr__ can detect if it's local or not
encoding['source'] = self._filename
encoding['original_shape'] = var.shape
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
with self.ensure_open(autoclose=False):
dsvars = FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in
iteritems(self.ds.variables))
return dsvars
def get_attrs(self):
with self.ensure_open(autoclose=True):
attrs = FrozenOrderedDict((k, self.ds.getncattr(k))
for k in self.ds.ncattrs())
return attrs
def get_dimensions(self):
with self.ensure_open(autoclose=True):
dims = FrozenOrderedDict((k, len(v))
for k, v in iteritems(self.ds.dimensions))
return dims
def get_encoding(self):
with self.ensure_open(autoclose=True):
encoding = {}
encoding['unlimited_dims'] = {
k for k, v in self.ds.dimensions.items() if v.isunlimited()}
return encoding
def set_dimension(self, name, length):
with self.ensure_open(autoclose=False):
self.ds.createDimension(name, size=length)
def set_attribute(self, key, value):
with self.ensure_open(autoclose=False):
if self.format != 'NETCDF4':
value = encode_nc3_attr_value(value)
self.ds.setncattr(key, value)
def set_variables(self, *args, **kwargs):
with self.ensure_open(autoclose=False):
super(NetCDF4DataStore, self).set_variables(*args, **kwargs)
def prepare_variable(self, name, variable, check_encoding=False,
unlimited_dims=None):
attrs = variable.attrs.copy()
variable = _force_native_endianness(variable)
if self.format == 'NETCDF4':
variable, datatype = _nc4_values_and_dtype(variable)
else:
variable = encode_nc3_variable(variable)
datatype = variable.dtype
self.set_necessary_dimensions(variable, unlimited_dims=unlimited_dims)
fill_value = attrs.pop('_FillValue', None)
if fill_value in ['', '\x00']:
# these are equivalent to the default FillValue, but netCDF4
# doesn't like setting fill_value to an empty string
fill_value = None
encoding = _extract_nc4_variable_encoding(
variable, raise_on_invalid=check_encoding)
nc4_var = self.ds.createVariable(
varname=name,
datatype=datatype,
dimensions=variable.dims,
zlib=encoding.get('zlib', False),
complevel=encoding.get('complevel', 4),
shuffle=encoding.get('shuffle', True),
fletcher32=encoding.get('fletcher32', False),
contiguous=encoding.get('contiguous', False),
chunksizes=encoding.get('chunksizes'),
endian='native',
least_significant_digit=encoding.get('least_significant_digit'),
fill_value=fill_value)
nc4_var.set_auto_maskandscale(False)
for k, v in iteritems(attrs):
# set attributes one-by-one since netCDF4<1.0.10 can't handle
# OrderedDict as the input to setncatts
nc4_var.setncattr(k, v)
return nc4_var, variable.data
def sync(self):
with self.ensure_open(autoclose=True):
super(NetCDF4DataStore, self).sync()
self.ds.sync()
def close(self):
if self._isopen:
# netCDF4 only allows closing the root group
ds = find_root(self.ds)
if ds._isopen:
ds.close()
self._isopen = False
| apache-2.0 | -2,989,005,801,748,905,500 | 36.904372 | 79 | 0.579975 | false | 4.148624 | false | false | false |
mvtuong/mysite | v1/blog/migrations/0002_auto_20150708_1454.py | 1 | 1092 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='blog',
name='file',
field=models.FileField(upload_to=b'user_upload/files/', blank=True),
),
migrations.AddField(
model_name='blog',
name='image',
field=models.ImageField(upload_to=b'user_upload/images/', blank=True),
),
migrations.AddField(
model_name='tag',
name='description',
field=models.TextField(max_length=500, blank=True),
),
migrations.AddField(
model_name='topic',
name='description',
field=models.TextField(max_length=500, blank=True),
),
migrations.AlterField(
model_name='blog',
name='tag',
field=models.ManyToManyField(to='blog.Tag', blank=True),
),
]
| apache-2.0 | 8,163,413,012,375,880,000 | 27 | 82 | 0.540293 | false | 4.368 | false | false | false |
arnikz/EMF_data_mining | DEA/mzscore.py | 1 | 12015 | #!/usr/bin/env python
#
# This script takes a database (SQLite) obtained from the PIQMIe service and populates
# additional tables/views to facilitate differential protein expression analyses based
# on standardized SILAC ratios.
#
# Note:
# z_score_{raw|norm}_ratio - column with canonical Z-score transformed raw/normalized
# SILAC protein ratios
#
# mz_score_{raw|norm}_ratio - column with modified Z-score transformed SILAC protein ratios
# suitable for heavy-tailed data (Iglewicz and Hoaglin, 1993)
#
#
# Author: Arnold Kuzniar
#
# Version: 1.0
#
import os
import sys
import argparse as argp
import math
import numpy as np
import scipy.stats as st
import sqlite3 as sqlt
import collections as cls
ratio_types = { # lookup to link column values to column names
'RATIO H/L': 'raw_ratio_HL',
'RATIO H/M': 'raw_ratio_HM',
'RATIO M/L': 'raw_ratio_ML',
'RATIO H/L NORMALIZED': 'norm_ratio_HL',
'RATIO H/M NORMALIZED': 'norm_ratio_HM',
'RATIO M/L NORMALIZED': 'norm_ratio_ML'
}
score_types = { # lookup to link user input to table column
'Zr' : 'z_score_raw_ratio',
'Zn' : 'z_score_norm_ratio',
'Mr' : 'm_score_raw_ratio',
'Mn' : 'm_score_norm_ratio'
}
# parse command-line args
parser = argp.ArgumentParser(
description = 'Differential analysis of SILAC protein ratios based on standardized scores.')
parser.add_argument(
'-n',
action = 'store_true',
dest = 'new_tabs',
help = 'populate new db tables with (modified) Z-scores')
parser.add_argument(
'-d',
dest = 'dts',
required = True,
choices = ['VH10', 'U2OS', 'IB10'],
help = 'select one of the data sets or cell lines')
parser.add_argument(
'-s',
required = True,
choices = score_types.keys(),
help = 'select one of the score types for filtering: Z*,M* - Z-score or modified Z-score; *r,*n - score based on raw or normalized SILAC protein ratios')
parser.add_argument(
'-c',
required = True,
dest = 'cutoff',
type = float,
help = 'absolute score cutoff (e.g. 1.65, 1.96 or 2.58)')
parser.add_argument(
'-o',
dest = 'outfile',
help = 'output file')
parser.add_argument(
'dbfile',
help = 'sqlite3 database file')
args = parser.parse_args()
# check user input
dbfile = args.dbfile
outfile = args.outfile
new_tabs = args.new_tabs
dts = args.dts
stype = args.s
cutoff = args.cutoff
score_type = None
if os.path.isfile(dbfile) is False:
parser.error("dbfile '%s' not found" % dbfile)
if stype and cutoff:
score_type = score_types[stype]
else:
parser.error('-s and -c args must be used together')
if outfile is None:
# set the default output filename
outfile = os.path.join(os.path.dirname(dbfile), '%s_mzscore_%s_%.2f.tab' % (dts, stype, cutoff))
if cutoff < 0:
parser.error('the absolute score cutoff must be a positive value')
# print info into STDOUT
print """
dbfile = %s
outfile = %s
dataset = %s
re-score = %s
score type = %s
score cutoff = %.2f
""" % (dbfile, outfile, dts, new_tabs, stype, cutoff)
# sqlite3 user-defined functions (UDFs)
def log(value, base):
try:
return math.log(value) / math.log(base)
except:
return None
def sqrt(value):
try:
return math.sqrt(value)
except:
return None
def pvalue(score): # convert Z- or M-score to two-tailed probability (P-value)
try:
return 2 * st.norm.cdf(-abs(score))
except:
return None
class Stdev: # sample standard deviation (aggregate function)
def __init__(self):
self.vec = []
def step(self, value):
self.vec.append(value)
def finalize(self):
return np.array(self.vec).std(ddof=1)
class Median: # median (aggregate function)
def __init__(self):
self.arr = []
def step(self, value):
self.arr.append(value)
def finalize(self):
return np.median(np.array(self.arr))
class Mad: # median absolute deviation (aggregate function)
def __init__(self):
self.arr = []
def step(self, value):
self.arr.append(value)
def finalize(self):
median = np.median(np.array(self.arr))
return np.median(np.abs(self.arr - median))
# SQL statements to populate tables/views
sql_create_tables = """
DROP VIEW IF EXISTS V_PGROUP_RATIO;
CREATE VIEW V_PGROUP_RATIO AS
-- simplifies the selection of SILAC ratios/types
SELECT
A.grp_id,
exp_name,
CAST(CASE %s
END AS TEXT) AS ratio_type,
CAST(quant_value AS NUMERIC) AS ratio_value
FROM
PGROUP_QUANT A, V_PGROUP B
WHERE
A.grp_id = B.grp_id
AND quant_type IN ('%s')
AND quant_value;
DROP TABLE IF EXISTS PGROUP_LOG2RATIO_STAT;
CREATE TABLE PGROUP_LOG2RATIO_STAT AS
-- stores descriptive statistics on SILAC protein ratios for each experiment
SELECT
exp_name,
ratio_type,
CAST(COUNT(ratio_value) AS INT) AS n,
CAST(MIN(LOG(ratio_value, 2)) AS NUMERIC) AS min,
CAST(MAX(LOG(ratio_value, 2)) AS NUMERIC) AS max,
CAST(AVG(LOG(ratio_value, 2)) AS NUMERIC) AS mean,
CAST(MEDIAN(LOG(ratio_value, 2)) AS NUMERIC) AS median,
CAST(STDEV(LOG(ratio_value, 2)) AS NUMERIC) AS sd,
CAST(MAD(LOG(ratio_value, 2)) AS NUMERIC) AS mad
FROM
V_PGROUP_RATIO
GROUP BY
exp_name, ratio_type;
CREATE INDEX idx_PGROUP_LOG2RATIO_STAT_exp_name_ratio_type ON PGROUP_LOG2RATIO_STAT(exp_name, ratio_type);
DROP VIEW IF EXISTS V_PGROUP_LOG2RATIO_STAT;
CREATE VIEW V_PGROUP_LOG2RATIO_STAT AS
-- shows rounded values of the statistics
SELECT
exp_name,
ratio_type,
n,
ROUND(min, 4) AS min,
ROUND(max, 4) AS max,
ROUND(mean, 4) AS mean,
ROUND(median, 4) AS median,
ROUND(sd, 4) AS sd,
ROUND(mad, 4) AS mad
FROM
PGROUP_LOG2RATIO_STAT;
DROP TABLE IF EXISTS PGROUP_MZSCORE;
CREATE TABLE PGROUP_MZSCORE AS
-- stores (modified) Z-score transformed SILAC protein raw/norm ratios
SELECT
grp_id,
A.exp_name AS exp_name,
CAST(A.ratio_type AS TEXT) AS ratio_type,
CAST((LOG(ratio_value, 2) - mean) / sd AS NUMERIC) AS z_score,
CAST(0.6745 * (LOG(ratio_value, 2) - median) / mad AS NUMERIC) AS m_score
FROM
V_PGROUP_RATIO A, PGROUP_LOG2RATIO_STAT B
WHERE
A.exp_name = B.exp_name
AND A.ratio_type = B.ratio_type;
CREATE INDEX idx_PGROUP_MZSCORE_grp_id ON PGROUP_MZSCORE(grp_id);
CREATE INDEX idx_PGROUP_MZSCORE_exp_name_ratio_type ON PGROUP_MZSCORE(exp_name, ratio_type);
""" % (' '.join([ "\n\tWHEN quant_type='%s' THEN '%s'" % (k, v) for (k, v) in ratio_types.iteritems() ]),
"','".join(ratio_types.keys()))
# dynamically construct SQL query to select diff. reg. protein groups
sql_sel_pgrps = """
SELECT
A.grp_id grp_id,
IFNULL(GROUP_CONCAT(DISTINCT gene), '-') genes,
{dts}_L0_M0_H1_{score_type}_HL '{stype}_H1L0', -- Z or M-score ON/OFF (treat1)
{dts}_L1_M1_H0_{score_type}_LH '{stype}_L1H0', -- Z or M-score ON/OFF (treat2)
{dts}_L0_M0_H1_{score_type}_HM '{stype}_H1M0', -- Z or M-score ON/OFF (treat3)
{dts}_L1_M1_H0_{score_type}_MH '{stype}_M1H0', -- Z or M-score ON/OFF (treat4)
{dts}_L0_M0_H1_{score_type}_LM '{stype}_L0M0', -- Z or M-score OFF/OFF (ctrl1)
{dts}_L1_M1_H0_{score_type}_LM '{stype}_L1M1', -- Z or M-score ON/ON (ctrl2)
PVALUE({dts}_L0_M0_H1_{score_type}_HL) 'pval_H1L0', -- P-value ON/OFF (treat1)
PVALUE({dts}_L1_M1_H0_{score_type}_LH) 'pval_L1H0', -- P-value ON/OFF (treat2)
PVALUE({dts}_L0_M0_H1_{score_type}_HM) 'pval_H1M0', -- P-value ON/OFF (treat3)
PVALUE({dts}_L1_M1_H0_{score_type}_MH) 'pval_M1H0', -- P-value ON/OFF (treat4)
PVALUE({dts}_L0_M0_H1_{score_type}_LM) 'pval_L0M0', -- P-value OFF/OFF (ctrl1)
PVALUE({dts}_L1_M1_H0_{score_type}_LM) 'pval_L1M1' -- P-value ON/ON (ctrl2)
FROM
V_PGROUP_MZSCORE A, PROT2GRP B, V_PROTEIN C
WHERE
A.grp_id = B.grp_id
AND B.prot_acc = C.acc
AND (({dts}_L0_M0_H1_{score_type}_HL > {cutoff}
AND {dts}_L0_M0_H1_{score_type}_HM > {cutoff}
AND {dts}_L1_M1_H0_{score_type}_LH > {cutoff}
AND {dts}_L1_M1_H0_{score_type}_MH > {cutoff})
OR ({dts}_L0_M0_H1_{score_type}_LH > {cutoff}
AND {dts}_L0_M0_H1_{score_type}_MH > {cutoff}
AND {dts}_L1_M1_H0_{score_type}_HL > {cutoff}
AND {dts}_L1_M1_H0_{score_type}_HM > {cutoff}))
AND {dts}_L0_M0_H1_{score_type}_ML <= {cutoff}
AND {dts}_L0_M0_H1_{score_type}_LM <= {cutoff}
AND {dts}_L1_M1_H0_{score_type}_ML <= {cutoff}
AND {dts}_L1_M1_H0_{score_type}_LM <= {cutoff}
GROUP BY A.grp_id;
""".format(dts=dts, score_type=score_type, stype=stype, cutoff=cutoff)
# connect to db
with sqlt.connect(args.dbfile) as conn:
conn.row_factory = sqlt.Row # enable column access by name: row['colnm']
conn.create_function('log', 2, log)
conn.create_function('sqrt', 1, sqrt)
conn.create_function('pvalue', 1, pvalue)
conn.create_aggregate('stdev', 1, Stdev)
conn.create_aggregate('median', 1, Median)
conn.create_aggregate('mad', 1, Mad)
cur = conn.cursor()
if new_tabs is True: # populate tables/views only with -n option
cur.executescript(sql_create_tables)
cur.execute('SELECT DISTINCT exp_name FROM EXPERIMENT')
exp_names = [ str(r[0]) for r in cur.fetchall() ]
cur.execute("SELECT DISTINCT ratio_type FROM PGROUP_LOG2RATIO_STAT")
ratio_types = [ str(r[0]) for r in cur.fetchall() ]
n = len(exp_names) * len(ratio_types)
i = 0
comma = ','
# create view for selecting diff. reg. proteins
sql_create_view = """
DROP VIEW IF EXISTS V_PGROUP_MZSCORE;
CREATE VIEW V_PGROUP_MZSCORE AS
SELECT
grp_id,
"""
for e in exp_names:
for r in ratio_types:
i += 1
rr = r[:-2] + r[-2:][::-1] # add inverse ratio (e.g., {raw|norm}_ratio_HL for *_ratio_LH)
if i == n: comma = ''
sql_create_view += "\tROUND(CAST(GROUP_CONCAT(CASE WHEN exp_name = '{exp}' AND ratio_type = '{ratio}' THEN z_score ELSE NULL END) AS NUMERIC), 4) AS '{exp}_z_score_{ratio}',\n".format(exp=e, ratio=r)
sql_create_view += "\tROUND(CAST(GROUP_CONCAT(CASE WHEN exp_name = '{exp}' AND ratio_type = '{ratio}' THEN -1 * z_score ELSE NULL END) AS NUMERIC), 4) AS '{exp}_z_score_{iratio}',\n".format(exp=e, ratio=r, iratio=rr)
sql_create_view += "\tROUND(CAST(GROUP_CONCAT(CASE WHEN exp_name = '{exp}' AND ratio_type = '{ratio}' THEN m_score ELSE NULL END) AS NUMERIC), 4) AS '{exp}_m_score_{ratio}',\n".format(exp=e, ratio=r)
sql_create_view += "\tROUND(CAST(GROUP_CONCAT(CASE WHEN exp_name = '{exp}' AND ratio_type = '{ratio}' THEN -1 * m_score ELSE NULL END) AS NUMERIC), 4) AS '{exp}_m_score_{iratio}'{comma}\n".format(exp=e, ratio=r, iratio=rr, comma=comma)
sql_create_view += "FROM PGROUP_MZSCORE GROUP BY grp_id"
cur.executescript(sql_create_view)
# write results onto tab-delim file
if dts is not None:
sep = '\t' # column separator
n_pgrps = 0 # count diff. reg. protein groups
with open(outfile, 'w+') as fout:
try:
for drow in [ cls.OrderedDict(xi) for xi in cur.execute(sql_sel_pgrps) ]:
# first output column names
if n_pgrps == 0:
header = sep.join(drow.keys()) + os.linesep
fout.write(header)
# output remaining rows with column values (grp_id, Z-/M-scores and P-values)
row = drow.values()
grp_id = str(drow['grp_id'])
genes = str(drow['genes'])
scores = [ str(round(float(x), 4)) for x in row[2:] ]
srow = grp_id + sep + genes + sep + sep.join(scores) + os.linesep
fout.write(srow)
n_pgrps += 1
except sqlt.OperationalError as e:
sys.stderr.write('Error: Selected data set not found: %s\n' % e)
sys.exit(1)
# remove empty outfile
if os.path.getsize(outfile) == 0:
print 'Nothing to write onto outfile.'
os.remove(outfile)
else:
print 'Ndiff =', n_pgrps
| gpl-3.0 | 7,526,957,426,108,315,000 | 33.426934 | 247 | 0.627549 | false | 2.870968 | false | false | false |
brando56894/Dungeon-Quest | player.py | 1 | 8551 | #!/usr/bin/python2
#
#~~Player Functions~~
from superRandom import superRandint, superChoice
from time import sleep
import actions
import monsters
class CreatePlayer(object):
def __init__(self, name):
self.health = 125
self.xp = 0 #TODO: use gained XP to gain levels
self.potions = 0
self.gold = 0
self.weapons = ["dagger"]
self.name = name
self.steps = 0
self.damage_dealt = 12 #not needed
self.current_weapon = "dagger"
self.dragon_attack = False
self.basilisk_attack = False
self.has_sword = False
self.has_pistol = False
self.has_rifle = False
self.run_away = 0
self.has_key = False
self.turns = 0
def __repr__(self):
return ("\nName: %s\nHealth: %d\nXP: %d\nPotions: "
"%d\nGold: %d\nWeapons: %s\nSteps: %d\nCurr"
"ent Weapon: %s\nDragon Attack: %s\nBasili"
"skAttack: %s\nHas Sword: %s\nHas Pistol: "
"%s\nHas Rifle: %s\nTimes Run Away: %d\nHa"
"s Key: %s\nTurns: %s" % (self.name,self.health,self.xp,
self.potions,self.gold,self.weapons,
self.steps,self.current_weapon,
self.dragon_attack,self.basilisk_attack,
self.has_sword,self.has_pistol,self.has_rifle,
self.run_away,self.has_key,self.turns)
)
def find_gold(self):
amount = superRandint(1,25)
self.gold += amount
print "\nYou found %d gold coins, which brings you to a total of %d coins!" % (amount, self.gold)
sleep(2)
return self
def find_gold_debug(self,amount):
self.gold += amount
print "\nYou found %d gold coins, which brings you to a total of %d coins!" % (amount, self.gold)
sleep(2)
return self
def find_potions(self):
self.potions += 1
print "\nYou found a health potion! You now have %d potions in your inventory." % self.potions
sleep(2)
return self
def find_weapon(self):
#TODO: add more weapons
weapons = ["sword","pistol","rifle"]
found = superChoice(weapons)
print "\nYou found a %s!" % found
if found == "sword":
self.has_sword = True
elif found == "pistol":
self.has_pistol = True
else:
self.has_rifle = True
return self
def buy_potions(self):
print "\nGold: %d" % self.gold
print "Each potion costs 20 gold pieces and restores 25 HP."
amount = raw_input("\nHow many would you like to purchase? ")
cost = int(amount) * 20
if self.gold >= int(cost):
self.gold = self.gold - int(cost)
self.potions += int(amount)
print "\n%d potions have been added to your inventory." % int(amount)
sleep(2)
return self
else:
print "\nSorry you don't have enough gold for %d potions!" % int(amount)
sleep(2)
actions.visit_shop(self)
def use_potion(self):
if self.potions > 0 and self.potions < 2:
self.potions -= 1
self.health += 25
print "\nYour health is now at %d" % self.health
elif self.potions > 1:
print "\nYou currently have %d potions" % self.potions
amount = int(raw_input("\nHow many? "))
raise_health = amount * 25
self.health += raise_health
self.potions -= amount
print "\nYour health is now at %d" % self.health
else:
print "\nSorry you don't have any more potions!"
sleep(2)
return self
def list_inventory(self):
actions.clearscreen()
print ("\nName: %s\n"
"Exp. Points: %d\n"
"Potions Held: %d\n"
"Gold: %d pieces\n"
"Current Weapon: %s" %(self.name, self.xp,
self.potions, self.gold, self.current_weapon)
)
if self.has_pistol is True and "pistol" not in self.weapons:
self.weapons.append("pistol")
elif self.has_rifle is True and "rifle" not in self.weapons:
self.weapons.append("rifle")
elif self.has_sword is True and "sword" not in self.weapons:
self.weapons.append("sword")
print "Weapons: %s" % ", ".join(str(weapon) for weapon in self.weapons)
sleep(4)
def low_health(self):
if self.health <= 60 and self.potions > 0:
print "\n*****DANGER*****\n"
choice = raw_input("\nYour health is currently at %d, a"
"nd you currently have %d potions in your inven"
"tory. \nWould you like to use one? " % (self.health,self.potions)
)
choice.lower()
if choice == 'y' or choice == 'yes':
self.use_potion()
return self
else:
print "\nOk tough guy."
sleep(2)
return self
def set_health(self, newHealth):
self.health = newHealth
print "\nHealth set to %d" % self.health
sleep(2)
return self
def take_damage(self, damage):
self.health -= damage
print "\nYour health is now at %d" % self.health
if self.health < 0:
print "\nYou were slain! Maybe you should carry more health potions with you next time!\n"
exit(0)
sleep(2)
return self
def deal_damage(self,Monster):
if self.current_weapon == "sword":
damage_dealt = superRandint(13,30)
elif self.current_weapon == "pistol":
damage_dealt = superRandint(31,60)
elif self.current_weapon == "rifle":
damage_dealt = superRandint(61,120)
else:
damage_dealt = superRandint(1,12)
Monster.take_damage(damage_dealt,self)
def gain_xp(self,monster_name):
if monster_name == "Dragon":
gained = superRandint(40,150)
elif monster_name == "Gremlin":
gained = superRandint(1,35)
elif monster_name == "Demon":
gained = superRandint(15,50)
elif monster_name == "Zombie":
gained = superRandint(16,75)
else:
gained = superRandint(1,30)
self.xp += gained
print "\nYou gained %d XP!" % gained
return self
def buy_weapon(self):
print "\nS)word: 25 Gold"
print "P)istol: 60 Gold"
print "R)ifle: 120 Gold"
choice = raw_input("\nWhich one would you like to purchase? ").lower()
if choice == 's'and self.gold >= 25:
self.gold -= 25
self.has_sword = True
print "\nA sword has been added to your inventory."
sleep(2)
elif choice == 'p' and self.gold >= 60:
self.gold -= 60
self.has_pistol = True
print "\nA pistol has been added to your inventory."
sleep(2)
elif choice == 'r' and self.gold >= 120:
self.gold -= 120
self.has_rifle = True
print "\nA rifle has been added to your inventory."
sleep(2)
else:
print "\nSorry you don't have enough gold for that purchase."
sleep(2)
actions.visit_shop(self)
return (self)
def set_current_weapon(self):
print "\nCurrent Weapon: " + self.current_weapon
#doesn't show correct weapons after a new weapon is found
#even if weapon is in inventory, method itself works perfectly.
print "Available Weapons: %s" % ", ".join(str(weapon) for weapon in self.weapons)
choice = raw_input("\nUse weapon: ").lower()
if choice == "sword" and self.has_sword is True:
self.current_weapon = "sword"
elif choice == "pistol" and self.has_pistol is True:
self.current_weapon = "pistol"
elif choice == "rifle" and self.has_rifle is True:
self.current_weapon = "rifle"
elif choice == "dagger":
self.current_weapon = "dagger"
else:
print "\nSorry you don't currently have that weapon in your inventory."
print "\nCurrent weapon has been changed to: %s" % self.current_weapon
sleep(2)
return self
| gpl-3.0 | 426,067,084,616,806,100 | 36.017316 | 105 | 0.535376 | false | 3.603456 | false | false | false |
adelina-t/compute-hyperv | hyperv/nova/serialproxy.py | 1 | 3962 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import patcher
import functools
import socket
from nova.i18n import _
from hyperv.nova import constants
from hyperv.nova import vmutils
threading = patcher.original('threading')
def handle_socket_errors(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self._client_connected.clear()
return wrapper
class SerialProxy(threading.Thread):
def __init__(self, instance_name, addr, port, input_queue,
output_queue, client_connected):
super(SerialProxy, self).__init__()
self.setDaemon(True)
self._instance_name = instance_name
self._addr = addr
self._port = port
self._conn = None
self._input_queue = input_queue
self._output_queue = output_queue
self._client_connected = client_connected
self._stopped = threading.Event()
def _setup_socket(self):
try:
self._sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self._sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1)
self._sock.bind((self._addr, self._port))
self._sock.listen(1)
except socket.error as err:
self._sock.close()
msg = (_('Failed to initialize serial proxy on'
'%(addr)s:%(port)s, handling connections '
'to instance %(instance_name)s. Error: %(error)s') %
{'addr': self._addr, 'port': self._port,
'instance_name': self._instance_name,
'error': err})
raise vmutils.HyperVException(msg)
def stop(self):
self._stopped.set()
self._client_connected.clear()
if self._conn:
self._conn.shutdown(socket.SHUT_RDWR)
self._conn.close()
self._sock.close()
def run(self):
self._setup_socket()
while not self._stopped.isSet():
self._accept_conn()
@handle_socket_errors
def _accept_conn(self):
self._conn, client_addr = self._sock.accept()
self._client_connected.set()
workers = []
for job in [self._get_data, self._send_data]:
worker = threading.Thread(target=job)
worker.setDaemon(True)
worker.start()
workers.append(worker)
for worker in workers:
worker_running = (worker.is_alive() and
worker is not threading.current_thread())
if worker_running:
worker.join()
self._conn.close()
self._conn = None
@handle_socket_errors
def _get_data(self):
while self._client_connected.isSet():
data = self._conn.recv(constants.SERIAL_CONSOLE_BUFFER_SIZE)
if not data:
self._client_connected.clear()
return
self._input_queue.put(data)
@handle_socket_errors
def _send_data(self):
while self._client_connected.isSet():
data = self._output_queue.get_burst()
if data:
self._conn.sendall(data)
| apache-2.0 | -2,187,697,925,085,981,000 | 31.47541 | 78 | 0.572438 | false | 4.232906 | false | false | false |
Auzzy/pyinq | pyinq/tests/test_results.py | 1 | 9565 | """
Copyright (c) 2012-2013, Austin Noto-Moniz ([email protected])
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
"""
from pyinq.results import *
##### TEST ASSERT RESULTS #####
LINENO = 12
CALL = "assert_true(True)"
FAIL = "FAIL"
TRACE = "TRACE"
EXPECTED = IOError
def test_Result_true():
check_Result(True)
def test_Result_false():
check_Result(False)
def test_AssertResult_true():
check_AssertResult(True)
def test_AssertResult_false():
check_AssertResult(False)
def test_AssertEqualsResult_true():
check_AssertEqualsResult(True,4,4)
def test_AssertEqualsResult_false():
check_AssertEqualsResult(False,4,5)
def test_AssertInResult_true():
check_AssertInResult(True,4,[1,2,4,8,16,32,64])
def test_AssertInResult_false():
check_AssertInResult(False,4,[1,1,2,3,5,8,13])
def test_AssertInstanceResult_true():
check_AssertInstanceResult(True,IOError,Exception)
def test_AssertInstanceResult_false():
check_AssertInstanceResult(False,IOError,WindowsError)
def test_AssertRaisesResult_true():
check_AssertRaisesResult(True,TRACE)
def test_AssertRaisesResult_false():
check_AssertRaisesResult(False,"")
def test_ExpectedErrorResult_true():
check_ExpectedErrorResult(True,LINENO)
def test_ExpectedErrorResult_false():
check_ExpectedErrorResult(False,None)
def test_FailResult():
result = FailResult(LINENO,FAIL)
assert result.lineno == LINENO
assert result.mess == FAIL
assert result.result == False
def test_AssertError():
result = AssertError(TRACE)
assert result.trace == TRACE
assert result.result is None
##### TEST RESULTS #####
NAME = "FOO"
def test_TestResult():
test_result = TestResult(NAME)
assert test_result.name == NAME
assert not test_result.before
assert not test_result.after
def test_TestResult_true():
test_result = TestResult(NAME)
test_result.extend(make_AssertResult_list(True,True,True))
assert test_result.get_status() == True
def test_TestResult_false():
test_result = TestResult(NAME)
test_result.extend(make_AssertResult_list(True,True,False))
assert test_result.get_status() == False
def test_TestClassResult():
cls_result = TestClassResult(NAME)
assert cls_result.name == NAME
assert not cls_result.before
assert not cls_result.after
def test_TestClassResult_true():
cls_result = TestClassResult(NAME)
cls_result.extend(make_TestResult_list(True,True,True))
assert cls_result.get_status() == True
def test_TestClassResult_false():
cls_result = TestClassResult(NAME)
cls_result.extend(make_TestResult_list(True,True,False))
assert cls_result.get_status() == False
def test_TestModuleResult():
mod_result = TestModuleResult(NAME)
assert mod_result.name == NAME
assert not mod_result.before
assert not mod_result.after
def test_TestModuleResult_true():
mod_result = TestModuleResult(NAME)
mod_result.extend(make_TestClassResult_list(True,True,True))
assert mod_result.get_status() == True
def test_TestModuleResult_false():
mod_result = TestModuleResult(NAME)
mod_result.extend(make_TestClassResult_list(True,True,False))
assert mod_result.get_status() == False
def test_TestSuiteResult():
suite_result = TestSuiteResult(NAME)
assert suite_result.name == NAME
assert not suite_result.before
assert not suite_result.after
def test_TestSuiteResult_true():
suite_result = TestSuiteResult(NAME)
suite_result.extend(make_TestModuleResult_list(True,True,True))
assert suite_result.get_status() == True
def test_TestSuiteResult_false():
suite_result = TestModuleResult(NAME)
suite_result.extend(make_TestModuleResult_list(True,True,False))
assert suite_result.get_status() == False
##### TEST ERRORS #####
def construct_call_str(name, args):
arg_str = ",".join([str(arg) for arg in args])
return "{name}({arg_str})".format(name=name,arg_str=arg_str)
def check_PyInqError(func_name, arg_dict, error_cls, result_cls, check_func):
call = construct_call_str(func_name,arg_dict.values())
error = error_cls(LINENO,call,**arg_dict)
result = error.result()
assert error.lineno == LINENO
assert error.call == call
for arg_name in arg_dict:
assert getattr(error,arg_name) == arg_dict[arg_name]
assert type(result) is result_cls
check_func(state=False,lineno=LINENO,call=call,result=result,**arg_dict)
def test_PyInqAssertError():
arg_dict = {}
check_PyInqError("assert_true",arg_dict,PyInqAssertError,AssertResult,check_AssertResult)
def test_PyInqAssertEqualsError():
arg_dict = {"actual":4, "expected":42}
check_PyInqError("assert_equal",arg_dict,PyInqAssertEqualsError,AssertEqualsResult,check_AssertEqualsResult)
def test_PyInqAssertInError():
arg_dict = {"item":4, "collection":[1,1,2,3,5,8,13,21]}
check_PyInqError("assert_in",arg_dict,PyInqAssertInError,AssertInResult,check_AssertInResult)
def test_PyInqAssertInstanceError():
arg_dict = {"obj":IOError, "cls":IndexError}
check_PyInqError("assert_is_instance",arg_dict,PyInqAssertInstanceError,AssertInstanceResult,check_AssertInstanceResult)
def test_PyInqAssertRaisesError():
arg_dict = {"expected":IOError, "trace":""}
check_PyInqError("assert_raises",arg_dict,PyInqAssertRaisesError,AssertRaisesResult,check_AssertRaisesResult)
def test_PyInqFailError():
arg_dict = {"mess":"This is a failure message."}
error = PyInqFailError(LINENO,**arg_dict)
result = error.result()
assert error.lineno == LINENO
assert error.mess == arg_dict["mess"]
assert type(result) is FailResult
assert result.lineno == LINENO
assert result.mess == arg_dict["mess"]
assert result.result == False
##### TEST HELPERS #####
def check_Result(state, result=None):
if not result:
result = Result(state)
assert result.result == state
def check_AssertResult(state, lineno=LINENO, call=CALL, result=None):
if not result:
result = AssertResult(lineno,call,state)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
def check_AssertEqualsResult(state, actual, expected, lineno=LINENO, call=CALL, result=None):
if not result:
result = AssertEqualsResult(lineno,call,state,actual,expected)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
assert result.actual == actual
assert result.expected == expected
def check_AssertInResult(state, item, collection, lineno=LINENO, call=CALL, result=None):
if not result:
result = AssertInResult(lineno,call,state,item,collection)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
assert result.item == item
assert result.collection == collection
def check_AssertInstanceResult(state, obj, cls, lineno=LINENO, call=CALL, result=None):
if not result:
result = AssertInstanceResult(lineno,call,state,obj,cls)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
assert result.obj_name == obj.__class__.__name__
assert result.class_name == cls.__name__
def check_AssertRaisesResult(state, trace, lineno=LINENO, call=CALL, expected=EXPECTED, result=None):
if not result:
result = AssertRaisesResult(lineno,call,state,trace,expected)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
assert remove_whitespace(result.trace) == remove_whitespace(trace)
assert result.expected == expected.__name__
def check_ExpectedErrorResult(state, lineno, expected=EXPECTED, result=None):
if not result:
result = ExpectedErrorResult(state,expected,lineno)
assert result.expected == expected.__name__
assert result.lineno == lineno
assert result.call is None
assert result.result == state
def make_AssertResult_list(*state_list):
return [AssertResult(LINENO,CALL,state) for state in state_list]
def make_TestResult_list(*state_list):
result_list = []
for state in state_list:
result = TestResult(NAME)
result.extend(make_AssertResult_list(state))
result_list.append(result)
return result_list
def make_TestClassResult_list(*state_list):
result_list = []
for state in state_list:
result = TestClassResult(NAME)
result.extend(make_TestResult_list(state))
result_list.append(result)
return result_list
def make_TestModuleResult_list(*state_list):
result_list = []
for state in state_list:
result = TestModuleResult(NAME)
result.extend(make_TestClassResult_list(state))
result_list.append(result)
return result_list
##### UTIL #####
def remove_whitespace(string):
return ''.join([line.strip() for line in string.splitlines()])
| isc | 8,524,622,938,886,892,000 | 30.883333 | 124 | 0.716675 | false | 3.599925 | true | false | false |
neogi/machine-learning | clustering_and_retrieval/gaussian_mixture_model/em-gmm.py | 1 | 11332 | # Imports
import sframe
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import copy
from PIL import Image
from io import BytesIO
import matplotlib.mlab as mlab
import colorsys
def generate_MoG_data(num_data, means, covariances, weights):
data = []
for i in range(num_data):
# Use np.random.choice and weights to pick a cluster id greater than or equal to 0 and less than num_clusters.
k = np.random.choice(len(weights), 1, p=weights)[0]
# Use np.random.multivariate_normal to create data from this cluster
x = np.random.multivariate_normal(means[k], covariances[k])
data.append(x)
return data
def log_sum_exp(Z):
""" Compute log(\sum_i exp(Z_i)) for some array Z."""
return np.max(Z) + np.log(np.sum(np.exp(Z - np.max(Z))))
def loglikelihood(data, weights, means, covs):
""" Compute the loglikelihood of the data for a Gaussian mixture model with the given parameters. """
num_clusters = len(means)
num_dim = len(data[0])
ll = 0
for d in data:
Z = np.zeros(num_clusters)
for k in range(num_clusters):
# Compute (x-mu)^T * Sigma^{-1} * (x-mu)
delta = np.array(d) - means[k]
exponent_term = np.dot(delta.T, np.dot(np.linalg.inv(covs[k]), delta))
# Compute loglikelihood contribution for this data point and this cluster
Z[k] += np.log(weights[k])
Z[k] -= 1/2. * (num_dim * np.log(2*np.pi) + np.log(np.linalg.det(covs[k])) + exponent_term)
# Increment loglikelihood contribution of this data point across all clusters
ll += log_sum_exp(Z)
return ll
def EM(data, init_means, init_covariances, init_weights, maxiter=1000, thresh=1e-4):
# Make copies of initial parameters, which we will update during each iteration
means = copy.deepcopy(init_means)
covariances = copy.deepcopy(init_covariances)
weights = copy.deepcopy(init_weights)
# Infer dimensions of dataset and the number of clusters
num_data = len(data)
num_dim = len(data[0])
num_clusters = len(means)
# Initialize some useful variables
resp = np.zeros((num_data, num_clusters))
ll = loglikelihood(data, weights, means, covariances)
ll_trace = [ll]
for i in range(maxiter):
if i % 5 == 0:
print("Iteration %s" % i)
# E-step: compute responsibilities
# Update resp matrix so that resp[j, k] is the responsibility of cluster k for data point j.
# Hint: To compute likelihood of seeing data point j given cluster k, use multivariate_normal.pdf.
for j in range(num_data):
for k in range(num_clusters):
# YOUR CODE HERE
resp[j, k] = weights[k] * multivariate_normal.pdf(x=data[j],
mean=means[k],
cov=covariances[k])
row_sums = resp.sum(axis=1)[:, np.newaxis]
resp = resp / row_sums # normalize over all possible cluster assignments
# M-step
# Compute the total responsibility assigned to each cluster, which will be useful when
# implementing M-steps below. In the lectures this is called N^{soft}
counts = np.sum(resp, axis=0)
for k in range(num_clusters):
# Update the weight for cluster k using the M-step update rule for the cluster weight, \hat{\pi}_k.
# YOUR CODE HERE
Nsoft_k = counts[k]
weights[k] = float(Nsoft_k)/float(num_data)
# Update means for cluster k using the M-step update rule for the mean variables.
# This will assign the variable means[k] to be our estimate for \hat{\mu}_k.
weighted_sum = 0
for j in range(num_data):
# YOUR CODE HERE
weighted_sum += resp[j, k] * data[j]
# YOUR CODE HERE
means[k] = weighted_sum/Nsoft_k
# Update covariances for cluster k using the M-step update rule for covariance variables.
# This will assign the variable covariances[k] to be the estimate for \hat{Sigma}_k.
weighted_sum = np.zeros((num_dim, num_dim))
for j in range(num_data):
# YOUR CODE HERE (Hint: Use np.outer on the data[j] and this cluster's mean)
weighted_sum += resp[j, k] * np.outer(data[j] - means[k], data[j] - means[k])
# YOUR CODE HERE
covariances[k] = weighted_sum/Nsoft_k
# Compute the loglikelihood at this iteration
# YOUR CODE HERE
ll_latest = loglikelihood(data, weights, means, covariances)
ll_trace.append(ll_latest)
# Check for convergence in log-likelihood and store
if (ll_latest - ll) < thresh and ll_latest > -np.inf:
break
ll = ll_latest
if i % 5 != 0:
print("Iteration %s" % i)
out = {'weights': weights, 'means': means, 'covs': covariances, 'loglik': ll_trace, 'resp': resp}
return out
def plot_contours(data, means, covs, title):
plt.figure()
plt.plot([x[0] for x in data], [y[1] for y in data],'ko') # data
delta = 0.025
k = len(means)
x = np.arange(-2.0, 7.0, delta)
y = np.arange(-2.0, 7.0, delta)
X, Y = np.meshgrid(x, y)
col = ['green', 'red', 'indigo']
for i in range(k):
mean = means[i]
cov = covs[i]
sigmax = np.sqrt(cov[0][0])
sigmay = np.sqrt(cov[1][1])
sigmaxy = cov[0][1]/(sigmax*sigmay)
Z = mlab.bivariate_normal(X, Y, sigmax, sigmay, mean[0], mean[1], sigmaxy)
plt.contour(X, Y, Z, colors = col[i])
plt.title(title)
plt.rcParams.update({'font.size':16})
plt.tight_layout()
def plot_responsibilities_in_RB(img, resp, title):
N, K = resp.shape
HSV_tuples = [(x*1.0/K, 0.5, 0.9) for x in range(K)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
R = img['red']
B = img['blue']
resp_by_img_int = [[resp[n][k] for k in range(K)] for n in range(N)]
cols = [tuple(np.dot(resp_by_img_int[n], np.array(RGB_tuples))) for n in range(N)]
plt.figure()
for n in range(len(R)):
plt.plot(R[n], B[n], 'o', c=cols[n])
plt.title(title)
plt.xlabel('R value')
plt.ylabel('B value')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
def get_top_images(assignments, cluster, k=5):
# YOUR CODE HERE
images_in_cluster = assignments[assignments['assignments']==cluster]
print images_in_cluster
top_images = images_in_cluster.topk('probs', k)
return top_images['image']
def save_images(images, prefix):
for i, image in enumerate(images):
Image.open(BytesIO(image._image_data)).save(prefix % i)
# Model parameters
init_means = [
[5, 0], # mean of cluster 1
[1, 1], # mean of cluster 2
[0, 5] # mean of cluster 3
]
init_covariances = [
[[.5, 0.], [0, .5]], # covariance of cluster 1
[[.92, .38], [.38, .91]], # covariance of cluster 2
[[.5, 0.], [0, .5]] # covariance of cluster 3
]
init_weights = [1/4., 1/2., 1/4.] # weights of each cluster
# Generate data
np.random.seed(4)
data = generate_MoG_data(100, init_means, init_covariances, init_weights)
# Plot clusters
plt.figure()
d = np.vstack(data)
plt.plot(d[:,0], d[:,1],'ko')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Test EM algorithm
np.random.seed(4)
# Initialization of parameters
chosen = np.random.choice(len(data), 3, replace=False)
initial_means = [data[x] for x in chosen]
initial_covs = [np.cov(data, rowvar=0)] * 3
initial_weights = [1/3.] * 3
# Run EM
results = EM(data, initial_means, initial_covs, initial_weights)
# Parameters after initialization
plot_contours(data, initial_means, initial_covs, 'Initial clusters')
# Parameters after 12 iterations
results = EM(data, initial_means, initial_covs, initial_weights, maxiter=12)
plot_contours(data, results['means'], results['covs'], 'Clusters after 12 iterations')
# Parameters after running EM to convergence
results = EM(data, initial_means, initial_covs, initial_weights)
plot_contours(data, results['means'], results['covs'], 'Final clusters')
# Log-likelihood plot
loglikelihoods = results['loglik']
plt.plot(range(len(loglikelihoods)), loglikelihoods, linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Load image data
images = sframe.SFrame('../data/Week04/images.sf/')
images['rgb'] = images.pack_columns(['red', 'green', 'blue'])['X4']
# Run EM on image data
np.random.seed(1)
# Initalize parameters
init_means = [images['rgb'][x] for x in np.random.choice(len(images), 4, replace=False)]
cov = np.diag([images['red'].var(), images['green'].var(), images['blue'].var()])
init_covariances = [cov, cov, cov, cov]
init_weights = [1/4., 1/4., 1/4., 1/4.]
# Convert rgb data to numpy arrays
img_data = [np.array(i) for i in images['rgb']]
# Run our EM algorithm on the image data using the above initializations.
# This should converge in about 125 iterations
out = EM(img_data, init_means, init_covariances, init_weights)
# Log-likelihood plot
ll = out['loglik']
plt.plot(range(len(ll)),ll,linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure()
plt.plot(range(10,len(ll)),ll[10:],linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Visualize evolution of responsibility
N, K = out['resp'].shape
random_resp = np.random.dirichlet(np.ones(K), N)
plot_responsibilities_in_RB(images, random_resp, 'Random responsibilities')
out = EM(img_data, init_means, init_covariances, init_weights, maxiter=1)
plot_responsibilities_in_RB(images, out['resp'], 'After 1 iteration')
out = EM(img_data, init_means, init_covariances, init_weights, maxiter=20)
plot_responsibilities_in_RB(images, out['resp'], 'After 20 iterations')
# Interpreting clusters
weights = out['weights']
means = out['means']
covariances = out['covs']
rgb = images['rgb']
N = len(images) # number of images
K = len(means) # number of clusters
assignments = [0]*N
probs = [0]*N
for i in range(N):
# Compute the score of data point i under each Gaussian component:
p = np.zeros(K)
for k in range(K):
p[k] = weights[k]*multivariate_normal.pdf(rgb[i], mean=means[k], cov=covariances[k])
# Compute assignments of each data point to a given cluster based on the above scores:
assignments[i] = np.argmax(p)
# For data point i, store the corresponding score under this cluster assignment:
probs[i] = np.max(p)
assignments = sframe.SFrame({'assignments':assignments, 'probs':probs, 'image': images['image']})
for idx in range(4):
get_top_images(assignments, idx)
for component_id in range(4):
print 'Component {0:d}'.format(component_id)
images = get_top_images(assignments, component_id)
save_images(images, 'component_{0:d}_%d.jpg'.format(component_id))
print '\n'
| gpl-3.0 | 2,065,051,852,382,707,500 | 33.867692 | 119 | 0.623544 | false | 3.323167 | true | false | false |
xorpaul/check_mk | modules/automation.py | 1 | 45601 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
class MKAutomationError(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
def do_automation(cmd, args):
try:
if cmd == "get-configuration":
read_config_files(with_conf_d=False)
result = automation_get_configuration()
elif cmd == "get-check-information":
result = automation_get_check_information()
elif cmd == "get-check-manpage":
result = automation_get_check_manpage(args)
elif cmd == "get-check-catalog":
result = automation_get_check_catalog(args)
elif cmd == "notification-get-bulks":
result = automation_get_bulks(args)
else:
read_config_files()
if cmd == "try-inventory":
result = automation_try_discovery(args)
elif cmd == "inventory":
result = automation_discovery(args)
elif cmd == "analyse-service":
result = automation_analyse_service(args)
elif cmd == "active-check":
result = automation_active_check(args)
elif cmd == "get-autochecks":
result = automation_get_autochecks(args)
elif cmd == "set-autochecks":
result = automation_set_autochecks(args)
elif cmd == "reload":
result = automation_restart("reload")
elif cmd == "restart":
result = automation_restart("restart")
elif cmd == "scan-parents":
result = automation_scan_parents(args)
elif cmd == "diag-host":
result = automation_diag_host(args)
elif cmd == "delete-host":
result = automation_delete_host(args)
elif cmd == "rename-host":
result = automation_rename_host(args)
elif cmd == "create-snapshot":
result = automation_create_snapshot(args)
elif cmd == "notification-replay":
result = automation_notification_replay(args)
elif cmd == "notification-analyse":
result = automation_notification_analyse(args)
elif cmd == "update-dns-cache":
result = automation_update_dns_cache()
elif cmd == "bake-agents":
result = automation_bake_agents()
else:
raise MKAutomationError("Automation command '%s' is not implemented." % cmd)
except MKAutomationError, e:
sys.stderr.write("%s\n" % e)
if opt_debug:
raise
output_profile()
sys.exit(1)
except Exception, e:
if opt_debug:
raise
else:
sys.stderr.write("%s\n" % e)
output_profile()
sys.exit(2)
if opt_debug:
import pprint
sys.stdout.write(pprint.pformat(result)+"\n")
else:
sys.stdout.write("%r\n" % (result,))
output_profile()
sys.exit(0)
# Does inventory for *one* host. Possible values for how:
# "new" - find only new services (like -I)
# "remove" - remove exceeding services
# "fixall" - find new, remove exceeding
# "refresh" - drop all services and reinventorize
def automation_discovery(args):
# Error sensivity
if args[0] == "@raiseerrors":
args = args[1:]
on_error = "raise"
os.dup2(os.open("/dev/null", os.O_WRONLY), 2)
else:
on_error = "ignore"
# perform full SNMP scan on SNMP devices?
if args[0] == "@scan":
do_snmp_scan = True
args = args[1:]
else:
do_snmp_scan = False
# use cache files if present?
if args[0] == "@cache":
args = args[1:]
use_caches = True
else:
use_caches = False
if len(args) < 2:
raise MKAutomationError("Need two arguments: new|remove|fixall|refresh HOSTNAME")
how = args[0]
hostnames = args[1:]
counts = {}
failed_hosts = {}
for hostname in hostnames:
counts.setdefault(hostname, [0, 0, 0, 0]) # added, removed, kept, total
try:
# in "refresh" mode we first need to remove all previously discovered
# checks of the host, so that get_host_services() does show us the
# new discovered check parameters.
if how == "refresh":
counts[hostname][1] += remove_autochecks_of(hostname) # this is cluster-aware!
# Compute current state of new and existing checks
services = get_host_services(hostname, use_caches=use_caches,
do_snmp_scan=do_snmp_scan, on_error=on_error)
# Create new list of checks
new_items = {}
for (check_type, item), (check_source, paramstring) in services.items():
if check_source in ("custom", "legacy", "active", "manual"):
continue # this is not an autocheck or ignored and currently not checked
# Note discovered checks that are shadowed by manual checks will vanish
# that way.
if check_source in ("new"):
if how in ("new", "fixall", "refresh"):
counts[hostname][0] += 1 # added
counts[hostname][3] += 1 # total
new_items[(check_type, item)] = paramstring
elif check_source in ("old", "ignored"):
# keep currently existing valid services in any case
new_items[(check_type, item)] = paramstring
counts[hostname][2] += 1 # kept
counts[hostname][3] += 1 # total
elif check_source in ("obsolete", "vanished"):
# keep item, if we are currently only looking for new services
# otherwise fix it: remove ignored and non-longer existing services
if how not in ("fixall", "remove"):
new_items[(check_type, item)] = paramstring
counts[hostname][2] += 1 # kept
counts[hostname][3] += 1 # total
else:
counts[hostname][1] += 1 # removed
# Silently keep clustered services
elif check_source.startswith("clustered_"):
new_items[(check_type, item)] = paramstring
else:
raise MKGeneralException("Unknown check source '%s'" % check_source)
set_autochecks_of(hostname, new_items)
except Exception, e:
if opt_debug:
raise
failed_hosts[hostname] = str(e)
return counts, failed_hosts
def automation_try_discovery(args):
use_caches = False
do_snmp_scan = False
if args[0] == '@noscan':
args = args[1:]
do_snmp_scan = False
use_caches = True
elif args[0] == '@scan':
args = args[1:]
do_snmp_scan = True
use_caches = False
if args[0] == '@raiseerrors':
on_error = "raise"
args = args[1:]
else:
on_error = "ignore"
# TODO: Remove this unlucky option opt_use_cachefile. At least do not
# handle this option so deep in the code. It should only be handled
# by top-level functions.
global opt_use_cachefile, check_max_cachefile_age
opt_use_cachefile = use_caches
if use_caches:
check_max_cachefile_age = inventory_max_cachefile_age
hostname = args[0]
table = get_check_preview(hostname, use_caches=use_caches,
do_snmp_scan=do_snmp_scan, on_error=on_error)
return table
# Set the new list of autochecks. This list is specified by a
# table of (checktype, item). No parameters are specified. Those
# are either (1) kept from existing autochecks or (2) computed
# from a new inventory. Note: we must never convert check parameters
# from python source code to actual values.
def automation_set_autochecks(args):
hostname = args[0]
new_items = eval(sys.stdin.read())
set_autochecks_of(hostname, new_items)
def set_autochecks_of(hostname, new_items):
# A Cluster does not have an autochecks file
# All of its services are located in the nodes instead
# So we cycle through all nodes remove all clustered service
# and add the ones we've got from stdin
if is_cluster(hostname):
for node in nodes_of(hostname):
new_autochecks = []
existing = parse_autochecks_file(node)
for check_type, item, paramstring in existing:
descr = service_description(check_type, item)
if hostname != host_of_clustered_service(node, descr):
new_autochecks.append((check_type, item, paramstring))
for (check_type, item), paramstring in new_items.items():
new_autochecks.append((check_type, item, paramstring))
# write new autochecks file for that host
automation_write_autochecks_file(node, new_autochecks)
else:
existing = parse_autochecks_file(hostname)
# write new autochecks file, but take paramstrings from existing ones
# for those checks which are kept
new_autochecks = []
for ct, item, paramstring in existing:
if (ct, item) in new_items:
new_autochecks.append((ct, item, paramstring))
del new_items[(ct, item)]
for (ct, item), paramstring in new_items.items():
new_autochecks.append((ct, item, paramstring))
# write new autochecks file for that host
automation_write_autochecks_file(hostname, new_autochecks)
def automation_write_autochecks_file(hostname, table):
if not os.path.exists(autochecksdir):
os.makedirs(autochecksdir)
path = "%s/%s.mk" % (autochecksdir, hostname)
f = file(path, "w")
f.write("[\n")
for check_type, item, paramstring in table:
f.write(" (%r, %r, %s),\n" % (check_type, item, paramstring))
f.write("]\n")
if inventory_check_autotrigger and inventory_check_interval:
schedule_inventory_check(hostname)
def automation_get_autochecks(args):
hostname = args[0]
result = []
for ct, item, paramstring in parse_autochecks_file(hostname):
result.append((ct, item, eval(paramstring), paramstring))
return result
def schedule_inventory_check(hostname):
try:
import socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(livestatus_unix_socket)
now = int(time.time())
if 'cmk-inventory' in use_new_descriptions_for:
command = "SCHEDULE_FORCED_SVC_CHECK;%s;Check_MK Discovery;%d" % (hostname, now)
else:
# FIXME: Remove this old name handling one day
command = "SCHEDULE_FORCED_SVC_CHECK;%s;Check_MK inventory;%d" % (hostname, now)
s.send("COMMAND [%d] %s\n" % (now, command))
except Exception, e:
if opt_debug:
raise
# Determine the type of the check, and how the parameters are being
# constructed
def automation_analyse_service(args):
global g_hostname
hostname = args[0]
servicedesc = args[1]
g_hostname = hostname # To be sure for all subfunctions
# We just consider types of checks that are managed via WATO.
# We have the following possible types of services:
# 1. manual checks (static_checks) (currently overriding inventorized checks)
# 2. inventorized check
# 3. classical checks
# 4. active checks
# Compute effective check table, in order to remove SNMP duplicates
check_table = get_check_table(hostname, remove_duplicates = True)
# 1. Manual checks
for nr, (checkgroup, entries) in enumerate(static_checks.items()):
for entry in entries:
entry, rule_options = get_rule_options(entry)
if rule_options.get("disabled"):
continue
# Parameters are optional
if len(entry[0]) == 2:
checktype, item = entry[0]
params = None
else:
checktype, item, params = entry[0]
if len(entry) == 3:
taglist, hostlist = entry[1:3]
else:
hostlist = entry[1]
taglist = []
if hosttags_match_taglist(tags_of_host(hostname), taglist) and \
in_extraconf_hostlist(hostlist, hostname):
descr = service_description(checktype, item)
if descr == servicedesc:
return {
"origin" : "static",
"checkgroup" : checkgroup,
"checktype" : checktype,
"item" : item,
"rule_nr" : nr,
"parameters" : params,
}
# 2. Load all autochecks of the host in question and try to find
# our service there
try:
path = "%s/%s.mk" % (autochecksdir, hostname)
for entry in eval(file(path).read()):
if len(entry) == 4: # old format
hn, ct, item, params = entry
else:
ct, item, params = entry # new format without host name
hn = hostname
if (ct, item) not in check_table:
continue # this is a removed duplicate or clustered service
descr = service_description(ct, item)
if hn == hostname and descr == servicedesc:
dlv = check_info[ct].get("default_levels_variable")
if dlv:
fs = factory_settings.get(dlv, None)
else:
fs = None
return {
"origin" : "auto",
"checktype" : ct,
"checkgroup" : check_info[ct].get("group"),
"item" : item,
"inv_parameters" : params,
"factory_settings" : fs,
"parameters" : compute_check_parameters(hostname, ct, item, params),
}
except:
if opt_debug:
raise
# 3. Classical checks
custchecks = host_extra_conf(hostname, custom_checks)
for nr, entry in enumerate(custchecks):
desc = entry["service_description"]
if desc == servicedesc:
result = {
"origin" : "classic",
"rule_nr" : nr,
}
if "command_line" in entry: # Only active checks have a command line
result["command_line"] = entry["command_line"]
return result
# 4. Active checks
for acttype, rules in active_checks.items():
entries = host_extra_conf(hostname, rules)
if entries:
act_info = active_check_info[acttype]
for params in entries:
description = act_info["service_description"](params)
if description == servicedesc:
return {
"origin" : "active",
"checktype" : acttype,
"parameters" : params,
}
return {} # not found
# TODO: Was ist mit Clustern???
# TODO: Klappt das mit automatischen verschatten von SNMP-Checks (bei dual Monitoring)
def automation_delete_host(args):
hostname = args[0]
for path in [
"%s/%s" % (precompiled_hostchecks_dir, hostname),
"%s/%s.py" % (precompiled_hostchecks_dir, hostname),
"%s/%s.mk" % (autochecksdir, hostname),
"%s/%s" % (logwatch_dir, hostname),
"%s/%s" % (counters_directory, hostname),
"%s/%s" % (tcp_cache_dir, hostname),
"%s/%s.*" % (tcp_cache_dir, hostname)]:
os.system("rm -rf '%s'" % path)
def automation_restart(job = "restart", use_rushd = True):
# make sure, Nagios does not inherit any open
# filedescriptors. This really happens, e.g. if
# check_mk is called by WATO via Apache. Nagios inherits
# the open file where Apache is listening for incoming
# HTTP connections. Really.
if monitoring_core == "nagios":
objects_file = nagios_objects_file
for fd in range(3, 256):
try:
os.close(fd)
except:
pass
else:
objects_file = var_dir + "/core/config"
if job == "restart":
job = "reload" # force reload for CMC
# os.closerange(3, 256) --> not available in older Python versions
class null_file:
def write(self, stuff):
pass
def flush(self):
pass
# Deactivate stdout by introducing fake file without filedescriptor
old_stdout = sys.stdout
sys.stdout = null_file()
try:
backup_path = None
if not lock_objects_file():
raise MKAutomationError("Cannot activate changes. "
"Another activation process is currently in progresss")
if os.path.exists(objects_file):
backup_path = objects_file + ".save"
os.rename(objects_file, backup_path)
else:
backup_path = None
try:
if monitoring_core == "nagios":
create_nagios_config(file(objects_file, "w"))
else:
do_create_cmc_config(opt_cmc_relfilename, use_rushd = use_rushd)
if "do_bake_agents" in globals() and bake_agents_on_restart:
do_bake_agents()
except Exception, e:
if backup_path:
os.rename(backup_path, objects_file)
if opt_debug:
raise
raise MKAutomationError("Error creating configuration: %s" % e)
if do_check_nagiosconfig():
if backup_path:
os.remove(backup_path)
if monitoring_core == "cmc":
do_pack_config()
else:
do_precompile_hostchecks()
do_core_action(job)
else:
if backup_path:
os.rename(backup_path, objects_file)
else:
os.remove(objects_file)
raise MKAutomationError("Configuration for monitoring core is invalid. Rolling back.")
except Exception, e:
if backup_path and os.path.exists(backup_path):
os.remove(backup_path)
if opt_debug:
raise
raise MKAutomationError(str(e))
sys.stdout = old_stdout
def automation_get_configuration():
# We read the list of variable names from stdin since
# that could be too much for the command line
variable_names = eval(sys.stdin.read())
result = {}
for varname in variable_names:
if varname in globals():
if not hasattr(globals()[varname], '__call__'):
result[varname] = globals()[varname]
return result
def automation_get_check_catalog(args):
def path_prefix_matches(p, op):
if op and not p:
return False
elif not op:
return True
else:
return p[0] == op[0] and path_prefix_matches(p[1:], op[1:])
read_manpage_catalog()
tree = {}
if len(args) > 0:
only_path = tuple(args)
else:
only_path = ()
for path, entries in g_manpage_catalog.items():
if not path_prefix_matches(path, only_path):
continue
subtree = tree
for component in path[:-1]:
subtree = subtree.setdefault(component, {})
subtree[path[-1]] = map(strip_manpage_entry, entries)
for p in only_path:
tree = tree[p]
return tree, manpage_catalog_titles
def strip_manpage_entry(entry):
return dict([ (k,v) for (k,v) in entry.items() if k in [
"name", "agents", "title"
]])
def automation_get_check_information():
manuals = all_manuals()
checks = {}
for check_type, check in check_info.items():
manfile = manuals.get(check_type)
if manfile:
title = file(manfile).readline().strip().split(":", 1)[1].strip()
else:
title = check_type
checks[check_type] = { "title" : title }
if check["group"]:
checks[check_type]["group"] = check["group"]
checks[check_type]["service_description"] = check.get("service_description","%s")
checks[check_type]["snmp"] = check_uses_snmp(check_type)
return checks
def automation_get_check_manpage(args):
if len(args) != 1:
raise MKAutomationError("Need exactly one argument.")
check_type = args[0]
manpage = load_manpage(args[0])
# Add a few informations from check_info. Note: active checks do not
# have an entry in check_info
if check_type in check_info:
manpage["type"] = "check_mk"
info = check_info[check_type]
for key in [ "snmp_info", "has_perfdata", "service_description" ]:
if key in info:
manpage[key] = info[key]
if "." in check_type:
section = check_type.split(".")[0]
if section in check_info and "snmp_info" in check_info[section]:
manpage["snmp_info"] = check_info[section]["snmp_info"]
if "group" in info:
manpage["group"] = info["group"]
# Assume active check
elif check_type.startswith("check_"):
manpage["type"] = "active"
return manpage
def automation_scan_parents(args):
settings = {
"timeout" : int(args[0]),
"probes" : int(args[1]),
"max_ttl" : int(args[2]),
"ping_probes" : int(args[3]),
}
hostnames = args[4:]
traceroute_prog = find_bin_in_path('traceroute')
if not traceroute_prog:
raise MKAutomationError("Cannot find binary <tt>traceroute</tt> in search path.")
try:
gateways = scan_parents_of(hostnames, silent=True, settings=settings)
return gateways
except Exception, e:
raise MKAutomationError(str(e))
def automation_diag_host(args):
import subprocess
hostname, test, ipaddress, snmp_community = args[:4]
agent_port, snmp_timeout, snmp_retries = map(int, args[4:7])
cmd = args[7]
if not ipaddress:
try:
ipaddress = lookup_ipaddress(hostname)
except:
raise MKGeneralException("Cannot resolve hostname %s into IP address" % hostname)
try:
if test == 'ping':
p = subprocess.Popen('ping -A -i 0.2 -c 2 -W 5 %s 2>&1' % ipaddress, shell = True, stdout = subprocess.PIPE)
response = p.stdout.read()
return (p.wait(), response)
elif test == 'agent':
if not cmd:
cmd = get_datasource_program(hostname, ipaddress)
if cmd:
return 0, get_agent_info_program(cmd)
else:
return 0, get_agent_info_tcp(hostname, ipaddress, agent_port or None)
elif test == 'traceroute':
traceroute_prog = find_bin_in_path('traceroute')
if not traceroute_prog:
return 1, "Cannot find binary <tt>traceroute</tt>."
else:
p = subprocess.Popen('traceroute -n %s 2>&1' % ipaddress, shell = True, stdout = subprocess.PIPE)
response = p.stdout.read()
return (p.wait(), response)
elif test.startswith('snmp'):
if snmp_community:
explicit_snmp_communities[hostname] = snmp_community
# override timing settings if provided
if snmp_timeout or snmp_retries:
timing = {}
if snmp_timeout:
timing['timeout'] = snmp_timeout
if snmp_retries:
timing['retries'] = snmp_retries
snmp_timing.insert(0, (timing, [], [hostname]))
# SNMP versions
global bulkwalk_hosts, snmpv2c_hosts
if test == 'snmpv2':
bulkwalk_hosts = [hostname]
elif test == 'snmpv2_nobulk':
bulkwalk_hosts = []
snmpv2c_hosts = [hostname]
elif test == 'snmpv1':
bulkwalk_hosts = []
snmpv2c_hosts = []
else:
return 1, "SNMP command not implemented"
data = get_snmp_table(hostname, ipaddress, None, ('.1.3.6.1.2.1.1', ['1.0', '4.0', '5.0', '6.0']))
if data:
return 0, 'sysDescr:\t%s\nsysContact:\t%s\nsysName:\t%s\nsysLocation:\t%s\n' % tuple(data[0])
else:
return 1, 'Got empty SNMP response'
else:
return 1, "Command not implemented"
except Exception, e:
if opt_debug:
raise
return 1, str(e)
# WATO calls this automation when a host has been renamed. We need to change
# several file and directory names.
# HIRN: Hier auch das neue Format berücksichtigen! Andererseits sollte
# eigentlich auch nix Schlimmes passieren, wenn der Hostname *nicht* in
# der Datei steht.
def automation_rename_host(args):
oldname = args[0]
newname = args[1]
actions = []
# Autochecks: simply read and write out the file again. We do
# not store a host name here anymore - but old versions did.
# by rewriting we get rid of the host name.
acpath = autochecksdir + "/" + oldname + ".mk"
if os.path.exists(acpath):
old_autochecks = parse_autochecks_file(oldname)
out = file(autochecksdir + "/" + newname + ".mk", "w")
out.write("[\n")
for ct, item, paramstring in old_autochecks:
out.write(" (%r, %r, %s),\n" % (ct, item, paramstring))
out.write("]\n")
out.close()
os.remove(acpath) # Remove old file
actions.append("autochecks")
# At this place WATO already has changed it's configuration. All further
# data might be changed by the still running core. So we need to stop
# it now.
core_was_running = core_is_running()
if core_was_running:
do_core_action("stop", quiet=True)
# Rename temporary files of the host
for d in [ "cache", "counters" ]:
if rename_host_file(tmp_dir + "/" + d + "/", oldname, newname):
actions.append(d)
if rename_host_dir(tmp_dir + "/piggyback/", oldname, newname):
actions.append("piggyback-load")
# Rename piggy files *created* by the host
piggybase = tmp_dir + "/piggyback/"
if os.path.exists(piggybase):
for piggydir in os.listdir(piggybase):
if rename_host_file(piggybase + piggydir, oldname, newname):
actions.append("piggyback-pig")
# Logwatch
if rename_host_dir(logwatch_dir, oldname, newname):
actions.append("logwatch")
# SNMP walks
if rename_host_file(snmpwalks_dir, oldname, newname):
actions.append("snmpwalk")
# OMD-Stuff. Note: The question really is whether this should be
# included in Check_MK. The point is - however - that all these
# actions need to take place while the core is stopped.
if omd_root:
actions += omd_rename_host(oldname, newname)
# Start monitoring again. In case of CMC we need to ignore
# any configuration created by the CMC Rushahead daemon
if core_was_running:
global ignore_ip_lookup_failures
ignore_ip_lookup_failures = True # force config generation to succeed. The core *must* start.
automation_restart("start", use_rushd = False)
if monitoring_core == "cmc":
try:
os.remove(var_dir + "/core/config.rush")
os.remove(var_dir + "/core/config.rush.id")
except:
pass
if failed_ip_lookups:
actions.append("ipfail")
return actions
def rename_host_dir(basedir, oldname, newname):
import shutil
if os.path.exists(basedir + "/" + oldname):
if os.path.exists(basedir + "/" + newname):
shutil.rmtree(basedir + "/" + newname)
os.rename(basedir + "/" + oldname, basedir + "/" + newname)
return 1
return 0
def rename_host_file(basedir, oldname, newname):
if os.path.exists(basedir + "/" + oldname):
if os.path.exists(basedir + "/" + newname):
os.remove(basedir + "/" + newname)
os.rename(basedir + "/" + oldname, basedir + "/" + newname)
return 1
return 0
# This functions could be moved out of Check_MK.
def omd_rename_host(oldname, newname):
oldregex = oldname.replace(".", "[.]")
newregex = newname.replace(".", "[.]")
actions = []
# Temporarily stop processing of performance data
npcd_running = os.path.exists(omd_root + "/tmp/pnp4nagios/run/npcd.pid")
if npcd_running:
os.system("omd stop npcd >/dev/null 2>&1 </dev/null")
rrdcache_running = os.path.exists(omd_root + "/tmp/run/rrdcached.sock")
if rrdcache_running:
os.system("omd stop rrdcached >/dev/null 2>&1 </dev/null")
# Fix pathnames in XML files
dirpath = omd_root + "/var/pnp4nagios/perfdata/" + oldname
os.system("sed -i 's@/perfdata/%s/@/perfdata/%s/@' %s/*.xml 2>/dev/null" % (oldname, newname, dirpath))
# RRD files
if rename_host_dir(rrd_path, oldname, newname):
actions.append("rrd")
# entries of rrdcached journal
dirpath = omd_root + "/var/rrdcached/"
if not os.system("sed -i 's@/perfdata/%s/@/perfdata/%s/@' "
"%s/var/rrdcached/rrd.journal.* 2>/dev/null" % ( oldregex, newregex, omd_root)):
actions.append("rrdcached")
# Spoolfiles of NPCD
if not os.system("sed -i 's/HOSTNAME::%s /HOSTNAME::%s /' "
"%s/var/pnp4nagios/perfdata.dump %s/var/pnp4nagios/spool/perfdata.* 2>/dev/null" % (
oldregex, newregex, omd_root, omd_root)):
actions.append("pnpspool")
if rrdcache_running:
os.system("omd start rrdcached >/dev/null 2>&1 </dev/null")
if npcd_running:
os.system("omd start npcd >/dev/null 2>&1 </dev/null")
# Logfiles and history files of CMC and Nagios. Problem
# here: the exact place of the hostname varies between the
# various log entry lines
sed_commands = r'''
s/(INITIAL|CURRENT) (HOST|SERVICE) STATE: %(old)s;/\1 \2 STATE: %(new)s;/
s/(HOST|SERVICE) (DOWNTIME |FLAPPING |)ALERT: %(old)s;/\1 \2ALERT: %(new)s;/
s/PASSIVE (HOST|SERVICE) CHECK: %(old)s;/PASSIVE \1 CHECK: %(new)s;/
s/(HOST|SERVICE) NOTIFICATION: ([^;]+);%(old)s;/\1 NOTIFICATION: \2;%(new)s;/
''' % { "old" : oldregex, "new" : newregex }
patterns = [
"var/check_mk/core/history",
"var/check_mk/core/archive/*",
"var/nagios/nagios.log",
"var/nagios/archive/*",
]
one_matched = False
for pattern in patterns:
command = "sed -ri --file=/dev/fd/0 %s/%s >/dev/null 2>&1" % (omd_root, pattern)
p = os.popen(command, "w")
p.write(sed_commands)
if not p.close():
one_matched = True
if one_matched:
actions.append("history")
# State retention (important for Downtimes, Acknowledgements, etc.)
if monitoring_core == "nagios":
if not os.system("sed -ri 's/^host_name=%s$/host_name=%s/' %s/var/nagios/retention.dat" % (
oldregex, newregex, omd_root)):
actions.append("retention")
else: # CMC
# Create a file "renamed_hosts" with the information about the
# renaming of the hosts. The core will honor this file when it
# reads the status file with the saved state.
file(var_dir + "/core/renamed_hosts", "w").write("%s\n%s\n" % (oldname, newname))
actions.append("retention")
# NagVis maps
if not os.system("sed -i 's/^[[:space:]]*host_name=%s[[:space:]]*$/host_name=%s/' "
"%s/etc/nagvis/maps/*.cfg 2>/dev/null" % (
oldregex, newregex, omd_root)):
actions.append("nagvis")
return actions
def automation_create_snapshot(args):
try:
import tarfile, time, cStringIO, shutil, subprocess, thread, traceback, threading
from hashlib import sha256
the_data = sys.stdin.read()
data = eval(the_data)
snapshot_name = data["snapshot_name"]
snapshot_dir = var_dir + "/wato/snapshots"
work_dir = snapshot_dir + "/workdir/%s" % snapshot_name
if not os.path.exists(work_dir):
os.makedirs(work_dir)
# Open / initialize files
filename_target = "%s/%s" % (snapshot_dir, snapshot_name)
filename_work = "%s/%s.work" % (work_dir, snapshot_name)
filename_status = "%s/%s.status" % (work_dir, snapshot_name)
filename_pid = "%s/%s.pid" % (work_dir, snapshot_name)
filename_subtar = ""
current_domain = ""
file(filename_target, "w").close()
file(filename_status, "w").close()
def wipe_directory(path):
for entry in os.listdir(path):
if entry not in [ '.', '..' ]:
p = path + "/" + entry
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.remove(p)
lock_status_file = threading.Lock()
def update_status_file(domain = None, infotext = None):
lock_status_file.acquire()
if os.path.exists(filename_status):
if domain:
statusinfo[domain] = infotext
statusfile = file(filename_status, "w")
statusfile.write("comment:%s\n" % data.get("comment"," ").encode("utf-8"))
status_list = list(statusinfo.items())
status_list.sort()
for status in status_list:
statusfile.write("%s.tar.gz:%s\n" % status)
lock_status_file.release()
# Set initial status info
statusinfo = {}
for name in data.get("domains", {}).keys():
statusinfo[name] = "TODO:0"
update_status_file()
# Now fork into our own process to have an asynchronous backup creation
try:
pid = os.fork()
if pid > 0:
# Exit parent process
return
# Decouple from parent environment
os.chdir("/")
os.umask(0)
os.setsid()
# Close all fd except stdin,out,err
for fd in range(3, 256):
try:
os.close(fd)
except OSError:
pass
sys.stdout.flush()
sys.stderr.flush()
si = os.open("/dev/null", os.O_RDONLY)
so = os.open("/dev/null", os.O_WRONLY)
os.dup2(si, 0)
os.dup2(so, 1)
os.dup2(so, 2)
os.close(si)
os.close(so)
except OSError, e:
raise MKAutomationError(str(e))
# Save pid of working process.
file(filename_pid, "w").write("%d" % os.getpid())
def cleanup():
wipe_directory(work_dir)
os.rmdir(work_dir)
def check_should_abort():
if not os.path.exists(filename_target):
cleanup()
sys.exit(0)
def get_basic_tarinfo(name):
tarinfo = tarfile.TarInfo(name)
tarinfo.mtime = time.time()
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.mode = 0644
tarinfo.type = tarfile.REGTYPE
return tarinfo
def update_subtar_size(seconds):
while current_domain != None:
try:
if current_domain:
if os.path.exists(path_subtar):
update_status_file(current_domain, "Processing:%d" % os.stat(path_subtar).st_size)
except:
pass
time.sleep(seconds)
def snapshot_secret():
path = default_config_dir + '/snapshot.secret'
try:
return file(path).read()
except IOError:
# create a secret during first use
try:
s = os.urandom(256)
except NotImplementedError:
s = sha256(time.time())
file(path, 'w').write(s)
return s
#
# Initialize the snapshot tar file and populate with initial information
#
tar_in_progress = tarfile.open(filename_work, "w")
# Add comment to tar file
if data.get("comment"):
tarinfo = get_basic_tarinfo("comment")
tarinfo.size = len(data.get("comment").encode("utf-8"))
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(data.get("comment").encode("utf-8")))
if data.get("created_by"):
tarinfo = get_basic_tarinfo("created_by")
tarinfo.size = len(data.get("created_by"))
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(data.get("created_by")))
# Add snapshot type
snapshot_type = data.get("type")
tarinfo = get_basic_tarinfo("type")
tarinfo.size = len(snapshot_type)
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(snapshot_type))
# Close tar in progress, all other files are included via command line tar
tar_in_progress.close()
#
# Process domains (sorted)
#
subtar_update_thread = thread.start_new_thread(update_subtar_size, (1,))
domains = map(lambda x: x, data.get("domains").items())
domains.sort()
subtar_info = {}
for name, info in domains:
current_domain = name # Set name for update size thread
prefix = info.get("prefix","")
exclude_options = ""
for entry in info.get("exclude", []):
exclude_options += "--exclude=%s " % entry
check_should_abort()
filename_subtar = "%s.tar.gz" % name
path_subtar = "%s/%s" % (work_dir, filename_subtar)
if info.get("backup_command"):
command = info.get("backup_command") % {
"prefix" : prefix,
"path_subtar" : path_subtar,
"work_dir" : work_dir
}
else:
paths = map(lambda x: x[1] == "" and "." or x[1], info.get("paths", []))
command = "tar czf %s --ignore-failed-read --force-local %s -C %s %s" % \
(path_subtar, exclude_options, prefix, " ".join(paths))
proc = subprocess.Popen(command, shell=True, stdin=None, close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=prefix)
stdout, stderr = proc.communicate()
exit_code = proc.wait()
# Allow exit codes 0 and 1 (files changed during backup)
if exit_code not in [0, 1]:
raise MKAutomationError("Error while creating backup of %s (Exit Code %d) - %s.\n%s" %
(current_domain, exit_code, stderr, command))
subtar_size = os.stat(path_subtar).st_size
subtar_hash = sha256(file(path_subtar).read()).hexdigest()
subtar_signed = sha256(subtar_hash + snapshot_secret()).hexdigest()
subtar_info[filename_subtar] = (subtar_hash, subtar_signed)
# Append tar.gz subtar to snapshot
command = "tar --append --file=%s %s ; rm %s" % \
(filename_work, filename_subtar, filename_subtar)
proc = subprocess.Popen(command, shell=True, cwd = work_dir)
proc.communicate()
exit_code = proc.wait()
if exit_code != 0:
raise MKAutomationError("Error on adding backup domain %s to tarfile" % current_domain)
current_domain = ""
update_status_file(name, "Finished:%d" % subtar_size)
# Now add the info file which contains hashes and signed hashes for
# each of the subtars
info = ''.join([ '%s %s %s\n' % (k, v[0], v[1]) for k, v in subtar_info.items() ]) + '\n'
tar_in_progress = tarfile.open(filename_work, "a")
tarinfo = get_basic_tarinfo("checksums")
tarinfo.size = len(info)
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(info))
tar_in_progress.close()
current_domain = None
shutil.move(filename_work, filename_target)
cleanup()
except Exception, e:
cleanup()
raise MKAutomationError(str(e))
def automation_notification_replay(args):
nr = args[0]
return notification_replay_backlog(int(nr))
def automation_notification_analyse(args):
nr = args[0]
return notification_analyse_backlog(int(nr))
def automation_get_bulks(args):
only_ripe = args[0] == "1"
return find_bulks(only_ripe)
def automation_active_check(args):
hostname, plugin, item = args
actchecks = []
needed_commands = []
if plugin == "custom":
custchecks = host_extra_conf(hostname, custom_checks)
for entry in custchecks:
if entry["service_description"] == item:
command_line = replace_core_macros(hostname, entry.get("command_line", ""))
if command_line:
command_line = autodetect_plugin(command_line)
return execute_check_plugin(command_line)
else:
return -1, "Passive check - cannot be executed"
else:
rules = active_checks.get(plugin)
if rules:
entries = host_extra_conf(hostname, rules)
if entries:
act_info = active_check_info[plugin]
for params in entries:
description = act_info["service_description"](params).replace('$HOSTNAME$', hostname)
if description == item:
args = act_info["argument_function"](params)
command_line = replace_core_macros(hostname, act_info["command_line"].replace("$ARG1$", args))
return execute_check_plugin(command_line)
def load_resource_file(macros):
try:
for line in file(omd_root + "/etc/nagios/resource.cfg"):
line = line.strip()
if not line or line[0] == '#':
continue
varname, value = line.split('=', 1)
macros[varname] = value
except:
if opt_debug:
raise
# Simulate replacing some of the more important macros of hosts. We
# cannot use dynamic macros, of course. Note: this will not work
# without OMD, since we do not know the value of $USER1$ and $USER2$
# here. We could read the Nagios resource.cfg file, but we do not
# know for sure the place of that either.
def replace_core_macros(hostname, commandline):
macros = {
"$HOSTNAME$" : hostname,
"$HOSTADDRESS$" : lookup_ipaddress(hostname),
}
load_resource_file(macros)
for varname, value in macros.items():
commandline = commandline.replace(varname, value)
return commandline
def execute_check_plugin(commandline):
try:
p = os.popen(commandline + " 2>&1")
output = p.read().strip()
ret = p.close()
if not ret:
status = 0
else:
if ret & 0xff == 0:
status = ret / 256
else:
status = 3
if status < 0 or status > 3:
status = 3
output = output.split("|",1)[0] # Drop performance data
return status, output
except Exception, e:
if opt_debug:
raise
return 3, "UNKNOWN - Cannot execute command: %s" % e
def automation_update_dns_cache():
return do_update_dns_cache()
def automation_bake_agents():
if "do_bake_agents" in globals():
return do_bake_agents()
| gpl-2.0 | -6,314,698,494,620,364,000 | 35.774194 | 120 | 0.551162 | false | 3.909465 | false | false | false |
nilmtk/nilmtk | nilmtk/dataset_converters/greend/convert_greend.py | 1 | 6684 | from os import listdir, getcwd
from os.path import join, isdir, isfile, dirname, abspath
import pandas as pd
import numpy as np
import datetime
import time
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
import warnings
import numpy as np
from io import StringIO
from multiprocessing import Pool
from nilmtk.utils import get_module_directory
def _get_blocks(filename):
'''
Return a list of dataframes from a GREEND CSV file
GREEND files can be interpreted as multiple CSV blocks concatenated into
a single file per date. Since the columns of the individual blocks can
vary in a single file, they need to be read separately.
There are some issues we need to handle in the converter:
- the headers from the multiple blocks
- corrupted data (lines with null chars, broken lines)
- more fields than specified in header
'''
block_data = None
dfs = []
previous_header = None
print(filename)
# Use float64 for timestamps and float32 for the rest of the columns
dtypes = {}
dtypes['timestamp'] = np.float64
def _process_block():
if block_data is None:
return
block_data.seek(0)
try:
# ignore extra fields for some files
error_bad_lines = not (
('building5' in filename and 'dataset_2014-02-04.csv' in filename)
)
df = pd.read_csv(block_data, index_col='timestamp', dtype=dtypes, error_bad_lines=error_bad_lines)
except: #(pd.errors.ParserError, ValueError, TypeError):
print("ERROR", filename)
raise
df.index = pd.to_datetime(df.index, unit='s')
df = df.tz_localize("UTC").tz_convert("CET").sort_index()
dfs.append(df)
block_data.close()
special_check = (
('dataset_2014-01-28.csv' in filename and 'building5' in filename) or
('dataset_2014-09-02.csv' in filename and 'building6' in filename)
)
with open(filename, 'r') as f:
for line in f:
# At least one file have a bunch of nulls present, let's clean the data
line = line.strip('\0')
if 'time' in line:
# Found a new block
if not line.startswith('time'):
# Some lines are corrupted, e.g. 1415605814.541311,0.0,NULL,NUtimestamp,000D6F00029C2918...
line = line[line.find('time'):]
if previous_header == line.strip():
# Same exact header, we can treat it as the same block
# print('Skipping split')
continue
# Using a defaultdict for the dtypes didn't work with read_csv,
# so we fill a normal dict when we find the columns
cols = line.strip().split(',')[1:]
for col in cols:
dtypes[col] = np.float32
# print('Found new block')
_process_block()
block_data = StringIO()
previous_header = line.strip()
if special_check:
if ('0.072.172091508705606' in line or
'1409660828.0753369,NULL,NUL' == line):
continue
block_data.write(line)
# Process the remaining block
_process_block()
return (filename, dfs)
def _get_houses(greend_path):
house_list = listdir(greend_path)
return [h for h in house_list if isdir(join(greend_path,h))]
def convert_greend(greend_path, hdf_filename, use_mp=True):
"""
Parameters
----------
greend_path : str
The root path of the greend dataset.
hdf_filename : str
The destination HDF5 filename (including path and suffix).
use_mp : bool
Defaults to True. Use multiprocessing to load the files for
each building.
"""
store = pd.HDFStore(hdf_filename, 'w', complevel=5, complib='zlib')
houses = sorted(_get_houses(greend_path))
print('Houses found:', houses)
if use_mp:
pool = Pool()
h = 1 # nilmtk counts buildings from 1 not from 0 as we do, so everything is shifted by 1
for house in houses:
print('Loading', house)
abs_house = join(greend_path, house)
dates = [d for d in listdir(abs_house) if d.startswith('dataset')]
target_filenames = [join(abs_house, date) for date in dates]
if use_mp:
house_data = pool.map(_get_blocks, target_filenames)
# Ensure the blocks are sorted by date and make a plain list
house_data_dfs = []
for date, data in sorted(house_data, key=lambda x: x[0]):
house_data_dfs.extend(data)
else:
house_data_dfs = []
for fn in target_filenames:
house_data_dfs.extend(_get_blocks(fn)[1])
overall_df = pd.concat(house_data_dfs, sort=False).sort_index()
dups_in_index = overall_df.index.duplicated(keep='first')
if dups_in_index.any():
print("Found duplicated values in index, dropping them.")
overall_df = overall_df[~dups_in_index]
m = 1
for column in overall_df.columns:
print("meter {}: {}".format(m, column))
key = Key(building=h, meter=m)
print("Putting into store...")
df = overall_df[column].to_frame() #.dropna(axis=0)
# if drop_duplicates:
# print("Dropping duplicated values in data...")
# df = df.drop_duplicates()
df.columns = pd.MultiIndex.from_tuples([('power', 'active')])
df.columns.set_names(LEVEL_NAMES, inplace=True)
store.put(str(key), df, format = 'table')
m += 1
# print('Flushing store...')
# store.flush()
h += 1
store.close()
# retrieve the dataset metadata in the metadata subfolder
metadata_dir = join(get_module_directory(), 'dataset_converters', 'greend', 'metadata')
convert_yaml_to_hdf5(metadata_dir, hdf_filename)
#is only called when this file is the main file... only test purpose
if __name__ == '__main__':
t1 = time.time()
convert_greend('GREEND_0-2_300615',
'GREEND_0-2_300615.h5')
dt = time.time() - t1
print()
print()
print('Time passed: {}:{}'.format(int(dt/60), int(dt%60)))
| apache-2.0 | 5,599,120,602,864,636,000 | 34.553191 | 111 | 0.565679 | false | 4.024082 | false | false | false |
CIGIHub/greyjay | greyjay/content_notes/apps.py | 1 | 1139 | from django.apps import AppConfig
class EndNotesAppConfig(AppConfig):
name = 'greyjay.content_notes'
label = 'content_notes'
verbose_name = "Wagtail end notes"
def ready(self):
from greyjay.articles.models import ArticlePage
from wagtail.wagtailadmin.edit_handlers import (
MultiFieldPanel,
FieldPanel,
InlinePanel,
ObjectList
)
notes_panel = [
MultiFieldPanel(
[
FieldPanel('endnotes_heading'),
FieldPanel('endnote_identifier_style'),
InlinePanel('endnote_links', label="End Notes"),
],
heading="End Notes Section"
),
MultiFieldPanel(
[
FieldPanel('citations_heading'),
InlinePanel('citation_links', label="Citations"),
],
heading="Citations Section"
),
]
ArticlePage.edit_handler.children.insert(
-1,
ObjectList(notes_panel, heading="Notes")
)
| mit | 8,242,842,528,254,381,000 | 28.205128 | 69 | 0.507463 | false | 4.973799 | false | false | false |
rew4332/tensorflow | tensorflow/python/ops/array_ops.py | 1 | 94317 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Casting
TensorFlow provides several operations that you can use to cast tensor data
types in your graph.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
@@saturate_cast
## Shapes and Shaping
TensorFlow provides several operations that you can use to determine the shape
of a tensor and change the shape of a tensor.
@@shape
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
@@meshgrid
## Slicing and Joining
TensorFlow provides several operations to slice or extract parts of a tensor,
or join multiple tensors together.
@@slice
@@strided_slice
@@split
@@tile
@@pad
@@concat
@@pack
@@unpack
@@reverse_sequence
@@reverse
@@transpose
@@extract_image_patches
@@space_to_batch
@@batch_to_space
@@space_to_depth
@@depth_to_space
@@gather
@@gather_nd
@@dynamic_partition
@@dynamic_stitch
@@boolean_mask
@@one_hot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import logging_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
# Aliases for some automatically-generated names.
listdiff = gen_array_ops.list_diff
def shape(input, name=None):
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
shape(t) ==> [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
return shape_internal(input, name, optimize=True)
def shape_internal(input, name=None, optimize=True):
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(input, ops.SparseTensor):
return gen_math_ops.cast(input.shape, dtypes.int32)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
# Static shape inference can be incorrect when loops are involved: disable
# shape optimization in this case to avoid generating invalid constants.
optimize &= input_tensor.graph._get_control_flow_context() is None
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), dtypes.int32, name=name)
return gen_array_ops.shape(input, name=name)
def size(input, name=None):
"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
return size_internal(input, name, optimize=True)
def size_internal(input, name=None, optimize=True):
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(input, ops.SparseTensor):
return gen_math_ops._prod(gen_math_ops.cast(input.shape, dtypes.int32), 0,
name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
# Static shape inference can be incorrect when loops are involved: disable
# shape optimization in this case to avoid generating invalid constants.
optimize &= input_tensor.graph._get_control_flow_context() is None
if optimize and input_shape.is_fully_defined():
return constant(input_shape.num_elements(), dtypes.int32, name=name)
return gen_array_ops.size(input, name=name)
def rank(input, name=None):
"""Returns the rank of a tensor.
This operation returns an integer representing the rank of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
# shape of tensor 't' is [2, 2, 3]
rank(t) ==> 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(input, ops.SparseTensor):
return gen_array_ops.size(input.shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
# Static shape inference can be incorrect when loops are involved: disable
# shape optimization in this case to avoid generating invalid constants.
optimize &= input_tensor.graph._get_control_flow_context() is None
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
# DEPRECATED use init_ops.zeros_initializer
# TODO(irving) Move it to init_ops.py
def zeros_initializer(shape, dtype=dtypes.float32):
"""An adaptor for zeros() to match the Initializer spec."""
return zeros(shape, dtype)
def _SliceHelper(tensor, slice_spec):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to numpy with the restriction that
currently only support basic indexing. That means that
using a tensor as input is not currently allowed
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _baseslice):
strides.append(s.step if s.step is not None else 1)
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
try:
s = int(s)
except TypeError:
raise TypeError("Bad slice index %s of type %s" % (s, type(s)))
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# pack possibly involves often involves no tensors, so we must use op_scope
# correct graph
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
begin_pack, end_pack, strides_pack = pack(begin), pack(end), pack(strides)
return strided_slice(tensor,
begin_pack,
end_pack,
strides_pack,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name)
# pylint: disable=undefined-variable,protected-access
def slice(input_, begin, size, name=None):
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
def strided_slice(input_,
begin,
end,
strides,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
name=None):
"""Extracts a strided slice from a tensor.
To a first order, this operation extracts a slice of size `end - begin`
from a tensor `input`
starting at the location specified by `begin`. The slice continues by adding
`stride` to the `begin` index until all dimensions are not less than `end`.
Note that components of stride can be negative, which causes a reverse
slice.
This operation can be thought of an encoding of a numpy style sliced
range. Given a python slice input[<spec0>, <spec1>, ..., <specn>]
this function will be called as follows.
`begin`, `end`, and `strides` will be all length n. n is in general
not the same dimensionality as `input`.
For the ith spec,
`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`,
and `shrink_axis_mask` will have the ith bit corrsponding to
the ith spec.
If the ith bit of `begin_mask` is non-zero, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask`, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is one, then a `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example `foo[3:5,4]` on a 10x8 tensor produces a shape 2 tensor
whereas `foo[3:5,4:5]` produces a shape 2x1 tensor with shrink_mask
being 1<<1 == 2.
If the ith bit of `shrink_axis_mask` is one, then `begin`,
`end[i]`, and `stride[i]` are used to do a slice in the appropriate
dimension, but the output tensor will be reduced in dimensionality
by one. This is only valid if the ith entry of slice[i]==1.
NOTE: `begin` and `end` are zero-indexed`.
`strides` entries must be non-zero.
```
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [2, 1, 3], [1, 1, 1]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [2, 2, 3], [1, 1, 1]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 1, 0], [2, -1, 3], [1, -1, 1]) ==>[[[4, 4, 4],
[3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops.strided_slice(input_,
begin,
end,
strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def pack(values, axis=0, name="pack"):
"""Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```prettyprint
# 'x' is [1, 4]
# 'y' is [2, 5]
# 'z' is [3, 6]
pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unpack. The numpy equivalent is
tf.pack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to pack along. Defaults to the first dimension.
Supports negative indexes.
name: A name for this operation (optional).
Returns:
output: A packed `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()
if value_shape.ndims is not None:
expanded_num_dims = value_shape.ndims + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops._pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError(
"Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops._pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is not None and dtype != inferred_dtype:
return NotImplemented
return _autopacking_helper(v, inferred_dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function(
(list, tuple), _autopacking_conversion_function, 99)
def unpack(value, num=None, axis=0, name="unpack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of pack. The numpy equivalent is
tf.unpack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unpacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unpack along. Defaults to the first
dimension. Supports negative indexes.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unpacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops._unpack(value, num=num, axis=axis, name=name)
def concat(concat_dim, values, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `concat_dim`. If
`values[i].shape = [D0, D1, ... Dconcat_dim(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Rconcat_dim, ...Dn]
where
Rconcat_dim = sum(Dconcat_dim(i))
That is, the data from the input tensors is joined along the `concat_dim`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `concat_dim` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]
tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]
```
Note: If you are concatenating along a new axis consider using pack.
E.g.
```python
tf.concat(axis, [tf.expand_dims(t, axis) for t in tensors])
```
can be rewritten as
```python
tf.pack(tensors, axis=axis)
```
Args:
concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.
values: A list of `Tensor` objects or a single `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that concat_dim is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(concat_dim,
name="concat_dim",
dtype=dtypes.int32).get_shape(
).assert_is_compatible_with(tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops._concat(concat_dim=concat_dim,
values=values,
name=name)
@ops.RegisterShape("Pack")
def _PackShape(op):
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
for inp in op.inputs[1:]:
input_shape = input_shape.merge_with(inp.get_shape())
input_shape = input_shape.as_list()
input_shape.insert(op.get_attr("axis"), len(op.inputs))
return [tensor_shape.TensorShape(input_shape)]
@ops.RegisterShape("Unpack")
def _UnpackShape(op):
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()] * op.get_attr("num")
input_shape = input_shape.as_list()
del input_shape[op.get_attr("axis")]
return [tensor_shape.TensorShape(input_shape)] * op.get_attr("num")
@ops.RegisterShape("Concat")
def _ConcatShape(op):
concat_dim = tensor_util.constant_value(op.inputs[0])
if concat_dim is None:
# Return an unknown shape with the same rank as the inputs, or an
# unknown rank if no input's rank is known.
rank = None
for value in op.inputs[1:]:
if rank is not None:
value.get_shape().assert_has_rank(rank)
else:
rank = value.get_shape().ndims
if rank == 0:
raise ValueError("Can't concatenate scalars (use tf.pack instead)")
return [tensor_shape.unknown_shape(ndims=rank)]
else:
# Merge all the non-concat dims, and sum the concat dim to make an
# output shape.
concat_dim = int(concat_dim)
if concat_dim < 0:
raise ValueError("Expected concat_dim >= 0, but got %d" % concat_dim)
output_shape = op.inputs[1].get_shape()
for value in op.inputs[2:]:
value_shape = value.get_shape()
if value_shape.ndims is not None and concat_dim >= value_shape.ndims:
raise ValueError("Expected concat_dim in range [0, %d), but got %d" %
(value_shape.ndims, concat_dim))
before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])
at = output_shape[concat_dim] + value_shape[concat_dim]
after = output_shape[
concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])
output_shape = before.concatenate(at).concatenate(after)
return [output_shape]
@ops.RegisterShape("ConcatOffset")
def _ConcatOffsetShape(op):
return [x.get_shape() for x in op.inputs[1:]]
def boolean_mask(tensor, mask, name="boolean_mask"):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = [True, False, True, False]
boolean_mask(tensor, mask) ==> [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
Returns:
Tensor populated by entries in `tensor` corresponding to `True` values in
`mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = [True, False, True]
boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), squeeze_dims=[1])
return gather(reshaped_tensor, indices)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"mask dimensions must be specified, even if some dimensions are None"
". E.g. shape=[None] is ok, but shape=None is not.")
shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)
tensor = reshape(tensor, concat(0, [[-1], shape(tensor)[ndims_mask:]]))
first_dim = shape_tensor[:ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape([first_dim])
.concatenate(shape_tensor[ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask)
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices => [12, 26, 37, 45]
tf.shape(a.values) => [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices => [26, 37]
tf.shape(b.values) => [2, 10]
```
Args:
* `a`: An `IndexedSlices` instance.
* `mask_indices`: Indices of elements to mask.
* `name`: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = listdiff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(split_dim, num_split, value, name="split"):
"""Splits a tensor into `num_split` tensors along one dimension.
Splits `value` along dimension `split_dim` into `num_split` smaller tensors.
Requires that `num_split` evenly divide `value.shape[split_dim]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(1, 3, value)
tf.shape(split0) ==> [5, 10]
```
Note: If you are splitting along an axis by the length of that axis, consider
using unpack, e.g.
```python
num_items = t.get_shape()[axis].value
[tf.squeeze(s, [axis]) for s in tf.split(axis, num_items, t)]
```
can be rewritten as
```python
tf.unpack(t, axis=axis)
```
Args:
split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[0, rank(value))`.
num_split: A Python integer. The number of ways to split.
value: The `Tensor` to split.
name: A name for the operation (optional).
Returns:
`num_split` `Tensor` objects resulting from splitting `value`.
"""
return gen_array_ops._split(split_dim=split_dim,
num_split=num_split,
value=value,
name=name)
@ops.RegisterShape("Reverse")
def _ReverseShape(op):
input_shape = op.inputs[0].get_shape()
dims_shape = op.inputs[1].get_shape().with_rank(1)
if dims_shape[0].value is not None:
input_shape = input_shape.with_rank(dims_shape[0])
if input_shape.ndims is not None and input_shape.ndims > 8:
raise ValueError(
"tf.reverse() does not work on tensors with more than 8 dimensions")
return [input_shape]
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
# 'x' is [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
# 'x' is [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# Take the transpose of the matrices in dimension-0
tf.transpose(x, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
# pylint: disable=invalid-name
def batch_matrix_transpose(a, name="batch_matrix_transpose"):
"""Transposes last two dimensions of batch matrix `a`.
For example:
```python
# Matrix with no batch dimension.
# 'x' is [[1 2 3]
# [4 5 6]]
tf.batch_matrixtranspose(x) ==> [[1 4]
[2 5]
[3 6]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.batch_matrix_transpose(x) is shape [1, 2, 4, 3]
```
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
0, (gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]))
return transpose(a, perm=perm)
# pylint: enable=invalid-name
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros", [shape]) as name:
try:
shape = tensor_shape.as_shape(shape)
output = constant(0, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(0, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype
return output
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if dtype is not None and tensor.dtype != dtype:
ret = zeros(shape_internal(tensor, optimize=optimize), dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
else:
return gen_array_ops._zeros_like(tensor, name=name)
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones", [shape]) as name:
try:
shape = tensor_shape.as_shape(shape)
output = constant(1, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(1, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape = tensor_shape.as_shape(shape)
if shape.is_fully_defined():
dim_list = shape.as_list()
else:
dim_list = []
ret = gen_array_ops._placeholder(
dtype=dtype,
shape=dim_list,
name=name)
ret.set_shape(shape)
return ret
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, shape=shape)
sp_value = sp.eval(session)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
if shape is None:
shape = placeholder(
dtypes.int64, name=(name + "/shape") if name is not None else None)
else:
shape = ops.convert_to_tensor(
shape, name=(name + "/shape") if name is not None else None)
return ops.SparseTensor(
values=placeholder(
dtype, name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64,
name=(name + "/indices") if name is not None else None),
shape=shape
)
def pad(tensor, paddings, mode="CONSTANT", name=None): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
# 't' is [[1, 2, 3], [4, 5, 6]].
# 'paddings' is [[1, 1,], [2, 2]].
# rank of 't' is 2.
pad(t, paddings, "CONSTANT") ==> [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
pad(t, paddings, "REFLECT") ==> [[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]]
pad(t, paddings, "SYMMETRIC") ==> [[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
if mode == "CONSTANT":
return gen_array_ops._pad(tensor, paddings, name=name)
if mode == "REFLECT":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="REFLECT",
name=name)
if mode == "SYMMETRIC":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="SYMMETRIC",
name=name)
raise ValueError("Unknown padding mode: %s" % mode)
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```prettyprint
x = [1, 2, 3]
y = [4, 5, 6]
```
results in
```prettyprint
X = [[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]
Y = [[4, 5, 6],
[4, 5, 6],
[4, 5, 6]]
```
Args:
*args: `Tensor`s with rank 1
indexing: Either 'xy' or 'ij' (optional, default: 'xy')
name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if len(kwargs) > 0:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
num_inputs = len(args)
ones = (1,) * num_inputs
asserts = [logging_ops.Assert(
gen_math_ops.equal(rank(x), 1),
["Input %d needs to have rank 1: " % i, rank(x)],
) for i, x in enumerate(args)]
# Prepare reshape by inserting dimensions with size 1 where needed
shapes = [ones[:i] + (-1,) + ones[i + 1:] for i in range(num_inputs)]
# Create parameters for broadcasting each tensor to the full size
sizes = [size(x) for x in args]
bcast = [sizes[:i] + [1] + sizes[i + 1:] for i in range(num_inputs)]
# By default, the numpy version swaps the instructions
# for the first and second dimension
if indexing == "xy" and num_inputs > 1:
shapes[0], shapes[1] = shapes[1], shapes[0]
bcast[0], bcast[1] = bcast[1], bcast[0]
results = []
with ops.control_dependencies(asserts):
for a, r, e in zip(args, shapes, bcast):
results.append(tile(reshape(a, r), e))
return results
@ops.RegisterShape("Placeholder")
def _PlaceholderShape(op):
given_shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape"))
if given_shape:
return [tensor_shape.TensorShape(given_shape)]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("CheckNumerics")
@ops.RegisterShape("Identity")
@ops.RegisterShape("RefIdentity")
@ops.RegisterShape("StopGradient")
@ops.RegisterShape("BatchMatrixBandPart")
@ops.RegisterShape("QuantizeAndDequantize")
def _UnchangedShape(op):
return [op.inputs[0].get_shape()]
@ops.RegisterShape("Rank")
@ops.RegisterShape("Size")
def _ScalarShape(unused_op):
return [tensor_shape.scalar()]
@ops.RegisterShape("Slice")
def _SliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
begin_shape = op.inputs[1].get_shape().with_rank(1)
sizes_shape = op.inputs[2].get_shape().with_rank(1)
ndims = begin_shape.merge_with(sizes_shape)[0].value
if ndims is not None:
input_shape.assert_has_rank(ndims)
# NOTE(mrry): Use `constant_value_as_shape()` to handle
# partially-known values.
begin_value = tensor_util.constant_value_as_shape(
op.inputs[1]).with_rank(ndims)
# NOTE(mrry): We can't use `constant_value_as_shape()` for `sizes`
# because it might contain -1, which can't be represented as a
# `TensorShape`.
sizes_value = tensor_util.constant_value(op.inputs[2])
if sizes_value is not None:
returned_dims = []
for i, (slice_size, begin_dim) in enumerate(zip(sizes_value.ravel(),
begin_value.dims)):
if slice_size != -1:
returned_dims.append(slice_size)
else:
returned_dims.append(input_shape[i] - begin_dim)
return [tensor_shape.TensorShape(returned_dims)]
else:
if input_shape.ndims is not None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
elif ndims is not None:
return [tensor_shape.unknown_shape(ndims=ndims)]
else:
return [tensor_shape.unknown_shape()]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
@ops.RegisterShape("StridedSliceGrad")
def _StridedSliceGradShape(op):
"""Shape function for gradient of array_ops.slice."""
return [tensor_util.constant_value(op.inputs[0])]
@ops.RegisterShape("StridedSlice")
def _StridedSliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
ndims = len(input_shape)
begin_shape = op.inputs[1].get_shape().with_rank(1)
end_shape = op.inputs[2].get_shape().with_rank(1)
strides_shape = op.inputs[3].get_shape().with_rank(1)
# get constant values if available
begin_value = tensor_util.constant_value(op.inputs[1])
end_value = tensor_util.constant_value(op.inputs[2])
strides_value = tensor_util.constant_value(op.inputs[3])
sparse_dims = begin_shape.merge_with(end_shape).merge_with(strides_shape)[
0].value
if (sparse_dims is None or begin_value is None or end_value is None or
strides_value is None):
return [tensor_shape.unknown_shape()]
begin_mask = op.get_attr("begin_mask")
end_mask = op.get_attr("end_mask")
ellipsis_mask = op.get_attr("ellipsis_mask")
new_axis_mask = op.get_attr("new_axis_mask")
shrink_axis_mask = op.get_attr("shrink_axis_mask")
# find the ellipsis
ellipsis_index = -1
# look for ellipses
num_add_axis_after_ellipsis = 0
for i in range(sparse_dims):
if ellipsis_index != -1 and ((1 << i) & new_axis_mask) != 0:
num_add_axis_after_ellipsis += 1
if (1 << i) & ellipsis_mask:
if ellipsis_index != -1:
raise ValueError("Multiple ellipses not allowed")
ellipsis_index = i
# insert a virtual ellipsis if not seen
if ellipsis_index == -1:
ellipsis_mask |= (1 << sparse_dims)
sparse_dims += 1
# build the dense specification
dense_dims = ndims # not accounting for newaxis and shrink
final_shape_gather = []
full_index = 0
dense_shrink_axis = 0
dense_specs = []
for dim in range(sparse_dims):
bit = 1 << dim
if bit & ellipsis_mask:
next_index = min(dense_dims -
(sparse_dims - dim) + 1 + num_add_axis_after_ellipsis,
dense_dims)
while full_index < next_index:
dense_specs.append(_baseslice(None, None, 1))
final_shape_gather.append(full_index)
full_index += 1
elif bit & new_axis_mask:
final_shape_gather.append(NEW_AXIS)
else:
dense_specs.append(_baseslice(
None if (begin_mask & bit) else begin_value[dim], None if (
end_mask & bit) else end_value[dim], strides_value[dim]))
if shrink_axis_mask & bit:
dense_shrink_axis |= (1 << full_index)
final_shape_gather.append(SHRINK_AXIS)
else:
final_shape_gather.append(full_index)
full_index += 1
# Compute each dimensions contribution to the "processing" shape
final_dims = []
for dim in range(dense_dims):
shrink = (dense_shrink_axis & (1 << dim)) != 0
final_dims.append(
_compute_size_of_strided_dim(shrink, dense_specs[dim], input_shape.dims[
dim]))
# Gather the final shape from the processing shape
final_shape = []
for index in final_shape_gather:
if index == NEW_AXIS:
final_shape.append(1)
elif index == SHRINK_AXIS:
pass
else:
final_shape.append(final_dims[index])
return [tensor_shape.TensorShape(final_shape)]
@ops.RegisterShape("Gather")
def _GatherShape(op):
"""Shape function for array_ops.gather."""
params_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
return [indices_shape.concatenate(params_shape[1:])]
@ops.RegisterShape("GatherNd")
def _GatherNdShape(op):
"""Shape function for array_ops.gather_nd."""
params_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank_at_least(1)
indices_rank = indices_shape.ndims
indices_lookup_rank = (
None if indices_rank is None else indices_shape[-1].value)
if params_shape.ndims is None or indices_lookup_rank is None:
return [tensor_shape.unknown_shape()]
else:
if indices_lookup_rank > params_shape.ndims:
raise ValueError(
"indices.shape[-1] must be <= params.rank, but saw indices shape: %s "
" and params shape: %s" % (indices_shape, params_shape))
indices_lookup_shape = indices_shape[:-1]
params_slices_shape = params_shape[indices_lookup_rank:]
return [indices_lookup_shape.concatenate(params_slices_shape)]
@ops.RegisterShape("Unique")
def _UniqueShape(op):
"""Shape function for array_ops.Unique."""
# The output is a vector with data-dependent length.
input_shape = op.inputs[0].get_shape()
input_shape.assert_has_rank(1)
return [tensor_shape.vector(None), input_shape]
@ops.RegisterShape("UniqueWithCounts")
def _UniqueWithCountsShape(op):
"""Shape function for array_ops.Unique."""
# The output is a vector with data-dependent length.
input_shape = op.inputs[0].get_shape()
input_shape.assert_has_rank(1)
return [tensor_shape.vector(None), input_shape, tensor_shape.vector(None)]
@ops.RegisterShape("BatchMatrixDiag")
def _BatchMatrixDiagShape(op):
"""Shape function for array_ops.batch_matrix_diag."""
diag_shape = op.inputs[0].get_shape().with_rank_at_least(1)
return [diag_shape.concatenate(diag_shape[-1])]
@ops.RegisterShape("BatchMatrixSetDiag")
def _BatchMatrixSetDiagShape(op):
"""Shape function for array_ops.batch_matrix_set_diag."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(2)
diag_shape = op.inputs[1].get_shape().with_rank_at_least(1)
output_shape = diag_shape.concatenate(diag_shape[-1])
output_shape = output_shape.merge_with(input_shape)
return [output_shape]
@ops.RegisterShape("BatchMatrixDiagPart")
def _BatchMatrixDiagPartShape(op):
"""Shape function for array_ops.batch_matrix_diag_part."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(2)
# Last two dims must match
input_shape[-1].assert_is_compatible_with(input_shape[-2])
return [input_shape[:-1]]
@ops.RegisterShape("Diag")
def _DiagShape(op):
"""Shape function for array_ops.diag.
This op has one input (of rank k <= 3), and one output (of rank 2k),
where the shape of the output is the concatenation of the input
shape with itself.
Args:
op: A Diag Operation.
Returns:
A single-element list containing the shape of the output.
"""
input_shape = op.inputs[0].get_shape().with_rank_at_most(3)
return [input_shape.concatenate(input_shape)]
@ops.RegisterShape("DiagPart")
def _DiagPartShape(op):
"""Shape function for array_ops.diag_part.
This op has one input (of rank k = 2, 4, or 6), and one output (of rank k/2),
where the shape of the output is the diagonal of the input shape.
Args:
op: A DiagPart Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If input has odd rank or greater than 6, or the first and
second halves of the shape are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank_at_most(6)
rank = input_shape.ndims
if rank is None:
return [tensor_shape.unknown_shape()]
if rank % 2:
raise ValueError("Input must be even rank, got rank = " + str(rank) + ".")
mid = rank // 2
return [input_shape[:mid].merge_with(input_shape[mid:])]
@ops.RegisterShape("ExpandDims")
def _ExpandDimsShape(op):
"""Determine shape for expand op's output tensor.
Args:
op: Operation for which to determine shape.
op.inputs[0] is the input tensor.
op.inputs[1] is the dimension in which to expand.
Returns:
Shape of op's output tensor.
Raises:
ValueError: If dim is outside of [-rank - 1, rank], where rank is the number
of dimensions in the input tensor.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
dim = tensor_util.constant_value(op.inputs[1])
input_ndims = input_shape.ndims
if dim < -input_ndims - 1 or dim > input_ndims:
raise ValueError(
"dim %d not in [%d, %d]." % (dim, -input_ndims, input_ndims))
if dim < 0:
dim += (input_ndims + 1)
result_shape = list(input_shape.dims)
result_shape.insert(dim, 1)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Squeeze")
def _SqueezeShape(op):
"""Determine shape for squeeze op's output tensor.
Args:
op: Operation for which to determine shape.
Returns:
Shape of op's output tensor.
Raises:
ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),
where rank is the number of dimensions in the input tensor. Or, if
squeeze_dims includes a dimension for which input shape has a value
not equal to 1.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
squeeze_dims = op.get_attr("squeeze_dims") or []
wrapped_squeeze_dims = []
input_ndims = input_shape.ndims
for i, squeeze_dim in enumerate(squeeze_dims):
if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:
raise ValueError(
"squeeze_dims[%d]=%d not in [%d, %d)." % (
i, squeeze_dim, -input_ndims, input_ndims))
if squeeze_dim < 0:
squeeze_dim += input_ndims
wrapped_squeeze_dims.append(squeeze_dim)
result_shape = []
for i, dim in enumerate([d.value for d in input_shape.dims]):
is_explicit_match = i in wrapped_squeeze_dims
if dim is None:
if is_explicit_match:
# Assume that the squeezed dimension will be 1 at runtime.
continue
if not wrapped_squeeze_dims:
# If squeezing all 1 dimensions and we see a None, give up.
return [tensor_shape.unknown_shape()]
elif dim == 1:
if is_explicit_match or not wrapped_squeeze_dims:
continue
elif is_explicit_match:
raise ValueError(
"Can not squeeze dim[%d], expected a dimension of 1, got %d." % (
i, dim))
result_shape.append(dim)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Bitcast")
def _BitcastShape(op):
"""Shape function for Bitcast op."""
input_shape = op.inputs[0].get_shape()
if input_shape == tensor_shape.unknown_shape():
return [tensor_shape.unknown_shape()]
input_type = op.inputs[0].dtype
size_of_input = input_type.size
output = dtypes.as_dtype(op.get_attr("type"))
size_of_output = output.size
if size_of_input == size_of_output:
return [input_shape]
else:
if size_of_output > size_of_input:
new_shape = input_shape.with_rank_at_least(1).as_list()
last_val = new_shape[-1]
if last_val is None or last_val == (size_of_output // size_of_input):
new_shape = new_shape[:-1]
else:
raise ValueError(
"Cannot bitcast due to shape. %d is not evenly divisible by %d." %
(new_shape[-1], size_of_input // size_of_output))
else:
new_shape = input_shape
new_shape = new_shape.concatenate([size_of_input // size_of_output])
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("Reshape")
def _ReshapeShape(op):
"""Shape function for Reshape op."""
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is not None:
num_elements = tensor_shape.Dimension(1)
for dim in input_shape.dims:
num_elements *= dim
else:
num_elements = tensor_shape.Dimension(None)
new_shape = tensor_util.constant_value_as_shape(op.inputs[1])
if new_shape.ndims is None:
# We have no information about the shape of the output.
return [new_shape]
if None not in new_shape.as_list():
# The new shape is fully defined.
if (num_elements.value is not None
and num_elements.value != np.prod(new_shape)):
raise ValueError(
"Cannot reshape a tensor with %d elements to shape %s (%d elements)"
% (num_elements.value, new_shape, np.prod(new_shape)))
elif num_elements.value is not None:
# We know the number of elements, so we can calculate the missing
# dimension in the new_shape.
known_elements = 1
unknown_indices = []
for i, dim in enumerate(new_shape):
if dim.value is None:
unknown_indices.append(i)
else:
known_elements *= dim.value
if known_elements != 0:
if num_elements % known_elements != 0:
raise ValueError("input has %s elements, which isn't divisible by %d" %
(num_elements, known_elements))
if len(unknown_indices) == 1:
unknown_index = unknown_indices[0]
new_shape = new_shape.merge_with(
new_shape[:unknown_index].concatenate(
[num_elements // known_elements]).concatenate(
new_shape[unknown_index+1:]))
return [new_shape]
@ops.RegisterShape("BroadcastGradientArgs")
def _BroadcastGradientArgsShape(op):
"""Shape function for the BroadcastGradientArgs op."""
# TODO(mrry): Implement constant_value for BroadcastGradientArgs?
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
return [tensor_shape.vector(None), tensor_shape.vector(None)]
@ops.RegisterShape("Fill")
def _FillShape(op):
"""Shape function for the Fill op.
This op takes a vector of dimensions and a scalar, and produces a
tensor with the given dimensions.
Args:
op: A Fill Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes or arguments are known to be invalid.
"""
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(0)
fill_dims = tensor_util.constant_value(op.inputs[0])
if fill_dims is not None and any(d < 0 for d in fill_dims):
raise ValueError("Fill dimensions must be >= 0")
return [tensor_util.constant_value_as_shape(op.inputs[0])]
@ops.RegisterShape("InvertPermutation")
def _InvertPermutationShape(op):
"""Shape function for the InvertPermutation op."""
return [op.inputs[0].get_shape().with_rank(1)]
@ops.RegisterShape("ListDiff")
def _ListDiffShape(op):
"""Shape function for the ListDiff op."""
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
# TODO(mrry): Indicate that the length falls within an interval?
return [tensor_shape.vector(None)] * 2
@ops.RegisterShape("Pad")
@ops.RegisterShape("MirrorPad")
def _PadShape(op):
"""Shape function for the Pad op.
This op has two inputs:
* input: A rank-N tensor.
* paddings: An N-by-2 matrix, in which the i^th row contains the
number of padding elements to add before and after `input` in the
i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in paddings.
Args:
op: A Pad Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible.
"""
paddings_shape = op.inputs[1].get_shape().with_rank(2)
input_shape = op.inputs[0].get_shape()
input_shape = input_shape.with_rank(paddings_shape[0].value)
paddings_shape = paddings_shape.merge_with(
tensor_shape.matrix(input_shape.ndims, 2))
paddings = tensor_util.constant_value(op.inputs[1])
if paddings is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
for i, dim in enumerate(input_shape.dims):
if paddings[i, 0] < 0 or paddings[i, 1] < 0:
raise ValueError("paddings must be non-negative")
output_dims.append(dim + paddings[i, 0] + paddings[i, 1])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("MirrorPadGrad")
def _MirrorPadGradShape(op):
"""Shape function for the MirrorPadGrad op."""
paddings_shape = op.inputs[1].get_shape().with_rank(2)
input_shape = op.inputs[0].get_shape().with_rank(paddings_shape[0].value)
paddings_shape = paddings_shape.merge_with(tensor_shape.matrix(
input_shape.ndims, 2))
paddings = tensor_util.constant_value(op.inputs[1])
if paddings is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
output_dims = []
for i, dim in enumerate(input_shape.dims):
if paddings[i, 0] < 0 or paddings[i, 1] < 0:
raise ValueError("Paddings must be non-negative.")
if dim < paddings[i, 0] + paddings[i, 1]:
raise ValueError("Output dimension is negative.")
output_dims.append(dim - paddings[i, 0] - paddings[i, 1])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("ReverseSequence")
def _ReverseSequenceShape(op):
"""Shape function for the ReverseSequence op.
This op has two inputs:
* input: A rank-N tensor with size B in the 0th dimension.
* seq_lens: A vector of length B.
It has one output, with the same size as input.
Args:
op: A ReverseSequence Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible or seq_dim == batch_dim.
"""
input_shape = op.inputs[0].get_shape()
seq_lens_shape = op.inputs[1].get_shape().with_rank(1)
if input_shape.ndims is None:
return [None]
seq_dim = op.get_attr("seq_dim")
batch_dim = op.get_attr("batch_dim")
if input_shape.ndims is not None:
if batch_dim >= input_shape.ndims:
raise ValueError("batch_dim must be < input.dims() (%d vs %d)" %
(batch_dim, input_shape.ndims))
if seq_dim >= input_shape.ndims:
raise ValueError("seq_dim must be < input.dims() (%d vs %d)" %
(seq_dim, input_shape.ndims))
batch_size = input_shape[batch_dim].merge_with(seq_lens_shape[0])
input_shape = tensor_shape.TensorShape([
value if ix != batch_dim else batch_size
for ix, value in enumerate(input_shape)])
return [input_shape]
@ops.RegisterShape("Shape")
@ops.RegisterShape("ShapeN")
def _ShapeNShape(op):
"""Shape function for the Shape/ShapeN op."""
return [tensor_shape.vector(x.get_shape().ndims) for x in op.inputs]
@ops.RegisterShape("Transpose")
def _TransposeShape(op):
"""Shape function for the Transpose op.
This op takes two inputs:
* input: a rank-N tensor of arbitrary shape.
* shuffle: a length-N vector.
Its output is the rank-N tensor computed by permuting the dimensions
of input according to shuffle.
Args:
op: A Transpose op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input and shuffle are incompatible.
IndexError: If shuffle contains an index that is >= the rank of input.
"""
input_shape = op.inputs[0].get_shape()
transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(
input_shape.ndims))
transpose_vec = tensor_util.constant_value(op.inputs[1])
if transpose_vec is None:
return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]
else:
return [tensor_shape.TensorShape([input_shape[i]
for i in transpose_vec.tolist()])]
@ops.RegisterShape("Split")
def _SplitShape(op):
"""Shape function for the Split op."""
split_dim = tensor_util.constant_value(op.inputs[0])
num_split = len(op.outputs)
input_shape = op.inputs[1].get_shape()
if split_dim is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split
else:
split_dim = int(split_dim)
input_shape = input_shape.with_rank_at_least(split_dim + 1)
if not (input_shape[split_dim] % num_split).is_compatible_with(0):
raise ValueError(
"Number of ways to split should evenly divide the split "
"dimension but got split_dim %d (size = %d) and num_split %d" %
(split_dim, input_shape[split_dim].value, num_split))
prefix = input_shape[:split_dim]
size_in_split_dim = input_shape[split_dim] // num_split
suffix = input_shape[split_dim + 1:]
output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)
return [output_shape] * num_split
@ops.RegisterShape("Tile")
def _TileShape(op):
"""Shape function for the Tile op.
This op has two inputs:
* input: A rank-N tensor.
* multiples: A length-N vector, in which the i^th element contains
the factor by which `input` will be tiled in the i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in multiples
Args:
op: A Tile Operation.
Returns:
A single-element list containing the shape of the output.
"""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0].value)
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim * multiple)
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("TileGrad")
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("Where")
def _WhereShape(op):
"""Shape function for the Where op."""
input_shape = op.inputs[0].get_shape()
return [tensor_shape.matrix(None, input_shape.ndims)]
@ops.RegisterShape("ZerosLike")
def _ZerosLikeShape(op):
"""Shape function for the ZerosLike op."""
return [op.inputs[0].get_shape()]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, ops.SparseTensor):
raise TypeError("Hypothesis must be a SparseTensor")
if not isinstance(truth, ops.SparseTensor):
raise TypeError("Truth must be a SparseTensor")
return gen_array_ops._edit_distance(hypothesis.indices,
hypothesis.values,
hypothesis.shape,
truth.indices,
truth.values,
truth.shape,
normalize=normalize,
name=name)
@ops.RegisterShape("EditDistance")
def _EditDistanceShape(op):
"""Shape function for the EditDistance op."""
hypothesis_shape = tensor_util.constant_value(op.inputs[2])
truth_shape = tensor_util.constant_value(op.inputs[5])
if hypothesis_shape is not None and truth_shape is not None:
if len(hypothesis_shape) != len(truth_shape):
raise ValueError(
"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s" %
(str(hypothesis_shape), str(truth_shape)))
return [tensor_shape.TensorShape(
[max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]
return [tensor_shape.unknown_shape()]
# The remaining ops do not change the shape of their inputs.
@ops.RegisterShape("Quantize")
@ops.RegisterShape("Dequantize")
def _QuantizeDequantizeShape(op):
unused_min_range = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
unused_max_range = op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
return common_shapes.unchanged_shape(op)
@ops.RegisterShape("ExtractImagePatches")
def _ExtractImagePatchesShape(op):
"""Shape function for the ExtractImagePatches op.
Args:
op: An ExtractImagePatches op.
Raises:
ValueError: If the strides or padding are invalid.
Returns:
The shape of the op output.
"""
images_shape = op.inputs[0].get_shape().with_rank(4)
batch = images_shape[0]
in_rows = images_shape[1]
in_cols = images_shape[2]
in_depth = images_shape[3]
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksizes")
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not yet support "
"ksizes in the batch and depth dimensions.")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
rate_b, rate_r, rate_c, rate_d = op.get_attr("rates")
if rate_b != 1 or rate_d != 1:
raise ValueError("Current implementation does not yet support "
"rates in the batch and depth dimensions.")
# Effective patch size, taking into account filter upsampling by rates.
ksize_r_eff = ksize_r + (ksize_r - 1) * (rate_r - 1)
ksize_c_eff = ksize_c + (ksize_c - 1) * (rate_c - 1)
padding = op.get_attr("padding")
out_rows, out_cols = common_shapes.get2d_conv_output_size(in_rows, in_cols,
ksize_r_eff,
ksize_c_eff,
stride_r, stride_c,
padding)
out_depth = None if in_depth is None else ksize_r * ksize_c * int(in_depth)
output_shape = [batch, out_rows, out_cols, out_depth]
return [tensor_shape.TensorShape(output_shape)]
@ops.RegisterShape("SpaceToBatch")
def _SpaceToBatchShape(op):
"""Shape function for the SpaceToBatch op.
The output shape is determined by the following inputs/ attributes:
* input: A rank-4 tensor with shape [B, H, W, D]
* paddings: A 2-by-2 matrix, specified as follows:
paddings = [[pad_top, pad_bottom], [pad_left, pad_right]],
implying effective padded spatial dimensions:
Hp = pad_top + H + pad_bottom
Wp = pad_left + W + pad_right
Both Hp and Wp must be multiples of block_size.
* block_size: an int.
Its output is also a rank-4 tensor with shape:
[B*block_size*block_size, Hp/block_size, Wp/block_size, D]
Args:
op: A SpaceToBatch op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of inputs are not as expected.
IndexError: If block_size does not divide Wp or Hp.
"""
# Check that the input tensor is 4-D.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 4-D input tensor.")
# Check that the paddings tensor is a matrix with shape [2, 2].
try:
paddings_shape = op.inputs[1].get_shape().with_rank(2)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 2-D paddings tensor.")
if paddings_shape[0] != 2 or paddings_shape[1] != 2:
raise ValueError(
"tf.space_to_batch() requires input paddings with shape [2, 2].")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
paddings = tensor_util.constant_value(op.inputs[1])
if paddings is not None:
if (paddings[0, 0] < 0 or paddings[0, 1] < 0 or
paddings[1, 0] < 0 or paddings[1, 1] < 0):
raise ValueError("paddings cannot be negative.")
input_height = input_shape[1] + paddings[0, 0] + paddings[0, 1]
input_width = input_shape[2] + paddings[1, 0] + paddings[1, 1]
if input_height % block_size > 0 or input_width % block_size > 0:
raise IndexError("block_size needs to divide both width and height.")
else:
input_height = tensor_shape.Dimension(None)
input_width = tensor_shape.Dimension(None)
batch = input_shape[0] * block_size * block_size
height = input_height // block_size
width = input_width // block_size
depth = input_shape[3]
return [tensor_shape.TensorShape([batch, height, width, depth])]
@ops.RegisterShape("BatchToSpace")
def _BatchToSpaceShape(op):
"""Shape function for the BatchToSpace op.
The output shape is determined by the following inputs/ attributes:
* input: A rank-4 tensor with shape
[B*block_size*block_size, Hp/block_size, Wp/block_size, D]
Note that the batch size of the input tensor must be divisible by
`block_size * block_size`.
* crops: A 2-by-2 matrix, specified as follows:
crops = [[crop_top, crop_bottom], [crop_left, crop_right]].
* block_size: an int.
Its output is also a rank-4 tensor with shape [B, H, W, D], where:
H = Hp - crop_top - crop_bottom
W = Wp - crop_left - crop_right
Args:
op: A BatchToSpace op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of the inputs are not as expected.
IndexError: If block_size*block_size does not divide the input batch size.
"""
# Check that the input tensor is 4-D.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError("tf.batch_to_space() requires 4-D input tensor.")
# Check that the crops tensor is a matrix with shape [2, 2].
try:
crops_shape = op.inputs[1].get_shape().with_rank(2)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 2-D crops tensor.")
if crops_shape[0] != 2 or crops_shape[1] != 2:
raise ValueError(
"tf.space_to_batch() requires input crops with shape [2, 2].")
crops = tensor_util.constant_value(op.inputs[1])
if (crops is not None and
(crops[0, 0] < 0 or crops[0, 1] < 0 or
crops[1, 0] < 0 or crops[1, 1] < 0)):
raise ValueError("crops cannot be negative.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_batch = input_shape[0]
if input_batch % (block_size * block_size) > 0:
raise IndexError("input batch must be divisible by block_size*block_size.")
batch = input_batch // (block_size * block_size)
if crops is not None:
height = input_shape[1] * block_size - crops[0, 0] - crops[0, 1]
width = input_shape[2] * block_size - crops[1, 0] - crops[1, 1]
if height <= 0 or width <= 0:
raise ValueError("Output height or width is not positive.")
else:
height = tensor_shape.Dimension(None)
width = tensor_shape.Dimension(None)
depth = input_shape[3]
return [tensor_shape.TensorShape([batch, height, width, depth])]
@ops.RegisterShape("SpaceToDepth")
def _SpaceToDepthShape(op):
"""Shape function for the SpaceToDepth op.
This op takes two inputs:
* input: a tensor of shape like that [B, H, W, D]
* block_size: an int.
Its output is the same-rank tensor but with changed
dimensions like that: [B, H/block_size, W/block_size, D*block_size*block_size]
Args:
op: A SpaceToDepth op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input are not as expected.
IndexError: If block_size does not divide W or H.
"""
# Check that the input tensor is of 4 dimensions.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.space_to_depth() requires tensors with exactly 4 dimensions.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_height = input_shape[1]
input_width = input_shape[2]
if (input_width % block_size > 0) or (input_height % block_size > 0):
raise IndexError(
"block_size needs to divide both width and height.")
width = input_width // block_size
height = input_height // block_size
new_depth = input_shape[3] * block_size * block_size
return [tensor_shape.TensorShape(
[input_shape[0], height, width, new_depth])]
@ops.RegisterShape("DepthToSpace")
def _DepthToSpaceShape(op):
"""Shape function for the DepthToSpace op.
This op takes two inputs:
* input: a tensor of shape like that [B, H, W, D]
* block_size: an int.
Its output is the same-rank tensor but with changed
dimensions like that:
[B, H*block_size, W*block_size, D/(block_size*block_size)]
Args:
op: A DepthToSpace op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input are not as expected.
IndexError: If block_size*block_size does not divide D.
"""
# Check that the input tensor is of 4 dimensions.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.depth_to_space() requires tensors with exactly 4 dimensions.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_height = input_shape[1]
input_width = input_shape[2]
input_depth = input_shape[3]
width = input_width * block_size
height = input_height * block_size
if input_depth % (block_size * block_size) > 0:
raise IndexError(
"block_size*block_size needs to divide the input depth.")
new_depth = input_depth // (block_size * block_size)
return [tensor_shape.TensorShape(
[input_shape[0], height, width, new_depth])]
def one_hot(indices, depth, on_value=None, off_value=None,
axis=None, dtype=None, name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`
Note: If a non-numeric data type output is desired (tf.string, tf.bool, etc.),
both `on_value` and `off_value` _must_ be provided to `one_hot`
Examples
=========
Suppose that
```
indices = [0, 2, -1, 1]
depth = 3
on_value = 5.0
off_value = 0.0
axis = -1
```
Then output is `[4 x 3]`:
```
output =
[5.0 0.0 0.0] // one_hot(0)
[0.0 0.0 5.0] // one_hot(2)
[0.0 0.0 0.0] // one_hot(-1)
[0.0 5.0 0.0] // one_hot(1)
```
Suppose that
```
indices = [[0, 2], [1, -1]]
depth = 3
on_value = 1.0
off_value = 0.0
axis = -1
```
Then output is `[2 x 2 x 3]`:
```
output =
[
[1.0, 0.0, 0.0] // one_hot(0)
[0.0, 0.0, 1.0] // one_hot(2)
][
[0.0, 1.0, 0.0] // one_hot(1)
[0.0, 0.0, 0.0] // one_hot(-1)
]
```
Using default values for `on_value` and `off_value`:
```
indices = [0, 1, 2]
depth = 3
```
The output will be
```
output =
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot", [indices, depth, on_value, off_value,
axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists \
else None
off_dtype = ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists\
else None
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if (on_exists and on_dtype != dtype):
raise TypeError("dtype {0} of on_value does not match " \
"dtype parameter {1}".format(on_dtype, dtype))
if (off_exists and off_dtype != dtype):
raise TypeError("dtype {0} of off_value does not match " \
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match " \
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,
name)
@ops.RegisterShape("OneHot")
def _OneHotShape(op):
"""Shape function for the OneHot op.
It closely follows the code in the .cc implementation.
Args:
op: A OneHot Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: if axis < -1.
"""
indices_shape = op.inputs[0].get_shape()
indices_dims = indices_shape.ndims
depth = tensor_util.constant_value(op.inputs[1])
axis = op.get_attr("axis")
if axis < -1:
raise ValueError("axis must be >= -1")
new_shape = None
if indices_dims is not None:
new_shape = indices_shape.as_list()
new_shape.insert(axis % (indices_dims + 1), depth)
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("PlaceholderWithDefault")
def _PlaceholderWithDefaultShape(op):
"""Shape function for the PlaceholderWithDefault op.
This op acts as an identity when it is not fed (passing through a
default value), but allows the user to feed it with tensors of a
possibly less precise shape than its default value.
Args:
op: A PlaceholderWithDefault `Operation`.
Returns:
A single-element list containing the shape of the output.
"""
input_shape = op.inputs[0].get_shape()
output_shape = tensor_shape.TensorShape(op.get_attr("shape"))
# NOTE(mrry): We don't merge these shapes, because `output_shape`
# may be *less* precise than `input_shape`.
input_shape.assert_is_compatible_with(output_shape)
return [output_shape]
| apache-2.0 | 7,509,335,684,078,603,000 | 32.012601 | 86 | 0.633067 | false | 3.451674 | false | false | false |
kubeflow/pipelines | samples/core/parallel_join/parallel_join.py | 1 | 1802 | #!/usr/bin/env python3
# Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp import dsl
def gcs_download_op(url):
return dsl.ContainerOp(
name='GCS - Download',
image='google/cloud-sdk:279.0.0',
command=['sh', '-c'],
arguments=['gsutil cat $0 | tee $1', url, '/tmp/results.txt'],
file_outputs={
'data': '/tmp/results.txt',
}
)
def echo2_op(text1, text2):
return dsl.ContainerOp(
name='echo',
image='library/bash:4.4.23',
command=['sh', '-c'],
arguments=['echo "Text 1: $0"; echo "Text 2: $1"', text1, text2]
)
@dsl.pipeline(
name='parallel-pipeline',
description='Download two messages in parallel and prints the concatenated result.'
)
def download_and_join(
url1='gs://ml-pipeline/sample-data/shakespeare/shakespeare1.txt',
url2='gs://ml-pipeline/sample-data/shakespeare/shakespeare2.txt'
):
"""A three-step pipeline with first two running in parallel."""
download1_task = gcs_download_op(url1)
download2_task = gcs_download_op(url2)
echo_task = echo2_op(download1_task.output, download2_task.output)
if __name__ == '__main__':
kfp.compiler.Compiler().compile(download_and_join, __file__ + '.yaml')
| apache-2.0 | 2,698,934,252,417,545,000 | 30.614035 | 85 | 0.667037 | false | 3.412879 | false | false | false |
openstack/networking-odl | networking_odl/journal/journal.py | 1 | 10979 | # Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import datetime
import threading
import time
from neutron_lib.callbacks import registry
from neutron_lib import context as nl_context
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception
from oslo_log import log as logging
from requests import exceptions
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.common import filters
from networking_odl.common import utils
from networking_odl.db import db
from networking_odl.journal import dependency_validations
LOG = logging.getLogger(__name__)
MAKE_URL = {}
LOG_ENTRY_TEMPLATE = ("%(log_type)s (Entry ID: %(entry_id)s) - %(op)s "
"%(obj_type)s %(obj_id)s (Time stamp: %(timestamp)s)")
LOG_RECORDED = 'Recorded'
LOG_PROCESSING = 'Processing'
LOG_COMPLETED = 'Completed'
LOG_ERROR_PROCESSING = 'Error while processing'
def call_thread_on_end(func):
def new_func(obj, *args, **kwargs):
return_value = func(obj, *args, **kwargs)
obj.journal.set_sync_event()
return return_value
return new_func
def _enrich_port(plugin_context, ml2_context, object_type, operation, data):
"""Enrich the port with additional information needed by ODL"""
# NOTE(yamahata): work around of ODL neutron northbound
# It passes security groups in port as list of dict for historical reasons.
# keep its format for compatibility.
# TODO(yamahata): drop this format conversion.
if data[odl_const.ODL_SGS]:
groups = [{'id': id_} for id_ in data['security_groups']]
else:
groups = []
new_data = copy.deepcopy(data)
new_data[odl_const.ODL_SGS] = groups
# NOTE(yamahata): work around for port creation for router
# tenant_id=''(empty string) is passed when port is created
# by l3 plugin internally for router.
# On the other hand, ODL doesn't accept empty string for tenant_id.
# In that case, deduce tenant_id from network_id for now.
# Right fix: modify Neutron so that don't allow empty string
# for tenant_id even for port for internal use.
# TODO(yamahata): eliminate this work around when neutron side
# is fixed
# assert port['tenant_id'] != ''
if ('tenant_id' not in new_data or new_data['tenant_id'] == ''):
if ml2_context:
network = ml2_context._network_context._network
else:
plugin = directory.get_plugin()
network = plugin.get_network(plugin_context,
new_data['network_id'])
new_data['tenant_id'] = network['tenant_id']
return new_data
def _log_entry(log_type, entry, log_level=logging.INFO, **kwargs):
delta = datetime.now() - datetime.min
timestamp = delta.total_seconds()
log_dict = {'log_type': log_type, 'op': entry.operation,
'obj_type': entry.object_type, 'obj_id': entry.object_uuid,
'entry_id': entry.seqnum, 'timestamp': timestamp}
LOG.log(log_level, LOG_ENTRY_TEMPLATE, log_dict, **kwargs)
def record(plugin_context, object_type, object_uuid, operation, data,
ml2_context=None):
if (object_type == odl_const.ODL_PORT and
operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
data = _enrich_port(
plugin_context, ml2_context, object_type, operation, data)
# Calculate depending_on on other journal entries
depending_on = dependency_validations.calculate(
plugin_context, operation, object_type, object_uuid, data)
# NOTE(mpeterson): Between the moment that a dependency is calculated and
# the new entry is recorded in the journal, an operation can ocurr that
# would make the dependency irrelevant. In that case we request a retry.
# For more details, read the commit message that introduced this comment.
try:
entry = db.create_pending_row(
plugin_context, object_type, object_uuid, operation, data,
depending_on=depending_on)
except exception.DBReferenceError as e:
raise exception.RetryRequest(e)
_log_entry(LOG_RECORDED, entry)
LOG.debug('Entry with ID %(entry_id)s depends on these entries: '
'%(depending_on)s',
{'entry_id': entry.seqnum,
'depending_on': [d.seqnum for d in depending_on]})
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_complete(context, entry):
if cfg.CONF.ml2_odl.completed_rows_retention == 0:
db.delete_row(context, entry)
else:
db.update_db_row_state(context, entry, odl_const.COMPLETED)
db.delete_dependency(context, entry)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_reset(context, entry):
db.update_db_row_state(context, entry, odl_const.PENDING)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_update_state_by_retry_count(context, entry, retry_count):
db.update_pending_db_row_retry(context, entry, retry_count)
def _make_url(row):
url_object = utils.make_url_object(row.object_type)
urlpath = ''
if row.operation == odl_const.ODL_CREATE:
urlpath = url_object
else:
urlpath = url_object + '/' + row.object_uuid
return urlpath
def register_url_builder(object_type, method):
MAKE_URL[object_type] = method
def _build_url(row):
return MAKE_URL.get(row.object_type, _make_url)(row)
class OpenDaylightJournalThread(object):
"""Thread worker for the OpenDaylight Journal Database."""
# make those parameter configurable?
_RETRY_SLEEP_MIN = 0.1
_RETRY_SLEEP_MAX = 60
def __init__(self, start_thread=True):
self.client = client.OpenDaylightRestClient.create_client()
self._max_retry_count = cfg.CONF.ml2_odl.retry_count
self._sleep_time = self._RETRY_SLEEP_MIN
self.event = threading.Event()
self._odl_sync_thread = self._create_odl_sync_thread()
self._odl_sync_thread_stop = threading.Event()
if start_thread:
self.start()
def _create_odl_sync_thread(self):
return threading.Thread(name='sync', target=self.run_sync_thread)
def start(self):
# Start the sync thread
LOG.debug("Starting a new sync thread")
if self._odl_sync_thread_stop.is_set():
self._odl_sync_thread_stop.clear()
self._odl_sync_thread = self._create_odl_sync_thread()
if not self._odl_sync_thread.is_alive():
self._odl_sync_thread.start()
def stop(self, timeout=None):
"""Allows to stop the sync thread.
Args:
timeout (float): Time in seconds to wait for joining or None for
no timeout.
"""
# Stop the sync thread
LOG.debug("Stopping the sync thread")
if self._odl_sync_thread.is_alive():
self._odl_sync_thread_stop.set()
# Process the journal one last time before stopping.
self.set_sync_event()
self._odl_sync_thread.join(timeout)
def set_sync_event(self):
self.event.set()
@staticmethod
def _json_data(row):
data = copy.deepcopy(row.data)
filters.filter_for_odl(row.object_type, row.operation, data)
if row.operation == odl_const.ODL_CREATE:
method = 'post'
to_send = {row.object_type: data}
elif row.operation == odl_const.ODL_UPDATE:
method = 'put'
to_send = {row.object_type: data}
elif row.operation == odl_const.ODL_DELETE:
method = 'delete'
to_send = None
return method, _build_url(row), to_send
def run_sync_thread(self):
while not self._odl_sync_thread_stop.is_set():
try:
self.event.wait()
self.event.clear()
self.sync_pending_entries()
except Exception:
# Catch exceptions to protect the thread while running
LOG.exception("Error on run_sync_thread")
def sync_pending_entries(self):
LOG.debug("Start processing journal entries")
context = nl_context.get_admin_context()
entry = db.get_oldest_pending_db_row_with_lock(context)
if entry is None:
LOG.debug("No journal entries to process")
return
while entry is not None:
stop_processing = self._sync_entry(context, entry)
if stop_processing:
break
entry = db.get_oldest_pending_db_row_with_lock(context)
LOG.debug("Finished processing journal entries")
def _retry_sleep(self):
# When something happened in the connection to ODL, don't busy loop
# because it's likely to hit same issue.
# Wait for a while for recovery
time.sleep(self._sleep_time)
self._sleep_time = min(self._sleep_time * 2, self._RETRY_SLEEP_MAX)
def _retry_reset(self):
self._sleep_time = self._RETRY_SLEEP_MIN
def _sync_entry(self, context, entry):
_log_entry(LOG_PROCESSING, entry)
method, urlpath, to_send = self._json_data(entry)
# TODO(mkolesni): This logic is weirdly written, need to refactor it.
try:
self.client.sendjson(method, urlpath, to_send)
registry.notify(entry.object_type, odl_const.BEFORE_COMPLETE,
self, context=context, operation=entry.operation,
row=entry)
entry_complete(context, entry)
self._retry_reset()
_log_entry(LOG_COMPLETED, entry)
except exceptions.ConnectionError:
# Don't raise the retry count, just log an error & break
entry_reset(context, entry)
LOG.error("Cannot connect to the OpenDaylight Controller,"
" will not process additional entries")
self._retry_sleep()
return True
except Exception:
_log_entry(LOG_ERROR_PROCESSING, entry,
log_level=logging.ERROR, exc_info=True)
entry_update_state_by_retry_count(
context, entry, self._max_retry_count)
return False
| apache-2.0 | 8,686,914,835,769,035,000 | 35.596667 | 79 | 0.637034 | false | 3.726748 | false | false | false |
tgbugs/pyontutils | ilxutils/ilxutils/nltklib.py | 1 | 6030 | '''
nltk.download(['wordnet', 'stopwords', 'punkt']) if not already downloaded.
Should add to wordnet if you want more words to compare as reference to.
'''
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from fuzzywuzzy import fuzz, process
stop_words = stopwords.words('english')
states = {
'ak': 'alaska',
'al': 'alabama',
'ar': 'arkansas',
'as': 'american samoa',
'az': 'arizona',
'ca': 'california',
'co': 'colorado',
'ct': 'connecticut',
'dc': 'district of columbia',
'de': 'delaware',
'fl': 'florida',
'ga': 'georgia',
'gu': 'guam',
'hi': 'hawaii',
'ia': 'iowa',
'id': 'idaho',
'il': 'illinois',
'in': 'indiana',
'ks': 'kansas',
'ky': 'kentucky',
'la': 'louisiana',
'ma': 'massachusetts',
'md': 'maryland',
'me': 'maine',
'mi': 'michigan',
'mn': 'minnesota',
'mo': 'missouri',
'mp': 'northern mariana islands',
'ms': 'mississippi',
'mt': 'montana',
'na': 'national',
'nc': 'north carolina',
'nd': 'north dakota',
'ne': 'nebraska',
'nh': 'new hampshire',
'nj': 'new jersey',
'nm': 'new mexico',
'nv': 'nevada',
'ny': 'new york',
'oh': 'ohio',
'ok': 'oklahoma',
'or': 'oregon',
'pa': 'pennsylvania',
'pr': 'puerto rico',
'ri': 'rhode island',
'sc': 'south carolina',
'sd': 'south dakota',
'tn': 'tennessee',
'tx': 'texas',
'ut': 'utah',
'va': 'virginia',
'vi': 'virgin islands',
'vt': 'vermont',
'wa': 'washington',
'wi': 'wisconsin',
'wv': 'west virginia',
'wy': 'wyoming'
}
def penn_to_wn(tag):
""" Convert between a Penn Treebank tag to a simplified Wordnet tag """
if tag.startswith('N'):
return 'n'
if tag.startswith('V'):
return 'v'
if tag.startswith('J'):
return 'a'
if tag.startswith('R'):
return 'r'
return None
def tagged_to_synset(word, tag):
wn_tag = penn_to_wn(tag)
# wn_tag is None if no definition is found
if wn_tag is None:
return word
# try:
# most probable english word
return wn.synsets(word, wn_tag)[0]
# except:
# return word
def fix_state_abbrev(tokens):
token = [
states[token] if states.get(token) else token
for token in tokens
]
return token
def clean_tokens(tokens, ignore_integers=False):
punctuations = ['(',')',';',':','[',']',',','.','/']
keywords = [
word for word in tokens
if not word in stop_words and not word in punctuations
]
keywords = fix_state_abbrev(keywords)
if ignore_integers:
keywords = [word for word in keywords if not is_possible_integer(word)]
return keywords
def clean(word):
word = str(word).lower().strip()
punctuations = ['(',')',';',':','[',']',',','.','/']
for punctuation in punctuations:
word = word.replace(punctuation, '')
return word
def is_possible_integer(word):
try:
int(word)
return True
except:
return False
def sentence_similarity(sentence1, sentence2, ignore_integers=False):
""" compute the sentence similarity using Wordnet """
# Tokenize and tag
sentence1 = ' '.join([clean(word) for word in sentence1.split()])
sentence2 = ' '.join([clean(word) for word in sentence2.split()])
tokens1 = word_tokenize(sentence1)
tokens2 = word_tokenize(sentence2)
tokens1 = clean_tokens(tokens1, ignore_integers)
tokens2 = clean_tokens(tokens2, ignore_integers)
# tag
sentence1 = pos_tag(tokens1)
sentence2 = pos_tag(tokens2)
# Get the synsets for the tagged words
synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]
synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]
print(synsets1)
print(synsets2)
# Filter out the Nones
synsets1 = [ss for ss in synsets1 if ss]
synsets2 = [ss for ss in synsets2 if ss]
score, count = 0.0, 0.0
# For each word in the first sentence
for synset1 in synsets1:
# Get the similarity value of the most similar word in the other sentence
best_score=[
wn.path_similarity(synset1, synset2)
if not isinstance(synset1, str) and not isinstance(synset2, str)
# just in case there are scientific words wordnet does not have
else fuzz.ratio(str(synset1), str(synset2)) / 100
for synset2 in synsets2
]
best_score=[s if s else 0 for s in best_score]
# print(synsets1, synsets2)
# Check that the similarity could have been computed
if best_score:
score += max(best_score)
count += 1
# Average the values
if count > 0:
score /= count
else:
score = 0
return score
def get_tokenized_sentence(sentence):
# Tokenize and tag
sentence = pos_tag(word_tokenize(sentence))
# Get the synsets for the tagged words
synsets = []
for tagged_word in sentence:
synset = tagged_to_synset(*tagged_word)
if synset:
synsets.append(synset)
else:
synsets.append(tagged_word[0])
return synsets # str(sorted(synsets))
def main():
sentences = [
"life is good in pa 92092",
"life is good in pa",
"life is good within pa 92092/2",
"life is good pa 92092/2",
"life is good in pa 92092/2",
"testing for difference"
]
focus_sentence = "life is good in pennsylvania"
for sentence in sentences:
# print ("Similarity(\"%s\", \"%s\") = %s" % (focus_sentence, sentence, sentence_similarity(focus_sentence, sentence)))
print ("Similarity(\"%s\", \"%s\") = %s" % (focus_sentence, sentence, sentence_similarity(focus_sentence, sentence, ignore_integers=True)))
# print(sentence_similarity(focus_sentence, sentences[2], ignore_integers=True))
if __name__ == '__main__':
main()
| mit | -8,744,749,470,656,482,000 | 27.443396 | 147 | 0.587231 | false | 3.268293 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.