code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
"""Test the csv/json export functionality."""
import binascii
import textwrap
import dnstwister.tools
import patches
from dnstwister.core.domain import Domain
def test_csv_export(webapp, monkeypatch):
"""Test CSV export"""
monkeypatch.setattr(
'dnstwister.tools.resolve', lambda domain: ('999.999.999.999', False)
)
domain = Domain('a.com')
hexdomain = domain.to_hex()
response = webapp.get('/search/{}/csv'.format(hexdomain))
assert response.headers['Content-Disposition'] == 'attachment; filename=dnstwister_report_a.com.csv'
assert '\n'.join(sorted(response.text.strip().split('\n'))) == textwrap.dedent("""
Domain,Type,Tweak,IP,Error
a.com,Addition,aa.com,999.999.999.999,False
a.com,Addition,ab.com,999.999.999.999,False
a.com,Addition,ac.com,999.999.999.999,False
a.com,Addition,ad.com,999.999.999.999,False
a.com,Addition,ae.com,999.999.999.999,False
a.com,Addition,af.com,999.999.999.999,False
a.com,Addition,ag.com,999.999.999.999,False
a.com,Addition,ah.com,999.999.999.999,False
a.com,Addition,ai.com,999.999.999.999,False
a.com,Addition,aj.com,999.999.999.999,False
a.com,Addition,ak.com,999.999.999.999,False
a.com,Addition,al.com,999.999.999.999,False
a.com,Addition,am.com,999.999.999.999,False
a.com,Addition,an.com,999.999.999.999,False
a.com,Addition,ao.com,999.999.999.999,False
a.com,Addition,ap.com,999.999.999.999,False
a.com,Addition,aq.com,999.999.999.999,False
a.com,Addition,ar.com,999.999.999.999,False
a.com,Addition,as.com,999.999.999.999,False
a.com,Addition,at.com,999.999.999.999,False
a.com,Addition,au.com,999.999.999.999,False
a.com,Addition,av.com,999.999.999.999,False
a.com,Addition,aw.com,999.999.999.999,False
a.com,Addition,ax.com,999.999.999.999,False
a.com,Addition,ay.com,999.999.999.999,False
a.com,Addition,az.com,999.999.999.999,False
a.com,Bitsquatting,c.com,999.999.999.999,False
a.com,Bitsquatting,e.com,999.999.999.999,False
a.com,Bitsquatting,i.com,999.999.999.999,False
a.com,Bitsquatting,q.com,999.999.999.999,False
a.com,Original*,a.com,999.999.999.999,False
a.com,Replacement,1.com,999.999.999.999,False
a.com,Replacement,2.com,999.999.999.999,False
a.com,Replacement,s.com,999.999.999.999,False
a.com,Replacement,w.com,999.999.999.999,False
a.com,Replacement,y.com,999.999.999.999,False
a.com,Replacement,z.com,999.999.999.999,False
a.com,Various,acom.com,999.999.999.999,False
a.com,Various,wwa.com,999.999.999.999,False
a.com,Various,www-a.com,999.999.999.999,False
a.com,Various,wwwa.com,999.999.999.999,False
a.com,Vowel swap,o.com,999.999.999.999,False
a.com,Vowel swap,u.com,999.999.999.999,False
""").strip()
def test_json_export(webapp, monkeypatch):
"""Test JSON export"""
monkeypatch.setattr(
'dnstwister.tools.dnstwist.DomainFuzzer', patches.SimpleFuzzer
)
monkeypatch.setattr(
'dnstwister.tools.resolve', lambda domain: ('999.999.999.999', False)
)
domain = Domain('a.com')
path = domain.to_hex()
response = webapp.get('/search/{}/json'.format(path))
assert response.headers['Content-Disposition'] == 'attachment; filename=dnstwister_report_a.com.json'
assert response.json == {
u'a.com': {
u'fuzzy_domains': [
{
u'domain-name': u'a.com',
u'fuzzer': u'Original*',
u'hex': u'612e636f6d',
u'resolution': {
u'error': False,
u'ip': u'999.999.999.999'
}
},
{
u'domain-name': u'a.co',
u'fuzzer': u'Pretend',
u'hex': u'612e636f',
u'resolution': {
u'error': False,
u'ip': u'999.999.999.999'
}
}
]
}
}
def test_json_export_one_domain(webapp, monkeypatch):
"""Test JSON export when no reports"""
monkeypatch.setattr(
'dnstwister.tools.dnstwist.DomainFuzzer', patches.SimpleFuzzer
)
monkeypatch.setattr(
'dnstwister.tools.resolve', lambda domain: ('999.999.999.999', False)
)
domains = ('a.com',)
path = ','.join([Domain(d).to_hex() for d in domains])
response = webapp.get('/search/{}/json'.format(path))
assert response.headers['Content-Disposition'] == 'attachment; filename=dnstwister_report_a.com.json'
assert response.json == {
u'a.com': {
u'fuzzy_domains': [
{
u'domain-name': u'a.com',
u'fuzzer': u'Original*',
u'hex': u'612e636f6d',
u'resolution': {
u'error': False,
u'ip': u'999.999.999.999'
}
},
{
u'domain-name': u'a.co',
u'fuzzer': u'Pretend',
u'hex': u'612e636f',
u'resolution': {
u'error': False,
u'ip': u'999.999.999.999'
}
}
]
}
}
def test_json_export_no_fuzzy(webapp, monkeypatch):
"""Test JSON export when no fuzzy domains."""
monkeypatch.setattr(
'dnstwister.tools.dnstwist.DomainFuzzer', patches.NoFuzzer
)
monkeypatch.setattr(
'dnstwister.tools.resolve', lambda domain: ('999.999.999.999', False)
)
domains = ('a.com',)
path = ','.join([Domain(d).to_hex() for d in domains])
response = webapp.get('/search/{}/json'.format(path))
assert response.headers['Content-Disposition'] == 'attachment; filename=dnstwister_report_a.com.json'
assert response.json == {
u'a.com': {
u'fuzzy_domains': [
{
u'domain-name': u'a.com',
u'fuzzer': u'Original*',
u'hex': u'612e636f6d',
u'resolution': {
u'error': False,
u'ip': u'999.999.999.999'
}
}
]
}
}
def test_json_export_formatting(webapp, monkeypatch):
"""Test JSON export looks nice :)"""
monkeypatch.setattr(
'dnstwister.tools.dnstwist.DomainFuzzer', patches.SimpleFuzzer
)
monkeypatch.setattr(
'dnstwister.tools.resolve', lambda domain: ('999.999.999.999', False)
)
domain = 'a.com'
path = Domain(domain).to_hex()
response = webapp.get('/search/{}/json'.format(path))
assert response.headers['Content-Disposition'] == 'attachment; filename=dnstwister_report_a.com.json'
assert response.text.strip() == textwrap.dedent("""
{
"a.com": {
"fuzzy_domains": [
{
"domain-name": "a.com",
"fuzzer": "Original*",
"hex": "612e636f6d",
"resolution": {
"error": false,
"ip": "999.999.999.999"
}
},
{
"domain-name": "a.co",
"fuzzer": "Pretend",
"hex": "612e636f",
"resolution": {
"error": false,
"ip": "999.999.999.999"
}
}
]
}
}
""").strip()
def test_failed_export(webapp):
"""Test unknown-format export"""
domain = 'a.com'
hexdomain = Domain(domain).to_hex()
response = webapp.get('/search/{}/xlsx'.format(hexdomain), expect_errors=True)
assert response.status_code == 400
def test_links_on_report(webapp):
"""Make sure the export links are working."""
domain = Domain('a.com')
hexdomain = domain.to_hex()
page_html = webapp.get('/search/{}'.format(hexdomain)).text
assert '/search/{}/csv'.format(hexdomain) in page_html
assert '/search/{}/json'.format(hexdomain) in page_html
def test_json_export_unicode_domain(webapp, monkeypatch):
"""Test JSON export when no reports"""
monkeypatch.setattr(
'dnstwister.tools.dnstwist.DomainFuzzer', patches.SimpleFuzzer
)
monkeypatch.setattr(
'dnstwister.tools.resolve', lambda domain: ('999.999.999.999', False)
)
domain = u'a\u00E0.com' # almost 'aa.com'
hexdomain = Domain(domain).to_hex()
response = webapp.get('/search/{}/json'.format(hexdomain))
assert response.headers['Content-Disposition'] == 'attachment; filename=dnstwister_report_xn--a-sfa.com.json'
assert response.json == {
u'xn--a-sfa.com': {
u'fuzzy_domains': [
{
u'domain-name': u'xn--a-sfa.com',
u'fuzzer': u'Original*',
u'hex': u'786e2d2d612d7366612e636f6d',
u'resolution': {
u'error': False,
u'ip': u'999.999.999.999'
}
},
{
u'domain-name': u'xn--a-sfa.co',
u'fuzzer': u'Pretend',
u'hex': u'786e2d2d612d7366612e636f',
u'resolution': {
u'error': False,
u'ip': u'999.999.999.999'
}
}
]
}
}
def test_unicode_csv_export(webapp, monkeypatch):
"""Test CSV export with Unicode"""
monkeypatch.setattr(
'dnstwister.tools.resolve', lambda domain: ('999.999.999.999', False)
)
domain = u'a\u00E0.com' # almost 'aa.com'
hexdomain = Domain(domain).to_hex()
response = webapp.get('/search/{}/csv'.format(hexdomain))
assert response.headers['Content-Disposition'] == 'attachment; filename=dnstwister_report_xn--a-sfa.com.csv'
assert '\n'.join(sorted(response.text.strip().split('\n'))) == textwrap.dedent("""
Domain,Type,Tweak,IP,Error
xn--a-sfa.com,Addition,xn--aa-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ab-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ac-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ad-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ae-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--af-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ag-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ah-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ai-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--aj-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ak-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--al-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--am-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--an-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ao-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ap-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--aq-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ar-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--as-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--at-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--au-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--av-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--aw-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ax-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--ay-jia.com,999.999.999.999,False
xn--a-sfa.com,Addition,xn--az-jia.com,999.999.999.999,False
xn--a-sfa.com,Bitsquatting,xn--c-sfa.com,999.999.999.999,False
xn--a-sfa.com,Bitsquatting,xn--e-sfa.com,999.999.999.999,False
xn--a-sfa.com,Bitsquatting,xn--i-sfa.com,999.999.999.999,False
xn--a-sfa.com,Bitsquatting,xn--q-sfa.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0ca15e.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0ca3e.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0ca743m.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0ca76d.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0ca7e.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0ca98b.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0caa.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0cab.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0cad.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0caf.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0cah.com,999.999.999.999,False
xn--a-sfa.com,Homoglyph,xn--0caj.com,999.999.999.999,False
xn--a-sfa.com,Hyphenation,xn--a--kia.com,999.999.999.999,False
xn--a-sfa.com,Omission,a.com,999.999.999.999,False
xn--a-sfa.com,Omission,xn--0ca.com,999.999.999.999,False
xn--a-sfa.com,Original*,xn--a-sfa.com,999.999.999.999,False
xn--a-sfa.com,Repetition,xn--a-sfaa.com,999.999.999.999,False
xn--a-sfa.com,Repetition,xn--aa-kia.com,999.999.999.999,False
xn--a-sfa.com,Replacement,xn--1-sfa.com,999.999.999.999,False
xn--a-sfa.com,Replacement,xn--2-sfa.com,999.999.999.999,False
xn--a-sfa.com,Replacement,xn--s-sfa.com,999.999.999.999,False
xn--a-sfa.com,Replacement,xn--w-sfa.com,999.999.999.999,False
xn--a-sfa.com,Replacement,xn--y-sfa.com,999.999.999.999,False
xn--a-sfa.com,Replacement,xn--z-sfa.com,999.999.999.999,False
xn--a-sfa.com,Subdomain,a.xn--0ca.com,999.999.999.999,False
xn--a-sfa.com,Transposition,xn--a-rfa.com,999.999.999.999,False
xn--a-sfa.com,Various,xn--acom-0na.com,999.999.999.999,False
xn--a-sfa.com,Various,xn--wwa-cla.com,999.999.999.999,False
xn--a-sfa.com,Various,xn--www-a-vqa.com,999.999.999.999,False
xn--a-sfa.com,Various,xn--wwwa-3na.com,999.999.999.999,False
xn--a-sfa.com,Vowel swap,xn--o-sfa.com,999.999.999.999,False
xn--a-sfa.com,Vowel swap,xn--u-sfa.com,999.999.999.999,False
""").strip()
| thisismyrobot/dnstwister | tests/test_exports.py | Python | unlicense | 14,760 |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""ML Fairness gym location-based attention allocation environment.
This environment is meant to be a general but simple location-based
attention allocation environment.
Situations that could be modeled by this environment are pest-control, or
allocation of social interventions like mobile STD testing clinics.
This is not a good environment to model predictive policing in part due to the
concerns raised in "Dirty Data, Bad Predictions: How Civil Rights Violations
Impact Police Data, Predictive Policing Systems, and Justice", Richardson et al.
(https://www.nyulawreview.org/wp-content/uploads/2019/04/NYULawReview-94-Richardson-Schultz-Crawford.pdf)
The environment contains k locations. At each timestep, some number of incidents
occur (such as rat infestations) in each location, and the agent has to allocate
some of its attention to each location. Each attention unit may then "see" or
discover some number of the incidents.
Incidents are drawn from a Poisson centered on an underlying incident rate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from typing import List, Optional, Tuple
import attr
import core
from spaces import multinomial
from gym import spaces
import numpy as np
from six.moves import range
@attr.s
class Params(core.Params):
"""Attention Allocation parameters."""
# Number of locations represented in the environment.
n_locations = attr.ib(default=2) # type: int
# Prior counts of incidents for each location.
prior_incident_counts = attr.ib(default=(650, 250)) # type: Tuple[int, ...]
# Incident rates for each location.
incident_rates = attr.ib(factory=lambda: [4., 3.]) # type: List[float]
# Discovered incidents weight - weights how many incidents are discovered
# incidents, for which attention needs to be allocated to be discovered
discovered_incident_weight = attr.ib(default=1.0) # type: float
# Reported inicdents weight - weights how many incidents are reported, which
# are observed from a location without attention being allocated there.
reported_incident_weight = attr.ib(default=0.0) # type: float
# Number of attention units that are available to be allocated.
n_attention_units = attr.ib(default=1) # type: int
# If True, each unit of attention can discover more than one incident.
# If False, each unit of attention can discover at most one incident.
attention_replacement = attr.ib(default=False) # type: bool
# Probability an incident is missed by an attention unit for each location.'
miss_incident_prob = attr.ib(default=(0.2, 0.2)) # type: Tuple[float, ...]
# Probability an incident is falsely reported without being discovered.
extra_incident_prob = attr.ib(default=(0.0, 0.0)) # type: Tuple[float, ...]
# The rate at which the incident_rates change in response to allocation
# of attention units.
dynamic_rate = attr.ib(default=0.0) # type: float
# Location feature parameters.
# Means and covariances of the multivariate gaussians for the features.
feature_means = attr.ib(factory=lambda: [1., 1.])
feature_covariances = attr.ib(factory=lambda: [[0.8, 0.0], [0.0, 0.7]])
# Vector with coefficients to control the correlation between features and
# underlying incident rates.
feature_coefficients = attr.ib(default=(0, 1))
@attr.s(cmp=False)
class State(core.State):
"""Attention Allocation state."""
# Parameters.
params = attr.ib() # type: Params
# A ndarray of integers representing the incidents seen at each location
incidents_seen = attr.ib() # type: np.ndarray
# A ndarray of integers representing the incidents reported for each location.
incidents_reported = attr.ib() # type: np.ndarray
# A ndarray of integers representing the incidents reported for each location.
incidents_occurred = attr.ib() # type: np.ndarray
# A ndarray of floats representing features for each location.
location_features = attr.ib() # type: np.ndarray
# Random state.
rng = attr.ib(factory=np.random.RandomState) # type: np.random.RandomState
def _sample_incidents(rng, params):
"""Generates new crimeincident occurrences across locations.
Args:
rng: A numpy RandomState() object acting as a random number generator.
params: A Params instance for this environment.
Returns:
incidents_occurred: a list of integers of number of incidents for each
location.
that could be discovered by attention.
reported_incidents: a list of integers of a number of incidents reported
directly.
"""
# pylint: disable=g-complex-comprehension
crimes = [
rng.poisson([
params.incident_rates[i] * params.discovered_incident_weight,
params.incident_rates[i] * params.reported_incident_weight
]) for i in range(params.n_locations)
]
incidents_occurred, reported_incidents = np.hsplit(np.asarray(crimes), 2)
return incidents_occurred.flatten(), reported_incidents.flatten()
def _get_location_features(params, rng, incidents_occurred):
"""Returns a matrix of float features for each location.
Calculates new feature means based on incidents occurred and draws features
from a multivariate gaussian distribution using the parameter defined means
and covariances.
Args:
params: A Params instance for this environment.
rng: A numpy RandomState() object acting as a random number generator.
incidents_occurred: A list of integers of number of incidents for each
location that occurred.
Returns:
A numpy array of n_locations by number of features.
"""
# Move feature means based on incidents that occurred to make m by k matrix
# where each row is the means for the features for location k at this step.
shifted_feature_means = params.feature_means + np.outer(
incidents_occurred, params.feature_coefficients)
feature_noise = rng.multivariate_normal(
np.zeros_like(params.feature_means),
params.feature_covariances,
size=params.n_locations)
return shifted_feature_means + feature_noise
def _update_state(state, incidents_occurred, incidents_reported, action):
"""Updates the state given the agents' action.
This function simulates attention discovering incidents in order to determine
and populate the number of seen incidents in the state.
Args:
state: a 'State' object with the state to be updated.
incidents_occurred: a vector of length equal to n_locations in state.param
that contains integer counts of incidents that occurred for each location.
incidents_reported: a vector of length equal to n_locations in state.param
that contains integer counts of incidents that are reported for each
location.
action: an action in the action space of LocationAllocationEnv that is a
vector of integer counts of attention allocated to each location.
"""
params = state.params
if params.attention_replacement:
discover_probability = 1 - (np.power(params.miss_incident_prob, action))
incidents_seen = [
state.rng.binomial(incidents_occurred[i], discover_probability[i])
for i in range(params.n_locations)
]
else:
# Attention units are without replacement, so each units can only catch 1
# crime.
incidents_seen = [0] * params.n_locations
for location_ind in range(params.n_locations):
unused_attention = action[location_ind]
# Iterate over crime incidents and determine if each one is "caught".
for _ in range(incidents_occurred[location_ind]):
incidents_discovered = state.rng.binomial(
1, 1 - (np.power(params.miss_incident_prob[location_ind],
unused_attention)))
unused_attention -= incidents_discovered
incidents_seen[location_ind] += incidents_discovered
if unused_attention <= 0:
# Terminate for loop early because there are no attention left.
break
# If there are unused individuals have them generate false incidents.
for _ in range(unused_attention):
incidents_seen[location_ind] += state.rng.binomial(
1, params.extra_incident_prob[location_ind])
# Handle dynamics.
for location_ind in range(params.n_locations):
attention = action[location_ind]
if attention == 0:
params.incident_rates[location_ind] += params.dynamic_rate
else:
params.incident_rates[location_ind] = max(
0.0, params.incident_rates[location_ind] -
(params.dynamic_rate * attention))
state.location_features = _get_location_features(params, state.rng,
incidents_occurred)
state.incidents_occurred = np.asarray(incidents_occurred)
state.incidents_seen = np.asarray(incidents_seen)
state.incidents_reported = np.asarray(incidents_reported)
class LocationAllocationEnv(core.FairnessEnv):
"""Location based allocation environment.
In each step, agent allocates attention across locations. Environment then
simulates seen incidents based on incidents that occurred and attention
distribution.
Incidents are generated from a poisson distribution of underlying incidents
rates for each location.
"""
def __init__(self, params = None):
if params is None:
params = Params()
self.action_space = multinomial.Multinomial(params.n_locations,
params.n_attention_units)
assert (params.n_locations == len(params.prior_incident_counts) and
params.n_locations == len(params.incident_rates))
# Define the observation space.
# Crimes seen is multidiscrete because it may not sum to n_attention_units.
# MultiDiscrete uses dtype=np.int32.
if params.attention_replacement:
# If there is attention replacement, the number of attention doesn't bound
# the incidents_seen.
incidents_seen_space = spaces.MultiDiscrete([np.iinfo(np.int32).max] *
params.n_locations)
else:
incidents_seen_space = spaces.MultiDiscrete(
[params.n_attention_units + 1] * params.n_locations)
incidents_reported_space = spaces.MultiDiscrete([np.iinfo(np.int32).max] *
params.n_locations)
n_features = len(params.feature_means)
location_features_space = spaces.Box(
low=-np.inf,
high=np.inf,
shape=(params.n_locations, n_features),
dtype=np.float32)
# The first observation from this state is not necessarily contained by this
# observation space. It conveys a prior of the initial incident counts.
self.observable_state_vars = {
'incidents_seen': incidents_seen_space,
'incidents_reported': incidents_reported_space,
'location_features': location_features_space
}
super(LocationAllocationEnv, self).__init__(params)
self._state_init()
def _state_init(self, rng=None):
n_locations = self.initial_params.n_locations
self.state = State(
rng=rng or np.random.RandomState(),
params=copy.deepcopy(self.initial_params),
incidents_seen=np.zeros(n_locations, dtype='int64'),
incidents_reported=np.zeros(n_locations, dtype='int64'),
incidents_occurred=np.zeros(n_locations, dtype='int64'),
location_features=np.zeros(
(n_locations, len(self.initial_params.feature_means))))
def reset(self):
"""Resets the environment."""
self._state_init(self.state.rng)
return super(LocationAllocationEnv, self).reset()
def _is_done(self):
"""Never returns true because there is no end case to this environment."""
return False
def _step_impl(self, state, action):
"""Run one timestep of the environment's dynamics.
In a step, the agent allocates attention across disctricts. The environement
then returns incidents seen as an observation based off the actual hidden
incident occurrences and attention allocation.
Args:
state: A 'State' object containing the current state.
action: An action in 'action space'.
Returns:
A 'State' object containing the updated state.
"""
incidents_occurred, reported_incidents = _sample_incidents(
state.rng, state.params)
_update_state(state, incidents_occurred, reported_incidents, action)
return state
| google/ml-fairness-gym | environments/attention_allocation.py | Python | apache-2.0 | 13,033 |
import os
import pytest
import sdk_install
import sdk_networks
import sdk_utils
from tests import config
overlay_nostrict = pytest.mark.skipif(os.environ.get("SECURITY") == "strict",
reason="overlay tests currently broken in strict")
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_TASK_COUNT,
additional_options=sdk_networks.ENABLE_VIRTUAL_NETWORKS_OPTIONS)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.overlay
@overlay_nostrict
@pytest.mark.dcos_min_version('1.9')
def test_install():
sdk_networks.check_task_network("template-0-node")
| vishnu2kmohan/dcos-commons | frameworks/template/tests/test_overlay.py | Python | apache-2.0 | 945 |
from django.urls import re_path
from .views import PrivateStorageView
urlpatterns = [
re_path(r'^(?P<path>.*)$', PrivateStorageView.as_view(), name='serve_private_file'),
]
| edoburu/django-private-storage | private_storage/urls.py | Python | apache-2.0 | 179 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "boot.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| micbuz/project2 | boot/manage.py | Python | apache-2.0 | 802 |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Exception for errors raised while interpreting nodes.
"""
class NodeException(Exception):
"""Base class for errors raised while interpreting nodes."""
def __init__(self, *msg):
"""Set the error message."""
self.msg = ' '.join(msg)
def __str__(self):
"""Return the message."""
return repr(self.msg)
| ChristopheVuillot/qiskit-sdk-py | qiskit/qasm/_node/_nodeexception.py | Python | apache-2.0 | 1,054 |
import os
from jenkins_jobs import cmd
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
@mock.patch('jenkins_jobs.builder.Jenkins.get_plugins_info', mock.MagicMock)
class DeleteTests(CmdTestsBase):
@mock.patch('jenkins_jobs.cmd.Builder.delete_job')
def test_delete_single_job(self, delete_job_mock):
"""
Test handling the deletion of a single Jenkins job.
"""
args = self.parser.parse_args(['delete', 'test_job'])
cmd.execute(args, self.config) # passes if executed without error
@mock.patch('jenkins_jobs.cmd.Builder.delete_job')
def test_delete_multiple_jobs(self, delete_job_mock):
"""
Test handling the deletion of multiple Jenkins jobs.
"""
args = self.parser.parse_args(['delete', 'test_job1', 'test_job2'])
cmd.execute(args, self.config) # passes if executed without error
@mock.patch('jenkins_jobs.builder.Jenkins.delete_job')
def test_delete_using_glob_params(self, delete_job_mock):
"""
Test handling the deletion of multiple Jenkins jobs using the glob
parameters feature.
"""
args = self.parser.parse_args(['delete',
'--path',
os.path.join(self.fixtures_path,
'cmd-002.yaml'),
'*bar*'])
cmd.execute(args, self.config)
calls = [mock.call('bar001'), mock.call('bar002')]
delete_job_mock.assert_has_calls(calls, any_order=True)
self.assertEqual(delete_job_mock.call_count, len(calls),
"Jenkins.delete_job() was called '%s' times when "
"expected '%s'" % (delete_job_mock.call_count,
len(calls)))
| lukas-bednar/jenkins-job-builder | tests/cmd/subcommands/test_delete.py | Python | apache-2.0 | 1,878 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from pecan.testing import load_test_app
from tuskar.db.sqlalchemy import models as db_models
from tuskar.tests import base
URL_ROLES = '/v1/overcloud_roles'
class OvercloudRolesTests(base.TestCase):
def setUp(self):
super(OvercloudRolesTests, self).setUp()
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', '..', '..', '..', 'api', 'config.py')
self.app = load_test_app(config_file)
@mock.patch('tuskar.db.sqlalchemy.api.Connection.get_overcloud_roles')
def test_get_all(self, mock_db_get):
# Setup
fake_results = [db_models.OvercloudRole(name='foo')]
mock_db_get.return_value = fake_results
# Test
response = self.app.get(URL_ROLES)
result = response.json
# Verify
self.assertEqual(response.status_int, 200)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertEqual(result[0]['name'], 'foo')
mock_db_get.assert_called_once()
@mock.patch('tuskar.db.sqlalchemy.api.'
'Connection.get_overcloud_role_by_id')
def test_get_one(self, mock_db_get):
# Setup
fake_result = db_models.OvercloudRole(name='foo')
mock_db_get.return_value = fake_result
# Test
url = URL_ROLES + '/' + '12345'
response = self.app.get(url)
result = response.json
# Verify
self.assertEqual(response.status_int, 200)
self.assertEqual(result['name'], 'foo')
mock_db_get.assert_called_once_with(12345)
@mock.patch('tuskar.db.sqlalchemy.api.Connection.create_overcloud_role')
def test_post(self, mock_db_create):
# Setup
create_me = {'name': 'new'}
fake_created = db_models.OvercloudRole(name='created')
mock_db_create.return_value = fake_created
# Test
response = self.app.post_json(URL_ROLES, params=create_me)
result = response.json
# Verify
self.assertEqual(response.status_int, 201)
self.assertEqual(result['name'], fake_created.name)
self.assertEqual(1, mock_db_create.call_count)
db_create_model = mock_db_create.call_args[0][0]
self.assertTrue(isinstance(db_create_model,
db_models.OvercloudRole))
self.assertEqual(db_create_model.name, create_me['name'])
@mock.patch('tuskar.db.sqlalchemy.api.Connection.update_overcloud_role')
def test_put(self, mock_db_update):
# Setup
changes = {'name': 'updated'}
fake_updated = db_models.OvercloudRole(name='after-update')
mock_db_update.return_value = fake_updated
# Test
url = URL_ROLES + '/' + '12345'
response = self.app.put_json(url, params=changes)
result = response.json
# Verify
self.assertEqual(response.status_int, 200)
self.assertEqual(result['name'], fake_updated.name)
self.assertEqual(1, mock_db_update.call_count)
db_update_model = mock_db_update.call_args[0][0]
self.assertTrue(isinstance(db_update_model,
db_models.OvercloudRole))
self.assertEqual(db_update_model.id, 12345)
self.assertEqual(db_update_model.name, changes['name'])
@mock.patch('tuskar.db.sqlalchemy.api.'
'Connection.delete_overcloud_role_by_id')
def test_delete(self, mock_db_delete):
# Test
url = URL_ROLES + '/' + '12345'
response = self.app.delete(url)
# Verify
self.assertEqual(response.status_int, 204)
mock_db_delete.assert_called_once_with(12345)
| rdo-management/tuskar | tuskar/tests/api/controllers/v1/test_overcloud_roles.py | Python | apache-2.0 | 4,280 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Compute client."""
import mock
from tests.unittest_utils import ForsetiTestCase
from google.cloud.security.common.gcp_api import _base_client
from google.cloud.security.common.gcp_api import compute
from tests.common.gcp_api.test_data import fake_firewall_rules
class ComputeTest(ForsetiTestCase):
"""Test the Compute client."""
@mock.patch.object(_base_client.BaseClient, '__init__', autospec=True)
def setUp(self, mock_base_client):
"""Set up."""
self.client = compute.ComputeClient()
def test_get_firewall_rules(self):
self.client.service = mock.MagicMock()
self.client.rate_limiter = mock.MagicMock()
self.client._build_paged_result = mock.MagicMock()
self.client._build_paged_result.return_value = (
fake_firewall_rules.PAGED_RESULTS)
firewall_rules = self.client.get_firewall_rules('aaaaa')
self.assertTrue(self.client.service.firewalls.called)
self.assertTrue(
mock.call().list(project='aaaaa')
in self.client.service.firewalls.mock_calls)
self.assertEquals(fake_firewall_rules.EXPECTED_RESULTS,
firewall_rules)
if __name__ == '__main__':
unittest.main()
| thenenadx/forseti-security | tests/common/gcp_api/compute_test.py | Python | apache-2.0 | 1,822 |
#!/usr/bin/env python
import os.path
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=True,
autoescape=None
)
tornado.web.Application.__init__(self, handlers, **settings)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render(
"index.html",
page_title = "Burt's Books | Home",
header_text = "Welcome to Burt's Books!",
footer_text = "For more information, please email us at <a href=\"mailto:[email protected]\">[email protected]</a>.",
)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| iamaris/xtornado | test/Introduction-to-Tornado-master/template_basics/bookstore/main.py | Python | apache-2.0 | 1,173 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def load_table_uri_parquet(table_id):
# [START bigquery_load_table_gcs_parquet]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set table_id to the ID of the table to create.
# table_id = "your-project.your_dataset.your_table_name"
job_config = bigquery.LoadJobConfig(source_format=bigquery.SourceFormat.PARQUET,)
uri = "gs://cloud-samples-data/bigquery/us-states/us-states.parquet"
load_job = client.load_table_from_uri(
uri, table_id, job_config=job_config
) # Make an API request.
load_job.result() # Waits for the job to complete.
destination_table = client.get_table(table_id)
print("Loaded {} rows.".format(destination_table.num_rows))
# [END bigquery_load_table_gcs_parquet]
| googleapis/python-bigquery | samples/load_table_uri_parquet.py | Python | apache-2.0 | 1,400 |
import django_filters
from django_filters import rest_framework as filters
from django_rv_apps.apps.believe_his_prophets.models.spirit_prophecy_chapter import SpiritProphecyChapter, SpiritProphecyChapterLanguage
from django_rv_apps.apps.believe_his_prophets.models.spirit_prophecy import SpiritProphecy
from django_rv_apps.apps.believe_his_prophets.models.language import Language
from django.utils import timezone
class SpiritProphecyChapterLanguageFilter(django_filters.FilterSet):
code_iso = filters.ModelMultipleChoiceFilter(
queryset=Language.objects.all(),
field_name='language__code_iso',
to_field_name='code_iso'
)
start_date = filters.CharFilter(method='filter_date')
class Meta:
model = SpiritProphecyChapterLanguage
fields = ('id' ,'code_iso','start_date')
def filter_date(self, queryset, name, value):
t = timezone.localtime(timezone.now())
return queryset.filter(
spirit_prophecy_chapter__start_date__year = t.year,
spirit_prophecy_chapter__start_date__month = t.month, spirit_prophecy_chapter__start_date__day = t.day,
) | davrv93/creed-en-sus-profetas-backend | django_rv_apps/apps/believe_his_prophets_api/views/spirit_prophecy_chapter_language/filters.py | Python | apache-2.0 | 1,155 |
import logging
from django.core import management
from django.core.management.base import BaseCommand
from awx.main.models import OAuth2AccessToken
from oauth2_provider.models import RefreshToken
class Command(BaseCommand):
def init_logging(self):
log_levels = dict(enumerate([logging.ERROR, logging.INFO,
logging.DEBUG, 0]))
self.logger = logging.getLogger('awx.main.commands.cleanup_tokens')
self.logger.setLevel(log_levels.get(self.verbosity, 0))
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
self.logger.addHandler(handler)
self.logger.propagate = False
def execute(self, *args, **options):
self.verbosity = int(options.get('verbosity', 1))
self.init_logging()
total_accesstokens = OAuth2AccessToken.objects.all().count()
total_refreshtokens = RefreshToken.objects.all().count()
management.call_command('cleartokens')
self.logger.info("Expired OAuth 2 Access Tokens deleted: {}".format(total_accesstokens - OAuth2AccessToken.objects.all().count()))
self.logger.info("Expired OAuth 2 Refresh Tokens deleted: {}".format(total_refreshtokens - RefreshToken.objects.all().count()))
| GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx/main/management/commands/cleanup_tokens.py | Python | apache-2.0 | 1,286 |
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from toil.common import Config
from toil.job import CheckpointJobDescription, JobDescription
from toil.jobStores.fileJobStore import FileJobStore
from toil.test import ToilTest, travis_test
from toil.worker import nextChainable
class WorkerTests(ToilTest):
"""Test miscellaneous units of the worker."""
def setUp(self):
super(WorkerTests, self).setUp()
path = self._getTestJobStorePath()
self.jobStore = FileJobStore(path)
self.config = Config()
self.config.jobStore = 'file:%s' % path
self.jobStore.initialize(self.config)
self.jobNumber = 0
@travis_test
def testNextChainable(self):
"""Make sure chainable/non-chainable jobs are identified correctly."""
def createTestJobDesc(memory, cores, disk, preemptable, checkpoint):
"""
Create a JobDescription with no command (representing a Job that
has already run) and return the JobDescription.
"""
name = 'job%d' % self.jobNumber
self.jobNumber += 1
descClass = CheckpointJobDescription if checkpoint else JobDescription
jobDesc = descClass(requirements={'memory': memory, 'cores': cores, 'disk': disk, 'preemptable': preemptable}, jobName=name)
# Assign an ID
self.jobStore.assignID(jobDesc)
# Save and return the JobDescription
return self.jobStore.create(jobDesc)
for successorType in ['addChild', 'addFollowOn']:
# Try with the branch point at both child and follow-on stages
# Identical non-checkpoint jobs should be chainable.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
jobDesc2 = createTestJobDesc(1, 2, 3, True, False)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
chainable = nextChainable(jobDesc1, self.jobStore, self.config)
self.assertNotEqual(chainable, None)
self.assertEqual(jobDesc2.jobStoreID, chainable.jobStoreID)
# Identical checkpoint jobs should not be chainable.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
jobDesc2 = createTestJobDesc(1, 2, 3, True, True)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
# If there is no child we should get nothing to chain.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
# If there are 2 or more children we should get nothing to chain.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
jobDesc2 = createTestJobDesc(1, 2, 3, True, False)
jobDesc3 = createTestJobDesc(1, 2, 3, True, False)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
getattr(jobDesc1, successorType)(jobDesc3.jobStoreID)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
# If there is an increase in resource requirements we should get nothing to chain.
reqs = {'memory': 1, 'cores': 2, 'disk': 3, 'preemptable': True, 'checkpoint': False}
for increased_attribute in ('memory', 'cores', 'disk'):
jobDesc1 = createTestJobDesc(**reqs)
reqs[increased_attribute] += 1
jobDesc2 = createTestJobDesc(**reqs)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
# A change in preemptability from True to False should be disallowed.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
jobDesc2 = createTestJobDesc(1, 2, 3, False, True)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
| BD2KGenomics/slugflow | src/toil/test/src/workerTest.py | Python | apache-2.0 | 4,647 |
#!/usr/bin/env python3
import unittest
from unittest.mock import MagicMock
import logging
import nat_monitor
import utils
class NatInstanceTest(unittest.TestCase):
def setUp(self):
self.vpc_conn = MagicMock()
self.ec2_conn = MagicMock()
self.instance_id = 'i-abc123'
self.subnet = MagicMock()
self.subnet.id = 'subnetid'
self.route_table = MagicMock()
self.route_table.id = 'rt-123'
self.vpc_conn.get_all_subnets = MagicMock(return_value=[self.subnet])
self.vpc_conn.get_all_route_tables = MagicMock(
return_value=[self.route_table])
self.vpc_conn.create_route = MagicMock()
self.vpc_id = 'vpc123'
self.az = 'us-east-1a'
self.instance = MagicMock()
self.role = 'nat'
self.instance.tags = {
'Role': self.role, 'Name': NatInstanceTest.__name__}
self.instance_tags = MagicMock()
self.name = 'name'
self.instance_tags.get_name = MagicMock(return_value=self.name)
self.instances = [self.instance]
self.ec2_conn.get_only_instances = MagicMock(
return_value=self.instances)
self.ec2_conn.modify_instance_attribute = MagicMock()
self.instance_metadata = {
'instance-id': self.instance_id,
'network': {
'interfaces': {
'macs': {
'0e:bf:0c:a1:f6:59': {
'vpc-id': self.vpc_id
}
}
}
},
'placement': {
'availability-zone': self.az
}
}
self.nat_instance = nat_monitor.NatInstance(
self.vpc_conn, self.ec2_conn, self.instance_tags, self.instance_metadata)
def test_init(self):
self.assertEqual(self.nat_instance.vpc_conn, self.vpc_conn)
self.assertEqual(self.nat_instance.ec2_conn, self.ec2_conn)
self.assertEqual(self.nat_instance.vpc_id, self.vpc_id)
self.assertEqual(self.nat_instance.az, self.az)
self.assertEqual(self.nat_instance.instance_id, self.instance_id)
self.assertEqual(
self.nat_instance.my_route_table_id, self.route_table.id)
self.assertEqual(self.nat_instance.name_tag, self.name)
def test_disable_source_dest_check(self):
self.nat_instance.disable_source_dest_check()
self.ec2_conn.modify_instance_attribute.assert_called_with(
self.instance_id, 'sourceDestCheck', False)
def test_set_route(self):
self.nat_instance.set_route()
self.vpc_conn.create_route.assert_called_with(
self.nat_instance.my_route_table_id, '0.0.0.0/0',
instance_id=self.nat_instance.instance_id)
if __name__ == '__main__':
unittest.main()
| ridecharge/aws-startup-utils-docker | scripts/nat_monitor_test.py | Python | apache-2.0 | 2,843 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for RetinaNet segmentation model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
import dataloader
import retinanet_segmentation_model
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than CPUs')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string('resnet_checkpoint', None,
'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
flags.DEFINE_integer(
'num_shards', default=8, help='Number of shards (TPU cores)')
flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
flags.DEFINE_integer('eval_batch_size', 8, 'evaluation batch size')
flags.DEFINE_integer('eval_samples', 1449, 'The number of samples for '
'evaluation.')
flags.DEFINE_integer(
'iterations_per_loop', 100, 'Number of iterations per TPU training loop')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., Pascal VOC train set)')
flags.DEFINE_string(
'validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., Pascal VOC validation set)')
flags.DEFINE_integer('num_examples_per_epoch', 10582,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', 45, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train_and_eval',
'Mode to run: train or eval (default: train)')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
if FLAGS.use_tpu:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
if FLAGS.mode in ('train',
'train_and_eval') and FLAGS.training_file_pattern is None:
raise RuntimeError('You must specify --training_file_pattern for training.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('You must specify'
'--validation_file_pattern for evaluation.')
# Parse hparams
hparams = retinanet_segmentation_model.default_hparams()
hparams.parse(FLAGS.hparams)
params = dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
use_tpu=FLAGS.use_tpu,
resnet_checkpoint=FLAGS.resnet_checkpoint,
mode=FLAGS.mode,
)
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
evaluation_master='',
model_dir=FLAGS.model_dir,
keep_checkpoint_max=3,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=contrib_tpu.TPUConfig(
FLAGS.iterations_per_loop,
FLAGS.num_shards,
per_host_input_for_training=(
contrib_tpu.InputPipelineConfig.PER_HOST_V2)))
model_fn = retinanet_segmentation_model.segmentation_model_fn
# TPU Estimator
eval_params = dict(
params,
use_tpu=FLAGS.use_tpu,
input_rand_hflip=False,
resnet_checkpoint=None,
is_training_bn=False,
)
if FLAGS.mode == 'train':
train_estimator = contrib_tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
train_estimator.train(
input_fn=dataloader.SegmentationInputReader(
FLAGS.training_file_pattern, is_training=True),
max_steps=int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size),
)
if FLAGS.eval_after_training:
# Run evaluation on CPU after training finishes.
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
elif FLAGS.mode == 'eval':
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
def terminate_eval():
tf.logging.info('Terminating eval after %d seconds of no checkpoints' %
FLAGS.eval_timeout)
return True
# Run evaluation when there's a new checkpoint
for ckpt in contrib_training.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout,
timeout_fn=terminate_eval):
tf.logging.info('Starting to evaluate.')
try:
# Note that if the eval_samples size is not fully divided by the
# eval_batch_size. The remainder will be dropped and result in
# differet evaluation performance than validating on the full set.
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
total_step = int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size)
if current_step >= total_step:
tf.logging.info('Evaluation finished after training step %d' %
current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info('Checkpoint %s no longer exists, skipping checkpoint' %
ckpt)
elif FLAGS.mode == 'train_and_eval':
train_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
for cycle in range(0, FLAGS.num_epochs):
tf.logging.info('Starting training cycle, epoch: %d.' % cycle)
train_estimator.train(
input_fn=dataloader.SegmentationInputReader(
FLAGS.training_file_pattern, is_training=True),
steps=int(FLAGS.num_examples_per_epoch / FLAGS.train_batch_size))
tf.logging.info('Starting evaluation cycle, epoch: {:d}.'.format(
cycle + 1))
# Run evaluation after training finishes.
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Evaluation results: %s' % eval_results)
else:
tf.logging.info('Mode not found.')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| tensorflow/tpu | models/official/retinanet/retinanet_segmentation_main.py | Python | apache-2.0 | 10,349 |
# -*- coding: utf-8 -*-
from troubleshooting.framework.modules.manager import ManagerFactory
from troubleshooting.framework.variable.variable import *
from troubleshooting.framework.libraries.baseList import list2stringAndFormat
from troubleshooting.framework.libraries.system import createDir
from troubleshooting.framework.modules.configuration import ConfigManagerInstance
import time
import os,sys
from htmltemplate import *
import re
class html(object):
def __init__(self):
super(html,self).__init__()
self.caseResult = ManagerFactory().getManager(LAYER.Case).case_record
self.currenttime = time.strftime("%Y-%m-%d %X %Z",time.localtime())
def write(self):
data = ""
data += HTML_BEFORE
data += HTML_HEAD
data +="""
<body bgcolor = "#E9EAEE">
<h1 align="center">TroubleShooting Framework Report</h1>
<p><i>%s</i></p>
<table width="100%%" border="2" class="bordered">
<thead>
<tr ><th width="15%%">CaseName</th><th width="5%%" >Status</th><th width="80%%">Attribute</th></tr>
</thead>
<tbody>
"""%(self.currenttime,)
recovery_id = 1
for i,caseName in enumerate(self.caseResult):
i += 1
caseStatus = self.caseResult[caseName]["STATUS"]
DESCRIPTION = self.caseResult[caseName]["DESCRIPTION"]
REFERENCE = self.caseResult[caseName]["REFERENCE"]
REFERENCEHtml = '<a href="%s">reference document</>'%REFERENCE if REFERENCE else '<font color="#d0d0d0">NA</font>'
TAGS = self.caseResult[caseName]["TAGS"]
TESTPOINT = self.caseResult[caseName]["TESTPOINT"]
parent_pass = """
<tr bgcolor="#53C579" class="parent" id="row_0%s"><td colspan="1">%s</td><td>PASS</td><td colspan="1"></td></tr>"""%(i,caseName,)
parent_fail = """
<tr bgcolor="#FF3030" class="parent" id="row_0%s"><td colspan="1">%s</td><td>FAIL</td><td colspan="1"></td></tr>"""%(i,caseName,)
parent_warn = """
<tr bgcolor="#FF7F00" class="parent" id="row_0%s"><td colspan="1">%s</td><td>WARN</td><td colspan="1"></td></tr>"""%(i,caseName,)
if caseStatus:
data += parent_pass
else:
_level = self.caseResult[caseName]["LEVEL"]
if _level is LEVEL.CRITICAL:
data += parent_fail
else:
data += parent_warn
data += """
<tr class="child_row_0%s" style="display:none"><td>Description</td><td></td><td>%s</td></tr>
<tr class="child_row_0%s" style="display:none"><td>Reference</td><td></td><td>%s</td></tr>
<tr class="child_row_0%s" style="display:none"><td>Tags</td><td></td><td>%s</td></tr>
"""%(i,DESCRIPTION,i,REFERENCEHtml,i,TAGS)
data += """
<tr class="child_row_0%s" style="display:none">
<td colspan="3" >
<table border="1" width="100%%" style="margin:0px">
"""%i
data += """
<tr>
<th width="5%%">
<b>TestPoint</b>
</th>
<th width="5%%">
<b>Status</b>
</th>
<th width="5%%">
<b>Level</b>
</th>
<th width="15%%" name="nolog">
<b>Impact</b>
</th>
<th width="35%%" name="nolog">
<b>Root Cause</b>
</th>
<th width="15%%" name="nolog">
<b>Fix Method</b>
</th>
<th width="20%%" name="nolog">
<b>Auto Fix Method</b>
</th>
<th style="display:none;" width="85%%" name="log">
<b>LOG</b>
</th>
</tr>
"""
for testpoint in TESTPOINT:
testpointStatus = TESTPOINT[testpoint]["STATUS"]
testpointStatusHtml = '<font color="green"><b><i>%s</i></b></font>' % STATUS.PASS.value.lower() if testpointStatus else '<font color="red"><b><i>%s</i></b></font>' % STATUS.FAIL.value.lower()
testpointImpact = TESTPOINT[testpoint]["IMPACT"]
testpointImpact = list2stringAndFormat(testpointImpact)
if not testpointImpact:
testpointImpact = '<font color="#d0d0d0">NA</font>'
testpointImpactHtml = testpointImpact.replace("\n","</br>")
testpointLevel = TESTPOINT[testpoint]["LEVEL"]
testpointLevelHtml = testpointLevel.value
testpointDescribe = TESTPOINT[testpoint]["DESCRIBE"]
testpointRCA = TESTPOINT[testpoint]["RCA"]
testpointRCA = list2stringAndFormat(testpointRCA)
if not testpointRCA:
testpointRCA = '<font color="#d0d0d0">NA</font>'
testpointRCAHtml = testpointRCA.replace("\n","</br>")
testpointFIXSTEP = TESTPOINT[testpoint]["FIXSTEP"]
testpointFIXSTEP = list2stringAndFormat(testpointFIXSTEP)
if not testpointFIXSTEP:
testpointFIXSTEP = '<font color="#d0d0d0">NA</font>'
testpointFIXSTEPHtml = testpointFIXSTEP.replace("\n","</br>")
testpointAutoFixStep = TESTPOINT[testpoint]["AUTOFIXSTEP"]
if not testpointAutoFixStep:
testpointAutoFixStep = '<font color="#d0d0d0">NA</font>'
else:
if ConfigManagerInstance.config["Host"]:
reportHash = ConfigManagerInstance.config["__ReportHash__"]
reportName = ConfigManagerInstance.config["__ReportName__"]
host = ConfigManagerInstance.config["Host"]
port = ConfigManagerInstance.config["Port"]
user = ConfigManagerInstance.config["User"]
password = ConfigManagerInstance.config["Password"]
cwd =ConfigManagerInstance.config["__ProjectCWD__"]
recovery = {"ProjectDir":cwd,"Host":host,"Port":port,"User":user,"Password":password,"Recovery":",".join(testpointAutoFixStep)}
testpointAutoFixStep = """
<iframe scrolling="no" src="/www/iframe/growl-genie.html?recovery=%s&reportHash=%s&reportName=%s"></iframe>
"""%(recovery,reportHash,reportName)
testpointAutoFixStepHtml = testpointAutoFixStep
testpointLog = TESTPOINT[testpoint]["LOG"]
testpointLogHtml = testpointLog
pattern = re.compile(r"\<.+\>")
match = pattern.finditer(testpointLog)
if match:
for m in match:
className = m.group()
testpointLogHtml = testpointLogHtml.replace(className,'<font color="#FFB90F">%s</font>'%className)
testpointLogHtml = testpointLogHtml.replace("\n", "</br>")
testpointTimeout = TESTPOINT[testpoint]["TIMEOUT"]
testpointCost = TESTPOINT[testpoint]["COST"]
testpointHtml = '<i title="Timeout: %s\nCostTime: %s">%s<i>'%(testpointTimeout,testpointCost,testpoint.strip("{}"))
attribute = """
<tr>
<td>
<i>%s</i>
</td>
<td>
<i>%s</i>
</td>
<td>
<i>%s</i>
</td>
<td name="nolog">
<i>%s</i>
</td>
<td name="nolog">
<i>%s</i>
</td>
<td name="nolog">
<i>%s</i>
</td>
<td name="nolog">
<i>%s</i>
</td>
<td style="display:none" name="log">
<i>%s</i>
</td>
</tr>
"""%(testpointHtml,testpointStatusHtml,testpointLevelHtml,testpointImpactHtml,testpointRCAHtml,testpointFIXSTEPHtml,testpointAutoFixStepHtml,testpointLogHtml)
data += attribute
data += """
</table>
</td>
</tr>
"""
data += """
</tbody>
</table>
"""
data += BUTTON
# data += HTML_LOG
data += BODY_AFTER
data += HTML_AFTER
reportDir = os.path.dirname(ConfigManagerInstance.config["Report"])
createDir(reportDir)
reportPath = ConfigManagerInstance.config["Report"]
with open(reportPath,"w") as f:
f.write(data)
| gaoxiaofeng/troubleShooting | src/troubleshooting/framework/output/writehtml.py | Python | apache-2.0 | 9,570 |
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Wrapper control suite environments that adds Gaussian noise to actions."""
import dm_env
import numpy as np
_BOUNDS_MUST_BE_FINITE = (
'All bounds in `env.action_spec()` must be finite, got: {action_spec}')
class Wrapper(dm_env.Environment):
"""Wraps a control environment and adds Gaussian noise to actions."""
def __init__(self, env, scale=0.01):
"""Initializes a new action noise Wrapper.
Args:
env: The control suite environment to wrap.
scale: The standard deviation of the noise, expressed as a fraction
of the max-min range for each action dimension.
Raises:
ValueError: If any of the action dimensions of the wrapped environment are
unbounded.
"""
action_spec = env.action_spec()
if not (np.all(np.isfinite(action_spec.minimum)) and
np.all(np.isfinite(action_spec.maximum))):
raise ValueError(_BOUNDS_MUST_BE_FINITE.format(action_spec=action_spec))
self._minimum = action_spec.minimum
self._maximum = action_spec.maximum
self._noise_std = scale * (action_spec.maximum - action_spec.minimum)
self._env = env
def step(self, action):
noisy_action = action + self._env.task.random.normal(scale=self._noise_std)
# Clip the noisy actions in place so that they fall within the bounds
# specified by the `action_spec`. Note that MuJoCo implicitly clips out-of-
# bounds control inputs, but we also clip here in case the actions do not
# correspond directly to MuJoCo actuators, or if there are other wrapper
# layers that expect the actions to be within bounds.
np.clip(noisy_action, self._minimum, self._maximum, out=noisy_action)
return self._env.step(noisy_action)
def reset(self):
return self._env.reset()
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
def __getattr__(self, name):
return getattr(self._env, name)
| deepmind/dm_control | dm_control/suite/wrappers/action_noise.py | Python | apache-2.0 | 2,630 |
import boto3
import botocore
import tarfile
import os
import shutil
class Persistor(object):
def __init__(self, data_dir, aws_region, bucket_name):
self.data_dir = data_dir
self.s3 = boto3.resource('s3', region_name=aws_region)
self.bucket_name = bucket_name
try:
self.s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': aws_region})
except botocore.exceptions.ClientError, e:
pass # bucket already exists
self.bucket = self.s3.Bucket(bucket_name)
def send_tar_to_s3(self, target_dir):
if not os.path.isdir(target_dir):
raise ValueError('target_dir %r not found.' % target_dir)
base_name = os.path.basename(target_dir)
base_dir = os.path.dirname(target_dir)
tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)
filekey = os.path.basename(tarname)
self.s3.Object(self.bucket_name, filekey).put(Body=open(tarname, 'rb'))
def fetch_and_extract(self, filename):
with open(filename, 'wb') as f:
self.bucket.download_fileobj(filename, f)
with tarfile.open(filename, "r:gz") as tar:
tar.extractall(self.data_dir)
| kreuks/liven | nlp/persistor.py | Python | apache-2.0 | 1,267 |
# 3rd party imports
from bidi.algorithm import get_display as apply_bidi
# Django imports
from django.conf import settings
# Project imports
from .base import TestGeneratePdfBase
from .factories import create_voters
from .utils_for_tests import extract_pdf_page, extract_textlines, clean_textlines, unwrap_lines
from ..arabic_reshaper import reshape
from ..generate_pdf import generate_pdf
from ..utils import truncate_center_name, format_name
from libya_elections.constants import ARABIC_COMMA, MALE, FEMALE, UNISEX
class TestGeneratePdfNoRegistrants(TestGeneratePdfBase):
"""Compare the word-by-word content of the PDF with expected content when there are no voters"""
def setUp(self):
super(TestGeneratePdfNoRegistrants, self).setUp()
self.voter_roll = []
def test_blank_page_content_male(self):
"""tests that the "blank" page explains why it is blank (no voters)"""
generate_pdf(self.filename, self.center, self.voter_roll, MALE)
# Build a list of the lines I expect to see.
expected_lines = []
expected_lines.append(self.STRINGS['center_header_prefix'])
expected_lines.append(self.STRINGS['center_list_header'])
expected_lines.append(self.STRINGS['no_male_registrants'])
expected_lines.append("1 / 1")
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 1)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_blank_page_content_female(self):
"""tests that the "blank" page explains why it is blank (no voters)"""
# Build a list of the lines I expect to see.
generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
expected_lines = []
expected_lines.append(self.STRINGS['center_header_prefix'])
expected_lines.append(self.STRINGS['center_list_header'])
expected_lines.append(self.STRINGS['no_female_registrants'])
expected_lines.append("1 / 1")
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 1)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
class TestGeneratePdfGenderParam(TestGeneratePdfBase):
"""Ensure that passing UNISEX to generate_pdf() raises an error.
This is a small test that didn't seem to fit elsewhere.
"""
def test_gender_param(self):
self.voter_roll = create_voters(1, self.gender)
with self.assertRaises(ValueError):
generate_pdf(self.filename, self.center, self.voter_roll, UNISEX)
class GeneratePdfContentTestMixin(object):
"""Mixin the provides the main methods tested in several test cases in this file (below).
These methods compare the actual word-by-word content of the PDF with expected content.
There's no unisex tests needed here because that concept only matters when dealing with
polling stations.
"""
def test_cover_content(self):
"""tests that the cover page contains the expected text"""
# Build a list of the lines I expect to see.
expected_lines = []
key = 'center_book_cover' if self.center_book else 'center_list_cover'
expected_lines += self.STRINGS[key]
# These are constructed "backwards" relative to how the actual code does it. It's
# necessary to do so because the text is laid out RtoL in the PDF.
expected_lines.append('{} :{}'.format(self.STRINGS['female'], self.STRINGS['gender']))
expected_lines.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
center_name = apply_bidi(reshape(self.center.name))
expected_lines.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
copied_by = self.center.copied_by.all()
if self.center.copy_of:
expected_lines.append('{} :{}'.format(self.center.copy_of.center_id,
self.STRINGS['copy_of']))
elif copied_by:
copied_by = [center.center_id for center in copied_by]
copied_by = (' ' + ARABIC_COMMA).join(map(str, reversed(copied_by)))
expected_lines.append('{} :{}'.format(copied_by, self.STRINGS['copied_by_plural']))
subconstituency_id = self.center.subconstituency.id
subconstituency_name = reshape(self.center.subconstituency.name_arabic)
subconstituency_name = apply_bidi(subconstituency_name)
subconstituency = '{} / {} :{}'.format(subconstituency_name, subconstituency_id,
self.STRINGS['subconstituency_name'])
expected_lines.append(subconstituency)
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 0)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
# Did center name wrap? If so, unwrap.
if expected_lines[4].startswith(actual_lines[5]):
actual_lines = unwrap_lines(actual_lines, 4)
has_copy_info = (self.center.copy_of or self.center.copied_by)
if has_copy_info:
# Did center name wrap? If so, unwrap.
if expected_lines[5].startswith(actual_lines[6]):
actual_lines = unwrap_lines(actual_lines, 5)
# Did subcon name wrap? If so, unwrap.
offset = 1 if has_copy_info else 0
if len(actual_lines) >= 7 + offset:
if expected_lines[5 + offset].startswith(actual_lines[6 + offset]):
actual_lines = unwrap_lines(actual_lines, 5 + offset)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_inner_page_content(self):
"""tests that the non-cover pages of a multipage PDF contain the expected text"""
# 1 pages = cover + 2 pages of names
self.assertEqual(self.n_pages, 3)
# Build a list of the lines I expect to see. I don't care about the cover page, just
# the inner pages.
expected_lines = []
page_header = []
page_header.append(self.STRINGS['center_header_prefix'])
key = 'center_book_header' if self.center_book else 'center_list_header'
page_header.append(self.STRINGS[key])
mf_string = self.STRINGS['female']
page_header.append('{} :{}'.format(mf_string, self.STRINGS['gender']))
page_header.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
center_name = apply_bidi(truncate_center_name(reshape(self.center.name)))
page_header.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
expected_lines += page_header
expected_lines.append(self.STRINGS['the_names'])
for voter in self.voter_roll[:self.n_voters - 1]:
expected_lines.append(apply_bidi(reshape(format_name(voter))))
expected_lines.append(mf_string)
# '2 / 1' is the 'page N/n_pages' from the footer
expected_lines.append('2 / 1')
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 1)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
# OK now test the second (final) inner page. It only has one voter on it.
expected_lines = page_header
expected_lines.append(self.STRINGS['the_names'])
for voter in self.voter_roll[-1:]:
expected_lines.append(apply_bidi(reshape(format_name(voter))))
expected_lines.append(mf_string)
# '2 / 2' is the 'page N/n_pages' from the footer
expected_lines.append('2 / 2')
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 2)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
class TestGeneratePdfContentCenterList(TestGeneratePdfBase, GeneratePdfContentTestMixin):
"""Invoke GeneratePdfContentTestMixin for center lists.
Center lists are only used during the in-person phase.
"""
def setUp(self):
super(TestGeneratePdfContentCenterList, self).setUp()
self.center_book = False
# Create a PDF that will spill to multiple pages.
self.n_voters = settings.ROLLGEN_REGISTRATIONS_PER_PAGE_REGISTRATION + 1
self.voter_roll = create_voters(self.n_voters, FEMALE)
self.n_pages = generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
class TestGeneratePdfContentCenterBook(TestGeneratePdfBase):
"""Invoke GeneratePdfContentTestMixin for center books.
Center books are only used during the exhibitions phase.
"""
def setUp(self):
super(TestGeneratePdfContentCenterBook, self).setUp()
self.center_book = True
self.n_pages = generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
class TestCopyOfCenter(TestGeneratePdfBase, GeneratePdfContentTestMixin):
"""Invoke GeneratePdfContentTestMixin for a copy center.
This class uses a center that is a copy in order to exercise the copy_of code branch.
"""
def setUp(self):
super(TestCopyOfCenter, self).setUp()
self.center_book = False
self.n_voters = 5
self.voter_roll = create_voters(self.n_voters, FEMALE)
# Any of the copy centers will do.
self.center = self.copy_centers[2]
self.n_pages = generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
def test_inner_page_content(self):
# This doesn't need to be re-tested for copy centers; they only affect the cover page.
self.assertTrue(True)
class TestCopiedByCenter(TestGeneratePdfBase, GeneratePdfContentTestMixin):
"""Invoke GeneratePdfContentTestMixin for a copied center.
This class uses a center that is copied by other centers in order to exercise the copied_by
code branch.
"""
def setUp(self):
super(TestCopiedByCenter, self).setUp()
self.center_book = False
self.n_voters = 5
self.voter_roll = create_voters(self.n_voters, FEMALE)
self.center = self.original_center
self.n_pages = generate_pdf(self.filename, self.center, self.voter_roll, FEMALE)
def test_inner_page_content(self):
# This doesn't need to be re-tested for copy centers; they only affect the cover page.
self.assertTrue(True)
| SmartElect/SmartElect | rollgen/tests/test_generate_pdf.py | Python | apache-2.0 | 11,178 |
"""
This example demonstrate how status works
"""
from juju import jasyncio
from juju import loop
import logging
import sys
from logging import getLogger
from juju.model import Model
from juju.status import formatted_status
LOG = getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
async def main():
model = Model()
await model.connect_current()
application = await model.deploy(
'cs:ubuntu-10',
application_name='ubuntu',
series='trusty',
channel='stable',
)
await jasyncio.sleep(10)
# Print the status to observe the evolution
# during a minute
for i in range(12):
try:
# By setting raw to True, the returned
# entry contains a FullStatus object with
# all the available status data.
# status = await model.status(raw=True)
status = await formatted_status(model)
print(status)
except Exception as e:
print(e)
await jasyncio.sleep(5)
await application.remove()
await model.disconnect()
if __name__ == '__main__':
loop.run(main())
| juju/python-libjuju | examples/status.py | Python | apache-2.0 | 1,149 |
#!/usr/bin/env python
# Cloudsnake Application server
# Licensed under Apache License, see license.txt
# Author: Markus Gronholm <[email protected]> Alshain Oy
class Luokka( object ):
def __init__( self, N ):
self.luku = N
def test( self ):
return self.luku
def test_001( data ):
#print >> cloudSnake.output, "Moi kaikki"
#print >> cloudSnake.output, cloudSnake.call( 'mean', [ [1,2,3,4] ] )
print >> cloudSnake.output, "Luokkakoe nro 1"
otus = cloudSnake.call( 'Luokka', [7] )
print >> cloudSnake.output, otus.test()
| Alshain-Oy/Cloudsnake-Application-Server | code_examples/class_test_01.py | Python | apache-2.0 | 546 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automatic config nagios configurations.
Copyright (C) 2015 Canux CHENG
All rights reserved
Name: __init__.py
Author: Canux [email protected]
Version: V1.0
Time: Wed 09 Sep 2015 09:20:51 PM EDT
Exaple:
./nagios -h
"""
__version__ = "3.1.0.0"
__description__ = """Config nagios automatic. Any question contact the author Canux CHENG. Email: [email protected]."""
__author__ = "Canux CHENG"
| crazy-canux/xnagios | nagios/__init__.py | Python | apache-2.0 | 451 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program tree representation."""
# pylint: skip-file
import numpy as np
from aloe.rfill.utils.rfill_consts import RFILL_EDGE_TYPES, RFILL_NODE_TYPES
class ProgNode(object):
"""Token as node in program tree/graph."""
def __init__(self, syntax, value=None, subtrees=None):
"""Initializer.
Args:
syntax: string representation of syntax
value: string representation of actual value
subtrees: list of tuple(edge_type, subtree nodes or single node)
"""
self.syntax = syntax
self.value = value
self.children = []
if subtrees is not None:
for e_type, children in subtrees:
if isinstance(children, list):
for c in children:
add_edge(parent_node=self, child_node=c, edge_type=e_type)
else:
add_edge(parent_node=self, child_node=children, edge_type=e_type)
def get_name(self):
if self.value is None:
return self.syntax
return self.syntax + '-' + str(self.value)
def add_child(self, edge_type, child_node):
self.children.append((edge_type, child_node))
def pprint(self, tab_cnt=0):
st = ' ' * tab_cnt + self.get_name()
print(st)
for _, c in self.children:
c.pprint(tab_cnt=tab_cnt + 1)
def __str__(self):
st = '(' + self.get_name()
for _, c in self.children:
st += c.__str__()
st += ')'
return st
class AbsRFillNode(ProgNode):
"""abstract Subclass of RFillNode."""
def pprint(self, tab_cnt=0):
if self.syntax == 'RegexTok' or self.syntax == 'ConstTok':
st = ' ' * tab_cnt + self.syntax + '('
_, p1 = self.children[0]
_, p2 = self.children[1]
_, direct = self.children[2]
name = p1.value
st += '%s, %d, %s)' % (name, p2.value, direct.value)
print(st)
return
st = ' ' * tab_cnt + self.get_name()
print(st)
for _, c in self.children:
c.pprint(tab_cnt=tab_cnt + 1)
def filter_tree_nodes(root_node, key_set, out_list=None):
if out_list is None:
out_list = []
if root_node.syntax in key_set:
out_list.append(root_node)
for _, c in root_node.children:
filter_tree_nodes(c, key_set, out_list=out_list)
return out_list
def add_edge(parent_node, child_node, edge_type):
parent_node.add_child(edge_type, child_node)
class ProgGraph(object):
"""Program graph"""
def __init__(self, tree_root, node_types=RFILL_NODE_TYPES, edge_types=RFILL_EDGE_TYPES, add_rev_edge=True):
"""Initializer.
Args:
tree_root: ProgNode type; the root of tree representation
node_types: dict of nodetype to index
edge_types: dict of edgetype to index
add_rev_edge: whether add reversed edge
"""
self.tree_root = tree_root
self.add_rev_edge = add_rev_edge
self.node_types = node_types
self.edge_types = edge_types
# list of tree nodes
self.node_list = []
# node feature index
self.node_feats = []
# list of (from_idx, to_idx, etype_int) tuples
self.edge_list = []
self.last_terminal = None # used for linking terminals
self.build_graph(self.tree_root)
self.num_nodes = len(self.node_list)
self.num_edges = len(self.edge_list)
# unzipped version of edge list
# self.from_list, self.to_list, self.edge_feats = \
# [np.array(x, dtype=np.int32) for x in zip(*self.edge_list)]
self.node_feats = np.array(self.node_feats, dtype=np.int32)
self.subexpr_ids = []
for _, c in self.tree_root.children:
self.subexpr_ids.append(c.index)
def render(self, render_path):
"""Render the program graph to specified path."""
import pygraphviz as pgv
ag = pgv.AGraph(directed=True)
e_idx2name = {}
for key in self.edge_types:
e_idx2name[self.edge_types[key]] = key
for i, node in enumerate(self.node_list):
ag.add_node(str(i) + '-' + node.get_name())
for e in self.edge_list:
x, y, et = e
ename = e_idx2name[et]
if ename.startswith('rev-'):
continue
x = str(x) + '-' + self.node_list[x].get_name()
y = str(y) + '-' + self.node_list[y].get_name()
ag.add_edge(x, y)
ag.layout(prog='dot')
ag.draw(render_path)
def add_bidir_edge(self, from_idx, to_idx, etype_str):
assert etype_str in self.edge_types
self.edge_list.append((from_idx, to_idx, self.edge_types[etype_str]))
if self.add_rev_edge:
# add reversed edge
rev_etype_str = 'rev-' + etype_str
assert rev_etype_str in self.edge_types
self.edge_list.append((to_idx, from_idx, self.edge_types[rev_etype_str]))
def build_graph(self, cur_root):
"""recursively build program graph from program tree.
Args:
cur_root: current root of (sub)program
Returns:
index: index of this cur_root node
"""
cur_root.index = len(self.node_list)
self.node_list.append(cur_root)
name = cur_root.get_name()
if name not in self.node_types:
raise NotImplementedError
type_idx = self.node_types[name]
cur_root.node_type = type_idx
self.node_feats.append(type_idx)
if len(cur_root.children): # pylint: disable=g-explicit-length-test
for e_type, c in cur_root.children:
child_idx = self.build_graph(c)
self.add_bidir_edge(cur_root.index, child_idx, e_type)
else: # add possible links between adjacent terminals
if self.last_terminal is not None:
self.add_bidir_edge(self.last_terminal.index, cur_root.index, 'succ')
self.last_terminal = cur_root
return cur_root.index
| google-research/google-research | aloe/aloe/rfill/utils/program_struct.py | Python | apache-2.0 | 6,121 |
# Call vendor to add the dependencies to the classpath
import vendor
vendor.add('lib')
# Import the Flask Framework
from flask import Flask, render_template, url_for, request, jsonify
app = Flask(__name__)
import translate
# Root directory
@app.route('/')
def index_route():
phrase = request.args.get("q")
if not phrase:
return render_template("index.html", phrase="")
return render_template("index.html", phrase=phrase)
@app.route("/translate")
def translate_route():
phrase = request.args.get("text")
fro = request.args.get("from")
to = request.args.get("to")
translated_text = translate.get_translation(phrase, lang=fro + "-" + to)
if translated_text == None:
return "Failed to translate", 404
return translated_text
if __name__ == '__main__':
#app.run(host="0.0.0.0") # For development
app.run() # For production | PiJoules/translation | __init__.py | Python | apache-2.0 | 847 |
import contextlib
from time import time
from .meter import Meter
from .stats import Stat
from .histogram import Histogram
class Timer(Stat):
def __init__(self):
self.count = 0
self.meter = Meter()
self.histogram = Histogram()
super(Timer, self).__init__()
@contextlib.contextmanager
def time(self):
start_time = time()
try:
yield
finally:
self.update(time() - start_time)
def update(self, value):
self.meter.mark()
self.histogram.update(value)
def get_values(self):
values = self.meter.get_values()
values.update(self.histogram.get_values())
return values
| emilssolmanis/tapes | tapes/local/timer.py | Python | apache-2.0 | 702 |
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x011300
_REQUEST_MESSAGE_TYPE = 70400
# hex: 0x011301
_RESPONSE_MESSAGE_TYPE = 70401
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_REFERENCE_ID_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_REFERENCE_ID_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, key, thread_id, reference_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_REFERENCE_ID_OFFSET, reference_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
return OutboundMessage(buf, True)
| hazelcast/hazelcast-python-client | hazelcast/protocol/codec/map_unlock_codec.py | Python | apache-2.0 | 1,021 |
__all__ = [
'fixed_value',
'coalesce',
]
try:
from itertools import ifilter as filter
except ImportError:
pass
class _FixedValue(object):
def __init__(self, value):
self._value = value
def __call__(self, *args, **kwargs):
return self._value
def fixed_value(value):
return _FixedValue(value)
class _Coalesce(object):
def _filter(self, x):
return x is not None
def __init__(self, callbacks, else_=None):
self._callbacks = callbacks
self._else = else_
def __call__(self, invoice):
results = (
callback(invoice)
for callback in self._callbacks
)
try:
return next(filter(
self._filter, results
))
except StopIteration:
return self._else
def coalesce(callbacks, else_=None):
return _Coalesce(callbacks, else_=else_)
| calidae/python-aeat_sii | src/pyAEATsii/callback_utils.py | Python | apache-2.0 | 918 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import textwrap
import unittest
# The sorted one.
from SortKeys.ttypes import SortedStruct, NegativeId
from SortSets.ttypes import SortedSetStruct
from thrift.protocol import TSimpleJSONProtocol
from thrift.transport.TTransport import TMemoryBuffer
def writeToJSON(obj):
trans = TMemoryBuffer()
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans)
obj.write(proto)
return trans.getvalue()
def readStructFromJSON(jstr, struct_type):
stuff = struct_type()
trans = TMemoryBuffer(jstr)
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans, struct_type.thrift_spec)
stuff.read(proto)
return stuff
class TestSortKeys(unittest.TestCase):
def testSorted(self):
static_struct = SortedStruct(aMap={"b": 1.0, "a": 1.0})
unsorted_blob = b'{\n "aMap": {\n "b": 1.0,\n "a": 1.0\n }\n}'
sorted_blob = b'{\n "aMap": {\n "a": 1.0,\n "b": 1.0\n }\n}'
sorted_struct = readStructFromJSON(unsorted_blob, SortedStruct)
blob = writeToJSON(sorted_struct)
self.assertNotEqual(blob, unsorted_blob)
self.assertEqual(blob, sorted_blob)
self.assertEqual(static_struct, sorted_struct)
def testSetSorted(self):
unsorted_set = set(["5", "4", "3", "2", "1", "0"])
static_struct = SortedSetStruct(aSet=unsorted_set)
unsorted_blob = (
textwrap.dedent(
"""\
{{
"aSet": [
"{}"
]
}}"""
)
.format('",\n "'.join(unsorted_set))
.encode()
)
sorted_blob = (
textwrap.dedent(
"""\
{{
"aSet": [
"{}"
]
}}"""
)
.format('",\n "'.join(sorted(unsorted_set)))
.encode()
)
sorted_struct = readStructFromJSON(unsorted_blob, SortedSetStruct)
blob = writeToJSON(sorted_struct)
self.assertNotEqual(blob, unsorted_blob)
self.assertEqual(blob, sorted_blob)
self.assertEqual(static_struct, sorted_struct)
def testNegativeId(self):
obj = NegativeId()
self.assertEqual(obj.field1, 1)
self.assertEqual(obj.field2, 2)
self.assertEqual(obj.field3, 3)
| facebook/fbthrift | thrift/test/py/TestSerializationSorted.py | Python | apache-2.0 | 3,066 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .TProtocol import TType, TProtocolBase, TProtocolException
from struct import pack, unpack
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True, **kwargs):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
self.string_length_limit = kwargs.get('string_length_limit', None)
self.container_length_limit = kwargs.get('container_length_limit', None)
def _check_string_length(self, length):
self._check_length(self.string_length_limit, length)
def _check_container_length(self, length):
self._check_length(self.container_length_limit, length)
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack("!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack("!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack("!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack("!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack("!d", dub)
self.trans.write(buff)
def writeBinary(self, str):
self.writeI32(len(str))
self.trans.write(str)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
self._check_container_length(size)
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
self._check_container_length(size)
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
self._check_container_length(size)
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack('!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack('!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
val, = unpack('!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack('!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def readBinary(self):
size = self.readI32()
self._check_string_length(size)
s = self.trans.readAll(size)
return s
class TBinaryProtocolFactory(object):
def __init__(self, strictRead=False, strictWrite=True, **kwargs):
self.strictRead = strictRead
self.strictWrite = strictWrite
self.string_length_limit = kwargs.get('string_length_limit', None)
self.container_length_limit = kwargs.get('container_length_limit', None)
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite,
string_length_limit=self.string_length_limit,
container_length_limit=self.container_length_limit)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory(object):
def __init__(self,
string_length_limit=None,
container_length_limit=None):
self.string_length_limit = string_length_limit
self.container_length_limit = container_length_limit
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(
trans,
string_length_limit=self.string_length_limit,
container_length_limit=self.container_length_limit)
| reTXT/thrift | lib/py/src/protocol/TBinaryProtocol.py | Python | apache-2.0 | 7,772 |
"""cmput404project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from rest_framework import routers
from service import views
urlpatterns = [
url(r'^', include('service.urls')),
url(r'^docs/', include('rest_framework_docs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| CMPUT404Team/CMPUT404-project-socialdistribution | cmput404project/cmput404project/urls.py | Python | apache-2.0 | 1,075 |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud-Custodian AWS Lambda Entry Point
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import logging
import json
from c7n.config import Config
from c7n.structure import StructureParser
from c7n.resources import load_resources
from c7n.policy import PolicyCollection
from c7n.utils import format_event, get_account_id_from_sts, local_session
import boto3
logging.root.setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
log = logging.getLogger('custodian.lambda')
##########################################
#
# Env var AWS Lambda specific configuration options, these are part of
# our "public" interface and hence are subject to compatiblity constraints.
#
# Control whether custodian lambda policy skips events that represent errors.
# We default to skipping events which denote they have errors.
# Set with `export C7N_SKIP_EVTERR=no` to process error events
C7N_SKIP_EVTERR = True
# Control whether the triggering event is logged.
# Set with `export C7N_DEBUG_EVENT=no` to disable event logging.
C7N_DEBUG_EVENT = True
# Control whether a policy failure will result in a lambda execution failure.
# Lambda on error will report error metrics and depending on event source
# automatically retry.
# Set with `export C7N_CATCH_ERR=yes`
C7N_CATCH_ERR = False
##########################################
#
# Internal global variables
#
# config.json policy data dict
policy_data = None
# execution options for the policy
policy_config = None
def init_env_globals():
"""Set module level values from environment variables.
Encapsulated here to enable better testing.
"""
global C7N_SKIP_EVTERR, C7N_DEBUG_EVENT, C7N_CATCH_ERR
C7N_SKIP_EVTERR = os.environ.get(
'C7N_SKIP_ERR_EVENT', 'yes') == 'yes' and True or False
C7N_DEBUG_EVENT = os.environ.get(
'C7N_DEBUG_EVENT', 'yes') == 'yes' and True or False
C7N_CATCH_ERR = os.environ.get(
'C7N_CATCH_ERR', 'no').strip().lower() == 'yes' and True or False
def init_config(policy_config):
"""Get policy lambda execution configuration.
cli parameters are serialized into the policy lambda config,
we merge those with any policy specific execution options.
--assume role and -s output directory get special handling, as
to disambiguate any cli context.
account id is sourced from the config options or from api call
and cached as a global.
Todo: this should get refactored out to mu.py as part of the
write out of configuration, instead of runtime processed.
"""
exec_options = policy_config.get('execution-options', {})
# Remove some configuration options that don't make sense to translate from
# cli to lambda automatically.
# - assume role on cli doesn't translate, it is the default lambda role and
# used to provision the lambda.
# - profile doesnt translate to lambda its `home` dir setup dependent
# - dryrun doesn't translate (and shouldn't be present)
# - region doesn't translate from cli (the lambda is bound to a region), and
# on the cli represents the region the lambda is provisioned in.
for k in ('assume_role', 'profile', 'region', 'dryrun', 'cache'):
exec_options.pop(k, None)
# a cli local directory doesn't translate to lambda
if not exec_options.get('output_dir', '').startswith('s3'):
exec_options['output_dir'] = '/tmp'
account_id = None
# we can source account id from the cli parameters to avoid the sts call
if exec_options.get('account_id'):
account_id = exec_options['account_id']
# merge with policy specific configuration
exec_options.update(
policy_config['policies'][0].get('mode', {}).get('execution-options', {}))
# if using assume role in lambda ensure that the correct
# execution account is captured in options.
if 'assume_role' in exec_options:
account_id = exec_options['assume_role'].split(':')[4]
elif account_id is None:
session = local_session(boto3.Session)
account_id = get_account_id_from_sts(session)
exec_options['account_id'] = account_id
# Historical compatibility with manually set execution options
# previously this was a boolean, its now a string value with the
# boolean flag triggering a string value of 'aws'
if 'metrics_enabled' in exec_options \
and isinstance(exec_options['metrics_enabled'], bool) \
and exec_options['metrics_enabled']:
exec_options['metrics_enabled'] = 'aws'
return Config.empty(**exec_options)
# One time initilization of global environment settings
init_env_globals()
def dispatch_event(event, context):
error = event.get('detail', {}).get('errorCode')
if error and C7N_SKIP_EVTERR:
log.debug("Skipping failed operation: %s" % error)
return
# one time initialization for cold starts.
global policy_config, policy_data
if policy_config is None:
with open('config.json') as f:
policy_data = json.load(f)
policy_config = init_config(policy_data)
load_resources(StructureParser().get_resource_types(policy_data))
if C7N_DEBUG_EVENT:
event['debug'] = True
log.info("Processing event\n %s", format_event(event))
if not policy_data or not policy_data.get('policies'):
return False
policies = PolicyCollection.from_data(policy_data, policy_config)
for p in policies:
try:
# validation provides for an initialization point for
# some filters/actions.
p.validate()
p.push(event, context)
except Exception:
log.exception("error during policy execution")
if C7N_CATCH_ERR:
continue
raise
return True
| kapilt/cloud-custodian | c7n/handler.py | Python | apache-2.0 | 6,491 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="grafeas.v1",
manifest={
"Recipe",
"Completeness",
"Metadata",
"BuilderConfig",
"InTotoProvenance",
},
)
class Recipe(proto.Message):
r"""Steps taken to build the artifact.
For a TaskRun, typically each container corresponds to one step
in the recipe.
Attributes:
type_ (str):
URI indicating what type of recipe was
performed. It determines the meaning of
recipe.entryPoint, recipe.arguments,
recipe.environment, and materials.
defined_in_material (int):
Index in materials containing the recipe
steps that are not implied by recipe.type. For
example, if the recipe type were "make", then
this would point to the source containing the
Makefile, not the make program itself. Set to -1
if the recipe doesn't come from a material, as
zero is default unset value for int64.
entry_point (str):
String identifying the entry point into the
build. This is often a path to a configuration
file and/or a target label within that file. The
syntax and meaning are defined by recipe.type.
For example, if the recipe type were "make",
then this would reference the directory in which
to run make as well as which target to use.
arguments (Sequence[google.protobuf.any_pb2.Any]):
Collection of all external inputs that
influenced the build on top of
recipe.definedInMaterial and recipe.entryPoint.
For example, if the recipe type were "make",
then this might be the flags passed to make
aside from the target, which is captured in
recipe.entryPoint. Since the arguments field can
greatly vary in structure, depending on the
builder and recipe type, this is of form "Any".
environment (Sequence[google.protobuf.any_pb2.Any]):
Any other builder-controlled inputs necessary
for correctly evaluating the recipe. Usually
only needed for reproducing the build but not
evaluated as part of policy. Since the
environment field can greatly vary in structure,
depending on the builder and recipe type, this
is of form "Any".
"""
type_ = proto.Field(proto.STRING, number=1,)
defined_in_material = proto.Field(proto.INT64, number=2,)
entry_point = proto.Field(proto.STRING, number=3,)
arguments = proto.RepeatedField(proto.MESSAGE, number=4, message=any_pb2.Any,)
environment = proto.RepeatedField(proto.MESSAGE, number=5, message=any_pb2.Any,)
class Completeness(proto.Message):
r"""Indicates that the builder claims certain fields in this
message to be complete.
Attributes:
arguments (bool):
If true, the builder claims that
recipe.arguments is complete, meaning that all
external inputs are properly captured in the
recipe.
environment (bool):
If true, the builder claims that
recipe.environment is claimed to be complete.
materials (bool):
If true, the builder claims that materials
are complete, usually through some controls to
prevent network access. Sometimes called
"hermetic".
"""
arguments = proto.Field(proto.BOOL, number=1,)
environment = proto.Field(proto.BOOL, number=2,)
materials = proto.Field(proto.BOOL, number=3,)
class Metadata(proto.Message):
r"""Other properties of the build.
Attributes:
build_invocation_id (str):
Identifies the particular build invocation,
which can be useful for finding associated logs
or other ad-hoc analysis. The value SHOULD be
globally unique, per in-toto Provenance spec.
build_started_on (google.protobuf.timestamp_pb2.Timestamp):
The timestamp of when the build started.
build_finished_on (google.protobuf.timestamp_pb2.Timestamp):
The timestamp of when the build completed.
completeness (grafeas.grafeas_v1.types.Completeness):
Indicates that the builder claims certain
fields in this message to be complete.
reproducible (bool):
If true, the builder claims that running the
recipe on materials will produce bit-for-bit
identical output.
"""
build_invocation_id = proto.Field(proto.STRING, number=1,)
build_started_on = proto.Field(
proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,
)
build_finished_on = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
completeness = proto.Field(proto.MESSAGE, number=4, message="Completeness",)
reproducible = proto.Field(proto.BOOL, number=5,)
class BuilderConfig(proto.Message):
r"""
Attributes:
id (str):
"""
id = proto.Field(proto.STRING, number=1,)
class InTotoProvenance(proto.Message):
r"""
Attributes:
builder_config (grafeas.grafeas_v1.types.BuilderConfig):
required
recipe (grafeas.grafeas_v1.types.Recipe):
Identifies the configuration used for the
build. When combined with materials, this SHOULD
fully describe the build, such that re-running
this recipe results in bit-for-bit identical
output (if the build is reproducible).
metadata (grafeas.grafeas_v1.types.Metadata):
materials (Sequence[str]):
The collection of artifacts that influenced
the build including sources, dependencies, build
tools, base images, and so on. This is
considered to be incomplete unless
metadata.completeness.materials is true. Unset
or null is equivalent to empty.
"""
builder_config = proto.Field(proto.MESSAGE, number=1, message="BuilderConfig",)
recipe = proto.Field(proto.MESSAGE, number=2, message="Recipe",)
metadata = proto.Field(proto.MESSAGE, number=3, message="Metadata",)
materials = proto.RepeatedField(proto.STRING, number=4,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-grafeas | grafeas/grafeas_v1/types/intoto_provenance.py | Python | apache-2.0 | 7,148 |
# https://djangosnippets.org/snippets/690/
import re
from django.template.defaultfilters import slugify
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
slug_separator='-'):
"""
Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager.
"""
slug_field = instance._meta.get_field(slug_field_name)
slug = getattr(instance, slug_field.attname)
slug_len = slug_field.max_length
# Sort out the initial slug, limiting its length if necessary.
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = _slug_strip(slug, slug_separator)
original_slug = slug
# Create the queryset if one wasn't explicitly provided and exclude the
# current instance from the queryset.
if queryset is None:
queryset = instance.__class__._default_manager.all()
if instance.pk:
queryset = queryset.exclude(pk=instance.pk)
# Find a unique slug. If one matches, at '-2' to the end and try again
# (then '-3', etc).
next = 2
while not slug or queryset.filter(**{slug_field_name: slug}):
slug = original_slug
end = '%s%s' % (slug_separator, next)
if slug_len and len(slug) + len(end) > slug_len:
slug = slug[:slug_len-len(end)]
slug = _slug_strip(slug, slug_separator)
slug = '%s%s' % (slug, end)
next += 1
setattr(instance, slug_field.attname, slug)
def _slug_strip(value, separator='-'):
"""
Cleans up a slug by removing slug separator characters that occur at the
beginning or end of a slug.
If an alternate separator is used, it will also replace any instances of
the default '-' separator with the new separator.
"""
separator = separator or ''
if separator == '-' or not separator:
re_sep = '-'
else:
re_sep = '(?:-|%s)' % re.escape(separator)
# Remove multiple instances and if an alternate separator is provided,
# replace the default '-' separator.
if separator != re_sep:
value = re.sub('%s+' % re_sep, separator, value)
# Remove separator from the beginning and end of the slug.
if separator:
if separator != '-':
re_sep = re.escape(separator)
value = re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
return value | thelabnyc/wagtail_blog | blog/utils.py | Python | apache-2.0 | 2,644 |
'''Trains a simple convnet on the Fashion MNIST dataset.
Gets to % test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
'''
from __future__ import print_function
import keras
from keras.datasets import fashion_mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| erramuzpe/seattle-perceptual-learning | perclearn/scripts/fashion_mnist_cnn.py | Python | apache-2.0 | 2,248 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
import torch.utils.data
import ray
from ray.experimental.sgd.pytorch import pytorch_utils
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
class PyTorchRunner(object):
"""Manages a PyTorch model for training."""
def __init__(self,
model_creator,
data_creator,
optimizer_creator,
config=None,
batch_size=16):
"""Initializes the runner.
Args:
model_creator (dict -> torch.nn.Module): see pytorch_trainer.py.
data_creator (dict -> Dataset, Dataset): see pytorch_trainer.py.
optimizer_creator (torch.nn.Module, dict -> loss, optimizer):
see pytorch_trainer.py.
config (dict): see pytorch_trainer.py.
batch_size (int): see pytorch_trainer.py.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.optimizer_creator = optimizer_creator
self.config = {} if config is None else config
self.batch_size = batch_size
self.verbose = True
self.epoch = 0
self._timers = {
k: utils.TimerStat(window_size=1)
for k in [
"setup_proc", "setup_model", "get_state", "set_state",
"validation", "training"
]
}
def setup(self):
"""Initializes the model."""
logger.debug("Creating model")
self.model = self.model_creator(self.config)
if torch.cuda.is_available():
self.model = self.model.cuda()
logger.debug("Creating optimizer")
self.criterion, self.optimizer = self.optimizer_creator(
self.model, self.config)
if torch.cuda.is_available():
self.criterion = self.criterion.cuda()
logger.debug("Creating dataset")
self.training_set, self.validation_set = self.data_creator(self.config)
self.train_loader = torch.utils.data.DataLoader(
self.training_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=2,
pin_memory=False)
self.validation_loader = torch.utils.data.DataLoader(
self.validation_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=2,
pin_memory=False)
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return utils.find_free_port()
def step(self):
"""Runs a training epoch and updates the model parameters."""
logger.debug("Begin Training Epoch {}".format(self.epoch + 1))
with self._timers["training"]:
train_stats = pytorch_utils.train(self.train_loader, self.model,
self.criterion, self.optimizer)
train_stats["epoch"] = self.epoch
self.epoch += 1
train_stats.update(self.stats())
return train_stats
def validate(self):
"""Evaluates the model on the validation data set."""
with self._timers["validation"]:
validation_stats = pytorch_utils.validate(
self.validation_loader, self.model, self.criterion)
validation_stats.update(self.stats())
return validation_stats
def stats(self):
"""Returns a dictionary of statistics collected."""
stats = {"epoch": self.epoch}
for k, t in self._timers.items():
stats[k + "_time_mean"] = t.mean
stats[k + "_time_total"] = t.sum
t.reset()
return stats
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"stats": self.stats()
}
def set_state(self, state):
"""Sets the state of the model."""
# TODO: restore timer stats
self.model.load_state_dict(state["model"])
self.optimizer.load_state_dict(state["optimizer"])
self.epoch = state["stats"]["epoch"]
def shutdown(self):
"""Attempts to shut down the worker."""
del self.validation_loader
del self.validation_set
del self.train_loader
del self.training_set
del self.criterion
del self.optimizer
del self.model
if torch.cuda.is_available():
torch.cuda.empty_cache()
| ujvl/ray-ng | python/ray/experimental/sgd/pytorch/pytorch_runner.py | Python | apache-2.0 | 4,800 |
from django.db import models
from django.contrib.auth.models import User
from datetime import date
# Create your models here.
class Genre(models.Model):
"""
Model representing a book genre (e.g. Science Fiction, Non Fiction).
"""
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
class Book(models.Model):
class Meta:
permissions = (("can_edit_book", "Edit book"),)
"""
Model representing a book (but not a specific copy of a book).
"""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file.
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN',max_length=13, help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book")
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
def get_absolute_url(self):
"""
Returns the url to access a particular book instance.
"""
return reverse('book-detail', args=[str(self.id)])
def display_genre(self):
"""
Creates a string for the Genre. This is required to display genre in Admin.
"""
return ', '.join([ genre.name for genre in self.genre.all()[:3] ])
display_genre.short_description = 'Genre'
import uuid # Required for unique book instances
class BookInstance(models.Model):
class Meta:
permissions = (("can_mark_returned", "Set book as returned"),)
ordering = ["due_back"]
"""
Model representing a specific copy of a book (i.e. that can be borrowed from the library).
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='d', help_text='Book availability')
def __str__(self):
"""
String for representing the Model object
"""
return '%s (%s)' % (self.id,self.book.title)
@property
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
class Author(models.Model):
"""
Model representing an author.
"""
class Meta:
permissions = (("can_edit_author", "Edit author"),)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('Died', null=True, blank=True)
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return '%s, %s' % (self.last_name, self.first_name)
class Language(models.Model):
"""
Model representing a Language (e.g. English, French, Japanese, etc.)
"""
name = models.CharField(max_length=200, help_text="Enter a the book's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
| byronlin92/django_local_library | catalog/models.py | Python | apache-2.0 | 4,639 |
def benchmark_hash_data():
"""
CommandLine:
python ~/code/ubelt/dev/bench_hash.py --convert=True --show
python ~/code/ubelt/dev/bench_hash.py --convert=False --show
"""
import ubelt as ub
#ITEM = 'JUST A STRING' * 100
ITEM = [0, 1, 'a', 'b', ['JUST A STRING'] * 4]
HASHERS = ['sha1', 'sha512', 'xxh32', 'xxh64', 'blake3']
scales = list(range(5, 13))
results = ub.AutoDict()
# Use json is faster or at least as fast it most cases
# xxhash is also significantly faster than sha512
convert = ub.argval('--convert', default='True').lower() == 'True'
print('convert = {!r}'.format(convert))
ti = ub.Timerit(9, bestof=3, verbose=1, unit='ms')
for s in ub.ProgIter(scales, desc='benchmark', verbose=3):
N = 2 ** s
print(' --- s={s}, N={N} --- '.format(s=s, N=N))
data = [ITEM] * N
for hasher in HASHERS:
for timer in ti.reset(hasher):
ub.hash_data(data, hasher=hasher, convert=convert)
results[hasher].update({N: ti.mean()})
col = {h: results[h][N] for h in HASHERS}
sortx = ub.argsort(col)
ranking = ub.dict_subset(col, sortx)
print('walltime: ' + ub.repr2(ranking, precision=9, nl=0))
best = next(iter(ranking))
#pairs = list(ub.iter_window( 2))
pairs = [(k, best) for k in ranking]
ratios = [ranking[k1] / ranking[k2] for k1, k2 in pairs]
nicekeys = ['{}/{}'.format(k1, k2) for k1, k2 in pairs]
relratios = ub.odict(zip(nicekeys, ratios))
print('speedup: ' + ub.repr2(relratios, precision=4, nl=0))
# xdoc +REQUIRES(--show)
# import pytest
# pytest.skip()
import pandas as pd
df = pd.DataFrame.from_dict(results)
df.columns.name = 'hasher'
df.index.name = 'N'
ratios = df.copy().drop(columns=df.columns)
for k1, k2 in [('sha512', 'xxh32'), ('sha1', 'xxh32'), ('xxh64', 'xxh32')]:
ratios['{}/{}'.format(k1, k2)] = df[k1] / df[k2]
print()
print('Seconds per iteration')
print(df.to_string(float_format='%.9f'))
print()
print('Ratios of seconds')
print(ratios.to_string(float_format='%.2f'))
print()
print('Average Ratio (over all N)')
print('convert = {!r}'.format(convert))
print(ratios.mean().sort_values())
if ub.argflag('--show'):
import kwplot
kwplot.autompl()
xdata = sorted(ub.peek(results.values()).keys())
ydata = ub.map_vals(lambda d: [d[x] for x in xdata], results)
kwplot.multi_plot(xdata, ydata, xlabel='N', ylabel='seconds', title='convert = {}'.format(convert))
kwplot.show_if_requested()
if __name__ == '__main__':
"""
CommandLine:
python ~/code/ubelt/dev/bench_hash.py
"""
benchmark_hash_data()
| Erotemic/ubelt | dev/bench/bench_hash.py | Python | apache-2.0 | 2,805 |
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from google.cloud import datacatalog
from google.protobuf import timestamp_pb2
from google.datacatalog_connectors.commons.prepare.base_entry_factory import \
BaseEntryFactory
from google.datacatalog_connectors.rdbms.common import constants
class DataCatalogEntryFactory(BaseEntryFactory):
NO_VALUE_SPECIFIED = 'UNDEFINED'
EMPTY_TOKEN = '?'
def __init__(self, project_id, location_id, entry_resource_url_prefix,
entry_group_id, metadata_definition):
self.__project_id = project_id
self.__location_id = location_id
self.__entry_resource_url_prefix = entry_resource_url_prefix
self.__entry_group_id = entry_group_id
self.__metadata_definition = metadata_definition
def make_entries_for_table_container(self, table_container):
"""Create Datacatalog entries from a table container dict.
:param table_container:
:return: entry_id, entry
"""
entry_id = self._format_id(table_container['name'])
entry = datacatalog.Entry()
entry.user_specified_type = self.__metadata_definition[
'table_container_def']['type']
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table_container['name'])
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table_container.get('create_time'),
table_container.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
desc = table_container.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
entry.linked_resource = '{}/{}'.format(
self.__entry_resource_url_prefix, entry_id)
return entry_id, entry
def make_entry_for_tables(self, table, table_container_name):
"""Create Datacatalog entries from a table dict.
:param table:
:param table_container_name:
:return: entry_id, entry
"""
entry_id = self._format_id('{}__{}'.format(table_container_name,
table['name']))
entry = datacatalog.Entry()
# some RDBMS' store views and tables definitions in the same
# system table, and the name is not user friendly, so we only
# keep it if it's a VIEW type.
table_type = table.get(constants.TABLE_TYPE_KEY)
if table_type and table_type.lower() == \
constants.VIEW_TYPE_VALUE:
table_type = table_type.lower()
else:
table_type = self.__metadata_definition['table_def']['type']
entry.user_specified_type = table_type
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table['name'])
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
desc = table.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.linked_resource = '{}/{}/{}'.format(
self.__entry_resource_url_prefix, table_container_name,
self._format_id(table['name']))
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table.get('create_time'),
table.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
columns = []
for column in table['columns']:
desc = column.get('desc')
if pd.isna(desc):
desc = ''
columns.append(
datacatalog.ColumnSchema(
column=self._format_id(column['name']),
description=desc,
type=DataCatalogEntryFactory.__format_entry_column_type(
column['type'])))
entry.schema.columns.extend(columns)
return entry_id, entry
@staticmethod
def __convert_date_value_to_epoch(date_value):
if pd.notnull(date_value):
return int(date_value.timestamp())
@staticmethod
def __convert_source_system_timestamp_fields(raw_create_time,
raw_update_time):
create_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_create_time)
if not pd.isnull(raw_update_time):
update_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_update_time)
else:
update_time = create_time
return create_time, update_time
@staticmethod
def __format_entry_column_type(source_name):
if isinstance(source_name, bytes):
# We've noticed some MySQL instances use bytes-like objects
# instead of `str` to specify the column types. We are using UTF-8
# to decode such objects when it happens because UTF-8 is the
# default character set for MySQL 8.0 onwards.
#
# We didn't notice similar behavior with other RDBMS but, if so,
# we should handle encoding as a configuration option that each
# RDBMS connector would have to set up. It might be exposed as a
# CLI arg, so users could easily change that. There is also the
# option to scrape that config directly from the DB.
source_name = source_name.decode("utf-8")
formatted_name = source_name.replace('&', '_')
formatted_name = formatted_name.replace(':', '_')
formatted_name = formatted_name.replace('/', '_')
formatted_name = formatted_name.replace(' ', '_')
if formatted_name == DataCatalogEntryFactory.EMPTY_TOKEN:
formatted_name = DataCatalogEntryFactory.NO_VALUE_SPECIFIED
return formatted_name
| GoogleCloudPlatform/datacatalog-connectors-rdbms | google-datacatalog-rdbms-connector/src/google/datacatalog_connectors/rdbms/prepare/datacatalog_entry_factory.py | Python | apache-2.0 | 7,500 |
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
############################################################################
## START OF IMPORTS SETUP
############################################################################
import sys
import os
import re
import copy
import glob
import types
try:
from . import base
from . import dag
from . import util
from . import plan
except:
s = "\nXED ERROR: mfile.py could not find mbuild." + \
" Should be a sibling of the xed2 directory.\n\n"
sys.stderr.write(s)
sys.exit(1)
###########################################################################
## DOXYGEN SUPPORT
###########################################################################
def _doxygen_version_okay(s, want_major, want_minor, want_fix):
values = s.split('.')
maj =int(values[0])
minor = int(values[1])
fix = 0
if len(values) > 2:
# remove everything after the dash for things like: 'Doxygen
# 1.5.1-p1'
values[2] = re.sub(r'-.*$','',values[2])
try:
fix = int(values[2])
except ValueError as v:
pass
if (maj > 1) or \
(maj == want_major and minor > want_minor) or \
(maj == want_major and minor == want_minor and fix >= want_fix):
return True
return False
def _find_doxygen(env):
"""Find the right version of doxygen. Return a tuple of the
command name and a boolean indicating whether or not the version
checked out."""
if env['doxygen_cmd'] == '':
doxygen_cmd_intel = "/usr/intel/bin/doxygen"
doxygen_cmd_cygwin = "C:/cygwin/bin/doxygen"
doxygen_cmd_mac = \
"/Applications/Doxygen.app/Contents/Resources/doxygen"
doxygen_cmd = "doxygen"
if env['build_os'] == 'win':
if os.path.exists(doxygen_cmd_cygwin):
doxygen_cmd = doxygen_cmd_cygwin
else:
base.msgb('DOXYGEN',"Could not find cygwin's doxygen," +
"trying doxygen from PATH")
elif env['build_os'] == 'lin':
if base.verbose(2):
base.msgb("CHECKING FOR", doxygen_cmd_intel)
if os.path.exists(doxygen_cmd_intel):
doxygen_cmd = doxygen_cmd_intel
elif env['build_os'] == 'mac':
if base.verbose(2):
base.msgb("CHECKING FOR", doxygen_cmd_mac)
if os.path.exists(doxygen_cmd_mac):
doxygen_cmd = doxygen_cmd_mac
else:
doxygen_cmd = env['doxygen_cmd']
doxygen_cmd = env.escape_string(doxygen_cmd)
doxygen_okay = False
if base.verbose(2):
base.msgb('Checking doxygen version','...')
if base.check_python_version(2,4):
try:
(retval, output, error_output) = \
util.run_command(doxygen_cmd + " --version")
if retval==0:
if len(output) > 0:
first_line = output[0].strip()
if base.verbose(2):
base.msgb("Doxygen version", first_line)
doxygen_okay = _doxygen_version_okay(first_line, 1,4,6)
else:
for o in output:
base.msgb("Doxygen-version-check STDOUT", o)
if error_output:
for line in error_output:
base.msgb("STDERR ",line.rstrip())
except:
base.die("Doxygen required by the command line options " +
"but no doxygen found")
return (doxygen_cmd, doxygen_okay)
def _replace_match(istring, mtch, newstring, group_name):
"""This is a lame way of avoiding regular expression backslashing
issues"""
x1= mtch.start(group_name)
x2= mtch.end(group_name)
ostring = istring[0:x1] + newstring + istring[x2:]
return ostring
def _customize_doxygen_file(env, subs):
"""Change the $(*) strings to the proper value in the config file.
Returns True on success"""
# doxygen wants quotes around paths with spaces
for k,s in iter(subs.items()):
if re.search(' ',s):
if not re.search('^".*"$',s):
base.die("Doxygen requires quotes around strings with spaces: [%s]->[%s]" %
( k,s))
return False
# input and output files
try:
lines = open(env['doxygen_config']).readlines()
except:
base.msgb("Could not open input file: " + env['doxygen_config'])
return False
env['doxygen_config_customized'] = \
env.build_dir_join(os.path.basename(env['doxygen_config']) + '.customized')
try:
ofile = open(env['doxygen_config_customized'],'w')
except:
base.msgb("Could not open output file: " + env['doxygen_config_customized'])
return False
# compile the patterns
rsubs = {}
for k,v in iter(subs.items()):
rsubs[k]=re.compile(r'(?P<tag>[$][(]' + k + '[)])')
olines = []
for line in lines:
oline = line
for k,p in iter(rsubs.items()):
#print ('searching for', k, 'to replace it with', subs[k])
m = p.search(oline)
while m:
#print ('replacing', k, 'with', subs[k])
oline = _replace_match(oline, m, subs[k], 'tag')
m = p.search(oline)
olines.append(oline)
try:
for line in olines:
ofile.write(line)
except:
ofile.close()
base.msgb("Could not write output file: " + env['doxygen_config_customized'])
return False
ofile.close()
return True
def _build_doxygen_main(args, env):
"""Customize the doxygen input file. Run the doxygen command, copy
in any images, and put the output in the right place."""
if isinstance(args, list):
if len(args) < 2:
base.die("Need subs dictionary and dummy file arg for the doxygen command " +
"to indicate its processing")
else:
base.die("Need a list for _build_doxygen_main with the subs " +
"dictionary and the dummy file name")
(subs,dummy_file) = args
(doxygen_cmd, doxygen_okay) = _find_doxygen(env)
if not doxygen_okay:
msg = 'No good doxygen available on this system; ' + \
'Your command line arguments\n\trequire it to be present. ' + \
'Consider dropping the "doc" and "doc-build" options\n\t or ' + \
'specify a path to doxygen with the --doxygen knob.\n\n\n'
return (1, [msg]) # failure
else:
env['DOXYGEN'] = doxygen_cmd
try:
okay = _customize_doxygen_file(env, subs)
except:
base.die("CUSTOMIZE DOXYGEN INPUT FILE FAILED")
if not okay:
return (1, ['Doxygen customization failed'])
cmd = env['DOXYGEN'] + ' ' + \
env.escape_string(env['doxygen_config_customized'])
if base.verbose(2):
base.msgb("RUN DOXYGEN", cmd)
(retval, output, error_output) = util.run_command(cmd)
for line in output:
base.msgb("DOX",line.rstrip())
if error_output:
for line in error_output:
base.msgb("DOX-ERROR",line.rstrip())
if retval != 0:
base.msgb("DOXYGEN FAILED")
base.die("Doxygen run failed. Retval=", str(retval))
util.touch(dummy_file)
base.msgb("DOXYGEN","succeeded")
return (0, []) # success
###########################################################################
# Doxygen build
###########################################################################
def _empty_dir(d):
"""return True if the directory d does not exist or if it contains no
files/subdirectories."""
if not os.path.exists(d):
return True
for (root, subdirs, subfiles) in os.walk(d):
if len(subfiles) or len(subdirs):
return False
return True
def _make_doxygen_reference_manual(env, doxygen_inputs, subs, work_queue,
hash_file_name='dox'):
"""Install the doxygen reference manual the doyxgen_output_dir
directory. doxygen_inputs is a list of files """
dox_dag = dag.dag_t(hash_file_name,env=env)
# so that the scanner can find them
dirs = {}
for f in doxygen_inputs:
dirs[os.path.dirname(f)]=True
for d in dirs.keys():
env.add_include_dir(d)
# make sure the config and top file are in the inptus list
doxygen_inputs.append(env['doxygen_config'])
doxygen_inputs.append(env['doxygen_top_src'])
dummy = env.build_dir_join('dummy-doxygen-' + hash_file_name)
# Run it via the builder to make it dependence driven
run_always = False
if _empty_dir(env['doxygen_install']):
run_always = True
if run_always:
_build_doxygen_main([subs,dummy], env)
else:
c1 = plan.plan_t(command=_build_doxygen_main,
args= [subs,dummy],
env= env,
input= doxygen_inputs,
output= dummy)
dox1 = dox_dag.add(env,c1)
okay = work_queue.build(dag=dox_dag)
phase = "DOXYGEN"
if not okay:
base.die("[%s] failed. dying..." % phase)
if base.verbose(2):
base.msgb(phase, "build succeeded")
############################################################
def doxygen_env(env):
"""Add the doxygen variables to the environment"""
doxygen_defaults = dict( doxygen_config='',
doxygen_top_src='',
doxygen_install='',
doxygen_cmd='' )
env.update_dict(doxygen_defaults)
def doxygen_args(env):
"""Add the knobs to the command line knobs parser"""
env.parser.add_option("--doxygen-install",
dest="doxygen_install",
action="store",
default='',
help="Doxygen installation directory")
env.parser.add_option("--doxygen-config",
dest="doxygen_config",
action="store",
default='',
help="Doxygen config file")
env.parser.add_option("--doxygen-top-src",
dest="doxygen_top_src",
action="store",
default='',
help="Doxygen top source file")
env.parser.add_option("--doxygen-cmd",
dest="doxygen_cmd",
action="store",
default='',
help="Doxygen command name")
def doxygen_run(env, inputs, subs, work_queue, hash_file_name='dox'):
"""Run doxygen assuming certain values are in the environment env.
@type env: env_t
@param env: the environment
@type inputs: list
@param inputs: list of input files to scan for dependences
@type subs: dictionary
@param subs: replacements in the config file
@type work_queue: work_queue_t
@param work_queue: a work queue for the build
@type hash_file_name: string
@param hash_file_name: used for the dummy file and mbuild hash suffix
"""
_make_doxygen_reference_manual(env, inputs, subs, work_queue, hash_file_name)
| intelxed/mbuild | mbuild/doxygen.py | Python | apache-2.0 | 12,057 |
from invoke import task, run
#from fabric.api import local, lcd, get, env
#from fabric.operations import require, prompt
#from fabric.utils import abort
import requests
import rdflib
import getpass
import os.path
import os
import setlr
from os import listdir
from rdflib import *
import logging
CHEAR_DIR='chear.d/'
HHEAR_DIR='hhear.d/'
SETL_FILE='ontology.setl.ttl'
ontology_setl = Namespace('https://hadatac.org/setl/')
setl = Namespace('http://purl.org/twc/vocab/setl/')
prov = Namespace('http://www.w3.org/ns/prov#')
dc = Namespace('http://purl.org/dc/terms/')
pv = Namespace('http://purl.org/net/provenance/ns#')
logging_level = logging.INFO
logging.basicConfig(level=logging_level)
@task
def buildchear(ctx):
setl_graph = Graph()
setl_graph.parse(SETL_FILE,format="turtle")
cwd = os.getcwd()
formats = ['ttl','owl','json']
ontology_output_files = [setl_graph.resource(URIRef('file://'+cwd+'/chear.'+x)) for x in formats]
print (len(setl_graph))
for filename in os.listdir(CHEAR_DIR):
if not filename.endswith('.ttl') or filename.startswith('#'):
continue
print('Adding fragment', filename)
fragment = setl_graph.resource(BNode())
for ontology_output_file in ontology_output_files:
print(ontology_output_file.identifier, list(ontology_output_file[prov.wasGeneratedBy]))
ontology_output_file.value(prov.wasGeneratedBy).add(prov.used, fragment)
fragment.add(RDF.type, setlr.void.Dataset)
fragment_extract = setl_graph.resource(BNode())
fragment.add(prov.wasGeneratedBy, fragment_extract)
fragment_extract.add(RDF.type, setl.Extract)
fragment_extract.add(prov.used, URIRef('file://'+CHEAR_DIR+filename))
setlr._setl(setl_graph)
@task
def buildhhear(ctx):
setl_graph = Graph()
setl_graph.parse('hhear-ontology.setl.ttl',format="turtle")
cwd = os.getcwd()
formats = ['ttl','owl','json']
ontology_output_files = [setl_graph.resource(URIRef('file://'+cwd+'/hhear.'+x)) for x in formats]
print (len(setl_graph))
for filename in os.listdir(HHEAR_DIR):
if not filename.endswith('.ttl') or filename.startswith('#'):
continue
print('Adding fragment', filename)
fragment = setl_graph.resource(BNode())
for ontology_output_file in ontology_output_files:
print(ontology_output_file.identifier, list(ontology_output_file[prov.wasGeneratedBy]))
ontology_output_file.value(prov.wasGeneratedBy).add(prov.used, fragment)
fragment.add(RDF.type, setlr.void.Dataset)
fragment_extract = setl_graph.resource(BNode())
fragment.add(prov.wasGeneratedBy, fragment_extract)
fragment_extract.add(RDF.type, setl.Extract)
fragment_extract.add(prov.used, URIRef('file://'+HHEAR_DIR+filename))
setlr._setl(setl_graph)
@task
def chear2hhear(c, inputfile, outputfile):
import openpyxl
import re
import pandas as pd
mappings = {}
mappings.update(dict([(row['label_uri'], row['numeric_uri'])
for i, row in pd.read_csv('sio_mappings.csv').iterrows()]))
mappings.update(dict([(row['label_uri'], row['numeric_uri'])
for i, row in pd.read_csv('chear2hhear_mappings.csv').iterrows()]))
wb = openpyxl.load_workbook(inputfile)
for sheet in wb:
for row in sheet.rows:
for cell in row:
if isinstance(cell.value, str):
cellValues = []
for c in re.split('\\s*[,&]\\s*', cell.value):
if c in mappings:
print('Replacing',c,'with',mappings[c])
c = mappings[c]
cellValues.append(c)
cell.value = ', '.join(cellValues)
wb.save(outputfile)
| tetherless-world/chear-ontology | tasks.py | Python | apache-2.0 | 3,872 |
# -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from reportlab.lib.units import inch, cm
from reportlab.lib.styles import *
from reportlab.lib.enums import *
from reportlab.lib.colors import *
from reportlab.lib.pagesizes import *
from reportlab.pdfbase import pdfmetrics
# from reportlab.platypus import *
# from reportlab.platypus.flowables import Flowable
# from reportlab.platypus.tableofcontents import TableOfContents
# from reportlab.platypus.para import Para, PageNumberObject, UNDERLINE, HotLink
import reportlab
import copy
import types
import os
import os.path
import pprint
import sys
import string
import re
import base64
import urlparse
import mimetypes
import urllib2
import urllib
import httplib
import tempfile
import shutil
rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$")
_reportlab_version = tuple(map(int, reportlab.Version.split('.')))
if _reportlab_version < (2,1):
raise ImportError("Reportlab Version 2.1+ is needed!")
REPORTLAB22 = _reportlab_version >= (2, 2)
#if not(reportlab.Version[0] == "2" and reportlab.Version[2] >= "1"):
# raise ImportError("Reportlab Version 2.1+ is needed!")
#
#REPORTLAB22 = (reportlab.Version[0] == "2" and reportlab.Version[2] >= "2")
# print "***", reportlab.Version, REPORTLAB22, reportlab.__file__
import logging
log = logging.getLogger("ho.pisa")
try:
import cStringIO as StringIO
except:
import StringIO
try:
import pyPdf
except:
pyPdf = None
try:
from reportlab.graphics import renderPM
except:
renderPM = None
try:
from reportlab.graphics import renderSVG
except:
renderSVG = None
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback, sys, cgi
type = value = tb = limit = None
type, value, tb = sys.exc_info()
list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
string.join(list[: - 1], ""),
list[ - 1])
def toList(value):
if type(value) not in (types.ListType, types.TupleType):
return [value]
return list(value)
def flatten(x):
"""flatten(sequence) -> list
copied from http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def _toColor(arg, default=None):
'''try to map an arbitrary arg to a color instance'''
if isinstance(arg, Color): return arg
tArg = type(arg)
if tArg in (types.ListType, types.TupleType):
assert 3 <= len(arg) <= 4, 'Can only convert 3 and 4 sequences to color'
assert 0 <= min(arg) and max(arg) <= 1
return len(arg) == 3 and Color(arg[0], arg[1], arg[2]) or CMYKColor(arg[0], arg[1], arg[2], arg[3])
elif tArg == types.StringType:
C = getAllNamedColors()
s = arg.lower()
if C.has_key(s): return C[s]
try:
return toColor(eval(arg))
except:
pass
try:
return HexColor(arg)
except:
if default is None:
raise ValueError('Invalid color value %r' % arg)
return default
def getColor(value, default=None):
" Convert to color value "
try:
original = value
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
# XXX Throws illegal in 2.1 e.g. toColor('none'),
# therefore we have a workaround here
return _toColor(value)
except ValueError, e:
log.warn("Unknown color %r", original)
return default
def getBorderStyle(value, default=None):
# log.debug(value)
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
#"xx-small" : 3./5.,
#"x-small": 3./4.,
#"small": 8./9.,
#"medium": 1./1.,
#"large": 6./5.,
#"x-large": 3./2.,
#"xx-large": 2./1.,
#"xxx-large": 3./1.,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes
"""
try:
original = value
if value is None:
return relative
elif type(value) is types.FloatType:
return value
elif type(value) is types.IntType:
return float(value)
elif type(value) in (types.TupleType, types.ListType):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[ - 2:] == 'cm':
return float(value[: - 2].strip()) * cm
elif value[ - 2:] == 'mm':
return (float(value[: - 2].strip()) * mm) # 1mm = 0.1cm
elif value[ - 2:] == 'in':
return float(value[: - 2].strip()) * inch # 1pt == 1/72inch
elif value[ - 2:] == 'inch':
return float(value[: - 4].strip()) * inch # 1pt == 1/72inch
elif value[ - 2:] == 'pt':
return float(value[: - 2].strip())
elif value[ - 2:] == 'pc':
return float(value[: - 2].strip()) * 12.0 # 1pc == 12pt
elif value[ - 2:] == 'px':
return float(value[: - 2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units
elif value[ - 1:] == 'i': # 1pt == 1/72inch
return float(value[: - 1].strip()) * inch
elif value in ("none", "0", "auto"):
return 0.0
elif relative:
if value[ - 2:] == 'em': # XXX
return (float(value[: - 2].strip()) * relative) # 1em = 1 * fontSize
elif value[ - 2:] == 'ex': # XXX
return (float(value[: - 2].strip()) * (relative / 2.0)) # 1ex = 1/2 fontSize
elif value[ - 1:] == '%':
# print "%", value, relative, (relative * float(value[:-1].strip())) / 100.0
return (relative * float(value[: - 1].strip())) / 100.0 # 1% = (fontSize * 1) / 100
elif value in ("normal", "inherit"):
return relative
elif _relativeSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _relativeSizeTable[value])
return max(MIN_FONT_SIZE, relative * _relativeSizeTable[value])
elif _absoluteSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _absoluteSizeTable[value])
return max(MIN_FONT_SIZE, relative * _absoluteSizeTable[value])
try:
value = float(value)
except:
log.warn("getSize: Not a float %r", value)
return default #value = 0
return max(0, value)
except Exception:
log.warn("getSize %r %r", original, relative, exc_info=1)
# print "ERROR getSize", repr(value), repr(value), e
return default
def getCoords(x, y, w, h, pagesize):
"""
As a stupid programmer I like to use the upper left
corner of the document as the 0,0 coords therefore
we need to do some fancy calculations
"""
#~ print pagesize
ax, ay = pagesize
if x < 0:
x = ax + x
if y < 0:
y = ay + y
if w != None and h != None:
if w <= 0:
w = (ax - x + w)
if h <= 0:
h = (ay - y + h)
return x, (ay - y - h), w, h
return x, (ay - y)
def getBox(box, pagesize):
"""
Parse sizes by corners in the form:
<X-Left> <Y-Upper> <Width> <Height>
The last to values with negative values are interpreted as offsets form
the right and lower border.
"""
box = str(box).split()
if len(box) != 4:
raise Exception, "box not defined right way"
x, y, w, h = map(getSize, box)
return getCoords(x, y, w, h, pagesize)
def getPos(position, pagesize):
"""
Pair of coordinates
"""
position = str(position).split()
if len(position) != 2:
raise Exception, "position not defined right way"
x, y = map(getSize, position)
return getCoords(x, y, None, None, pagesize)
def getBool(s):
" Is it a boolean? "
return str(s).lower() in ("y", "yes", "1", "true")
_uid = 0
def getUID():
" Unique ID "
global _uid
_uid += 1
return str(_uid)
_alignments = {
"left": TA_LEFT,
"center": TA_CENTER,
"middle": TA_CENTER,
"right": TA_RIGHT,
"justify": TA_JUSTIFY,
}
def getAlign(value, default=TA_LEFT):
return _alignments.get(str(value).lower(), default)
#def getVAlign(value):
# # Unused
# return str(value).upper()
GAE = "google.appengine" in sys.modules
if GAE:
STRATEGIES = (
StringIO.StringIO,
StringIO.StringIO)
else:
STRATEGIES = (
StringIO.StringIO,
tempfile.NamedTemporaryFile)
class pisaTempFile(object):
"""A temporary file implementation that uses memory unless
either capacity is breached or fileno is requested, at which
point a real temporary file will be created and the relevant
details returned
If capacity is -1 the second strategy will never be used.
Inspired by:
http://code.activestate.com/recipes/496744/
"""
STRATEGIES = STRATEGIES
CAPACITY = 10 * 1024
def __init__(self, buffer="", capacity=CAPACITY):
"""Creates a TempFile object containing the specified buffer.
If capacity is specified, we use a real temporary file once the
file gets larger than that size. Otherwise, the data is stored
in memory.
"""
#if hasattr(buffer, "read"):
#shutil.copyfileobj( fsrc, fdst[, length])
self.capacity = capacity
self.strategy = int(len(buffer) > self.capacity)
try:
self._delegate = self.STRATEGIES[self.strategy]()
except:
# Fallback for Google AppEnginge etc.
self._delegate = self.STRATEGIES[0]()
self.write(buffer)
def makeTempFile(self):
" Switch to next startegy. If an error occured stay with the first strategy "
if self.strategy == 0:
try:
new_delegate = self.STRATEGIES[1]()
new_delegate.write(self.getvalue())
self._delegate = new_delegate
self.strategy = 1
log.warn("Created temporary file %s", self.name)
except:
self.capacity = - 1
def getFileName(self):
" Get a named temporary file "
self.makeTempFile()
return self.name
def fileno(self):
"""Forces this buffer to use a temporary file as the underlying.
object and returns the fileno associated with it.
"""
self.makeTempFile()
return self._delegate.fileno()
def getvalue(self):
" Get value of file. Work around for second strategy "
if self.strategy == 0:
return self._delegate.getvalue()
self._delegate.flush()
self._delegate.seek(0)
return self._delegate.read()
def write(self, value):
" If capacity != -1 and length of file > capacity it is time to switch "
if self.capacity > 0 and self.strategy == 0:
len_value = len(value)
if len_value >= self.capacity:
needs_new_strategy = True
else:
self.seek(0, 2) # find end of file
needs_new_strategy = \
(self.tell() + len_value) >= self.capacity
if needs_new_strategy:
self.makeTempFile()
self._delegate.write(value)
def __getattr__(self, name):
try:
return getattr(self._delegate, name)
except AttributeError:
# hide the delegation
e = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, name)
raise AttributeError(e)
_rx_datauri = re.compile("^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL)
class pisaFileObject:
"""
XXX
"""
def __init__(self, uri, basepath=None):
self.basepath = basepath
self.mimetype = None
self.file = None
self.data = None
self.uri = None
self.local = None
self.tmp_file = None
uri = str(uri)
log.debug("FileObject %r, Basepath: %r", uri, basepath)
# Data URI
if uri.startswith("data:"):
m = _rx_datauri.match(uri)
self.mimetype = m.group("mime")
self.data = base64.decodestring(m.group("data"))
else:
# Check if we have an external scheme
if basepath and not (uri.startswith("http://") or uri.startswith("https://")):
urlParts = urlparse.urlparse(basepath)
else:
urlParts = urlparse.urlparse(uri)
log.debug("URLParts: %r", urlParts)
# Drive letters have len==1 but we are looking for things like http:
if len(urlParts[0]) > 1 :
# External data
if basepath:
uri = urlparse.urljoin(basepath, uri)
#path = urlparse.urlsplit(url)[2]
#mimetype = getMimeType(path)
# Using HTTPLIB
server, path = urllib.splithost(uri[uri.find("//"):])
if uri.startswith("https://"):
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.request("GET", path)
r1 = conn.getresponse()
# log.debug("HTTP %r %r %r %r", server, path, uri, r1)
if (r1.status, r1.reason) == (200, "OK"):
# data = r1.read()
self.mimetype = r1.getheader("Content-Type", None).split(";")[0]
self.uri = uri
if r1.getheader("content-encoding") == "gzip":
# zbuf = cStringIO.StringIO(data)
import gzip
self.file = gzip.GzipFile(mode="rb", fileobj=r1)
#data = zfile.read()
#zfile.close()
else:
self.file = r1
# self.file = urlResponse
else:
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get("Content-Type", None).split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
else:
# Local data
if basepath:
uri = os.path.normpath(os.path.join(basepath, uri))
if os.path.isfile(uri):
self.uri = uri
self.local = uri
self.setMimeTypeByName(uri)
self.file = open(uri, "rb")
def getFile(self):
if self.file is not None:
return self.file
if self.data is not None:
return pisaTempFile(self.data)
return None
def getNamedFile(self):
if self.notFound():
return None
if self.local:
return str(self.local)
if not self.tmp_file:
self.tmp_file = tempfile.NamedTemporaryFile()
if self.file:
shutil.copyfileobj(self.file, self.tmp_file)
else:
self.tmp_file.write(self.getData())
self.tmp_file.flush()
return self.tmp_file.name
def getData(self):
if self.data is not None:
return self.data
if self.file is not None:
self.data = self.file.read()
return self.data
return None
def notFound(self):
return (self.file is None) and (self.data is None)
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0]
def getFile(*a , **kw):
file = pisaFileObject(*a, **kw)
if file.notFound():
return None
return file
COLOR_BY_NAME = {
'activeborder': Color(212, 208, 200),
'activecaption': Color(10, 36, 106),
'aliceblue': Color(.941176, .972549, 1),
'antiquewhite': Color(.980392, .921569, .843137),
'appworkspace': Color(128, 128, 128),
'aqua': Color(0, 1, 1),
'aquamarine': Color(.498039, 1, .831373),
'azure': Color(.941176, 1, 1),
'background': Color(58, 110, 165),
'beige': Color(.960784, .960784, .862745),
'bisque': Color(1, .894118, .768627),
'black': Color(0, 0, 0),
'blanchedalmond': Color(1, .921569, .803922),
'blue': Color(0, 0, 1),
'blueviolet': Color(.541176, .168627, .886275),
'brown': Color(.647059, .164706, .164706),
'burlywood': Color(.870588, .721569, .529412),
'buttonface': Color(212, 208, 200),
'buttonhighlight': Color(255, 255, 255),
'buttonshadow': Color(128, 128, 128),
'buttontext': Color(0, 0, 0),
'cadetblue': Color(.372549, .619608, .627451),
'captiontext': Color(255, 255, 255),
'chartreuse': Color(.498039, 1, 0),
'chocolate': Color(.823529, .411765, .117647),
'coral': Color(1, .498039, .313725),
'cornflowerblue': Color(.392157, .584314, .929412),
'cornsilk': Color(1, .972549, .862745),
'crimson': Color(.862745, .078431, .235294),
'cyan': Color(0, 1, 1),
'darkblue': Color(0, 0, .545098),
'darkcyan': Color(0, .545098, .545098),
'darkgoldenrod': Color(.721569, .52549, .043137),
'darkgray': Color(.662745, .662745, .662745),
'darkgreen': Color(0, .392157, 0),
'darkgrey': Color(.662745, .662745, .662745),
'darkkhaki': Color(.741176, .717647, .419608),
'darkmagenta': Color(.545098, 0, .545098),
'darkolivegreen': Color(.333333, .419608, .184314),
'darkorange': Color(1, .54902, 0),
'darkorchid': Color(.6, .196078, .8),
'darkred': Color(.545098, 0, 0),
'darksalmon': Color(.913725, .588235, .478431),
'darkseagreen': Color(.560784, .737255, .560784),
'darkslateblue': Color(.282353, .239216, .545098),
'darkslategray': Color(.184314, .309804, .309804),
'darkslategrey': Color(.184314, .309804, .309804),
'darkturquoise': Color(0, .807843, .819608),
'darkviolet': Color(.580392, 0, .827451),
'deeppink': Color(1, .078431, .576471),
'deepskyblue': Color(0, .74902, 1),
'dimgray': Color(.411765, .411765, .411765),
'dimgrey': Color(.411765, .411765, .411765),
'dodgerblue': Color(.117647, .564706, 1),
'firebrick': Color(.698039, .133333, .133333),
'floralwhite': Color(1, .980392, .941176),
'forestgreen': Color(.133333, .545098, .133333),
'fuchsia': Color(1, 0, 1),
'gainsboro': Color(.862745, .862745, .862745),
'ghostwhite': Color(.972549, .972549, 1),
'gold': Color(1, .843137, 0),
'goldenrod': Color(.854902, .647059, .12549),
'gray': Color(.501961, .501961, .501961),
'graytext': Color(128, 128, 128),
'green': Color(0, .501961, 0),
'greenyellow': Color(.678431, 1, .184314),
'grey': Color(.501961, .501961, .501961),
'highlight': Color(10, 36, 106),
'highlighttext': Color(255, 255, 255),
'honeydew': Color(.941176, 1, .941176),
'hotpink': Color(1, .411765, .705882),
'inactiveborder': Color(212, 208, 200),
'inactivecaption': Color(128, 128, 128),
'inactivecaptiontext': Color(212, 208, 200),
'indianred': Color(.803922, .360784, .360784),
'indigo': Color(.294118, 0, .509804),
'infobackground': Color(255, 255, 225),
'infotext': Color(0, 0, 0),
'ivory': Color(1, 1, .941176),
'khaki': Color(.941176, .901961, .54902),
'lavender': Color(.901961, .901961, .980392),
'lavenderblush': Color(1, .941176, .960784),
'lawngreen': Color(.486275, .988235, 0),
'lemonchiffon': Color(1, .980392, .803922),
'lightblue': Color(.678431, .847059, .901961),
'lightcoral': Color(.941176, .501961, .501961),
'lightcyan': Color(.878431, 1, 1),
'lightgoldenrodyellow': Color(.980392, .980392, .823529),
'lightgray': Color(.827451, .827451, .827451),
'lightgreen': Color(.564706, .933333, .564706),
'lightgrey': Color(.827451, .827451, .827451),
'lightpink': Color(1, .713725, .756863),
'lightsalmon': Color(1, .627451, .478431),
'lightseagreen': Color(.12549, .698039, .666667),
'lightskyblue': Color(.529412, .807843, .980392),
'lightslategray': Color(.466667, .533333, .6),
'lightslategrey': Color(.466667, .533333, .6),
'lightsteelblue': Color(.690196, .768627, .870588),
'lightyellow': Color(1, 1, .878431),
'lime': Color(0, 1, 0),
'limegreen': Color(.196078, .803922, .196078),
'linen': Color(.980392, .941176, .901961),
'magenta': Color(1, 0, 1),
'maroon': Color(.501961, 0, 0),
'mediumaquamarine': Color(.4, .803922, .666667),
'mediumblue': Color(0, 0, .803922),
'mediumorchid': Color(.729412, .333333, .827451),
'mediumpurple': Color(.576471, .439216, .858824),
'mediumseagreen': Color(.235294, .701961, .443137),
'mediumslateblue': Color(.482353, .407843, .933333),
'mediumspringgreen': Color(0, .980392, .603922),
'mediumturquoise': Color(.282353, .819608, .8),
'mediumvioletred': Color(.780392, .082353, .521569),
'menu': Color(212, 208, 200),
'menutext': Color(0, 0, 0),
'midnightblue': Color(.098039, .098039, .439216),
'mintcream': Color(.960784, 1, .980392),
'mistyrose': Color(1, .894118, .882353),
'moccasin': Color(1, .894118, .709804),
'navajowhite': Color(1, .870588, .678431),
'navy': Color(0, 0, .501961),
'oldlace': Color(.992157, .960784, .901961),
'olive': Color(.501961, .501961, 0),
'olivedrab': Color(.419608, .556863, .137255),
'orange': Color(1, .647059, 0),
'orangered': Color(1, .270588, 0),
'orchid': Color(.854902, .439216, .839216),
'palegoldenrod': Color(.933333, .909804, .666667),
'palegreen': Color(.596078, .984314, .596078),
'paleturquoise': Color(.686275, .933333, .933333),
'palevioletred': Color(.858824, .439216, .576471),
'papayawhip': Color(1, .937255, .835294),
'peachpuff': Color(1, .854902, .72549),
'peru': Color(.803922, .521569, .247059),
'pink': Color(1, .752941, .796078),
'plum': Color(.866667, .627451, .866667),
'powderblue': Color(.690196, .878431, .901961),
'purple': Color(.501961, 0, .501961),
'red': Color(1, 0, 0),
'rosybrown': Color(.737255, .560784, .560784),
'royalblue': Color(.254902, .411765, .882353),
'saddlebrown': Color(.545098, .270588, .07451),
'salmon': Color(.980392, .501961, .447059),
'sandybrown': Color(.956863, .643137, .376471),
'scrollbar': Color(212, 208, 200),
'seagreen': Color(.180392, .545098, .341176),
'seashell': Color(1, .960784, .933333),
'sienna': Color(.627451, .321569, .176471),
'silver': Color(.752941, .752941, .752941),
'skyblue': Color(.529412, .807843, .921569),
'slateblue': Color(.415686, .352941, .803922),
'slategray': Color(.439216, .501961, .564706),
'slategrey': Color(.439216, .501961, .564706),
'snow': Color(1, .980392, .980392),
'springgreen': Color(0, 1, .498039),
'steelblue': Color(.27451, .509804, .705882),
'tan': Color(.823529, .705882, .54902),
'teal': Color(0, .501961, .501961),
'thistle': Color(.847059, .74902, .847059),
'threeddarkshadow': Color(64, 64, 64),
'threedface': Color(212, 208, 200),
'threedhighlight': Color(255, 255, 255),
'threedlightshadow': Color(212, 208, 200),
'threedshadow': Color(128, 128, 128),
'tomato': Color(1, .388235, .278431),
'turquoise': Color(.25098, .878431, .815686),
'violet': Color(.933333, .509804, .933333),
'wheat': Color(.960784, .870588, .701961),
'white': Color(1, 1, 1),
'whitesmoke': Color(.960784, .960784, .960784),
'window': Color(255, 255, 255),
'windowframe': Color(0, 0, 0),
'windowtext': Color(0, 0, 0),
'yellow': Color(1, 1, 0),
'yellowgreen': Color(.603922, .803922, .196078)}
| rcucui/Pisa-util-fix | sx/pisa3/pisa_util.py | Python | apache-2.0 | 26,330 |
#!/usr/bin/env python
import argparse
import logging
import json
import subprocess
import sys
import os.path
import urllib2
from base64 import b64decode
from distutils.dir_util import mkpath
from tempfile import TemporaryFile
from shutil import copyfileobj
from urlparse import urlparse
from urllib2 import urlopen
from StringIO import StringIO
from boto import connect_s3
from boto.kms.layer1 import KMSConnection
def download_to_file(s3, src, f):
logging.debug('download_to_file: %s -> %s', src, f)
url = urlparse(src)
if url.scheme == 's3':
bucket = s3.get_bucket(url.netloc, validate=False)
key = bucket.get_key(url.path, validate=False)
key.get_contents_to_file(f)
else:
response = urlopen(src)
copyfileobj(response, f, 16 * 1024)
def download_to_filename(s3, src, dst, mode=None):
dirname, os.path.dirname = os.path.split(dst)
mkpath(dirname)
with open(dst, 'wb') as f:
download_to_file(s3, src, f)
if mode is not None:
os.chmod(dst, mode)
def download_to_string(s3, src):
logging.debug('download_to_string: %s', src)
f = StringIO()
download_to_file(s3, src, f)
s = f.getvalue()
logging.debug('download_to_string: %s: %s', src, s)
f.close()
return s
def download(s3, src=None, dst=None, mode=None):
assert src and dst
logging.info('download: %s -> %s', src, dst)
download_to_filename(s3, src, dst, mode=mode)
def process_parameter(kms, parameter):
logging.debug('process_parameter: %s', parameter)
t = parameter['type']
value = parameter['value']
if t == 'plain':
return value
elif t == 'kms_encrypted':
decrypted = kms.decrypt(value.decode('base64'))
return decrypted['Plaintext']
else:
raise Exception("Unexpected parameter type: '%s'" % (t, ))
def debug_parameter(kms, parameter):
t = parameter['type']
if t == 'plain':
return parameter['value']
elif t == 'kms_encrypted':
return '***'
else:
return '<unknown:%s>' % (t, )
def process_env(kms, parameters):
return dict((key, process_parameter(kms, parameter)) for key, parameter in parameters.iteritems())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config_uri')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--aws-credentials', type=file)
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
level=(logging.DEBUG if args.verbose else logging.INFO))
aws_credentials = {}
if args.aws_credentials:
aws_credentials = json.loads(args.aws_credentials.read())
s3 = connect_s3(host='s3.amazonaws.com', **aws_credentials)
# Fetch config json
config_filename = os.path.basename(args.config_uri)
download_to_filename(s3, args.config_uri, config_filename)
with open(config_filename, 'rb') as f:
config = json.load(f)
# Compile environment variables
env_parameters = {}
kms = KMSConnection(**aws_credentials)
env_parameters = process_env(kms, config.get('env', {}))
# Create working directory
working_directory = config['working_directory']
mkpath(working_directory)
# Download staging files
for item in config.get('download', []):
download(s3, **item)
# Execute command
env = dict(os.environ)
env.update(env_parameters)
debug_command = [debug_parameter(kms, parameter) for parameter in config['command']]
command = [process_parameter(kms, parameter) for parameter in config['command']]
logging.info('executing command: %s', debug_command)
logging.debug('Popen: command=%s, env=%s', command, env)
process = subprocess.Popen(command, env=env, cwd=working_directory)
return process.wait()
if __name__ == '__main__':
status = main()
sys.exit(status)
| treasure-data/digdag | digdag-standards/src/main/resources/io/digdag/standards/operator/aws/runner.py | Python | apache-2.0 | 3,923 |
# Copyright 2019 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Imron Alston <[email protected]>
from __future__ import absolute_import
from __future__ import print_function
__author__ = "[email protected]"
import sys
from scalyr_agent import UnsupportedSystem
from scalyr_agent.test_base import ScalyrTestCase
class MySqlMonitorTest(ScalyrTestCase):
def _import_mysql_monitor(self):
import scalyr_agent.builtin_monitors.mysql_monitor # NOQA
self.assertTrue(True)
def test_min_python_version(self):
if sys.version_info[:2] < (2, 7):
self.assertRaises(UnsupportedSystem, lambda: self._import_mysql_monitor())
else:
self._import_mysql_monitor()
def test_missing_qcache_hits(self):
if sys.version_info[:2] < (2, 7):
print(
"Skipping test 'test_missing_qcache_hits'.\n"
"This test is non-critical for pre-2.7 testing.\n"
)
return
from scalyr_agent.builtin_monitors.mysql_monitor import MysqlDB
class TestMysqlDB(MysqlDB):
def __init__(self):
# do nothing, because we don't actually want to connect to a DB
# for this test
pass
db = TestMysqlDB()
globalVars = {}
globalStatusMap = {"global.com_select": 10}
expected = 0
actual = db._derived_stat_query_cache_efficiency(globalVars, globalStatusMap)
self.assertEqual(expected, actual)
| imron/scalyr-agent-2 | tests/unit/builtin_monitors/mysql_monitor_test.py | Python | apache-2.0 | 2,104 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
import sys
import tempfile
from absl.testing import absltest
import numpy as np
from six.moves import cPickle
from simulation_research.traffic import file_util
class UtilTest(absltest.TestCase):
def setUp(self):
super(UtilTest, self).setUp()
self._output_dir = tempfile.mkdtemp(dir=absltest.get_default_test_tmpdir())
def test_append_line_to_file(self):
r"""Tests the output file.
The output file contains the following.
hello world
(hello) "world"
(hello) !!!!!!!!!!! @~#$%^&*()_+"world"
aaaaaaaa
bbbbbbbbbb
backslash\ backslash
backslash\ backslash
backslash\\ backslash
backslash\\\ backslash
backslash\\ backslash
"""
input_lines = ['hello world',
'(hello) "world"',
'(hello) !!!!!!!!!!! @~#$%^&*()_+"world"',
'aaaaaaaa\nbbbbbbbbbb',
r'backslash\ backslash',
'backslash\\ backslash',
r'backslash\\ backslash',
r'backslash\\\ backslash',
'backslash\\\\ backslash']
file_path = os.path.join(self._output_dir, 'test_append_line_to_file.txt')
for line in input_lines:
file_util.append_line_to_file(file_path, line)
self.assertTrue(file_util.f_exists(file_path))
# Note that the linebreak in the input_lines[3].
target_lines = ['hello world',
'(hello) "world"',
'(hello) !!!!!!!!!!! @~#$%^&*()_+"world"',
'aaaaaaaa',
'bbbbbbbbbb',
r'backslash\ backslash',
'backslash\\ backslash',
r'backslash\\ backslash',
r'backslash\\\ backslash',
'backslash\\\\ backslash']
with file_util.f_open(file_path, 'r') as actual_file:
line_counter = 0
read_lines = actual_file.readlines()
for line in read_lines:
# Linebreak is appended to the target string.
self.assertEqual(line, target_lines[line_counter] + '\n')
line_counter += 1
target_line_number = len(target_lines)
self.assertEqual(target_line_number, line_counter)
def test_save_load_variable(self):
file_path = os.path.join(self._output_dir, 'test_output_data.pkl')
# Case 1: Nested dictionary.
data = {'zz': 1, 'b': 234, 123: 'asdfa', 'dict': {'a': 123, 't': 123}}
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertEqual(data, actual_variable)
self.assertIsInstance(actual_variable, dict)
# Case 2: 2-level nested dictionary.
data = collections.defaultdict(
lambda: collections.defaultdict(list))
data['first']['A'] = [1, 2, 3]
data['first']['B'] = [1, 2, 3]
data['second']['B'] = [1, 2, 3]
data['second']['C'] = [1, 2, 3]
data['third']['C'] = [1, 2, 3]
data['third']['D'] = [1, 2, 3]
data['path'] = 'asdfas/asdf/asdfasdf/'
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertEqual(data, actual_variable)
self.assertIsInstance(actual_variable, dict)
# Case 3: Large array. If the size is too large, the test will timeout.
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10000
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertListEqual(data, actual_variable)
self.assertIsInstance(actual_variable, list)
# Case 4: numpy array.
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10
data = np.array(data)
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
np.testing.assert_array_equal(data, actual_variable)
self.assertIsInstance(actual_variable, np.ndarray)
# Case 5: A list of tuples.
x = [1, 2, 3]
y = ['a', 'b', 'c']
data = zip(x, y)
# Saving zip variable does not affect the iterative variable.
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
# python2 treats `actual_variable` as a list, however, python3 treats it as
# an iterative object.
self.assertListEqual(list(actual_variable), list(data))
# Case 6: In python2, the itertools.tee cannot be saved by cPickle. However,
# in python3, it can be saved.
x = [1, 2, 3]
y = ['a', 'b', 'c']
data = zip(x, y)
data_tee, _ = itertools.tee(data)
python_version = sys.version_info[0]
try:
file_util.save_variable(file_path, data_tee)
pickle_save_correctly = True
except cPickle.PicklingError:
pickle_save_correctly = False
self.assertTrue((pickle_save_correctly and python_version == 3) or
(not pickle_save_correctly and python_version == 2))
if __name__ == '__main__':
absltest.main()
| google-research/google-research | simulation_research/traffic/file_util_test.py | Python | apache-2.0 | 5,630 |
from django.utils import unittest
from restclients.test.uwnetid.subscription import EmailForwardingTest
from restclients.test.util.date_formator import formatorTest
from restclients.test.hfs.idcard import HfsTest
from restclients.test.library.mylibinfo import MyLibInfoTest
from restclients.test.digitlib.curric import DigitLibTest
from restclients.test.sws.compatible import SWSTest
from restclients.test.sws.financial import SWSFinance
from restclients.test.sws.notice import SWSNotice
from restclients.test.sws.term import SWSTestTerm
from restclients.test.sws.err404.dao import SWSTestDAO404
from restclients.test.sws.err500.dao import SWSTestDAO500
from restclients.test.sws.invalid_dao import SWSTestInvalidDAO
from restclients.test.sws.file_implementation.dao import SWSTestFileDAO
from restclients.test.sws.schedule_data import SWSTestScheduleData
from restclients.test.sws.enrollment import SWSTestEnrollments
from restclients.test.sws.section import SWSTestSectionData
from restclients.test.sws.section_status import SWSTestSectionStatusData
from restclients.test.sws.independent_study import SWSIndependentStudy
from restclients.test.sws.instructor_no_regid import SWSMissingRegid
from restclients.test.sws.registrations import SWSTestRegistrations
from restclients.test.sws.campus import SWSTestCampus
from restclients.test.sws.college import SWSTestCollege
from restclients.test.sws.department import SWSTestDepartment
from restclients.test.sws.curriculum import SWSTestCurriculum
from restclients.test.sws.graderoster import SWSTestGradeRoster
from restclients.test.sws.dates import SWSTestDates
from restclients.test.pws.person import PWSTestPersonData
from restclients.test.pws.entity import PWSTestEntityData
from restclients.test.pws.idcard import TestIdCardPhoto
from restclients.test.pws.err404.dao import PWSTestDAO404
from restclients.test.pws.err404.pws import PWSTest404
from restclients.test.pws.err500.dao import PWSTestDAO500
from restclients.test.pws.err500.pws import PWSTest500
from restclients.test.pws.invalid_dao import PWSTestInvalidDAO
from restclients.test.pws.file_implementation.dao import PWSTestFileDAO
from restclients.test.gws.group import GWSGroupBasics
from restclients.test.gws.course_group import GWSCourseGroupBasics
from restclients.test.gws.search import GWSGroupSearch
from restclients.test.cache.none import NoCacheTest
from restclients.test.cache.time import TimeCacheTest
from restclients.test.cache.etag import ETagCacheTest
from restclients.test.book.by_schedule import BookstoreScheduleTest
from restclients.test.amazon_sqs.queues import SQSQueue
from restclients.test.sms.send import SMS
from restclients.test.sms.invalid_phone_number import SMSInvalidNumbers
from restclients.test.nws.subscription import NWSTestSubscription
from restclients.test.nws.channel import NWSTestChannel
from restclients.test.nws.endpoint import NWSTestEndpoint
from restclients.test.nws.message import NWSTestMessage
from restclients.test.nws.person import NWSTestPerson
from restclients.test.canvas.enrollments import CanvasTestEnrollment
from restclients.test.canvas.accounts import CanvasTestAccounts
from restclients.test.canvas.admins import CanvasTestAdmins
from restclients.test.canvas.roles import CanvasTestRoles
from restclients.test.canvas.courses import CanvasTestCourses
from restclients.test.canvas.sections import CanvasTestSections
from restclients.test.canvas.bad_sis_ids import CanvasBadSISIDs
from restclients.test.canvas.terms import CanvasTestTerms
from restclients.test.canvas.users import CanvasTestUsers
from restclients.test.canvas.submissions import CanvasTestSubmissions
from restclients.test.canvas.assignments import CanvasTestAssignments
from restclients.test.canvas.quizzes import CanvasTestQuizzes
from restclients.test.catalyst.gradebook import CatalystTestGradebook
from restclients.test.trumba.accounts import TrumbaTestAccounts
from restclients.test.trumba.calendar import TestCalendarParse
from restclients.test.trumba.calendars import TrumbaTestCalendars
from restclients.test.gws.trumba_group import TestGwsTrumbaGroup
from restclients.test.r25.events import R25TestEvents
from restclients.test.r25.spaces import R25TestSpaces
from restclients.test.myplan import MyPlanTestData
from restclients.test.thread import ThreadsTest
from restclients.test.view import ViewTest
from restclients.test.dao_implementation.mock import TestMock
from restclients.test.irws import IRWSTest
from restclients.test.iasystem.evaluation import IASystemTest
| jeffFranklin/uw-restclients | restclients/tests.py | Python | apache-2.0 | 4,530 |
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Extension to scipy.linalg module developed for PBC branch.
'''
import numpy as np
import scipy.linalg
def davidson_nosymm(matvec,size,nroots,Adiag=None):
'''Davidson diagonalization method to solve A c = E c
when A is not Hermitian.
'''
# We don't pass args
def matvec_args(vec, args):
return matvec(vec)
nroots = min(nroots,size)
#if Adiag == None:
# Adiag = matvec(numpy.ones(size))
# Currently not used:
x = np.ones((size,1))
P = np.ones((size,1))
arnold = Arnoldi(matvec_args, x, P, nroots=nroots)
return arnold.solve()
VERBOSE = False
class Arnoldi:
def __init__(self,matr_multiply,xStart,inPreCon,nroots=1,tol=1e-6):
self.matrMultiply = matr_multiply
self.size = xStart.shape[0]
self.nEigen = min(nroots, self.size)
self.maxM = min(30, self.size)
self.maxOuterLoop = 10
self.tol = tol
#
# Creating initial guess and preconditioner
#
self.x0 = xStart.real.copy()
self.iteration = 0
self.totalIter = 0
self.converged = False
self.preCon = inPreCon.copy()
#
# Allocating other vectors
#
self.allocateVecs()
def solve(self):
while self.converged == 0:
if self.totalIter == 0:
self.guessInitial()
for i in range(self.maxM):
if self.deflated == 1:
self.currentSize = self.nEigen
if self.deflated == 0 and self.totalIter > 0:
self.hMult()
self.push_Av()
self.constructSubspace()
self.solveSubspace()
self.constructSol()
self.computeResidual()
self.checkConvergence()
self.deflated = 0
if self.converged:
break
self.updateVecs()
self.checkDeflate()
self.constructDeflatedSub()
self.totalIter += 1
self.currentSize += 1
print("")
print("Converged in %3d cycles" % self.totalIter)
self.constructAllSolV()
return self.outeigs, self.outevecs
def allocateVecs(self):
self.subH = np.zeros( shape=(self.maxM,self.maxM), dtype=complex )
self.sol = np.zeros( shape=(self.maxM), dtype=complex )
self.dgks = np.zeros( shape=(self.maxM), dtype=complex )
self.nConv = np.zeros( shape=(self.maxM), dtype=int )
self.eigs = np.zeros( shape=(self.maxM), dtype=complex )
self.evecs = np.zeros( shape=(self.maxM,self.maxM), dtype=complex )
self.oldeigs = np.zeros( shape=(self.maxM), dtype=complex )
self.deigs = np.zeros( shape=(self.maxM), dtype=complex )
self.outeigs = np.zeros( shape=(self.nEigen), dtype=complex )
self.outevecs = np.zeros( shape=(self.size,self.nEigen), dtype=complex)
self.currentSize = 0
self.Ax = np.zeros( shape=(self.size), dtype=complex )
self.res = np.zeros( shape=(self.size), dtype=complex )
self.vlist = np.zeros( shape=(self.maxM,self.size), dtype=complex )
self.cv = np.zeros( shape = (self.size), dtype = complex )
self.cAv = np.zeros( shape = (self.size), dtype = complex )
self.Avlist = np.zeros( shape=(self.maxM,self.size), dtype=complex )
self.dres = 999.9
self.resnorm = 999.9
self.cvEig = 0.1
self.ciEig = 0
self.deflated = 0
def guessInitial(self):
nrm = np.linalg.norm(self.x0)
self.x0 *= 1./nrm
self.currentSize = self.nEigen
for i in range(self.currentSize):
self.vlist[i] *= 0.0
self.vlist[i,i] = 1.0 + 0.0*1j
self.vlist[i] /= np.linalg.norm(self.vlist[i])
for i in range(self.currentSize):
self.cv = self.vlist[i].copy()
self.hMult()
self.Avlist[i] = self.cAv.copy()
self.constructSubspace()
def hMult(self):
args = 0
self.cAv = self.matrMultiply(self.cv.reshape(self.size),args)
def push_Av(self):
self.Avlist[self.currentSize-1] = self.cAv.reshape(self.size)
def constructSubspace(self):
if self.totalIter == 0 or self.deflated == 1: # construct the full block of v^*Av
for i in range(self.currentSize):
for j in range(self.currentSize):
val = np.vdot(self.vlist[i],self.Avlist[j])
self.subH[i,j] = val
else:
for j in range(self.currentSize):
if j <= (self.currentSize-1):
val = np.vdot(self.vlist[j],self.Avlist[self.currentSize-1])
self.subH[j,self.currentSize-1] = val
if j < (self.currentSize-1):
val = np.vdot(self.vlist[self.currentSize-1],self.Avlist[j])
self.subH[self.currentSize-1,j] = val
def solveSubspace(self):
w, v = scipy.linalg.eig(self.subH[:self.currentSize,:self.currentSize])
idx = w.real.argsort()
#imag_norm = np.linalg.norm(w.imag)
#if imag_norm > 1e-12:
# print " *************************************************** "
# print " WARNING IMAGINARY EIGENVALUE OF NORM %.15g " % (imag_norm)
# print " *************************************************** "
#print "Imaginary norm eigenvectors = ", np.linalg.norm(v.imag)
#print "Imaginary norm eigenvalue = ", np.linalg.norm(w.imag)
v = v[:,idx]
w = w[idx].real
self.sol[:self.currentSize] = v[:,self.ciEig]
self.evecs[:self.currentSize,:self.currentSize] = v
self.eigs[:self.currentSize] = w[:self.currentSize]
self.outeigs[:self.nEigen] = w[:self.nEigen]
self.cvEig = self.eigs[self.ciEig]
def constructAllSolV(self):
for i in range(self.nEigen):
self.sol[:] = self.evecs[:,i]
self.cv = np.dot(self.vlist[:self.currentSize].transpose(),self.sol[:self.currentSize])
self.outevecs[:,i] = self.cv
def constructSol(self):
self.constructSolV()
self.constructSolAv()
def constructSolV(self):
self.cv = np.dot(self.vlist[:self.currentSize].transpose(),self.sol[:self.currentSize])
def constructSolAv(self):
self.cAv = np.dot(self.Avlist[:self.currentSize].transpose(),self.sol[:self.currentSize])
def computeResidual(self):
self.res = self.cAv - self.cvEig * self.cv
self.dres = np.vdot(self.res,self.res)**0.5
#
# gram-schmidt for residual vector
#
for i in range(self.currentSize):
self.dgks[i] = np.vdot( self.vlist[i], self.res )
self.res -= self.dgks[i]*self.vlist[i]
#
# second gram-schmidt to make them really orthogonal
#
for i in range(self.currentSize):
self.dgks[i] = np.vdot( self.vlist[i], self.res )
self.res -= self.dgks[i]*self.vlist[i]
self.resnorm = np.linalg.norm(self.res)
self.res /= self.resnorm
orthog = 0.0
for i in range(self.currentSize):
orthog += np.vdot(self.res,self.vlist[i])**2.0
orthog = orthog ** 0.5
if not self.deflated:
if VERBOSE:
print("%3d %20.14f %20.14f %10.4g" % (self.ciEig, self.cvEig.real, self.resnorm.real, orthog.real))
#else:
# print "%3d %20.14f %20.14f %20.14f (deflated)" % (self.ciEig, self.cvEig,
# self.resnorm, orthog)
self.iteration += 1
def updateVecs(self):
self.vlist[self.currentSize] = self.res.copy()
self.cv = self.vlist[self.currentSize]
def checkConvergence(self):
if self.resnorm < self.tol:
if VERBOSE:
print("Eigenvalue %3d converged! (res = %.15g)" % (self.ciEig, self.resnorm))
self.ciEig += 1
if self.ciEig == self.nEigen:
self.converged = True
if self.resnorm < self.tol and not self.converged:
if VERBOSE:
print("")
print("")
print("%-3s %-20s %-20s %-8s" % ("#", " Eigenvalue", " Res. Norm.", " Ortho. (should be ~0)"))
def gramSchmidtCurrentVec(self,northo):
for i in range(northo):
self.dgks[i] = np.vdot( self.vlist[i], self.cv )
self.cv -= self.dgks[i]*self.vlist[i] #/ np.vdot(self.vlist[i],self.vlist[i])
self.cv /= np.linalg.norm(self.cv)
def checkDeflate(self):
if self.currentSize == self.maxM-1:
self.deflated = 1
#print "deflating..."
for i in range(self.nEigen):
self.sol[:self.currentSize] = self.evecs[:self.currentSize,i]
# Finds the "best" eigenvector for this eigenvalue
self.constructSolV()
# Puts this guess in self.Avlist rather than self.vlist for now...
# since this would mess up self.constructSolV()'s solution
self.Avlist[i] = self.cv.copy()
for i in range(self.nEigen):
# This is actually the "best" eigenvector v, not A*v (see above)
self.cv = self.Avlist[i].copy()
self.gramSchmidtCurrentVec(i)
self.vlist[i] = self.cv.copy()
for i in range(self.nEigen):
# This is actually the "best" eigenvector v, not A*v (see above)
self.cv = self.vlist[i].copy()
# Use current vector cv to create cAv
self.hMult()
self.Avlist[i] = self.cAv.copy()
def constructDeflatedSub(self):
if self.deflated == 1:
self.currentSize = self.nEigen
self.constructSubspace()
| sunqm/pyscf | pyscf/pbc/lib/arnoldi.py | Python | apache-2.0 | 10,549 |
#!/usr/bin/env python
import os
import glob
import unittest
import pysmile
import json
__author__ = 'Jonathan Hosmer'
class PySmileTestDecode(unittest.TestCase):
def setUp(self):
curdir = os.path.dirname(os.path.abspath(__file__))
self.smile_dir = os.path.join(curdir, 'data', 'smile')
self.json_dir = os.path.join(curdir, 'data', 'json')
def test_json_org_sample1(self):
s = os.path.join(self.smile_dir, 'json-org-sample1.smile')
j = os.path.join(self.json_dir, 'json-org-sample1.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample2(self):
s = os.path.join(self.smile_dir, 'json-org-sample2.smile')
j = os.path.join(self.json_dir, 'json-org-sample2.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample3(self):
s = os.path.join(self.smile_dir, 'json-org-sample3.smile')
j = os.path.join(self.json_dir, 'json-org-sample3.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample4(self):
s = os.path.join(self.smile_dir, 'json-org-sample4.smile')
j = os.path.join(self.json_dir, 'json-org-sample4.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_json_org_sample5(self):
s = os.path.join(self.smile_dir, 'json-org-sample5.smile')
j = os.path.join(self.json_dir, 'json-org-sample5.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_numbers_int_4k(self):
s = os.path.join(self.smile_dir, 'numbers-int-4k.smile')
j = os.path.join(self.json_dir, 'numbers-int-4k.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_numbers_int_64k(self):
s = os.path.join(self.smile_dir, 'numbers-int-64k.smile')
j = os.path.join(self.json_dir, 'numbers-int-64k.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_test1(self):
s = os.path.join(self.smile_dir, 'test1.smile')
j = os.path.join(self.json_dir, 'test1.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
def test_test2(self):
s = os.path.join(self.smile_dir, 'test2.smile')
j = os.path.join(self.json_dir, 'test2.jsn')
b = json.load(open(j, 'rb'))
try:
a = pysmile.decode(open(s, 'rb').read())
except pysmile.SMILEDecodeError, e:
self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1]))
else:
if isinstance(a, list):
self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
elif isinstance(a, dict):
self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
else:
self.fail('Unexpected Type: {!r}'.format(type(a)))
class PySmileTestEncode(unittest.TestCase):
def setUp(self):
curdir = os.path.dirname(os.path.abspath(__file__))
self.smile_dir = os.path.join(curdir, 'data', 'smile')
self.json_dir = os.path.join(curdir, 'data', 'json')
def test_json_org_sample1(self):
s = os.path.join(self.smile_dir, 'json-org-sample1.smile')
j = os.path.join(self.json_dir, 'json-org-sample1.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample2(self):
s = os.path.join(self.smile_dir, 'json-org-sample2.smile')
j = os.path.join(self.json_dir, 'json-org-sample2.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample3(self):
s = os.path.join(self.smile_dir, 'json-org-sample3.smile')
j = os.path.join(self.json_dir, 'json-org-sample3.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample4(self):
s = os.path.join(self.smile_dir, 'json-org-sample4.smile')
j = os.path.join(self.json_dir, 'json-org-sample4.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_json_org_sample5(self):
s = os.path.join(self.smile_dir, 'json-org-sample5.smile')
j = os.path.join(self.json_dir, 'json-org-sample5.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_numbers_int_4k(self):
s = os.path.join(self.smile_dir, 'numbers-int-4k.smile')
j = os.path.join(self.json_dir, 'numbers-int-4k.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_numbers_int_64k(self):
s = os.path.join(self.smile_dir, 'numbers-int-64k.smile')
j = os.path.join(self.json_dir, 'numbers-int-64k.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_test1(self):
s = os.path.join(self.smile_dir, 'test1.smile')
j = os.path.join(self.json_dir, 'test1.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def test_test2(self):
s = os.path.join(self.smile_dir, 'test2.smile')
j = os.path.join(self.json_dir, 'test2.jsn')
a = pysmile.encode(json.load(open(j, 'rb')))
b = open(s, 'rb').read()
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
class PySmileTestMisc(unittest.TestCase):
def test_1(self):
a = [1]
b = pysmile.decode(':)\n\x03\xf8\xc2\xf9')
self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_2(self):
a = [1, 2]
b = pysmile.decode(':)\n\x03\xf8\xc2\xc4\xf9')
self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_3(self):
a = [1, 2, {'c': 3}]
b = pysmile.decode(':)\n\x03\xf8\xc2\xc4\xfa\x80c\xc6\xfb\xf9')
self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_4(self):
a = {'a': 1}
b = pysmile.decode(':)\n\x03\xfa\x80a\xc2\xfb')
self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_5(self):
a = {'a': '1', 'b': 2, 'c': [3], 'd': -1, 'e': 4.20}
b = pysmile.decode(
':)\n\x03\xfa\x80a@1\x80c\xf8\xc6\xf9\x80b\xc4\x80e(fL\x19\x04\x04\x80d\xc1\xfb')
self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
def test_6(self):
a = {'a': {'b': {'c': {'d': ['e']}}}}
b = pysmile.decode(
':)\n\x03\xfa\x80a\xfa\x80b\xfa\x80c\xfa\x80d\xf8@e\xf9\xfb\xfb\xfb\xfb')
self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))
| jhosmer/PySmile | tests/pysmile_tests.py | Python | apache-2.0 | 11,679 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.openstack.common import log as logging
from quantum.plugins.services.agent_loadbalancer.drivers.vedge.vmware.vshield.vseapi import VseAPI
from quantum.plugins.services.agent_loadbalancer.drivers.vedge.lbapi import LoadBalancerAPI
from quantum.plugins.services.agent_loadbalancer.drivers.vedge import (
cfg as hacfg
)
from oslo.config import cfg
LOG = logging.getLogger(__name__)
edgeUri = 'https://10.117.5.245'
edgeId = 'edge-7'
edgeUser = 'admin'
edgePasswd = 'default'
OPTS = [
cfg.StrOpt('pool_vseid',
help='this is a vseid of pool'),
cfg.StrOpt('vip_vseid',
help='this is a vseid of vip')
]
class VShieldEdgeLB():
supported_extension_aliases = ["lbaas"]
def __init__(self):
# Hard coded for now
vseapi = VseAPI(edgeUri, edgeUser, edgePasswd, edgeId)
self.vselbapi = LoadBalancerAPI(vseapi)
self.conf = cfg.CONF
self._max_monitors = 255
count = 0
while count < self._max_monitors:
monitorMap = "monitorMap_%d" % count
OPTS.append(cfg.ListOpt(monitorMap))
count = count + 1
self.conf.register_opts(OPTS)
def ini_update(self, ini_path):
argv = ["--config-file", ini_path]
self.conf(argv)
def ini2vseid(self, ini_path):
pool_vseid = self.conf.pool_vseid
vip_vseid = self.conf.vip_vseid
return (pool_vseid, vip_vseid)
def extract_monitorids(self, monitors):
monitor_ids = []
for monitor in monitors:
monitor_ids.append(monitor['id'])
return monitor_ids
def extract_vsemonitor_maps(self):
monitor_maps = {}
count = 0
while count < self._max_monitors:
monitorMap = "monitorMap_%d" % count
opt = "self.conf.{}".format(monitorMap)
monitorMap = eval(opt)
if monitorMap is not None:
monitor_id = monitorMap[0]
monitor_vseid = monitorMap[1]
monitor_maps[monitor_id] = monitor_vseid
else:
return monitor_maps
count = count + 1
return monitor_maps
def ini2monitorvseids(self, monitor_ids, monitor_maps):
monitor_vseids = {}
monitor_vseids_delete = {}
for k,v in monitor_maps.items():
if k in monitor_ids:
monitor_vseids[k] = v
else:
monitor_vseids_delete[k] = v
return (monitor_vseids,monitor_vseids_delete)
# def ini2monitorvseids2(self, ini_path):
# monitor_vseids = {}
# except_opts = ("config_file", "config_dir", "pool_vseid", "vip_vseid")
# opts = self.conf._opts()
# print "opts: %s" % opts
# for index in opts.keys():
# if index not in except_opts:
# opt = "self.conf.{}".format(index)
# index = eval(opt)
# if index is not None:
# monitor_id = index[0]
# monitor_vseid = index[1]
# monitor_vseids[monitor_id] = monitor_vseid
# return monitor_vseids
def create(self, logical_config, ini_path, conf_path):
monitors = logical_config['healthmonitors']
members = logical_config['members']
pool = logical_config['pool']
vip = logical_config['vip']
if monitors is not None:
#try:
monitor_vseids,monitors_request = self.vselbapi.create_monitors(monitors)
#except Exception:
# LOG.error(_("monitors create error %s") % monitors)
# exit(1)
#try:
pool_vseid,pool_request = self.vselbapi.create_pool(pool, members, monitor_vseids)
if vip is not None:
vip_vseid,vip_request = self.vselbapi.create_vip(vip, pool_vseid)
#except Exception:
# hacfg.save_ini(ini_path, pool_vseid, None, monitor_vseids)
# self.vselbapi.delete_monitors(ini_path)
# self.vselbapi.delete_pool(ini_path)
# print "pool or vip create error!"
# exit(1)
hacfg.save_ini(ini_path, pool_vseid, vip_vseid, monitor_vseids)
hacfg.save_conf(conf_path, pool_request, vip_request)
def update(self, logical_config, ini_path, conf_path):
self.ini_update(ini_path)
monitors = logical_config['healthmonitors']
members = logical_config['members']
pool = logical_config['pool']
vip = logical_config['vip']
pool_vseid,vip_vseid = self.ini2vseid(ini_path)
monitor_ids = self.extract_monitorids(monitors)
old_vsemonitor_maps = self.extract_vsemonitor_maps()
monitor_vseids_update,monitor_vseids_delete = self.ini2monitorvseids(monitor_ids, old_vsemonitor_maps)
#try:
if monitors is not None:
monitor_vseids,monitors_request = self.vselbapi.update_monitors(monitors, old_vsemonitor_maps,
monitor_ids, monitor_vseids_update,
monitor_vseids_delete, pool_vseid)
pool_vseid,pool_request = self.vselbapi.update_pool(pool, pool_vseid, members, monitor_vseids)
if vip is not None:
vip_vseid,vip_request = self.vselbapi.update_vip(vip, pool_vseid, vip_vseid)
#except Exception:
# print "pool or vip update error!"
# exit(1)
hacfg.save_ini(ini_path, pool_vseid, vip_vseid, monitor_vseids)
hacfg.save_conf(conf_path, pool_request, vip_request)
def destroy(self, pool_id, ini_path, conf_path):
self.ini_update(ini_path)
pool_vseid,vip_vseid = self.ini2vseid(ini_path)
monitor_vseids = self.extract_vsemonitor_maps()
# monitor_vseids = self.ini2monitorvseids2(ini_path)
if vip_vseid is not None:
self.vselbapi.delete_vip(vip_vseid)
self.vselbapi.delete_pool(pool_vseid, monitor_vseids)
if monitor_vseids is not None:
self.vselbapi.delete_monitors(monitor_vseids, pool_vseid)
def get_stats(pool_id, ini_path, conf_path):
# self.vselbapi.get_stats()
self.vselbapi.get_config()
| linvictor88/vse-lbaas-driver | quantum/plugins/services/agent_loadbalancer/drivers/vedge/vselb.py | Python | apache-2.0 | 7,013 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Facilities for creating multiple test combinations.
Here is an example of testing various optimizers in Eager and Graph mode:
class AdditionExample(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(mode=["graph", "eager"],
optimizer=[AdamOptimizer(),
GradientDescentOptimizer()]))
def testOptimizer(self, optimizer):
... f(optimizer)...
This will run `testOptimizer` 4 times with the specified optimizers: 2 in
Eager and 2 in Graph mode.
The test will be provided with arguments that match the arguments of combine
by name. It is necessary to request all arguments, except for `mode`, which is
optional.
`combine()` function is available for creating a cross product of various
options. `times()` function exists for creating a product of N `combine()`-ed
results. See below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import one_device_strategy as one_device_lib
from tensorflow.contrib.distribute.python import tpu_strategy as tpu_lib
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.util import tf_inspect
GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
Args:
combinations: a list of dictionaries created using combine() and times().
Restrictions:
-- the "mode" argument can be either "eager" or "graph". It's "graph" by
default.
-- arguments of the test method must match by name to get the corresponding
value of the combination. Tests must accept all arguments except the
"mode", "required_tpu" and "required_gpus".
-- "distribution" argument is special and optional. It is meant for passing
instances of DistributionStrategy. Each instance is to be passed as via
`NamedDistribution`. If using "distribution", "required_gpus" and
"required_tpu" should be specified via the NamedDistribution instance,
rather than as separate arguments.
-- "required_tpu" argument is special and optional. If not `None`, then the
test will be skipped if TPUs aren't available.
-- "required_gpus" argument is special and optional. If not `None`, then the
test will be skipped if the specified number of GPUs aren't available.
Returns:
a decorator that will cause the test method or the test class to be run
under the specified conditions.
Raises:
ValueError - if "mode" argument wasn't either "eager" or "graph" or if other
arguments were not accepted by the test method.
"""
def decorator(test_method_or_class):
"""The decorator to be returned."""
# Generate good test names that can be used with --test_filter.
named_combinations = []
for combination in combinations:
# We use OrderedDicts in `combine()` and `times()` to ensure stable
# order of keys in each dictionary.
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
if isinstance(test_method_or_class, type):
class_object = test_method_or_class
class_object._test_method_ids = test_method_ids = {}
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
delattr(class_object, name)
methods = {}
parameterized._update_class_dict_for_param_test_case(
class_object.__name__, methods, test_method_ids, name,
parameterized._ParameterizedTestIter(
_augment_with_special_arguments(test_method),
named_combinations, parameterized._NAMED, name))
for method_name, method in six.iteritems(methods):
setattr(class_object, method_name, method)
return class_object
else:
test_method = _augment_with_special_arguments(test_method_or_class)
return parameterized.named_parameters(*named_combinations)(test_method)
return decorator
def _augment_with_special_arguments(test_method):
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.get("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
# TODO(priyag): Consider allowing tests in graph mode using soft
# placement.
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with context.eager_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
elif mode == "graph":
with ops.Graph().as_default(), context.graph_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(
mode))
return decorated
def combine(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = combine(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
return [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
def times(*combined):
"""Generate a product of N sets of combinations.
times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])
Args:
*combined: N lists of dictionaries that specify combinations.
Returns:
a list of dictionaries for each combination.
Raises:
ValueError: if some of the inputs have overlapping keys.
"""
assert combined
if len(combined) == 1:
return combined[0]
first = combined[0]
rest_combined = times(*combined[1:])
combined_results = []
for a in first:
for b in rest_combined:
if set(a.keys()).intersection(set(b.keys())):
raise ValueError("Keys need to not overlap: {} vs {}".format(
a.keys(), b.keys()))
combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
return combined_results
class NamedObject(object):
"""A class that translates an object into a good test name."""
def __init__(self, name, obj):
self._name = name
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def __repr__(self):
return self._name
class NamedDistribution(object):
"""Translates DistributionStrategy and its data into a good name."""
def __init__(self, name, distribution_fn, required_gpus=None,
required_tpu=False):
self._distribution_fn = distribution_fn
self._name = name
self._required_gpus = required_gpus
self._required_tpu = required_tpu
def __repr__(self):
return self._name
@property
def strategy(self):
return self._distribution_fn()
@property
def required_gpus(self):
return self._required_gpus
@property
def required_tpu(self):
return self._required_tpu
# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
"Default",
distribution_strategy_context._get_default_distribution_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
tpu_strategy = NamedDistribution(
"TPU", lambda: tpu_lib.TPUStrategy(
TPUClusterResolver(""), steps_per_run=2),
required_tpu=True)
tpu_strategy_one_step = NamedDistribution(
"TPUOneStep", lambda: tpu_lib.TPUStrategy(
TPUClusterResolver(""), steps_per_run=1),
required_tpu=True)
mirrored_strategy_with_one_cpu = NamedDistribution(
"Mirrored1CPU",
lambda: mirrored_lib.MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = NamedDistribution(
"Mirrored1GPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
core_mirrored_strategy_with_one_cpu = NamedDistribution(
"CoreMirrored1CPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/cpu:0"]))
core_mirrored_strategy_with_one_gpu = NamedDistribution(
"CoreMirrored1GPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"CoreMirroredCPUAndGPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_two_gpus = NamedDistribution(
"CoreMirrored2GPUs",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = NamedObject("AdamV1",
lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = NamedObject(
"RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
adagrad_optimizer_v2_fn = NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v1)
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)
| asimshankar/tensorflow | tensorflow/contrib/distribute/python/combinations.py | Python | apache-2.0 | 15,137 |
#!/usr/bin/env python
"""
Rules
for *.py files
* if the changed file is __init__.py, and there is a side-band test/ dir, then test the entire test/functional directory
the reason for this is that the init files are usually organizing collections
and those can affect many different apis if they break
* if the filename is test_*.py then include it
* if the filename is *.py, then check to see if it has an associated test_FILENAME file
and if so, include it in the test
* summarize all of the above so that a test_FILENAME that is a subpath of the first bullet
is not tested twice
for non-*.py files
* if the file is in a test/functional directory, test the whole directory
"""
import subprocess
import os
import shutil
import argparse
def cleanup_tox_directory():
if os.path.exists('.tox'):
shutil.rmtree('.tox')
def examine_python_rules(line):
fname, fext = os.path.splitext(line)
filename = os.path.basename(line)
dirname = os.path.dirname(line)
test_filename = 'test_' + filename
functional_test_file = '{0}/test/functional/{1}'.format(dirname, test_filename)
functional_test_dir = '{0}/test/functional/'.format(dirname)
if filename == '__init__.py' and os.path.exists(functional_test_dir):
return functional_test_dir
elif filename.startswith('test_') and filename.endswith('.py'):
return line
elif fext == '.py' and os.path.exists(functional_test_file):
return functional_test_file
elif 'test/functional' in line and filename == '__init__.py':
print(" * Skipping {0} because it is not a test file".format(line))
elif filename == '__init__.py' and not os.path.exists(functional_test_dir):
print(" * {0} does not have a side-band test directory!".format(line))
else:
print(" * {0} did not match any rules!".format(line))
def examine_non_python_rules(line):
if 'test/functional' in line:
return os.path.dirname(line)
def determine_files_to_test(product, commit):
results = []
build_all = [
'setup.py', 'f5/bigip/contexts.py', 'f5/bigip/mixins.py',
'f5/bigip/resource.py', 'f5sdk_plugins/fixtures.py',
'f5/bigip/__init__.py'
]
output_file = "pytest.{0}.jenkins.txt".format(product)
p1 = subprocess.Popen(
['git', '--no-pager', 'diff', '--name-only', 'origin/development', commit],
stdout=subprocess.PIPE,
)
p2 = subprocess.Popen(
['egrep', '-v', '(^requirements\.|^setup.py)'],
stdin=p1.stdout,
stdout=subprocess.PIPE,
)
p3 = subprocess.Popen(
['egrep', '(^f5\/{0}\/)'.format(product)],
stdin=p2.stdout,
stdout=subprocess.PIPE,
)
out, err = p3.communicate()
out = out.splitlines()
out = filter(None, out)
if not out:
return
for line in out:
fname, fext = os.path.splitext(line)
if not os.path.exists(line):
print "{0} was not found. Maybe this is a rename?".format(line)
continue
if line in build_all:
cleanup_tox_directory()
results.append('f5/{0}'.format(product))
elif fext == '.py':
result = examine_python_rules(line)
if result:
results.append(result)
else:
result = examine_non_python_rules(line)
if result:
results.append(result)
if results:
results = set(results)
results = compress_testable_files(results)
fh = open(output_file, 'w')
fh.writelines("%s\n" % l for l in results)
fh.close()
def compress_testable_files(files):
lines = sorted(files)
for idx, item in enumerate(lines):
file, ext = os.path.splitext(item)
if not ext and not file.endswith('/'):
item += '/'
tmp = [x for x in lines if item in x and item != x]
for _ in tmp:
lines.remove(_)
return lines
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c','--commit', help='Git commit to check', required=True)
args = parser.parse_args()
for product in ['iworkflow', 'bigip', 'bigiq']:
determine_files_to_test(product, args.commit)
| F5Networks/f5-common-python | devtools/bin/create-test-list.py | Python | apache-2.0 | 4,268 |
__author__ = 'thauser'
from argh import arg
import logging
from pnc_cli import utils
from pnc_cli.swagger_client.apis import BuildconfigurationsetsApi
from pnc_cli.swagger_client.apis import BuildconfigsetrecordsApi
sets_api = BuildconfigurationsetsApi(utils.get_api_client())
bcsr_api = BuildconfigsetrecordsApi(utils.get_api_client())
@arg("-p", "--page-size", help="Limit the amount of build records returned")
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_build_configuration_set_records(page_size=200, sort="", q=""):
"""
List all build configuration set records.
"""
response = utils.checked_api_call(bcsr_api, 'get_all', page_size=page_size, sort=sort, q=q)
if response:
return response.content
@arg("id", help="ID of build configuration set record to retrieve.")
def get_build_configuration_set_record(id):
"""
Get a specific BuildConfigSetRecord
"""
response = utils.checked_api_call(bcsr_api, 'get_specific', id=id)
if not response:
logging.error("A BuildConfigurationSetRecord with ID {} does not exist.".format(id))
return
return response.content
@arg("id", help="ID of BuildConfigSetRecord to retrieve build records from.")
@arg("-p", "--page-size", help="Limit the amount of build records returned")
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_records_for_build_config_set(id, page_size=200, sort="", q=""):
"""
Get a list of BuildRecords for the given BuildConfigSetRecord
"""
bcrs_check = utils.checked_api_call(sets_api, 'get_all', q="id==" + id)
if not bcrs_check:
logging.error("A BuildConfigurationSet with ID {} does not exist.".format(id))
return
response = utils.checked_api_call(bcsr_api, 'get_build_records', id=id, page_size=page_size, sort=sort, q=q)
if response:
return response.content
| jianajavier/pnc-cli | pnc_cli/buildconfigsetrecords.py | Python | apache-2.0 | 1,916 |
class Solution:
def maxProfit(self, prices, fee):
dp = [[-prices[0]], [0]]
for i in range(1, len(prices)):
dp[0].append(max(dp[0][i-1], dp[1][i-1]-prices[i]))
dp[1].append(max(dp[0][i-1]+prices[i]-fee, dp[1][i-1]))
return dp[1][-1]
print(Solution().maxProfit([1, 3, 2, 8, 4, 9], 2))
| zuun77/givemegoogletshirts | leetcode/python/714_best-time-to-buy-and-sell-stock-with-transaction-fee.py | Python | apache-2.0 | 335 |
# Copyright (C) 2017,2019 Rodrigo Jose Hernandez Cordoba
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bl_info = {
"name": "AeonGames Skeleton Format (.skl)",
"author": "Rodrigo Hernandez",
"version": (1, 0, 0),
"blender": (2, 80, 0),
"location": "File > Export > Export AeonGames Skeleton",
"description": "Exports an armature to an AeonGames Skeleton (SKL) file",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export"}
import bpy
from . import export
def skl_menu_func(self, context):
self.layout.operator(
export.SKL_OT_exporter.bl_idname,
text="AeonGames Skeleton (.skl)")
def register():
bpy.utils.register_class(export.SKL_OT_exporter)
bpy.types.TOPBAR_MT_file_export.append(skl_menu_func)
if __name__ == "__main__":
register()
| AeonGames/AeonEngine | tools/blender/addons/io_skeleton_skl/__init__.py | Python | apache-2.0 | 1,334 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
# ============= standard library imports ========================
from threading import Thread
from pyface.tasks.action.schema import SToolBar
from pyface.tasks.task_layout import TaskLayout, PaneItem, Splitter, VSplitter
from pyface.ui.qt4.tasks.advanced_editor_area_pane import EditorWidget
from traits.api import Any, Instance, on_trait_change
# ============= local library imports ==========================
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.envisage.tasks.editor_task import EditorTask
from pychron.spectrometer.tasks.editor import PeakCenterEditor, ScanEditor, CoincidenceEditor, ScannerEditor
from pychron.spectrometer.tasks.spectrometer_actions import StopScanAction
from pychron.spectrometer.tasks.spectrometer_panes import ControlsPane, \
ReadoutPane, IntensitiesPane, RecordControlsPane, DACScannerPane, MassScannerPane
class SpectrometerTask(EditorTask):
scan_manager = Any
name = 'Spectrometer'
id = 'pychron.spectrometer'
_scan_editor = Instance(ScanEditor)
tool_bars = [SToolBar(StopScanAction(), )]
def info(self, msg, *args, **kw):
super(SpectrometerTask, self).info(msg)
def spy_position_magnet(self, *args, **kw):
self.scan_manager.position_magnet(*args, **kw)
def spy_peak_center(self, name):
peak_kw = dict(confirm_save=False, warn=True,
new_thread=False,
message='spectrometer script peakcenter',
on_end=self._on_peak_center_end)
setup_kw = dict(config_name=name)
return self._peak_center(setup_kw=setup_kw, peak_kw=peak_kw)
def populate_mftable(self):
sm = self.scan_manager
cfg = sm.setup_populate_mftable()
if cfg:
def func():
refiso = cfg.isotope
ion = sm.ion_optics_manager
ion.backup_mftable()
odefl = []
dets = cfg.get_detectors()
self.debug('setting deflections')
for det, defl in dets:
odefl.append((det, sm.spectrometer.get_deflection(det)))
sm.spectrometer.set_deflection(det, defl)
for di in dets:
ion.setup_peak_center(detector=[di.name], isotope=refiso,
config_name=cfg.peak_center_config.active_item.name,
standalone_graph=False,
new=True,
show_label=True, use_configuration_dac=False)
ion.peak_center.update_others = False
name = 'Pop MFTable {}-{}'.format(di.name, refiso)
invoke_in_main_thread(self._open_editor, PeakCenterEditor(model=ion.peak_center,
name=name))
self._on_peak_center_start()
ion.do_peak_center(new_thread=False, save=True, warn=True)
self._on_peak_center_end()
if not ion.peak_center.isAlive():
break
self.debug('unset deflections')
for det, defl in odefl:
sm.spectrometer.set_deflection(det, defl)
fp = cfg.get_finish_position()
self.debug('move to end position={}'.format(fp))
if fp:
iso, det = fp
if iso and det:
ion.position(iso, det)
t = Thread(target=func)
t.start()
def stop_scan(self):
self.debug('stop scan fired')
editor = self.active_editor
self.debug('active editor {}'.format(editor))
if editor:
if isinstance(editor, (ScanEditor, PeakCenterEditor, CoincidenceEditor)):
self.debug('editor stop')
editor.stop()
def do_coincidence(self):
es = [int(e.name.split(' ')[-1])
for e in self.editor_area.editors
if isinstance(e, CoincidenceEditor)]
i = max(es) + 1 if es else 1
man = self.scan_manager.ion_optics_manager
name = 'Coincidence {:02d}'.format(i)
if man.setup_coincidence():
self._open_editor(CoincidenceEditor(model=man.coincidence, name=name))
man.do_coincidence_scan()
def do_peak_center(self):
peak_kw = dict(confirm_save=True, warn=True,
message='manual peakcenter',
on_end=self._on_peak_center_end)
self._peak_center(peak_kw=peak_kw)
def define_peak_center(self):
from pychron.spectrometer.ion_optics.define_peak_center_view import DefinePeakCenterView
man = self.scan_manager.ion_optics_manager
spec = man.spectrometer
dets = spec.detector_names
isos = spec.isotopes
dpc = DefinePeakCenterView(detectors=dets,
isotopes=isos,
detector=dets[0],
isotope=isos[0])
info = dpc.edit_traits()
if info.result:
det = dpc.detector
isotope = dpc.isotope
dac = dpc.dac
self.debug('manually setting mftable to {}:{}:{}'.format(det, isotope, dac))
message = 'manually define peak center {}:{}:{}'.format(det, isotope, dac)
man.spectrometer.magnet.update_field_table(det, isotope, dac, message)
def _on_peak_center_start(self):
self.scan_manager.log_events_enabled = False
self.scan_manager.scan_enabled = False
def _on_peak_center_end(self):
self.scan_manager.log_events_enabled = True
self.scan_manager.scan_enabled = True
def send_configuration(self):
self.scan_manager.spectrometer.send_configuration()
def prepare_destroy(self):
for e in self.editor_area.editors:
if hasattr(e, 'stop'):
e.stop()
self.scan_manager.prepare_destroy()
super(SpectrometerTask, self).prepare_destroy()
# def activated(self):
# self.scan_manager.activate()
# self._scan_factory()
# super(SpectrometerTask, self).activated()
def create_dock_panes(self):
panes = [
ControlsPane(model=self.scan_manager),
RecordControlsPane(model=self.scan_manager),
MassScannerPane(model=self.scan_manager),
DACScannerPane(model=self.scan_manager),
ReadoutPane(model=self.scan_manager),
IntensitiesPane(model=self.scan_manager)]
panes = self._add_canvas_pane(panes)
return panes
# def _active_editor_changed(self, new):
# if not new:
# try:
# self._scan_factory()
# except AttributeError:
# pass
# private
def _peak_center(self, setup_kw=None, peak_kw=None):
if setup_kw is None:
setup_kw = {}
if peak_kw is None:
peak_kw = {}
es = []
for e in self.editor_area.editors:
if isinstance(e, PeakCenterEditor):
try:
es.append(int(e.name.split(' ')[-1]))
except ValueError:
pass
i = max(es) + 1 if es else 1
ret = -1
ion = self.scan_manager.ion_optics_manager
self._peak_center_start_hook()
time.sleep(2)
name = 'Peak Center {:02d}'.format(i)
if ion.setup_peak_center(new=True, **setup_kw):
self._on_peak_center_start()
invoke_in_main_thread(self._open_editor, PeakCenterEditor(model=ion.peak_center, name=name))
ion.do_peak_center(**peak_kw)
ret = ion.peak_center_result
self._peak_center_stop_hook()
return ret
def _peak_center_start_hook(self):
pass
def _peak_center_stop_hook(self):
pass
def _scan_factory(self):
sim = self.scan_manager.spectrometer.simulation
name = 'Scan (Simulation)' if sim else 'Scan'
# self._open_editor(ScanEditor(model=self.scan_manager, name=name))
# print 'asdfas', self.editor_area.control
# print [e for e in self.editor_area.control.children() if isinstance(e, EditorWidget)]
# super(SpectrometerTask, self).activated()
se = ScanEditor(model=self.scan_manager, name=name)
self._open_editor(se)
def _default_layout_default(self):
return TaskLayout(
left=Splitter(
PaneItem('pychron.spectrometer.controls'),
orientation='vertical'),
right=VSplitter(PaneItem('pychron.spectrometer.intensities'),
PaneItem('pychron.spectrometer.readout')))
# def create_central_pane(self):
# g = ScanPane(model=self.scan_manager)
# return g
@on_trait_change('scan_manager:mass_scanner:new_scanner')
def _handle_mass_scan_event(self):
self._scan_event(self.scan_manager.mass_scanner)
@on_trait_change('scan_manager:dac_scanner:new_scanner')
def _handle_dac_scan_event(self):
self._scan_event(self.scan_manager.dac_scanner)
def _scan_event(self, scanner):
sim = self.scan_manager.spectrometer.simulation
name = 'Magnet Scan (Simulation)' if sim else 'Magnet Scan'
editor = next((e for e in self.editor_area.editors if e.id == 'pychron.scanner'), None)
if editor is not None:
scanner.reset()
else:
editor = ScannerEditor(model=scanner, name=name, id='pychron.scanner')
self._open_editor(editor, activate=False)
self.split_editors(0, 1, h2=300, orientation='vertical')
self.activate_editor(editor)
@on_trait_change('window:opened')
def _opened(self):
self.scan_manager.activate()
self._scan_factory()
ee = [e for e in self.editor_area.control.children() if isinstance(e, EditorWidget)][0]
# print int(ee.features())
# ee.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
# print int(ee.features())
# ee.update_title()
# ============= EOF =============================================
| UManPychron/pychron | pychron/spectrometer/tasks/spectrometer_task.py | Python | apache-2.0 | 11,184 |
import basepage
class NavigationBars(basepage.BasePage):
def expand_project_panel(self):
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-project"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
def expand_admin_panel(self):
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-admin"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
def expand_identity_panel(self):
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-identity"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
def expand_developer_panel(self):
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-developer"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
"""
Project > Compute > Resource
"""
def expand_project_compute(self):
NavigationBars.expand_project_panel(self)
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-project-compute"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
def click_project_compute_overview(self):
NavigationBars.expand_project_compute(self)
self.driver.find_element_by_css_selector(
'a[href="/project/"]').click()
def click_project_compute_instance(self):
NavigationBars.expand_project_compute(self)
self.driver.find_element_by_css_selector(
'a[href="/project/instances/"]').click()
def click_project_compute_volumes(self):
NavigationBars.expand_project_compute(self)
self.driver.find_element_by_css_selector(
'a[href="/project/volumes/"]').click()
def click_project_compute_images(self):
NavigationBars.expand_project_compute(self)
self.driver.find_element_by_css_selector(
'a[href="/project/images/"]').click()
def click_project_compute_access_and_security(self):
NavigationBars.expand_project_compute(self)
self.driver.find_element_by_css_selector(
'a[href="/project/access_and_security/"]').click()
"""
Project > Network > Resource
"""
def expand_project_network(self):
NavigationBars.expand_project_panel(self)
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-project-network"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
def click_project_network_network_topology(self):
NavigationBars.expand_project_network(self)
self.driver.find_element_by_css_selector(
'a[href="/project/network_topology/"]').click()
def click_project_network_networks(self):
NavigationBars.expand_project_network(self)
self.driver.find_element_by_css_selector(
'a[href="/project/networks/"]').click()
def click_project_network_routers(self):
NavigationBars.expand_project_network(self)
self.driver.find_element_by_css_selector(
'a[href="/project/routers/"]').click()
def click_project_network_loadbalancers(self):
NavigationBars.expand_project_network(self)
self.driver.find_element_by_css_selector(
'a[href="/project/ngloadbalancersv2/"]').click()
"""
Project > Orchestration > Resource
"""
def expand_project_orchestration(self):
NavigationBars.expand_project_panel(self)
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-project-orchestration"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
def click_project_orchestration_stacks(self):
NavigationBars.expand_project_orchestration(self)
self.driver.find_element_by_css_selector(
'a[href="/project/stacks/"]').click()
def click_project_orchestration_resource_types(self):
NavigationBars.expand_project_orchestration(self)
self.driver.find_element_by_css_selector(
'a[href="/project/stacks/resource_types/"]').click()
def click_project_orchestration_template_versions(self):
NavigationBars.expand_project_orchestration(self)
self.driver.find_element_by_css_selector(
'a[href="/project/stacks/template_versions/"]').click()
"""
Project > Object Store > Resource
"""
def expand_project_object_store(self):
NavigationBars.expand_project_panel(self)
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-project-object_store"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
def click_project_object_store_containers(self):
NavigationBars.expand_project_object_store(self)
self.driver.find_element_by_css_selector(
'a[href="/project/containers/"]').click()
"""
Admin > System > Resource
"""
def expand_admin_system(self):
NavigationBars.expand_admin_panel(self)
elm = self.driver.find_element_by_css_selector(
'a[data-target="#sidebar-accordion-admin-admin"]')
state = elm.get_attribute('class')
if 'collapsed' in state:
elm.click()
else:
pass
def click_admin_system_overview(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/"]').click()
def click_admin_system_hypervisors(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/hypervisors/"]').click()
def click_admin_system_host_aggregates(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/aggregates/"]').click()
def click_admin_system_instances(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/instances/"]').click()
def click_admin_system_volumes(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/volumes/"]').click()
def click_admin_system_flavors(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/flavors/"]').click()
def click_admin_system_images(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/images/"]').click()
def click_admin_system_networks(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/networks/"]').click()
def click_admin_system_routers(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/routers/"]').click()
def click_admin_system_floating_ips(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/floating_ips/"]').click()
def click_admin_system_defaults(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/defaults/"]').click()
def click_admin_system_metadata_definitions(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/metadata_defs/"]').click()
def click_admin_system_info(self):
NavigationBars.expand_admin_system(self)
self.driver.find_element_by_css_selector(
'a[href="/admin/info/"]').click()
"""
Identity > Resource
"""
def click_identity_projects(self):
NavigationBars.expand_identity_panel(self)
self.driver.find_element_by_css_selector(
'a[href="/identity/"]').click()
def click_identity_users(self):
NavigationBars.expand_identity_panel(self)
self.driver.find_element_by_css_selector(
'a[href="/identity/users/"]').click()
def click_identity_groups(self):
NavigationBars.expand_identity_panel(self)
self.driver.find_element_by_css_selector(
'a[href="/identity/groups/"]').click()
def click_identity_roles(self):
NavigationBars.expand_identity_panel(self)
self.driver.find_element_by_css_selector(
'a[href="/identity/roles/"]').click()
| rcbops-qe/horizon-selenium | pages/navigation_bars.py | Python | apache-2.0 | 9,234 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import yaml_util
from run import check_run_quick
class Processor(object):
def __init__(self, config, environ_path, yml_path, aws_path):
with open(environ_path, 'r') as f:
self.__environ_content = f.read()
if not self.__environ_content.endswith('\n'):
self.__environ_content += '\n'
with open(yml_path, 'r') as f:
self.__output = f.read()
self.__bindings = yaml_util.YamlBindings()
self.__bindings.import_string(config)
self.__write_yml_path = yml_path
self.__write_aws_path = aws_path
self.__write_environ_path = environ_path
self.__environ_keys = set()
def update_environ(self, key, name):
value = self.lookup(key)
if value is None:
return
self.__environ_keys.add(key)
assignment = '{name}={value}'.format(name=name, value=value)
match = re.search('^{name}=.*'.format(name=name),
self.__environ_content,
re.MULTILINE)
if match:
self.__environ_content = ''.join([
self.__environ_content[0:match.start(0)],
assignment,
self.__environ_content[match.end(0):]
])
else:
self.__environ_content += assignment + '\n'
def update_in_place(self, key):
self.__output = self.__bindings.transform_yaml_source(self.__output, key)
def lookup(self, key):
try:
return self.__bindings.get(key)
except KeyError:
return None
def update_remaining_keys(self):
stack = [('', self.__bindings.map)]
while stack:
prefix, root = stack.pop()
for name, value in root.items():
key = '{prefix}{child}'.format(prefix=prefix, child=name)
if isinstance(value, dict):
stack.append((key + '.', value))
elif not key in self.__environ_keys:
try:
self.update_in_place(key)
except ValueError:
pass
def process(self):
self.update_environ('providers.aws.enabled', 'SPINNAKER_AWS_ENABLED')
self.update_environ('providers.aws.defaultRegion',
'SPINNAKER_AWS_DEFAULT_REGION')
self.update_environ('providers.google.enabled',
'SPINNAKER_GOOGLE_ENABLED')
self.update_environ('providers.google.primaryCredentials.project',
'SPINNAKER_GOOGLE_PROJECT_ID')
self.update_environ('providers.google.defaultRegion',
'SPINNAKER_GOOGLE_DEFAULT_REGION')
self.update_environ('providers.google.defaultZone',
'SPINNAKER_GOOGLE_DEFAULT_ZONE')
self.update_in_place('providers.aws.primaryCredentials.name')
aws_name = self.lookup('providers.aws.primaryCredentials.name')
aws_key = self.lookup('providers.aws.primaryCredentials.access_key_id')
aws_secret = self.lookup('providers.aws.primaryCredentials.secret_key')
if aws_key and aws_secret:
with open(self.__write_aws_path, 'w') as f:
f.write("""
[default]
aws_secret_access_key = {secret}
aws_access_key_id = {key}
""".format(name=aws_name, secret=aws_secret, key=aws_key))
self.update_remaining_keys()
with open(self.__write_environ_path, 'w') as f:
f.write(self.__environ_content)
with open(self.__write_yml_path, 'w') as f:
f.write(self.__output)
if __name__ == '__main__':
if len(sys.argv) != 5:
sys.stderr.write('Usage: <content> <environ-path> <local-yml-path> <aws-cred-path>\n')
sys.exit(-1)
content = sys.argv[1]
environ_path = sys.argv[2]
local_yml_path = sys.argv[3]
aws_credentials_path = sys.argv[4]
processor = Processor(content,
environ_path, local_yml_path, aws_credentials_path)
processor.process()
sys.exit(0)
| tgracchus/spinnaker | pylib/spinnaker/transform_old_config.py | Python | apache-2.0 | 4,498 |
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the single_task_evaluator."""
from mint.ctl import single_task_evaluator
from mint.ctl import single_task_trainer
from third_party.tf_models import orbit
import tensorflow as tf
import tensorflow_datasets as tfds
class SingleTaskEvaluatorTest(tf.test.TestCase):
def test_single_task_evaluation(self):
iris = tfds.load('iris')
train_ds = iris['train'].batch(32)
model = tf.keras.Sequential([
tf.keras.Input(shape=(4,), name='features'),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
trainer = single_task_trainer.SingleTaskTrainer(
train_ds,
label_key='label',
model=model,
loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(
learning_rate=tf.keras.optimizers.schedules.PiecewiseConstantDecay(
[0], [0.01, 0.01])))
evaluator = single_task_evaluator.SingleTaskEvaluator(
train_ds,
label_key='label',
model=model,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
controller = orbit.Controller(
trainer=trainer,
evaluator=evaluator,
steps_per_loop=100,
global_step=trainer.optimizer.iterations)
controller.train(train_ds.cardinality().numpy())
controller.evaluate()
accuracy = evaluator.metrics[0].result().numpy()
self.assertGreater(0.925, accuracy)
if __name__ == '__main__':
tf.test.main()
| google-research/mint | mint/ctl/single_task_evaluator_test.py | Python | apache-2.0 | 2,149 |
class User(object):
def __init__(self, username=None, password=None, email=None):
self.username = username
self.password = password
self.email = email
@classmethod
def admin(cls):
return cls(username="admin", password="admin")
#random values for username and password
@classmethod
def random_data(cls):
from random import randint
return cls(username="user" + str(randint(0, 1000)), password="pass" + str(randint(0, 1000)))
| ArtemVavilov88/php4dvd_tests | php4dvd/model/user.py | Python | apache-2.0 | 512 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon_lib import tables
from openstack_horizon import api
from openstack_horizon.dashboards.identity.groups import constants
LOG = logging.getLogger(__name__)
LOGOUT_URL = 'logout'
STATUS_CHOICES = (
("true", True),
("false", False)
)
class CreateGroupLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Group")
url = constants.GROUPS_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class EditGroupLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Group")
url = constants.GROUPS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class DeleteGroupsAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Group",
u"Delete Groups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Group",
u"Deleted Groups",
count
)
name = "delete"
policy_rules = (("identity", "identity:delete_group"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
def delete(self, request, obj_id):
LOG.info('Deleting group "%s".' % obj_id)
api.keystone.group_delete(request, obj_id)
class ManageUsersLink(tables.LinkAction):
name = "users"
verbose_name = _("Modify Users")
url = constants.GROUPS_MANAGE_URL
icon = "pencil"
policy_rules = (("identity", "identity:get_group"),
("identity", "identity:list_users"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
class GroupFilterAction(tables.FilterAction):
def filter(self, table, groups, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(group):
if q in group.name.lower():
return True
return False
return filter(comp, groups)
class GroupsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Group ID'))
class Meta:
name = "groups"
verbose_name = _("Groups")
row_actions = (ManageUsersLink, EditGroupLink, DeleteGroupsAction)
table_actions = (GroupFilterAction, CreateGroupLink,
DeleteGroupsAction)
class UserFilterAction(tables.FilterAction):
def filter(self, table, users, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [user for user in users
if q in user.name.lower()
or q in getattr(user, 'email', '').lower()]
class RemoveMembers(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Remove User",
u"Remove Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Removed User",
u"Removed Users",
count
)
name = "removeGroupMember"
policy_rules = (("identity", "identity:remove_user_from_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Removing user %s from group %s.' % (user_obj.id,
group_id))
api.keystone.remove_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when removing current user
# Keystone revokes the token of the user removed from the group.
# If the logon user was removed, redirect the user to logout.
class AddMembersLink(tables.LinkAction):
name = "add_user_link"
verbose_name = _("Add...")
classes = ("ajax-modal",)
icon = "plus"
url = constants.GROUPS_ADD_MEMBER_URL
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def get_link_url(self, datum=None):
return reverse(self.url, kwargs=self.table.kwargs)
class UsersTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('User Name'))
email = tables.Column('email', verbose_name=_('Email'),
filters=[defaultfilters.escape,
defaultfilters.urlize])
id = tables.Column('id', verbose_name=_('User ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
status_choices=STATUS_CHOICES,
empty_value="False")
class GroupMembersTable(UsersTable):
class Meta:
name = "group_members"
verbose_name = _("Group Members")
table_actions = (UserFilterAction, AddMembersLink, RemoveMembers)
class AddMembers(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Add User",
u"Add Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Added User",
u"Added Users",
count
)
name = "addMember"
icon = "plus"
requires_input = True
success_url = constants.GROUPS_MANAGE_URL
policy_rules = (("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Adding user %s to group %s.' % (user_obj.id,
group_id))
api.keystone.add_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when adding current user
# Keystone revokes the token of the user added to the group.
# If the logon user was added, redirect the user to logout.
def get_success_url(self, request=None):
group_id = self.table.kwargs.get('group_id', None)
return reverse(self.success_url, args=[group_id])
class GroupNonMembersTable(UsersTable):
class Meta:
name = "group_non_members"
verbose_name = _("Non-Members")
table_actions = (UserFilterAction, AddMembers)
| mrunge/openstack_horizon | openstack_horizon/dashboards/identity/groups/tables.py | Python | apache-2.0 | 8,157 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 22:25:38 2017
@author: sitibanc
"""
import math
import numpy as np
def entropy(p1, n1): # postive, negative
if p1 == 0 and n1 == 0:
return 1
value = 0
pp = p1 / (p1 + n1)
pn = n1 / (p1 + n1)
if pp > 0:
value -= pp * math.log2(pp)
if pn > 0:
value -= pn * math.log2(pn)
return value
def infoGain(p1, n1, p2, n2):
total = p1 + n1 + p2 + n2
s1 = p1 + n1
s2 = p2 + n2
return entropy(p1 + p2, n1 + n2) - s1 / total * entropy(p1, n1) - s2 / total * entropy(p2, n2)
def buildDT(feature, target, positive, negative):
### node structure (dictionary)
# node.leaf = 0/1
# node.selectf = feature index
# node.threshold = some value (regards feature value)
# node.child = index of childs (leaft, right)
###
# root node
node = dict()
node['data'] = range(len(target))
### tree structure (list)
tree = []
tree.append(node)
###
i = 0
while i < len(tree):
idx = tree[i]['data']
# data中的值是否相同
if sum(target[idx] == negative) == len(idx): #全負
tree[i]['leaf'] = 1 # is leaf node
tree[i]['decision'] = negative
elif sum(target[idx] == positive) == len(idx): #全正
tree[i]['leaf'] = 1
tree[i]['decision'] = positive
# 試圖找出最好的切分方法
else:
bestIG = 0
# 從該node(tree[j])中取出集合,決定threshold
for j in range(feature.shape[1]): # feature.shape回傳(rows長度, columns長度)的tuple
pool = list(set(feature[idx, j])) #以集合觀念處理去掉重複的值
for k in range(len(pool) - 1):
threshold = (pool[k] + pool[k + 1]) / 2
G1 = [] #左子樹
G2 = [] #右子樹
for t in idx:
if feature[t, j] <= threshold:
G1.append(t)
else:
G2.append(t)
# Calculate infoGain
thisIG = infoGain(sum(target[G1] == positive), sum(target[G1] == negative), sum(target[G2] == positive), sum(target[G2] == negative))
# Update bestIG
if thisIG > bestIG:
bestIG = thisIG
bestG1 = G1
bestG2 = G2
bestThreshold = threshold
bestf = j
if bestIG > 0:
tree[i]['leaf'] = 0
tree[i]['selectf'] = bestf
tree[i]['threshold'] = bestThreshold
tree[i]['child'] = [len(tree),len(tree) + 1]
# 先放左子樹
node = dict()
node['data'] = bestG1
tree.append(node)
# 後放右子樹
node = dict()
node['data'] = bestG2
tree.append(node)
# 沒有更好的切分方法
else:
tree[i]['leaf'] = 1
# 預測結果從多數決
if sum(target[idx] == positive) > sum(target[idx] == negative):
tree[i]['decision'] = positive
else:
tree[i]['decision'] = negative
i += 1
return tree
data = np.loadtxt('PlayTennis.txt',usecols=range(5),dtype=int)
feature = data[:,0:4]
target = data[:,4]-1
def DT(feature, target):
node = dict()
node['data'] = range(len(target))
Tree = [];
Tree.append(node)
t = 0
while(t<len(Tree)):
idx = Tree[t]['data']
if(sum(target[idx])==0):
print(idx)
Tree[t]['leaf']=1
Tree[t]['decision']=0
elif(sum(target[idx])==len(idx)):
print(idx)
Tree[t]['leaf']=1
Tree[t]['decision']=1
else:
bestIG = 0
for i in range(feature.shape[1]):
pool = list(set(feature[idx,i]))
for j in range(len(pool)-1):
thres = (pool[j]+pool[j+1])/2
G1 = []
G2 = []
for k in idx:
if(feature[k,i]<=thres):
G1.append(k)
else:
G2.append(k)
thisIG = infoGain(sum(target[G1]==1),sum(target[G1]==0),sum(target[G2]==1),sum(target[G2]==0))
if(thisIG>bestIG):
bestIG = thisIG
bestG1 = G1
bestG2 = G2
bestthres = thres
bestf = i
if(bestIG>0):
Tree[t]['leaf']=0
Tree[t]['selectf']=bestf
Tree[t]['threshold']=bestthres
Tree[t]['child']=[len(Tree),len(Tree)+1]
node = dict()
node['data']=bestG1
Tree.append(node)
node = dict()
node['data']=bestG2
Tree.append(node)
else:
Tree[t]['leaf']=1
if(sum(target(idx)==1)>sum(target(idx)==0)):
Tree[t]['decision']=1
else:
Tree[t]['decision']=0
t+=1
return Tree
Tree = buildDT(feature, target, 1, 0)
#Tree = DT(feature, target)
for i in range(len(target)):
test_feature = feature[i,:]
now = 0
while(Tree[now]['leaf']==0):
bestf = Tree[now]['selectf']
thres = Tree[now]['threshold']
if(test_feature[bestf]<=thres):
now = Tree[now]['child'][0]
else:
now = Tree[now]['child'][1]
print(target[i],Tree[now]['decision'])
| SitiBanc/1061_NCTU_IOMDS | 1011/Course Material/temp.py | Python | apache-2.0 | 5,952 |
# adapted from zmq_server_example.py in tinyrpc
import time, sys
import zmq
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.zmq import ZmqServerTransport
from tinyrpc.server import RPCServer
from tinyrpc.dispatch import RPCDispatcher
class Server(object):
def __init__(self, req_callback):
# print 'initializing Rpc'
self.ctx = zmq.Context()
self.dispatcher = RPCDispatcher()
self.transport = ZmqServerTransport.create(self.ctx, 'tcp://127.0.0.1:8000')
self.req_callback = req_callback
self.rpc_server = RPCServer(
self.transport,
JSONRPCProtocol(),
self.dispatcher
)
self.dispatcher.public(self.request) # register this function (replacing the decorator)
# print 'READYc: '+str(time.clock())
# sys.exit(0)
self.rpc_server.serve_forever()
# def start(self):
# self.rpc_server.serve_forever()
def request(self, req):
return self.req_callback(req)
| dongting/sdnac | sdnac/api/rpc.py | Python | apache-2.0 | 1,086 |
# Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nw.providers.Provider import Provider
import subprocess
import re
from logging import getLogger
# /!\ Warning: this Provider uses the ping system command and has been designed for Linux (Debian Wheezy).
# List of data the Ping Provider can return (set in Provider's config field 'requested_data').
# If the Provider is configured with another requested_data, an exception is raised.
# If no requested_data is configured for Ping Provider, status is used by default.
_data_available = [
'status', # returns the status code (integer) of ping command execution: 0 = success, other = error occurred
'ping_response', # returns the whole std output of ping command (string)
'pkt_transmitted', # returns the number of packets transmitted (integer) (extracted from stdout of ping command using a regex)
'pkt_received', # returns the number of packets received (integer) (extracted from stdout of ping command using a regex)
'pkt_loss', # returns the number of packets loss (integer) (extracted from stdout of ping command using a regex)
'ping_avg', # returns the average ping time (in ms) (float) (extracted from stdout of ping command using a regex)
'ping_min', # returns the min ping time (in ms) (float) (extracted from stdout of ping command using a regex)
'ping_max' # returns the max ping time (in ms) (float) (extracted from stdout of ping command using a regex)
]
class Ping(Provider):
# Overload _mandatory_parameters and _optional_parameters to list the parameters required by HttpRequest provider
_mandatory_parameters = [
'ping_addr' # IP address or hostname of the machine to ping
]
_optional_parameters = [
'requested_data', # (string) Requested data (default is 'status' which returns the status code of ping command execution). See _data_available for available options.
'count', # (integer) -c option of ping: Stop after sending (and receiving) count ECHO_RESPONSE packets. If not defined, default value is 1.
'timeout' # (integer) -W option of ping: Time to wait for a response, in seconds. The option affects only timeout in absense of any responses, otherwise ping waits for two RTTs.
]
def __init__(self, options):
Provider.__init__(self, options)
# Build ping command
self.ping_cmd = "ping"
# Add -c option
if not self._config.get('count'):
getLogger(__name__).info('Option "count" is not provided to provider Ping, use default value (1)')
self.count = 1
else:
self.count = self._config.get('count')
self.ping_cmd += " -c " + str(self.count)
# Add -W option if requested
if self._config.get('timeout'):
self.ping_cmd += " -W " + str(self._config.get('timeout'))
# Add ping address
self.ping_cmd += " " + self._config.get('ping_addr')
# Load requested data (default is 'status')
self.requested_data = self._config.get('requested_data') or "status"
def process(self):
if (self.requested_data == "status"):
return self._getPingStatus()
else:
# TODO: better management of ping errors
try:
ping_data = self._performPing()
except:
return None # Ping error
# Return the requested data
if (self.requested_data == "ping_response"):
return ping_data.ping_response
if (self.requested_data == "pkt_transmitted"):
return ping_data.pkt_transmitted
if (self.requested_data == "pkt_received"):
return ping_data.pkt_received
elif (self.requested_data == "pkt_loss"):
return ping_data.pkt_loss
if (self.requested_data == "ping_avg"):
return ping_data.ping_avg
if (self.requested_data == "ping_min"):
return ping_data.ping_min
if (self.requested_data == "ping_max"):
return ping_data.ping_max
# Simply execute ping command to retrieve the command's returned code
def _getPingStatus(self):
getLogger(__name__).debug('Call ping command with the following options: ' + self.ping_cmd)
returncode = subprocess.call(self.ping_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
getLogger(__name__).debug('Ping command returned status code: ' + str(returncode))
return returncode
# Execute ping command and returned a PingData object in case of success
def _performPing(self):
getLogger(__name__).debug('Call ping command with the following options: ' + self.ping_cmd)
(output, error) = subprocess.Popen(self.ping_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True).communicate()
if output:
getLogger(__name__).debug('Ping command returned: ' + output)
return PingData(output)
else:
getLogger(__name__).debug('Ping error: ' + error)
raise Exception(error)
# This function is called by __init__ of the abstract Provider class, it verify during the object initialization if the Provider' configuration is valid.
def _isConfigValid(self):
Provider._isConfigValid(self)
# If requested_data is provided, check if it is managed by Ping provider
if self._config.get('requested_data') and not (self._config.get('requested_data') in _data_available):
getLogger(__name__).error('Parameter requested_data "' + self._config.get('requested_data') + '" provided to provider Ping is not allowed. Allowed conditions are: ' + str(_data_available))
return False
return True
class PingData:
"""
Class extracting ping statistics data using regexps on ping command response.
/!\ Warning: regexp used to extract information applies on string returned by ping command on Linux (tested on Debian Wheezy).
Extracted data are:
- ping_response = the whole output of ping command
- pkt_transmitted = number of packets transmitted (integer)
- pkt_received = number of packets received (integer)
- pkt_loss = packet loss rate in percentage (float)
- ping_min = ping minimum response time in milliseconds (float)
- ping_avg = ping average response time in milliseconds (float)
- ping_max = ping maximum response time in milliseconds (float)
- ping_stdev = standard deviation of ping response time in milliseconds (float)
"""
def __init__(self, ping_response):
if not ping_response:
raise Exception("Can't create PingData object without ping response data")
self.ping_response = ping_response
# Extract packets data from statistics section of Ping response
result = re.search('(?P<pkt_transmitted>\d)\spackets\stransmitted,\s(?P<pkt_received>\d)?\s?\w*\sreceived,\s(?P<pkt_loss>[\d]*?\.?[\d]*)\%\spacket\sloss', self.ping_response)
self.pkt_transmitted = int(result.group('pkt_transmitted'))
self.pkt_received = int(result.group('pkt_received'))
self.pkt_loss = float(result.group('pkt_loss'))
# Extract time stats from statistics section of Ping response
result = re.search('min\/avg\/max\/\w*\s=\s(?P<ping_min>[\d]*\.[\d]*)\/(?P<ping_avg>[\d]*\.[\d]*)\/(?P<ping_max>[\d]*\.[\d]*)\/(?P<ping_stddev>[\d]*\.[\d]*)', self.ping_response)
self.ping_min = float(result.group('ping_min'))
self.ping_avg = float(result.group('ping_avg'))
self.ping_max = float(result.group('ping_max'))
self.ping_stddev = float(result.group('ping_stddev'))
| OpenTouch/night-watch | src/nw/providers/Ping.py | Python | apache-2.0 | 8,878 |
import lean
import lang.expr as expr
# =========================================================
# Declaration Views
class DeclView(lean.declaration):
def __init__(self, decl):
self.decl = decl
def destruct(self):
# type: DeclView -> (lean.name, ?, ?, lean.expr, lean.expr)
return (self.decl.get_name(),
self.decl.get_univ_params(),
self.decl.get_num_univ_params(),
self.decl.get_type(),
self.decl.get_value())
def mentions(self, d_thm):
v = self.decl.get_value()
return expr.gather_theorem(d_thm, v)
# =========================================================
# Environment Views
class EnvView(lean.environment):
def __init__(self, env):
# type: lean.environment -> None
self.env = env
def get_decls(self, f=None):
# type: (lean.declaration -> bool) -> [lean.declaration]
decls = []
self.env.for_each_declaration(lambda decl: decls.append(decl))
if f:
decls = filter(lambda decl: f(decl), decls)
return decls
def get_theorems(self):
# type: (lean.declaration -> bool) -> [lean.declaration]
return self.get_decls(lambda decl: decl.is_theorem())
def thm_dict_of_decls(self, decls):
# type: [lean.declaration] -> dict<lean.name, lean.expr>
d_thm = {}
for decl in decls:
if decl.is_theorem():
n, up, nup, t, v = DeclView(decl).destruct()
d_thm[n] = v
return d_thm
| dselsam/lean-python-bindings | lean/lang/env.py | Python | apache-2.0 | 1,567 |
import logging
import os
import sys
import time
import json
import jsonschema
import pprint
import pytest
import requests
from ray._private.test_utils import (
format_web_url,
wait_for_condition,
wait_until_server_available,
)
from ray.dashboard import dashboard
from ray.dashboard.tests.conftest import * # noqa
from ray.job_submission import JobSubmissionClient
logger = logging.getLogger(__name__)
def _get_snapshot(address: str):
response = requests.get(f"{address}/api/snapshot")
response.raise_for_status()
data = response.json()
schema_path = os.path.join(
os.path.dirname(dashboard.__file__), "modules/snapshot/snapshot_schema.json"
)
pprint.pprint(data)
jsonschema.validate(instance=data, schema=json.load(open(schema_path)))
return data
def test_successful_job_status(
ray_start_with_dashboard, disable_aiohttp_cache, enable_test_module
):
address = ray_start_with_dashboard.address_info["webui_url"]
assert wait_until_server_available(address)
address = format_web_url(address)
job_sleep_time_s = 5
entrypoint_cmd = (
'python -c"'
"import ray;"
"ray.init();"
"import time;"
f"time.sleep({job_sleep_time_s});"
'"'
)
client = JobSubmissionClient(address)
start_time_s = int(time.time())
runtime_env = {"env_vars": {"RAY_TEST_123": "123"}}
metadata = {"ray_test_456": "456"}
job_id = client.submit_job(
entrypoint=entrypoint_cmd, metadata=metadata, runtime_env=runtime_env
)
def wait_for_job_to_succeed():
data = _get_snapshot(address)
legacy_job_succeeded = False
job_succeeded = False
# Test legacy job snapshot (one driver per job).
for job_entry in data["data"]["snapshot"]["jobs"].values():
if job_entry["status"] is not None:
assert job_entry["config"]["metadata"]["jobSubmissionId"] == job_id
assert job_entry["status"] in {"PENDING", "RUNNING", "SUCCEEDED"}
assert job_entry["statusMessage"] is not None
legacy_job_succeeded = job_entry["status"] == "SUCCEEDED"
# Test new jobs snapshot (0 to N drivers per job).
for job_submission_id, entry in data["data"]["snapshot"][
"jobSubmission"
].items():
if entry["status"] is not None:
assert entry["status"] in {"PENDING", "RUNNING", "SUCCEEDED"}
assert entry["message"] is not None
# TODO(architkulkarni): Disable automatic camelcase.
assert entry["runtimeEnv"] == {"envVars": {"RAYTest123": "123"}}
assert entry["metadata"] == {"rayTest456": "456"}
assert entry["errorType"] is None
assert abs(entry["startTime"] - start_time_s) <= 2
if entry["status"] == "SUCCEEDED":
job_succeeded = True
assert entry["endTime"] >= entry["startTime"] + job_sleep_time_s
return legacy_job_succeeded and job_succeeded
wait_for_condition(wait_for_job_to_succeed, timeout=30)
def test_failed_job_status(
ray_start_with_dashboard, disable_aiohttp_cache, enable_test_module
):
address = ray_start_with_dashboard.address_info["webui_url"]
assert wait_until_server_available(address)
address = format_web_url(address)
job_sleep_time_s = 5
entrypoint_cmd = (
'python -c"'
"import ray;"
"ray.init();"
"import time;"
f"time.sleep({job_sleep_time_s});"
"import sys;"
"sys.exit(1);"
'"'
)
start_time_s = int(time.time())
client = JobSubmissionClient(address)
runtime_env = {"env_vars": {"RAY_TEST_456": "456"}}
metadata = {"ray_test_789": "789"}
job_id = client.submit_job(
entrypoint=entrypoint_cmd, metadata=metadata, runtime_env=runtime_env
)
def wait_for_job_to_fail():
data = _get_snapshot(address)
legacy_job_failed = False
job_failed = False
# Test legacy job snapshot (one driver per job).
for job_entry in data["data"]["snapshot"]["jobs"].values():
if job_entry["status"] is not None:
assert job_entry["config"]["metadata"]["jobSubmissionId"] == job_id
assert job_entry["status"] in {"PENDING", "RUNNING", "FAILED"}
assert job_entry["statusMessage"] is not None
legacy_job_failed = job_entry["status"] == "FAILED"
# Test new jobs snapshot (0 to N drivers per job).
for job_submission_id, entry in data["data"]["snapshot"][
"jobSubmission"
].items():
if entry["status"] is not None:
assert entry["status"] in {"PENDING", "RUNNING", "FAILED"}
assert entry["message"] is not None
# TODO(architkulkarni): Disable automatic camelcase.
assert entry["runtimeEnv"] == {"envVars": {"RAYTest456": "456"}}
assert entry["metadata"] == {"rayTest789": "789"}
assert entry["errorType"] is None
assert abs(entry["startTime"] - start_time_s) <= 2
if entry["status"] == "FAILED":
job_failed = True
assert entry["endTime"] >= entry["startTime"] + job_sleep_time_s
return legacy_job_failed and job_failed
wait_for_condition(wait_for_job_to_fail, timeout=25)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| ray-project/ray | dashboard/modules/snapshot/tests/test_job_submission.py | Python | apache-2.0 | 5,558 |
#!/usr/bin/env python3
import argparse
import datetime
import getpass
import json
import logging
import logging.config
import os
import re
import sys
import tabulate
import uuid
from critsapi.critsapi import CRITsAPI
from critsapi.critsdbapi import CRITsDBAPI
from lib.pt.common.config import Config
from lib.pt.common.constants import PT_HOME
from lib.pt.core.database import Database
from lib.pt.ptapi import PTAPI
from lib.crits.vocabulary.indicators import IndicatorTypes as it
from operator import itemgetter
from configparser import ConfigParser
log = logging.getLogger()
VERSION = "0.1337"
# Check configuration directory
local_config_dir = os.path.join(PT_HOME, 'etc', 'local')
if not os.path.exists(local_config_dir):
os.makedirs(local_config_dir)
sys.exit('No etc/local/ directory. See README to create.')
config = Config()
# Check local data directory
if config.core.cache_enabled:
if not os.path.exists(config.core.cache_dir):
log.info('Creating Cache directory in '
'{}'.format(config.core.cache_dir))
os.makedirs(config.core.cache_dir)
# Initialize loggin
log_path = os.path.join(PT_HOME, 'etc', 'local', 'logging.ini')
try:
logging.config.fileConfig(log_path)
except Exception as e:
sys.exit('unable to load logging configuration file {}: '
'{}'.format(log_path, str(e)))
pt = PTAPI(username=config.core.pt_username, apikey=config.core.pt_apikey)
pt.set_proxy(http=config.proxy.http, https=config.proxy.https)
argparser = argparse.ArgumentParser()
argparser.add_argument('QUERY', action='store', help='A value to send as a'
' query to PT. Email, phone, name, etc.')
argparser.add_argument('--dev', dest='dev', action='store_true', default=False)
argparser.add_argument('--crits', dest='crits', action='store_true',
default=False, help='Write the results to CRITs with'
' appropriate relationships.')
argparser.add_argument('--test', dest='test', action='store_true',
default=False, help='Run with test data. (Save PT '
'queries)')
argparser.add_argument('-f', dest='force', action='store_true', default=False,
help='Force a new API query (do not used cached '
'results.')
argparser.add_argument('-t', action='append', dest='tags', default=[],
help='Bucket list tags for crits. Multiple -t options '
'are allowed.')
# Add our mutually exclusive items
meg = argparser.add_mutually_exclusive_group()
meg.add_argument('-n', dest='name', action='store_true', default=False,
help='The query is a name and pt_query will not try to '
'determine the type automatically.')
meg.add_argument('-a', dest='address', action='store_true', default=False,
help='The query is an address and pt_query will not '
'try to determine the type automatically.')
args = argparser.parse_args()
# Patterns for determining which type of lookup to do
# Some items cannot be differentiated via regex (name vs address), so we use
# a flag to specify these
# Load patterns for regexes
pattern_config = ConfigParser()
patterns = {}
with open(os.path.join(PT_HOME, 'etc', 'patterns.ini')) as fp:
pattern_config.readfp(fp)
email_address_pattern = re.compile(pattern_config.get('email', 'pattern'))
phone_pattern = re.compile(pattern_config.get('phone', 'pattern'))
domain_pattern = re.compile(pattern_config.get('domain', 'pattern'))
database = None
if config.core.cache_enabled:
database = Database()
if args.crits:
HOME = os.path.expanduser("~")
if not os.path.exists(os.path.join(HOME, '.crits_api')):
print('''Please create a file with the following contents:
[crits]
user = lolnate
[keys]
prod_api_key = keyhere
dev_api_key = keyhere
''')
raise SystemExit('~/.crits_api was not found or was not accessible.')
crits_config = ConfigParser()
crits_config.read(os.path.join(HOME, '.crits_api'))
if crits_config.has_option("keys", "prod"):
crits_api_prod = crits_config.get("keys", "prod")
if crits_config.has_option("keys", "dev"):
crits_api_dev = crits_config.get("keys", "dev")
if crits_config.has_option("crits", "user"):
crits_username = crits_config.get("crits", "user")
if args.dev:
crits_url = config.crits.crits_dev_api_url
crits_api_key = crits_api_dev
if len(crits_api_key) != 40:
print("Dev API key in ~/.crits_api is the wrong length! Must be 40\
characters.")
else:
crits_url = config.crits.crits_prod_api_url
crits_api_key = crits_api_prod
if len(crits_api_key) != 40:
print("Prod API key in ~/.crits_api is the wrong length! Must be 40\
characters.")
crits_proxy = {
'http': config.crits.crits_proxy_url,
'https': config.crits.crits_proxy_url,
}
# Build our mongo connection
if args.dev:
crits_mongo = CRITsDBAPI(mongo_uri=config.crits.mongo_uri_dev,
db_name=config.crits.database)
else:
crits_mongo = CRITsDBAPI(mongo_uri=config.crits.mongo_uri,
db_name=config.crits.database)
crits_mongo.connect()
# Connect to the CRITs API
crits = CRITsAPI(
api_url=crits_url,
api_key=crits_api_key,
username=crits_username,
proxies=crits_proxy,
verify=config.crits.crits_verify
)
query = args.QUERY.rstrip()
# Get the user launching all this
user = getpass.getuser()
# Used to store the type of indicator in CRITs for the query object.
crits_indicator_type = ''
# Used to store the cache file location
cache_file = None
if database and not args.force and config.core.cache_enabled:
cache_file = database.get_cache_file(query)
if cache_file:
log.info('Using cache file for query {}'.format(query))
with open(cache_file) as fp:
results = json.loads(fp.read())
bucket_list = ['whois', 'pt:query']
for t in args.tags:
bucket_list.append(t)
if args.name or args.address:
if args.name:
field_str = 'name'
if args.address:
field_str = 'address'
if args.test:
results = pt.get_test_results(field=field_str)
else:
results = pt.whois_search(query=query, field=field_str)
if database and not cache_file and config.core.cache_enabled:
filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))
log.debug('Filepath is {}'.format(filepath))
database.add_results_to_cache(query, user, results, filepath)
base_reference = 'https://www.passivetotal.org/search/whois/'\
'{}'.format(field_str)
# Use our config defined indicator type of whois email objects
if args.name:
crits_indicator_type = it.WHOIS_NAME
if args.address:
crits_indicator_type = it.WHOIS_ADDR1
bucket_list.append('registrant')
elif re.match(email_address_pattern, query):
if args.test:
results = pt.get_test_results(field='email')
else:
results = pt.whois_search(query=query, field='email')
# Now add the results to the db if we have it
if database and not cache_file and config.core.cache_enabled:
filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))
log.debug('Filepath is {}'.format(filepath))
database.add_results_to_cache(query, user, results, filepath)
base_reference = 'https://www.passivetotal.org/search/whois/email'
# Use our config defined indicator type of whois email objects
crits_indicator_type = it.WHOIS_REGISTRANT_EMAIL_ADDRESS
bucket_list.append('registrant')
elif re.match(phone_pattern, query):
if args.test:
results = pt.get_test_results(field='phone')
else:
results = pt.whois_search(query=query, field='phone')
# Now add the results to the db if we have it
if database and not cache_file and config.core.cache_enabled:
filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))
log.debug('Filepath is {}'.format(filepath))
database.add_results_to_cache(query, user, results, filepath)
base_reference = 'https://www.passivetotal.org/search/whois/phone'
crits_indicator_type = it.WHOIS_TELEPHONE
bucket_list.append('registrant')
elif re.match(domain_pattern, query):
if args.test:
results = pt.get_test_results(field='domain')
else:
results = pt.whois_search(query=query, field='domain')
# Now add the results to the db if we have it
if database and not cache_file and config.core.cache_enabled:
filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))
log.debug('Filepath is {}'.format(filepath))
database.add_results_to_cache(query, user, results, filepath)
base_reference = 'https://www.passivetotal.org/search/whois/domain'
crits_indicator_type = it.DOMAIN
else:
raise SystemExit("Your query didn't match a known pattern.")
# Add the query to CRITs regardless of the number of results
# TODO: Add campaigns
if args.crits:
found = False
# Search for it with raw mongo because API is slow
crits_result = crits_mongo.find('indicators', {'value': query, 'type':
crits_indicator_type})
if crits_result.count() > 0:
for r in crits_result:
if r['value'] == query:
indicator = r
found = True
if not found:
indicator = crits.add_indicator(
value=query,
itype=crits_indicator_type,
source=config.crits.default_source,
reference='Added via pt_query.py',
method='pt_query.py',
bucket_list=bucket_list,
indicator_confidence='low',
indicator_impact='low',
description='Queried with pt_query.py',
)
# This is pretty hacky - Since we use both the raw DB and the API, we might
# receive either an '_id' or an 'id' back. We are going to standardize on
# 'id', rather than '_id'
if 'id' not in indicator:
if '_id' not in indicator:
print(repr(indicator))
raise SystemExit('id and _id not found for query: '
'{} in new indicator'.format(query))
else:
indicator['id'] = indicator['_id']
# Iterate through all results and print/add to CRITs (if args provided)
formatted_results = []
for result in results['results']:
if 'domain' in result:
crits_indicators_to_add = []
# Row contains:
# Domain, Registrant Email, Registrant Name, Registrant Date,
# Expiration Date, Tags
row = ['', '', '', '', '', '']
row[0] = result['domain']
# Email address used to register
if 'registrant' in result:
# Append the registrant email
if 'email' in result['registrant']:
row[1] = result['registrant']['email']
email_obj = {
'value': result['registrant']['email'],
'type': it.WHOIS_REGISTRANT_EMAIL_ADDRESS,
'related_to': result['domain']
}
crits_indicators_to_add.append(email_obj)
if 'name' in result['registrant']:
row[2] = result['registrant']['name']
name_obj = {
'value': result['registrant']['name'],
'type': it.WHOIS_NAME,
'related_to': result['domain']
}
crits_indicators_to_add.append(name_obj)
if 'telephone' in result['registrant']:
row[3] = result['registrant']['telephone']
phone_obj = {
'value': result['registrant']['telephone'],
'type': it.WHOIS_TELEPHONE,
'related_to': result['domain']
}
crits_indicators_to_add.append(phone_obj)
if 'street' in result['registrant']:
addr1_obj = {
'value': result['registrant']['street'],
'type': it.WHOIS_ADDR1,
'related_to': result['domain']
}
crits_indicators_to_add.append(addr1_obj)
# Date the domain was registered
if 'registered' in result:
row[4] = result['registered']
if 'expiresAt' in result:
row[5] = result['expiresAt']
formatted_results.append(row)
# TODO: Tags. They appear to be an extra API query which is annoying
reference = '{0}/{1}'.format(base_reference, query)
if args.crits:
# Let's try getting the confidence and impact from the parent whois
# indicator
confidence = 'low'
impact = 'low'
if 'confidence' in indicator:
if 'rating' in indicator['confidence']:
confidence = indicator['confidence']['rating']
if 'impact' in indicator:
if 'rating' in indicator['impact']:
impact = indicator['impact']['rating']
# If not in CRITs, add all the associated indicators
bucket_list = ['whois pivoting', 'pt:found']
for t in args.tags:
bucket_list.append(t)
new_ind = crits.add_indicator(
value=result['domain'],
itype=it.DOMAIN,
source=config.crits.default_source,
reference=reference,
method='pt_query.py',
bucket_list=bucket_list,
indicator_confidence=confidence,
indicator_impact=impact,
description='Discovered through PT whois pivots'
)
# The CRITs API allows us to add a campaign to the indicator, but
# not multiple campaigns at one time,
# so we will do it directly with the DB.
# We want to replicate the campaigns of the WHOIS indicator (if
# a campaign exists) to the new indicator.
if 'campaign' in indicator:
for campaign in indicator['campaign']:
crits_mongo.add_embedded_campaign(
new_ind['id'],
'indicators',
campaign['name'],
campaign['confidence'],
campaign['analyst'],
datetime.datetime.now(),
campaign['description']
)
# If the new indicator and the indicator are not related,
# relate them.
if not crits.has_relationship(indicator['id'], 'Indicator',
new_ind['id'], 'Indicator',
rel_type='Registered'):
crits.forge_relationship(indicator['id'], 'Indicator',
new_ind['id'], 'Indicator',
rel_type='Registered')
# Now we can add the rest of the WHOIS indicators (if necessary)
for ind in crits_indicators_to_add:
# If the indicator exists, just get the id and use it to build
# relationships. We will look for one with the same source.
# If not in CRITs, add it and relate it.
whois_indicator = crits_mongo.find_one(
'indicators',
{
'value': ind['value'],
'type': ind['type'],
'source.name':
config.crits.default_source,
})
if not whois_indicator:
bucket_list = ['whois pivoting', 'pt:found']
for t in args.tags:
bucket_list.append(t)
whois_indicator = crits.add_indicator(
value=ind['value'],
itype=ind['type'],
source=config.crits.default_source,
reference=reference,
method='pt_query.py',
bucket_list=bucket_list,
indicator_confidence=confidence,
indicator_impact=impact,
description='Discovered through PT whois pivots'
)
# This is pretty hacky - Since we use both the raw DB and the
# API, we might receive either an '_id' or an 'id' back. We
# are going to standardize on 'id', rather than '_id'
if 'id' not in whois_indicator:
if '_id' not in whois_indicator:
print(repr(whois_indicator))
raise SystemExit('id and _id not found for query: '
'{} in whois indicator'.format(query))
whois_indicator['id'] = whois_indicator['_id']
# Not a huge deal, but make sure we don't waste time adding
# a relationship to itself
if whois_indicator['id'] == new_ind['id']:
continue
# The CRITs API allows us to add a campaign to the indicator,
# but not multiple campaigns at one time,
# so we will do it directly with the DB.
# We want to replicate the campaigns of the WHOIS indicator (if
# a campaign exists) to the new indicator.
# Continue with the same campaign
if 'campaign' in indicator:
for campaign in indicator['campaign']:
crits_mongo.add_embedded_campaign(
whois_indicator['id'],
'indicators',
campaign['name'],
campaign['confidence'],
campaign['analyst'],
datetime.datetime.now(),
campaign['description']
)
# If the new indicator and the indicator are not related,
# relate them.
if not crits.has_relationship(whois_indicator['id'],
'Indicator',
new_ind['id'],
'Indicator',
rel_type='Registered'):
crits.forge_relationship(whois_indicator['id'],
'Indicator',
new_ind['id'],
'Indicator',
rel_type='Registered')
# Add a bucket_list item to track that we searched for this whois indicator
if args.crits:
crits_mongo.add_bucket_list_item(indicator['id'], 'indicators',
'pt:whois_search_completed')
# SORT BY DATE
formatted_results = sorted(formatted_results, key=itemgetter(3), reverse=True)
# Row contains:
# Domain, Registrant Email, Registrant Name, Registrant Telephone,
# Registrant Date, Expiration Date, Tags
headers = ['Domain', 'Registrant Email', 'Registrant Name',
'Registrant Telephone', 'Registrant Date', 'Expiration Date',
'Tags']
print(tabulate.tabulate(formatted_results, headers))
| IntegralDefense/ptauto | bin/pt_query.py | Python | apache-2.0 | 19,839 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pecan import rest
from pecan import abort
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral import exceptions as ex
from mistral.api.controllers.v1 import task
from mistral.openstack.common import log as logging
from mistral.api.controllers import resource
from mistral.db import api as db_api
from mistral.engine import engine
LOG = logging.getLogger(__name__)
class Execution(resource.Resource):
"""Execution resource."""
id = wtypes.text
workbook_name = wtypes.text
task = wtypes.text
state = wtypes.text
# Context is a JSON object but since WSME doesn't support arbitrary
# dictionaries we have to use text type convert to json and back manually.
context = wtypes.text
def to_dict(self):
d = super(Execution, self).to_dict()
if d.get('context'):
d['context'] = json.loads(d['context'])
return d
@classmethod
def from_dict(cls, d):
e = cls()
for key, val in d.items():
if hasattr(e, key):
if key == 'context' and val:
val = json.dumps(val)
setattr(e, key, val)
return e
class Executions(resource.Resource):
"""A collection of Execution resources."""
executions = [Execution]
class ExecutionsController(rest.RestController):
tasks = task.TasksController()
@wsme_pecan.wsexpose(Execution, wtypes.text, wtypes.text)
def get(self, workbook_name, id):
LOG.debug("Fetch execution [workbook_name=%s, id=%s]" %
(workbook_name, id))
values = db_api.execution_get(workbook_name, id)
if not values:
abort(404)
else:
return Execution.from_dict(values)
@wsme_pecan.wsexpose(Execution, wtypes.text, wtypes.text, body=Execution)
def put(self, workbook_name, id, execution):
LOG.debug("Update execution [workbook_name=%s, id=%s, execution=%s]" %
(workbook_name, id, execution))
values = db_api.execution_update(workbook_name,
id,
execution.to_dict())
return Execution.from_dict(values)
@wsme_pecan.wsexpose(Execution, wtypes.text, body=Execution,
status_code=201)
def post(self, workbook_name, execution):
LOG.debug("Create execution [workbook_name=%s, execution=%s]" %
(workbook_name, execution))
try:
context = None
if execution.context:
context = json.loads(execution.context)
values = engine.start_workflow_execution(execution.workbook_name,
execution.task,
context)
except ex.MistralException as e:
#TODO(nmakhotkin) we should use thing such a decorator here
abort(400, e.message)
return Execution.from_dict(values)
@wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204)
def delete(self, workbook_name, id):
LOG.debug("Delete execution [workbook_name=%s, id=%s]" %
(workbook_name, id))
db_api.execution_delete(workbook_name, id)
@wsme_pecan.wsexpose(Executions, wtypes.text)
def get_all(self, workbook_name):
LOG.debug("Fetch executions [workbook_name=%s]" % workbook_name)
executions = [Execution.from_dict(values)
for values in db_api.executions_get(workbook_name)]
return Executions(executions=executions)
| dzimine/mistral | mistral/api/controllers/v1/execution.py | Python | apache-2.0 | 4,285 |
import logging
from sys import exc_info
from socket import socket, AF_INET, SOCK_STREAM, SOCK_DGRAM, SHUT_RD, SOL_SOCKET, SO_REUSEADDR, error as socketError
from traceback import format_exception
from struct import Struct
from .common import *
from .models import *
from .exceptions import *
class ClientHost():
def __init__(self, host):
self.host = host
self.active = 0
self.udpsocks = set()
class ZeallectProxy(ZeallectSocketEntity):
def __init__(self, c):
self.config = c
self.remotedets = (c.host, int(c.port))
def run(self):
binding = (self.config.bindhost, int(self.config.bindport))
# create tcp socket/server
tcpsock = socket(AF_INET, SOCK_STREAM)
tcpsock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
tcpsock.bind(binding)
tcpsock.listen(5)
# create udp socket/server
udpsock = socket(AF_INET, SOCK_DGRAM)
udpsock.bind(binding)
# maps client UDP bindings to socket objects
# destined for the server
self.udpmap = {}
self.sockets = {
tcpsock: None,
udpsock: None,
}
# maps tcp sockets and addresses to ClientHost objects
self.hostmap = {}
# shortcuts
remotedets = self.remotedets
socketdict = self.sockets
nextsocket = self.selectReadable()
ignore_exc = (None, socketError, EndOfStreamException)
self.alive = True
for sock in nextsocket:
# client -> us
if sock is udpsock:
data, details = sock.recvfrom(Z_SOCKET_RECV_SIZE)
host = details[0]
if host in self.hostmap:
if details not in self.udpmap:
newsock = socket(AF_INET, SOCK_DGRAM)
self.udpmap[details] = newsock
socketdict[newsock] = details
self.hostmap[host].udpsocks.add(newsock)
self.udpmap[details].sendto(data, remotedets)
# server -> us
elif SOCK_DGRAM == sock.type:
data, details = sock.recvfrom(Z_SOCKET_RECV_SIZE)
if details == remotedets:
udpsock.sendto(data, socketdict[sock])
# client -> us (connect())
elif sock is tcpsock:
newsock, details = sock.accept()
# create connection to server
newservsock = socket(AF_INET, SOCK_STREAM)
newservsock.settimeout(Z_SOCKET_SERVER_TIMEOUT)
try:
newservsock.connect(remotedets)
socketdict[newsock] = newservsock
socketdict[newservsock] = newsock
host = details[0]
if host not in self.hostmap:
clienthost = ClientHost(host)
else:
clienthost = self.hostmap[host]
clienthost.active += 1
self.hostmap[newsock] = clienthost
self.hostmap[newservsock] = clienthost
self.hostmap[details[0]] = clienthost
logging.info('Client connected from [/%s:%d]', *details)
except:
logging.warning('Failed to create tcp relay for [/%s:%d]', *details)
exception = exc_info()
logging.warning(''.join(format_exception(*exception)))
# tcp relay
else:
try:
data = sock.recv(Z_SOCKET_RECV_SIZE)
if data:
socketdict[sock].send(data)
else:
self.removeRelay(sock)
except:
exception = exc_info()
if exception[0] not in ignore_exc:
logging.warning('An exception occurred while processing a socket!')
logging.warning(''.join(format_exception(*exception)))
self.removeRelay(sock)
def removeRelay(self, sock):
sockpair = self.sockets[sock]
for _sock in (sockpair, sock):
try:
_sock.shutdown(SHUT_RD)
_sock.close()
except socketError:
pass
self.sockets.pop(_sock)
clienthost = self.hostmap[sock]
if clienthost.active == 1:
logging.debug('Freeing up all UDP sockets to %s', clienthost.host)
# pop all udp socks that the host has connected
dets = map(self.sockets.pop, clienthost.udpsocks)
# pop all udp socks from the udpmap
map(self.udpmap.pop, dets)
# remove all referernces to the clienthost
self.hostmap.pop(sockpair)
self.hostmap.pop(sock)
self.hostmap.pop(clienthost.host)
else:
clienthost.active -= 1
logging.info('Client from %s disconnected', clienthost.host)
def disconnect(self):
socks = [s for s in self.sockets.keys() if SOCK_STREAM == s.type]
for sock in socks:
try:
sock.shutdown(SHUT_RD)
sock.close()
except socketError:
pass
| jm-/zeallect | zeallect/proxy.py | Python | apache-2.0 | 5,351 |
import logging
from event_consumer.conf import settings
from event_consumer.errors import PermanentFailure
from event_consumer.handlers import message_handler
_logger = logging.getLogger(__name__)
class IntegrationTestHandlers(object):
"""
Basic message handlers that log or raise known exceptions to allow
interactive testing of the RabbitMQ config.
"""
@staticmethod
def py_integration_ok(body):
"""
Should always succeed, never retry, never archive.
"""
msg = 'py_integration_ok, {}'.format(body)
_logger.info(msg)
@staticmethod
def py_integration_raise(body):
"""
Should retry until there are no attempts left, then archive.
"""
msg = 'py_integration_raise, {}'.format(body)
_logger.info(msg)
raise Exception(msg)
@staticmethod
def py_integration_raise_permanent(body):
"""
Should cause the message to be archived on first go.
"""
msg = 'py_integration_raise_permanent, {}'.format(body)
_logger.info(msg)
raise PermanentFailure(msg)
if settings.TEST_ENABLED:
# Add tasks for interactive testing (call decorators directly)
message_handler('py.integration.ok')(
IntegrationTestHandlers.py_integration_ok)
message_handler('py.integration.raise')(
IntegrationTestHandlers.py_integration_raise)
message_handler('py.integration.raise.permanent')(
IntegrationTestHandlers.py_integration_raise_permanent)
| depop/celery-message-consumer | event_consumer/test_utils/handlers.py | Python | apache-2.0 | 1,527 |
#!/usr/bin/env python
# -*- coding: iso-8859-2 -*-
import string
import copy
import os
import gzip
import gtk
import commands
try:
from backports import lzma
except ImportError:
from lzma import LZMAFile as lzma
import singletons
from common import *
import common; _ = common._
from Source import *
from Package import *
from Category import *
def czfind(istr):
l = len(istr)
i = 0
word = ''
flag = False
while i < l:
if istr[i] == '\n':
if flag:
flag = False
else:
break
word = ''
elif istr[i] == ' ':
flag = True
else:
word += istr[i]
i += 1
return word
class PackagePool:
"""This class retrieves and structures every packages that are accessible
from the system."""
HASHED_LEN = 2 # used by GetUpgradeableState
def __init__(self):
self.initialized = False
def Init(self):
"""Reinitialize the inner state of the package pool. Must be called
in case of manipulating the package registry."""
self.initialized = False
self.all_packages = []
self.package_name_pool = {}
self.installed_package_names = {}
singletons.application.DisplayProgress(_('Reading package sources'))
self.RegisterUpgradablePackages()
self.all_sources = self.GetSources()
singletons.application.DisplayProgress(_('Querying installed packages'))
self.RegisterInstalledPackages()
self.RegisterInstallablePackages()
self.initialized = True
def GetSources(self):
"""Retrieve and return the Source objects containted in urpmi.cfg."""
def get_source_name(line):
"""Extract the source name from the line of the file."""
prev_chr = line[0]
name = line[0]
for c in line[1:-1]:
if c == ' ':
if prev_chr == '\\':
name += ' '
else:
break
elif c != '\\':
name += c
prev_chr = c
return name
file = open('/etc/urpmi/urpmi.cfg')
sources = []
word = ''
flag0 = False
flag1 = False
name_flag = False
while 1:
c = file.read(1)
if c == '':
break
elif c == '{':
if flag0 == False:
flag0 = True
else:
name_flag = False
name = get_source_name(word)
source = Source(name)
source.hdlist = czfind(commands.getoutput('find /var/lib/urpmi/ | grep cz | grep ' + name))
print 'HL:', source.hdlist
if source.hdlist != '':
sources.append(source)
word = ''
elif c == '}':
if flag1 == False:
flag1 = True
name_flag = True
else:
name_flag = True
elif name_flag == True and c not in ['\\', '\n']:
word += c
return sources
def GetActiveSources(self, new=False):
"""Return the active Source objects."""
if new:
all_sources = self.GetSources()
else:
all_sources = self.all_sources
return [source for source in all_sources if not source.ignore]
def RegisterInstalledPackages(self):
"""Retrieve a dictionary containing every installed packages on the system."""
file = os.popen('rpmquery --all "--queryformat=%{name}-%{version}-%{release}.%{arch}:%{size}:%{group}:%{installtime}\n"')
for line in file:
fields = line.strip().split(':')
name = fields[0]
size = int(fields[1])
category = fields[2]
btime = int(fields[3])
self.AddPackage(name, size, category, time=btime)
def RegisterInstallablePackages(self):
"""Get the list of every packages that are installable on the system."""
for source in self.GetActiveSources():
#disable gzip file = gzip.open(source.hdlist)
# print "DEBUG " + lzma.open(source.hdlist).read()
file = lzma(source.hdlist)
for line in file:
if line[:6] != '@info@':
continue
fields = line.strip()[7:].split('@')
longname = fields[0]
size = int(fields[2])
category = fields[3]
self.AddPackage(longname, size, category, source)
def RegisterUpgradablePackages(self):
upl = commands.getoutput('urpmq --auto-select --whatrequires').split()
l = len (upl)
i = 0
self.upgradable_packages = []
self.upgradable_packages_long = []
while i < l:
self.upgradable_packages.append(self.generate_shortname(upl[i]))
self.upgradable_packages_long.append(upl[i])
i += 1
def generate_shortname(self, longname):
"""Generate shortname from a longname. This is a workaround if association failed."""
print("LONGN:", longname)
pos1 = longname.rfind("-")
if pos1 > 0:
pos2 = longname[:pos1].rfind("-")
return longname[:pos2]
else:
return ""
def RegisterCategory(self, category_str, package):
"""Register category 'category' in the category tree."""
category_path = category_str.split('/')
current_category = self.category_tree
for subcategory_name in category_path:
current_category = current_category.GetSubCategory(subcategory_name)
current_category.AddPackage(package)
def AddPackage(self, longname, size, category, source=None, time=-1):
"""Add package to the registry."""
if self.package_name_pool.has_key(longname):
self.package_name_pool[longname].AddSource(source)
return
package = Package()
package.longname = longname
package.shortname = self.generate_shortname(longname) ### Ezt raktam be !!!
package.size = size
package.category = category
if source:
package.AddSource(source)
package.is_installed = False
else:
package.is_installed = True
package.time = time
if len(package.longname) >= 3:
if package.longname.lower().find('lib') != -1:
package.is_library = True
else:
package.is_library = False
else:
package.is_library = False
if package.shortname in self.upgradable_packages and package.is_installed:
package.is_upgradable = True
else:
package.is_upgradable = False
self.package_name_pool[longname] = package
self.all_packages.append(package)
def GetPackagesContainingDescription(self, text):
"""Get the list of every packages that are installable on the system."""
active_sources = self.GetActiveSources() #[source for source in self.all_sources if not source.ignore]
containing_longnames = {}
for source in active_sources:
file = lzma.open(source.hdlist)
for line in file:
if line[:9] == '@summary@':
fields = line.strip().split('@')
description = fields[2]
elif line[:6] == '@info@':
fields = line.strip().split('@')
longname = fields[2]
if description.lower().find(text) != -1:
containing_longnames[longname] = True
return containing_longnames
FILTER_PACKAGENAME = 0
FILTER_DESCRIPTION = 1
FILTER_FILENAME = 2
def GetPackagesContainingFiles(self, search_text):
pass
# active_sources = self.GetActiveSources()
# active_source_paths = ''
# containing_longnames = {}
# for source in active_sources:
# active_source_paths += escape(source.hdlist) + ' '
# command = 'parsehdlist --fileswinfo ' + active_source_paths + ' | grep ".*:files:.*'+escape(search_text)+'.*"'
# file = os.popen(command)
# for line in file:
# containing_longnames[ line.split(':')[0] ] = True
# return containing_longnames
def Filter(self, application, library, installed, noninstalled, search_mode, search_text):
"""Filter packages."""
# reset pacage registry
self.packages = []
self.category_tree = Category('root')
search_text = search_text.lower()
if search_mode == self.FILTER_DESCRIPTION:
containing_longnames = self.GetPackagesContainingDescription(search_text)
elif search_mode == self.FILTER_FILENAME:
containing_longnames = self.GetPackagesContainingFiles(search_text)
for source in self.all_sources:
source.packages = []
for package in self.all_packages:
inst = (package.is_installed and installed) or (not package.is_installed and noninstalled)
ptype = (package.is_library and library) or (not package.is_library and application)
if search_mode == self.FILTER_PACKAGENAME:
search_inc = package.longname.lower().find(search_text)!=-1
elif search_mode == self.FILTER_DESCRIPTION:
search_inc = containing_longnames.has_key(package.longname)
elif search_mode == self.FILTER_FILENAME:
search_inc = containing_longnames.has_key(package.shortname)
else:
search_inc = True
included = inst and ptype and search_inc
if included:
for source in package.sources:
source.AddPackage(package)
self.RegisterCategory(package.category, package)
self.packages.append(package)
| blackPantherOS/packagemanagement | rpmanager/PackagePool.py | Python | apache-2.0 | 10,079 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from abc import abstractmethod
from builtins import object, open, str
from collections import defaultdict, namedtuple
from functools import total_ordering
import six
from future.utils import PY3
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement,
PinnedJarArtifactSet)
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants.ivy.bootstrapper import Bootstrapper
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.java.util import execute_runner
from pants.util.collections_abc_backport import OrderedDict
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir, safe_open
from pants.util.fileutil import atomic_copy, safe_hardlink_or_copy
class IvyResolutionStep(object):
"""Ivy specific class for describing steps of performing resolution."""
# NB(nh): This class is the base class for the ivy resolve and fetch steps.
# It also specifies the abstract methods that define the components of resolution steps.
def __init__(self, confs, hash_name, pinned_artifacts, soft_excludes, ivy_resolution_cache_dir,
ivy_repository_cache_dir, ivy_workdir):
"""
:param confs: A tuple of string ivy confs to resolve for.
:param hash_name: A unique string name for this resolve.
:param pinned_artifacts: A tuple of "artifact-alikes" to force the versions of.
:param soft_excludes: A flag marking whether to pass excludes to Ivy or to apply them after the
fact.
:param ivy_repository_cache_dir: The cache directory used by Ivy for repository cache data.
:param ivy_resolution_cache_dir: The cache directory used by Ivy for resolution cache data.
:param ivy_workdir: A task-specific workdir that all ivy outputs live in.
"""
self.confs = confs
self.hash_name = hash_name
self.pinned_artifacts = pinned_artifacts
self.soft_excludes = soft_excludes
self.ivy_repository_cache_dir = ivy_repository_cache_dir
self.ivy_resolution_cache_dir = ivy_resolution_cache_dir
self.ivy_workdir = ivy_workdir
self.workdir_reports_by_conf = {c: self.resolve_report_path(c) for c in confs}
@abstractmethod
def required_load_files_exist(self):
"""The files required to load a previous resolve exist."""
@abstractmethod
def required_exec_files_exist(self):
"""The files to do a resolve exist."""
@abstractmethod
def load(self, targets):
"""Loads the result of a resolve or fetch."""
@abstractmethod
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
"""Runs the resolve or fetch and loads the result, returning it."""
@property
def workdir(self):
return os.path.join(self.ivy_workdir, self.hash_name)
@property
def hardlink_classpath_filename(self):
return os.path.join(self.workdir, 'classpath')
@property
def ivy_cache_classpath_filename(self):
return '{}.raw'.format(self.hardlink_classpath_filename)
@property
def frozen_resolve_file(self):
return os.path.join(self.workdir, 'resolution.json')
@property
def hardlink_dir(self):
return os.path.join(self.ivy_workdir, 'jars')
@abstractmethod
def ivy_xml_path(self):
"""Ivy xml location."""
@abstractmethod
def resolve_report_path(self, conf):
"""Location of the resolve report in the workdir."""
def _construct_and_load_hardlink_map(self):
artifact_paths, hardlink_map = IvyUtils.construct_and_load_hardlink_map(
self.hardlink_dir,
self.ivy_repository_cache_dir,
self.ivy_cache_classpath_filename,
self.hardlink_classpath_filename)
return artifact_paths, hardlink_map
def _call_ivy(self, executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name):
IvyUtils.do_resolve(executor,
extra_args,
ivyxml,
jvm_options,
self.workdir_reports_by_conf,
self.confs,
self.ivy_resolution_cache_dir,
self.ivy_cache_classpath_filename,
hash_name_for_report,
workunit_factory,
workunit_name)
class IvyFetchStep(IvyResolutionStep):
"""Resolves ivy artifacts using the coordinates from a previous resolve."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename) and
os.path.isfile(self.frozen_resolve_file))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'fetch-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'fetch-ivy.xml')
def required_exec_files_exist(self):
return os.path.isfile(self.frozen_resolve_file)
def load(self, targets):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
return self._load_from_fetch(frozen_resolutions)
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
self._do_fetch(executor, extra_args, frozen_resolutions, jvm_options,
workunit_name, workunit_factory)
result = self._load_from_fetch(frozen_resolutions)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.ivy_workdir,
result))
return result
def _load_from_fetch(self, frozen_resolutions):
artifact_paths, hardlink_map = self._construct_and_load_hardlink_map()
return IvyFetchResolveResult(artifact_paths,
hardlink_map,
self.hash_name,
self.workdir_reports_by_conf,
frozen_resolutions)
def _do_fetch(self, executor, extra_args, frozen_resolution, jvm_options, workunit_name,
workunit_factory):
# It's important for fetches to have a different ivy report from resolves as their
# contents differ.
hash_name_for_report = '{}-fetch'.format(self.hash_name)
ivyxml = self.ivy_xml_path
self._prepare_ivy_xml(frozen_resolution, ivyxml, hash_name_for_report)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, frozen_resolution, ivyxml, resolve_hash_name_for_report):
# NB(nh): Our ivy.xml ensures that we always get the default configuration, even if it's not
# part of the requested confs.
default_resolution = frozen_resolution.get('default')
if default_resolution is None:
raise IvyUtils.IvyError("Couldn't find the frozen resolution for the 'default' ivy conf.")
try:
jars = default_resolution.jar_dependencies
IvyUtils.generate_fetch_ivy(jars, ivyxml, self.confs, resolve_hash_name_for_report)
except Exception as e:
raise IvyUtils.IvyError('Failed to prepare ivy resolve: {}'.format(e))
class IvyResolveStep(IvyResolutionStep):
"""Resolves ivy artifacts and produces a cacheable file containing the resulting coordinates."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'resolve-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'resolve-ivy.xml')
def load(self, targets):
artifact_paths, hardlink_map = self._construct_and_load_hardlink_map()
return IvyResolveResult(artifact_paths,
hardlink_map,
self.hash_name,
self.workdir_reports_by_conf)
def exec_and_load(self, executor, extra_args, targets, jvm_options,
workunit_name, workunit_factory):
self._do_resolve(executor, extra_args, targets, jvm_options, workunit_name, workunit_factory)
result = self.load(targets)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.ivy_workdir,
result))
frozen_resolutions_by_conf = result.get_frozen_resolutions_by_conf(targets)
FrozenResolution.dump_to_file(self.frozen_resolve_file, frozen_resolutions_by_conf)
return result
def _do_resolve(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory):
ivyxml = self.ivy_xml_path
hash_name = '{}-resolve'.format(self.hash_name)
self._prepare_ivy_xml(targets, ivyxml, hash_name)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, targets, ivyxml, hash_name):
# TODO(John Sirois): merge the code below into IvyUtils or up here; either way, better
# diagnostics can be had in `IvyUtils.generate_ivy` if this is done.
# See: https://github.com/pantsbuild/pants/issues/2239
jars, global_excludes = IvyUtils.calculate_classpath(targets)
# Don't pass global excludes to ivy when using soft excludes.
if self.soft_excludes:
global_excludes = []
IvyUtils.generate_ivy(targets, jars, global_excludes, ivyxml, self.confs,
hash_name, self.pinned_artifacts)
class FrozenResolution(object):
"""Contains the abstracted results of a resolve.
With this we can do a simple fetch.
"""
# TODO(nh): include full dependency graph in here.
# So that we can inject it into the build graph if we want to.
class MissingTarget(Exception):
"""Thrown when a loaded resolution has a target spec for a target that doesn't exist."""
def __init__(self):
self.target_to_resolved_coordinates = defaultdict(OrderedSet)
self.all_resolved_coordinates = OrderedSet()
self.coordinate_to_attributes = OrderedDict()
@property
def jar_dependencies(self):
return [
JarDependency(c.org, c.name, c.rev, classifier=c.classifier, ext=c.ext,
**self.coordinate_to_attributes.get(c, {}))
for c in self.all_resolved_coordinates]
def add_resolved_jars(self, target, resolved_jars):
coords = [j.coordinate for j in resolved_jars]
self.add_resolution_coords(target, coords)
# Assuming target is a jar library.
for j in target.jar_dependencies:
url = j.get_url(relative=True)
if url:
self.coordinate_to_attributes[j.coordinate] = {'url': url, 'base_path': j.base_path}
else:
self.coordinate_to_attributes[j.coordinate] = {}
def add_resolution_coords(self, target, coords):
for c in coords:
self.target_to_resolved_coordinates[target].add(c)
self.all_resolved_coordinates.add(c)
def target_spec_to_coordinate_strings(self):
return {t.address.spec: [str(c) for c in coordinates]
for t, coordinates in self.target_to_resolved_coordinates.items()}
def __repr__(self):
return 'FrozenResolution(\n target_to_resolved_coordinates\n {}\n all\n {}'.format(
'\n '.join(': '.join([t.address.spec,
'\n '.join(str(c) for c in cs)])
for t,cs in self.target_to_resolved_coordinates.items()),
'\n '.join(str(c) for c in self.coordinate_to_attributes.keys())
)
def __eq__(self, other):
return (type(self) == type(other) and
self.all_resolved_coordinates == other.all_resolved_coordinates and
self.target_to_resolved_coordinates == other.target_to_resolved_coordinates)
def __ne__(self, other):
return not self == other
@classmethod
def load_from_file(cls, filename, targets):
if not os.path.exists(filename):
return None
with open(filename, 'r') as f:
# Using OrderedDict here to maintain insertion order of dict entries.
from_file = json.load(f, object_pairs_hook=OrderedDict)
result = {}
target_lookup = {t.address.spec: t for t in targets}
for conf, serialized_resolution in from_file.items():
resolution = FrozenResolution()
def m2_for(c):
return M2Coordinate.from_string(c)
for coord, attr_dict in serialized_resolution['coord_to_attrs'].items():
m2 = m2_for(coord)
resolution.coordinate_to_attributes[m2] = attr_dict
for spec, coord_strs in serialized_resolution['target_to_coords'].items():
t = target_lookup.get(spec, None)
if t is None:
raise cls.MissingTarget('Cannot find target for address {} in frozen resolution'
.format(spec))
resolution.add_resolution_coords(t, [m2_for(c) for c in coord_strs])
result[conf] = resolution
return result
@classmethod
def dump_to_file(cls, filename, resolutions_by_conf):
res = {}
for conf, resolution in resolutions_by_conf.items():
res[conf] = OrderedDict([
['target_to_coords',resolution.target_spec_to_coordinate_strings()],
['coord_to_attrs', OrderedDict([str(c), attrs]
for c, attrs in resolution.coordinate_to_attributes.items())]
])
with safe_concurrent_creation(filename) as tmp_filename:
mode = 'w' if PY3 else 'wb'
with open(tmp_filename, mode) as f:
json.dump(res, f)
class IvyResolveResult(object):
"""The result of an Ivy resolution.
The result data includes the list of resolved artifacts, the relationships between those artifacts
and the targets that requested them and the hash name of the resolve.
"""
def __init__(self, resolved_artifact_paths, hardlink_map, resolve_hash_name, reports_by_conf):
self._reports_by_conf = reports_by_conf
self.resolved_artifact_paths = resolved_artifact_paths
self.resolve_hash_name = resolve_hash_name
self._hardlink_map = hardlink_map
@property
def has_resolved_artifacts(self):
"""The requested targets have a resolution associated with them."""
return self.resolve_hash_name is not None
def all_linked_artifacts_exist(self):
"""All of the artifact paths for this resolve point to existing files."""
if not self.has_resolved_artifacts:
return False
for path in self.resolved_artifact_paths:
if not os.path.isfile(path):
return False
else:
return True
def report_for_conf(self, conf):
"""Returns the path to the ivy report for the provided conf.
Returns None if there is no path.
"""
return self._reports_by_conf.get(conf)
def get_frozen_resolutions_by_conf(self, targets):
frozen_resolutions_by_conf = OrderedDict()
for conf in self._reports_by_conf:
frozen_resolution = FrozenResolution()
for target, resolved_jars in self.resolved_jars_for_each_target(conf, targets):
frozen_resolution.add_resolved_jars(target, resolved_jars)
frozen_resolutions_by_conf[conf] = frozen_resolution
return frozen_resolutions_by_conf
def resolved_jars_for_each_target(self, conf, targets):
"""Yields the resolved jars for each passed JarLibrary.
If there is no report for the requested conf, yields nothing.
:param conf: The ivy conf to load jars for.
:param targets: The collection of JarLibrary targets to find resolved jars for.
:yield: target, resolved_jars
:raises IvyTaskMixin.UnresolvedJarError
"""
ivy_info = self._ivy_info_for(conf)
if not ivy_info:
return
jar_library_targets = [t for t in targets if isinstance(t, JarLibrary)]
ivy_jar_memo = {}
for target in jar_library_targets:
# Add the artifacts from each dependency module.
resolved_jars = self._resolved_jars_with_hardlinks(conf, ivy_info, ivy_jar_memo,
self._jar_dependencies_for_target(conf,
target),
target)
yield target, resolved_jars
def _jar_dependencies_for_target(self, conf, target):
return target.jar_dependencies
def _ivy_info_for(self, conf):
report_path = self._reports_by_conf.get(conf)
return IvyUtils.parse_xml_report(conf, report_path)
def _new_resolved_jar_with_hardlink_path(self, conf, target, resolved_jar_without_hardlink):
def candidate_cache_paths():
# There is a focus on being lazy here to avoid `os.path.realpath` when we can.
yield resolved_jar_without_hardlink.cache_path
yield os.path.realpath(resolved_jar_without_hardlink.cache_path)
for cache_path in candidate_cache_paths():
pants_path = self._hardlink_map.get(cache_path)
if pants_path:
break
else:
raise IvyResolveMappingError(
'Jar {resolved_jar} in {spec} not resolved to the ivy '
'hardlink map in conf {conf}.'
.format(spec=target.address.spec,
resolved_jar=resolved_jar_without_hardlink.cache_path,
conf=conf))
return ResolvedJar(coordinate=resolved_jar_without_hardlink.coordinate,
pants_path=pants_path,
cache_path=resolved_jar_without_hardlink.cache_path)
def _resolved_jars_with_hardlinks(self, conf, ivy_info, ivy_jar_memo, coordinates, target):
raw_resolved_jars = ivy_info.get_resolved_jars_for_coordinates(coordinates,
memo=ivy_jar_memo)
resolved_jars = [self._new_resolved_jar_with_hardlink_path(conf, target, raw_resolved_jar)
for raw_resolved_jar in raw_resolved_jars]
return resolved_jars
class IvyFetchResolveResult(IvyResolveResult):
"""A resolve result that uses the frozen resolution to look up dependencies."""
def __init__(self, resolved_artifact_paths, hardlink_map, resolve_hash_name, reports_by_conf,
frozen_resolutions):
super(IvyFetchResolveResult, self).__init__(resolved_artifact_paths, hardlink_map,
resolve_hash_name, reports_by_conf)
self._frozen_resolutions = frozen_resolutions
def _jar_dependencies_for_target(self, conf, target):
return self._frozen_resolutions[conf].target_to_resolved_coordinates.get(target, ())
NO_RESOLVE_RUN_RESULT = IvyResolveResult([], {}, None, {})
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
Dependency = namedtuple('DependencyAttributes',
['org', 'name', 'rev', 'mutable', 'force', 'transitive'])
Artifact = namedtuple('Artifact', ['name', 'type_', 'ext', 'url', 'classifier'])
logger = logging.getLogger(__name__)
class IvyResolveMappingError(Exception):
"""Raised when there is a failure mapping the ivy resolve results to pants objects."""
@total_ordering
class IvyModuleRef(object):
"""
:API: public
"""
# latest.integration is ivy magic meaning "just get the latest version"
_ANY_REV = 'latest.integration'
def __init__(self, org, name, rev, classifier=None, ext=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
self.ext = ext or 'jar'
self._id = (self.org, self.name, self.rev, self.classifier, self.ext)
def __eq__(self, other):
return isinstance(other, IvyModuleRef) and self._id == other._id
# TODO(#6071): Return NotImplemented if other does not have attributes
def __lt__(self, other):
# We can't just re-use __repr__ or __str_ because we want to order rev last
return ((self.org, self.name, self.classifier or '', self.ext, self.rev) <
(other.org, other.name, other.classifier or '', other.ext, other.rev))
def __hash__(self):
return hash(self._id)
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id))
def __repr__(self):
return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})'
.format(*self._id))
@property
def caller_key(self):
"""This returns an identifier for an IvyModuleRef that only retains the caller org and name.
Ivy represents dependees as `<caller/>`'s with just org and name and rev information.
This method returns a `<caller/>` representation of the current ref.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV)
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a different version of a dependency than
the one we request, and we want to ensure that all requesters of any version of that dependency
are able to learn about it.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier,
ext=self.ext)
class IvyInfo(object):
"""
:API: public
"""
def __init__(self, conf):
self._conf = conf
self.modules_by_ref = {} # Map from ref to referenced module.
self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
if not module.artifact:
# Module was evicted, so do not record information about it
return
ref_unversioned = module.ref.unversioned
if ref_unversioned in self.refs_by_unversioned_refs:
raise IvyResolveMappingError('Already defined module {}, as rev {}!'
.format(ref_unversioned, module.ref.rev))
if module.ref in self.modules_by_ref:
raise IvyResolveMappingError('Already defined module {}, would be overwritten!'
.format(module.ref))
self.refs_by_unversioned_refs[ref_unversioned] = module.ref
self.modules_by_ref[module.ref] = module
for caller in module.callers:
self._deps_by_caller[caller.caller_key].add(module.ref)
self._artifacts_by_ref[ref_unversioned].add(module.artifact)
def _do_traverse_dependency_graph(self, ref, collector, memo, visited):
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
# NB(zundel): ivy does not return deps in a consistent order for the same module for
# different resolves. Sort them to get consistency and prevent cache invalidation.
# See https://github.com/pantsbuild/pants/issues/2607
deps = sorted(self._deps_by_caller.get(ref.caller_key, ()))
for dep in deps:
acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited)
def get_resolved_jars_for_coordinates(self, coordinates, memo=None):
"""Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in coordinates:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier, jar.ext)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars
def __repr__(self):
return 'IvyInfo(conf={}, refs={})'.format(self._conf, self.modules_by_ref.keys())
class IvyUtils(object):
"""Useful methods related to interaction with ivy.
:API: public
"""
# Protects ivy executions.
_ivy_lock = threading.RLock()
# Protect writes to the global map of jar path -> hardlinks to that jar.
_hardlink_map_lock = threading.Lock()
INTERNAL_ORG_NAME = 'internal'
class IvyError(Exception):
"""Indicates an error preparing an ivy operation."""
class IvyResolveReportError(IvyError):
"""Indicates that an ivy report cannot be found."""
class IvyResolveConflictingDepsError(IvyError):
"""Indicates two or more locally declared dependencies conflict."""
class BadRevisionError(IvyError):
"""Indicates an unparseable version number."""
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.name, version=jar.rev)
@staticmethod
def _load_classpath_from_cachepath(path):
if not os.path.exists(path):
return []
else:
with safe_open(path, 'r') as cp:
return [_f for _f in (path.strip() for path in cp.read().split(os.pathsep)) if _f]
@classmethod
def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf,
confs, ivy_resolution_cache_dir, ivy_cache_classpath_filename, resolve_hash_name,
workunit_factory, workunit_name):
"""Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
"""
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory)
with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp:
extra_args = extra_args or []
args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args
with cls._ivy_lock:
cls._exec_ivy(ivy, confs, ivyxml, args,
jvm_options=jvm_options,
executor=executor,
workunit_name=workunit_name,
workunit_factory=workunit_factory)
if not os.path.exists(raw_target_classpath_file_tmp):
raise cls.IvyError('Ivy failed to create classpath file at {}'
.format(raw_target_classpath_file_tmp))
cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name)
logger.debug('Moved ivy classfile file to {dest}'
.format(dest=ivy_cache_classpath_filename))
@classmethod
def _copy_ivy_reports(cls, workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name):
for conf in confs:
ivy_cache_report_path = IvyUtils.xml_report_path(ivy_resolution_cache_dir, resolve_hash_name,
conf)
workdir_report_path = workdir_report_paths_by_conf[conf]
try:
atomic_copy(ivy_cache_report_path,
workdir_report_path)
except IOError as e:
raise cls.IvyError('Failed to copy report into workdir from {} to {}: {}'
.format(ivy_cache_report_path, workdir_report_path, e))
@classmethod
def _exec_ivy(cls, ivy, confs, ivyxml, args, jvm_options, executor,
workunit_name, workunit_factory):
ivy = ivy or Bootstrapper.default_ivy()
ivy_args = ['-ivy', ivyxml]
ivy_args.append('-confs')
ivy_args.extend(confs)
ivy_args.extend(args)
ivy_jvm_options = list(jvm_options)
# Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng.
ivy_jvm_options.append('-Dsun.io.useCanonCaches=false')
runner = ivy.runner(jvm_options=ivy_jvm_options, args=ivy_args, executor=executor)
try:
with ivy.resolution_lock:
result = execute_runner(runner, workunit_factory=workunit_factory,
workunit_name=workunit_name)
if result != 0:
raise IvyUtils.IvyError('Ivy returned {result}. cmd={cmd}'.format(result=result,
cmd=runner.cmd))
except runner.executor.Error as e:
raise IvyUtils.IvyError(e)
@classmethod
def construct_and_load_hardlink_map(cls, hardlink_dir, ivy_repository_cache_dir,
ivy_cache_classpath_filename, hardlink_classpath_filename):
# Make our actual classpath be hardlinks, so that the paths are uniform across systems.
# Note that we must do this even if we read the raw_target_classpath_file from the artifact
# cache. If we cache the target_classpath_file we won't know how to create the hardlinks.
with IvyUtils._hardlink_map_lock:
# A common dir for hardlinks into the ivy2 cache. This ensures that paths to jars
# in artifact-cached analysis files are consistent across systems.
# Note that we have one global, well-known hardlink dir, again so that paths are
# consistent across builds.
hardlink_map = cls._hardlink_cachepath(ivy_repository_cache_dir,
ivy_cache_classpath_filename,
hardlink_dir,
hardlink_classpath_filename)
classpath = cls._load_classpath_from_cachepath(hardlink_classpath_filename)
return classpath, hardlink_map
@classmethod
def _hardlink_cachepath(cls, ivy_repository_cache_dir, inpath, hardlink_dir, outpath):
"""hardlinks all paths listed in inpath that are under ivy_repository_cache_dir into hardlink_dir.
If there is an existing hardlink for a file under inpath, it is used rather than creating
a new hardlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> hardlink to that path.
"""
safe_mkdir(hardlink_dir)
# The ivy_repository_cache_dir might itself be a hardlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the hardlink'ed path and the realpath to the jar to the hardlink map.
real_ivy_cache_dir = os.path.realpath(ivy_repository_cache_dir)
hardlink_map = OrderedDict()
inpaths = cls._load_classpath_from_cachepath(inpath)
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
hardlink_map[path] = os.path.join(hardlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't hardlink it.
hardlink_map[path] = path
# Create hardlinks for paths in the ivy cache dir.
for path, hardlink in six.iteritems(hardlink_map):
if path == hardlink:
# Skip paths that aren't going to be hardlinked.
continue
safe_mkdir(os.path.dirname(hardlink))
safe_hardlink_or_copy(path, hardlink)
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(hardlink_map.values())))
return dict(hardlink_map)
@classmethod
def xml_report_path(cls, resolution_cache_dir, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:API: public
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution.
:param string conf: The ivy conf name (e.g. "default").
:returns: The report path.
:rtype: string
"""
return os.path.join(resolution_cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, conf, path):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:API: public
:param string conf: the ivy conf name (e.g. "default")
:param string path: The path to the ivy report file.
:returns: The info in the xml report.
:rtype: :class:`IvyInfo`
:raises: :class:`IvyResolveMappingError` if no report exists.
"""
if not os.path.exists(path):
raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path))
logger.debug("Parsing ivy report {}".format(path))
ret = IvyInfo(conf)
etree = ET.parse(path)
doc = etree.getroot()
for module in doc.findall('dependencies/module'):
org = module.get('organisation')
name = module.get('name')
for revision in module.findall('revision'):
rev = revision.get('name')
callers = []
for caller in revision.findall('caller'):
callers.append(IvyModuleRef(caller.get('organisation'),
caller.get('name'),
caller.get('callerrev')))
for artifact in revision.findall('artifacts/artifact'):
classifier = artifact.get('extra-classifier')
ext = artifact.get('ext')
ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev,
classifier=classifier, ext=ext)
artifact_cache_path = artifact.get('location')
ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, tuple(callers))
ret.add_module(ivy_module)
return ret
@classmethod
def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None,
pinned_artifacts=None, jar_dep_manager=None):
if not resolve_hash_name:
resolve_hash_name = Target.maybe_readable_identify(targets)
return cls._generate_resolve_ivy(jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts,
jar_dep_manager)
@classmethod
def _generate_resolve_ivy(cls, jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts=None,
jar_dep_manager=None):
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
jars_by_key = OrderedDict()
for jar in jars:
jars = jars_by_key.setdefault((jar.org, jar.name), [])
jars.append(jar)
manager = jar_dep_manager or JarDependencyManagement.global_instance()
artifact_set = PinnedJarArtifactSet(pinned_artifacts) # Copy, because we're modifying it.
for jars in jars_by_key.values():
for i, dep in enumerate(jars):
direct_coord = M2Coordinate.create(dep)
managed_coord = artifact_set[direct_coord]
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
jars[i] = dep.copy(rev=coord.rev)
elif dep.force:
# If this dependency is marked as 'force' and there is no version conflict, use the normal
# pants behavior for 'force'.
artifact_set.put(direct_coord)
dependencies = [cls._generate_jar_template(jars) for jars in jars_by_key.values()]
# As it turns out force is not transitive - it only works for dependencies pants knows about
# directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
# don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
# edit the generated ivy.xml and use the override feature [3] though and that does work
# transitively as you'd hope.
#
# [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
# [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
# src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
# [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
overrides = [cls._generate_override_template(_coord) for _coord in artifact_set]
excludes = [cls._generate_exclude_template(exclude) for exclude in excludes]
template_data = TemplateData(
org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies,
excludes=excludes,
overrides=overrides)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name):
"""Generates an ivy xml with all jars marked as intransitive using the all conflict manager."""
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
# Use org name _and_ rev so that we can have dependencies with different versions. This will
# allow for batching fetching if we want to do that.
jars_by_key = OrderedDict()
for jar in jars:
jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar)
dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()]
template_data = TemplateData(org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def _write_ivy_xml_file(cls, ivyxml, template_data, template_relpath):
template_text = pkgutil.get_data(__name__, template_relpath).decode('utf-8')
generator = Generator(template_text, lib=template_data)
with safe_open(ivyxml, 'w') as output:
generator.write(output)
@classmethod
def calculate_classpath(cls, targets):
"""Creates a consistent classpath and list of excludes for the passed targets.
It also modifies the JarDependency objects' excludes to contain all the jars excluded by
provides.
:param iterable targets: List of targets to collect JarDependencies and excludes from.
:returns: A pair of a list of JarDependencies, and a set of excludes to apply globally.
"""
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
# TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its
# attachments (classified artifacts) at another. Ivy does not, allow this, the dependency
# can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict
# resolution happens at the classifier level, allowing skew in a
# multi-artifact/multi-classifier dependency. We only find out about the skew later in
# `_generate_jar_template` below which will blow up with a conflict. Move this logic closer
# together to get a more clear validate, then emit ivy.xml then resolve flow instead of the
# spread-out validations happening here.
# See: https://github.com/pantsbuild/pants/issues/2239
coordinate = (jar.org, jar.name, jar.classifier)
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing,
proposed=jar)
def collect_jars(target):
if isinstance(target, JarLibrary):
for jar in target.jar_dependencies:
add_jar(jar)
def collect_excludes(target):
target_excludes = target.payload.get_field_value('excludes')
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(target):
if not (isinstance(target, ExportableJvmLibrary) and target.provides):
return
logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format(
target.provides.org, target.provides.name, target))
provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name))
def collect_elements(target):
targets_processed.add(target)
collect_jars(target)
collect_excludes(target)
collect_provide_excludes(target)
for target in targets:
target.walk(collect_elements, predicate=lambda target: target not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
new_jars = OrderedDict()
for coordinate, jar in jars.items():
new_jars[coordinate] = jar.copy(excludes=jar.excludes + additional_excludes)
jars = new_jars
return list(jars.values()), global_excludes
@classmethod
def _resolve_conflict(cls, existing, proposed):
if existing.rev is None:
return proposed
if proposed.rev is None:
return existing
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise cls.IvyResolveConflictingDepsError('Cannot force {}#{};{} to both rev {} and {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
elif existing.force:
logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev
))
return existing
elif proposed.force:
logger.debug('Forcing {}#{};{} from {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
return proposed
else:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug('Upgrading {}#{};{} from rev {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev,
))
return proposed
else:
return existing
@classmethod
def _generate_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
mutable=jar.mutable,
force=jar.force,
transitive=jar.transitive)
for jar in jars)
if len(global_dep_attributes) != 1:
# TODO: Need to provide information about where these came from - could be
# far-flung JarLibrary targets. The jars here were collected from targets via
# `calculate_classpath` above so executing this step there instead may make more
# sense.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
excludes = set()
for jar in jars:
excludes.update(jar.excludes)
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
force=jar_attributes.force,
transitive=jar_attributes.transitive,
artifacts=list(artifacts.values()),
any_have_url=any_have_url,
excludes=[cls._generate_exclude_template(exclude) for exclude in excludes])
return template
@classmethod
def _generate_fetch_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
transitive=False,
mutable=jar.mutable,
force=True)
for jar in jars)
if len(global_dep_attributes) != 1:
# If we batch fetches and assume conflict manager all, we could ignore these.
# Leaving this here for now.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
artifacts=list(artifacts.values()),
any_have_url=any_have_url,
excludes=[])
return template
| twitter/pants | src/python/pants/backend/jvm/ivy_utils.py | Python | apache-2.0 | 51,293 |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
OPTS = {"benchmark": [
# prepoll delay, timeout, poll interval
# "start": (0, 300, 1)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "start",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "start"),
cfg.FloatOpt("nova_server_%s_timeout" % "start",
default=float(300),
help="Server %s timeout" % "start"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "start",
default=float(1),
help="Server %s poll interval" % "start"),
# "stop": (0, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "stop",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "stop"),
cfg.FloatOpt("nova_server_%s_timeout" % "stop",
default=float(300),
help="Server %s timeout" % "stop"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "stop",
default=float(2),
help="Server %s poll interval" % "stop"),
# "boot": (1, 300, 1)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "boot",
default=float(1),
help="Time to sleep after %s before polling"
" for status" % "boot"),
cfg.FloatOpt("nova_server_%s_timeout" % "boot",
default=float(300),
help="Server %s timeout" % "boot"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "boot",
default=float(2),
help="Server %s poll interval" % "boot"),
# "delete": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "delete",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "delete"),
cfg.FloatOpt("nova_server_%s_timeout" % "delete",
default=float(300),
help="Server %s timeout" % "delete"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "delete",
default=float(2),
help="Server %s poll interval" % "delete"),
# "reboot": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "reboot",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "reboot"),
cfg.FloatOpt("nova_server_%s_timeout" % "reboot",
default=float(300),
help="Server %s timeout" % "reboot"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "reboot",
default=float(2),
help="Server %s poll interval" % "reboot"),
# "rebuild": (1, 300, 1)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "rebuild",
default=float(1),
help="Time to sleep after %s before polling"
" for status" % "rebuild"),
cfg.FloatOpt("nova_server_%s_timeout" % "rebuild",
default=float(300),
help="Server %s timeout" % "rebuild"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "rebuild",
default=float(1),
help="Server %s poll interval" % "rebuild"),
# "rescue": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "rescue",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "rescue"),
cfg.FloatOpt("nova_server_%s_timeout" % "rescue",
default=float(300),
help="Server %s timeout" % "rescue"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "rescue",
default=float(2),
help="Server %s poll interval" % "rescue"),
# "unrescue": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unrescue",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "unrescue"),
cfg.FloatOpt("nova_server_%s_timeout" % "unrescue",
default=float(300),
help="Server %s timeout" % "unrescue"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "unrescue",
default=float(2),
help="Server %s poll interval" % "unrescue"),
# "suspend": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "suspend",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "suspend"),
cfg.FloatOpt("nova_server_%s_timeout" % "suspend",
default=float(300),
help="Server %s timeout" % "suspend"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "suspend",
default=float(2),
help="Server %s poll interval" % "suspend"),
# "resume": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resume",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "resume"),
cfg.FloatOpt("nova_server_%s_timeout" % "resume",
default=float(300),
help="Server %s timeout" % "resume"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "resume",
default=float(2),
help="Server %s poll interval" % "resume"),
# "pause": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "pause",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "pause"),
cfg.FloatOpt("nova_server_%s_timeout" % "pause",
default=float(300),
help="Server %s timeout" % "pause"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "pause",
default=float(2),
help="Server %s poll interval" % "pause"),
# "unpause": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unpause",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "unpause"),
cfg.FloatOpt("nova_server_%s_timeout" % "unpause",
default=float(300),
help="Server %s timeout" % "unpause"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "unpause",
default=float(2),
help="Server %s poll interval" % "unpause"),
# "shelve": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "shelve",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "shelve"),
cfg.FloatOpt("nova_server_%s_timeout" % "shelve",
default=float(300),
help="Server %s timeout" % "shelve"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "shelve",
default=float(2),
help="Server %s poll interval" % "shelve"),
# "unshelve": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unshelve",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "unshelve"),
cfg.FloatOpt("nova_server_%s_timeout" % "unshelve",
default=float(300),
help="Server %s timeout" % "unshelve"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "unshelve",
default=float(2),
help="Server %s poll interval" % "unshelve"),
# "image_create": (0, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "image_create",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "image_create"),
cfg.FloatOpt("nova_server_%s_timeout" % "image_create",
default=float(300),
help="Server %s timeout" % "image_create"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "image_create",
default=float(2),
help="Server %s poll interval" % "image_create"),
# "image_delete": (0, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "image_delete",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "image_delete"),
cfg.FloatOpt("nova_server_%s_timeout" % "image_delete",
default=float(300),
help="Server %s timeout" % "image_delete"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "image_delete",
default=float(2),
help="Server %s poll interval" % "image_delete"),
# "resize": (2, 400, 5)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "resize"),
cfg.FloatOpt("nova_server_%s_timeout" % "resize",
default=float(400),
help="Server %s timeout" % "resize"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "resize",
default=float(5),
help="Server %s poll interval" % "resize"),
# "resize_confirm": (0, 200, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize_confirm",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "resize_confirm"),
cfg.FloatOpt("nova_server_%s_timeout" % "resize_confirm",
default=float(200),
help="Server %s timeout" % "resize_confirm"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "resize_confirm",
default=float(2),
help="Server %s poll interval" % "resize_confirm"),
# "resize_revert": (0, 200, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize_revert",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "resize_revert"),
cfg.FloatOpt("nova_server_%s_timeout" % "resize_revert",
default=float(200),
help="Server %s timeout" % "resize_revert"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "resize_revert",
default=float(2),
help="Server %s poll interval" % "resize_revert"),
# "live_migrate": (1, 400, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "live_migrate",
default=float(1),
help="Time to sleep after %s before polling"
" for status" % "live_migrate"),
cfg.FloatOpt("nova_server_%s_timeout" % "live_migrate",
default=float(400),
help="Server %s timeout" % "live_migrate"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "live_migrate",
default=float(2),
help="Server %s poll interval" % "live_migrate"),
# "migrate": (1, 400, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "migrate",
default=float(1),
help="Time to sleep after %s before polling"
" for status" % "migrate"),
cfg.FloatOpt("nova_server_%s_timeout" % "migrate",
default=float(400),
help="Server %s timeout" % "migrate"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "migrate",
default=float(2),
help="Server %s poll interval" % "migrate"),
# "detach":
cfg.FloatOpt("nova_detach_volume_timeout",
default=float(200),
help="Nova volume detach timeout"),
cfg.FloatOpt("nova_detach_volume_poll_interval",
default=float(2),
help="Nova volume detach poll interval")
]}
| yeming233/rally | rally/plugins/openstack/cfg/nova.py | Python | apache-2.0 | 12,529 |
# -*- coding: utf-8 -*-
'''
Created on Mar 12, 2012
@author: moloch
Copyright 2012
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import re
from uuid import uuid4
from datetime import datetime
from sqlalchemy import Column
from sqlalchemy.types import DateTime, Integer, String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import declarative_base
generate_uuid = lambda: str(uuid4())
class _DatabaseObject(object):
''' All game objects inherit from this object '''
@declared_attr
def __tablename__(self):
''' Converts name from camel case to snake case '''
name = self.__name__
return (
name[0].lower() +
re.sub(r'([A-Z])',
lambda letter: "_" + letter.group(0).lower(), name[1:]
)
)
id = Column(Integer, unique=True, primary_key=True) # lint:ok
uuid = Column(String(36), unique=True, default=generate_uuid)
created = Column(DateTime, default=datetime.now)
# Create an instance called "BaseObject"
DatabaseObject = declarative_base(cls=_DatabaseObject)
| lunarca/fngrpt | models/BaseModels.py | Python | apache-2.0 | 1,637 |
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import logging.handlers
log = logging.getLogger('imc')
console = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
def enable_file_logging(filename="imcsdk.log"):
file_handler = logging.handlers.RotatingFileHandler(
filename, maxBytes=10*1024*1024, backupCount=5)
log.addHandler(file_handler)
def set_log_level(level=logging.DEBUG):
"""
Allows setting log level
Args:
level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)
Returns:
None
Example:
from imcsdk import set_log_level
import logging
set_log_level(logging.INFO)
"""
log.setLevel(level)
console.setLevel(level)
set_log_level(logging.DEBUG)
log.addHandler(console)
if os.path.exists('/tmp/imcsdk_debug'):
enable_file_logging()
__author__ = 'Cisco Systems'
__email__ = '[email protected]'
__version__ = '0.9.3.1'
| ragupta-git/ImcSdk | imcsdk/__init__.py | Python | apache-2.0 | 1,616 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
# Configure the baseURL
baseUrl = "https://www.expedia.es"
# Create a webDriver instance and maximize window
driver = webdriver.Firefox()
driver.maximize_window()
# Navigage to URL and put a 10 seconds implicit wait
driver.get(baseUrl)
driver.implicitly_wait(10)
# Find and click on element "Flights"
# Find departure textbox and type "Barcelona"
# Find departure textbox and type "Madrid"
# Find departure time and type "23/11/2017"
# Close Calendar
# Find the "Find" button and click on
# Quit driver
| twiindan/selenium_lessons | 04_Selenium/exercices/expedia.py | Python | apache-2.0 | 654 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""This library provides a set of classes and functions that helps train models.
## Optimizers
The Optimizer base class provides methods to compute gradients for a loss and
apply gradients to variables. A collection of subclasses implement classic
optimization algorithms such as GradientDescent and Adagrad.
You never instantiate the Optimizer class itself, but instead instantiate one
of the subclasses.
@@Optimizer
@@GradientDescentOptimizer
@@AdadeltaOptimizer
@@AdagradOptimizer
@@MomentumOptimizer
@@AdamOptimizer
@@FtrlOptimizer
@@RMSPropOptimizer
## Gradient Computation
TensorFlow provides functions to compute the derivatives for a given
TensorFlow computation graph, adding operations to the graph. The
optimizer classes automatically compute derivatives on your graph, but
creators of new Optimizers or expert users can call the lower-level
functions below.
@@gradients
@@AggregationMethod
@@stop_gradient
## Gradient Clipping
TensorFlow provides several operations that you can use to add clipping
functions to your graph. You can use these functions to perform general data
clipping, but they're particularly useful for handling exploding or vanishing
gradients.
@@clip_by_value
@@clip_by_norm
@@clip_by_average_norm
@@clip_by_global_norm
@@global_norm
## Decaying the learning rate
@@exponential_decay
## Moving Averages
Some training algorithms, such as GradientDescent and Momentum often benefit
from maintaining a moving average of variables during optimization. Using the
moving averages for evaluations often improve results significantly.
@@ExponentialMovingAverage
## Coordinator and QueueRunner
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
for how to use threads and queues. For documentation on the Queue API,
see [Queues](../../api_docs/python/io_ops.md#queues).
@@Coordinator
@@QueueRunner
@@add_queue_runner
@@start_queue_runners
## Distributed execution
See [Distributed TensorFlow](../../how_tos/distributed/index.md) for
more information about how to configure a distributed TensorFlow program.
@@Server
@@Supervisor
@@SessionManager
@@ClusterSpec
@@replica_device_setter
## Summary Operations
The following ops output
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffers as serialized string tensors.
You can fetch the output of a summary op in a session, and pass it to
a [SummaryWriter](../../api_docs/python/train.md#SummaryWriter) to append it
to an event file. Event files contain
[`Event`](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
protos that can contain `Summary` protos along with the timestamp and
step. You can then use TensorBoard to visualize the contents of the
event files. See [TensorBoard and
Summaries](../../how_tos/summaries_and_tensorboard/index.md) for more
details.
@@scalar_summary
@@image_summary
@@audio_summary
@@histogram_summary
@@zero_fraction
@@merge_summary
@@merge_all_summaries
## Adding Summaries to Event Files
See [Summaries and
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
overview of summaries, event files, and visualization in TensorBoard.
@@SummaryWriter
@@summary_iterator
## Training utilities
@@global_step
@@write_graph
"""
# pylint: enable=line-too-long
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# pylint: disable=g-bad-import-order,unused-import
from tensorflow.python.ops import gradients
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.proximal_adagrad import ProximalAdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.ftrl import FtrlOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.moving_averages import ExponentialMovingAverage
from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.proximal_gradient_descent import ProximalGradientDescentOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
from tensorflow.python.training.coordinator import LooperThread
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
from tensorflow.python.training.input import *
from tensorflow.python.training.device_setter import replica_device_setter
from tensorflow.python.training.saver import generate_checkpoint_state_proto
from tensorflow.python.training.saver import get_checkpoint_state
from tensorflow.python.training.saver import latest_checkpoint
from tensorflow.python.training.saver import Saver
from tensorflow.python.training.saver import update_checkpoint_state
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.training.saver import import_meta_graph
from tensorflow.python.training.session_manager import SessionManager
from tensorflow.python.training.summary_io import summary_iterator
from tensorflow.python.training.summary_io import SummaryWriter
from tensorflow.python.training.supervisor import Supervisor
from tensorflow.python.training.training_util import write_graph
from tensorflow.python.training.training_util import global_step
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import exponential_decay
# Distributed computing support
from tensorflow.core.protobuf.tensorflow_server_pb2 import ClusterDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.training.server_lib import Server
from tensorflow.python.util.all_util import make_all
# Include extra modules for docstrings because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
__all__ = make_all(__name__, [sys.modules[__name__], io_ops, state_ops])
# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
__all__.extend([
"BytesList",
"Example",
"Feature",
"FeatureList",
"FeatureLists",
"Features",
"FloatList",
"Int64List",
"LooperThread",
"SaverDef",
"SequenceExample",
"export_meta_graph",
"generate_checkpoint_state_proto",
"import_meta_graph",
"queue_runner",
])
| TakayukiSakai/tensorflow | tensorflow/python/training/training.py | Python | apache-2.0 | 8,011 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
author: sanja7s
---------------
plot the distribution
"""
import os
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from collections import defaultdict
from matplotlib import colors
from pylab import MaxNLocator
import pylab as pl
from mpl_toolkits.axes_grid import inset_locator
matplotlib.style.use('ggplot')
IN_DIR = "../../data/timelines"
os.chdir(IN_DIR)
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
grid = {'color' : 'gray',
'alpha' : 0.5,
'linestyle' : '-.'}
lines = {'color' : 'gray'}
#xticks = {'color' : 'gray'}
matplotlib.rc('font', **font)
matplotlib.rc('grid', **grid)
matplotlib.rc('lines', **lines)
#matplotlib.rc('ticks', **ticks)
def read_in_plug_data(node):
f_in = 'node_' + node +'_plug.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
# n are irrelevant
n, n, n, t, n, plug, n, n, n = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
plug = float(plug)
distr[t] = plug
return distr
def read_in_num_jobs_data(node):
f_in = 'node_' + node +'_plug.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, jobs_list, n6 = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
jobs = jobs_list.split(',')
if jobs_list == "":
distr[t] = 0
else:
distr[t] = len(jobs)
return distr
def read_in_CPU_data(node):
f_in = 'node_' + node +'_CPUMEM.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, CPU1, n, CPU2, n, n, n, n, n = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
CPU1 = float(CPU1)
CPU2 = float(CPU2)
distr[t] = (CPU1, CPU2)
return distr
def read_in_MEM_data(node):
f_in = 'node_' + node +'_CPUMEM.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, n, n, n, n, MEM1, n, MEM2, n = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
MEM1 = float(MEM1)
MEM2 = float(MEM2)
distr[t] = (MEM1, MEM2)
return distr
def read_in_rb_data(node):
f_in = 'node_' + node +'_rb.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, r, n, b, n = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
r = int(r)
b = int(b)
distr[t] = (r, b)
return distr
def plot_plug_timeline(node):
print 'Plotting plug values'
d = read_in_plug_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values = [v if v > 0 else 0 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax = plt.subplots()
ax.scatter(X, values, marker='s', s=1)
#ax.plot(X, values)
fig.autofmt_xdate()
ax.set_xlabel('time')
ax.set_ylabel('plug value')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
#plt.show()
plt.savefig('plug_timeline_node_' + node + '.png')
return fig, ax, plt
def plot_plug_timeline_v2(node):
print 'Plotting plug values'
d = read_in_plug_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values = [v if v > 0 else 0 for v in d.values()]
ts = pd.Series(values, index = X)
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax = plt.subplots()
ts.plot(color = 'darkblue')
for tl in ax.get_yticklabels():
tl.set_color('darkblue')
fig.autofmt_xdate()
ax.set_xlabel('time')
ax.set_ylabel('plug value', color='darkblue')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
ymin = 240
ymax = 280
if min(values) < 160:
ymin = min(values) - 10
if max(values) > 250:
ymax = max(values) + 10
plt.ylim(ymin, ymax)
#plt.savefig(cwd + '/multiple_v2/plug_only/plug_timeline_node_' + node + '_v2.png')
return fig, ax, plt
def plot_plug_and_num_jobs_timeline(node):
print 'Plotting num of jobs values'
d = read_in_num_jobs_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values = d.values()
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax1, plt = plot_plug_timeline_v2(node)
ax2 = ax1.twinx()
ax2.scatter(X, values,
marker='s', color='red', s=7)
ax2.set_ylabel('# of jobs', color='red')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('r')
cwd = os.getcwd()
print cwd
plt.savefig(cwd + '/lowest_norm_stdev/SandyBridge/num_jobs_and_plug_timeline_node_' + node + '_v2.png')
def plot_plug_and_CPUs_timeline(node):
print 'Plotting CPUs values'
d = read_in_CPU_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values1 = []
values2 = []
for el in d.values():
if el[0] > 0:
v1 = el[0]
else:
v1 = 0
values1.append(v1)
if el[1] > 0:
v2 = el[1]
else:
v2 = 0
values2.append(v2)
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max CPU1 ', min(values1), max(values1)
print 'Min and max CPU2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline_v2(node)
ax2 = ax1.twinx()
ts1 = pd.Series(values1, index = X)
ax2.scatter(X, values1, marker='s', color='red', s=4, label = 'CPU1')
#ts1.plot(color='red', label = 'CPU1')
ts2 = pd.Series(values2, index = X)
ax2.scatter(X, values2, marker='s', color='magenta', s=4, label = 'CPU2')
#ts2.plot(color='magenta', label = 'CPU2')
ax2.set_ylabel('CPU values', color='red')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('r')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=3)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('lowest_norm_stdev/SandyBridge/CPUs_plug_timeline_node_' + node + '.png')
def plot_plug_and_MEM_timeline(node):
print 'Plotting DRAM values'
d = read_in_MEM_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values1 = [v[0] if v[0] > -1 else -1 for v in d.values()]
values2 = [v[1] if v[1] > -1 else -1 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max MEM1 ', min(values1), max(values1)
print 'Min and max MEM2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline(node)
ax2 = ax1.twinx()
ax2.scatter(X, values1,
marker='s', color='darkgreen', s=4, label = 'DRAM1')
ax2.scatter(X, values2,
marker='s', color='olive', s=4, label = 'DRAM2')
ax2.set_ylabel('DRAM values', color='olive')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('olive')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=1)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('MEM_plug_timeline_node_' + node + '.png')
def plot_plug_and_rb_timeline(node):
print 'Plotting r b values'
d = read_in_rb_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values1 = [v[0] if v[0] > 0 else 0 for v in d.values()]
values2 = [v[1] if v[1] > 0 else 0 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max MEM1 ', min(values1), max(values1)
print 'Min and max MEM2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline(node)
ax2 = ax1.twinx()
ax2.scatter(X, values1,
marker='s', color='tomato', s=3, label = 'r')
ax2.scatter(X, values2,
marker='s', color='sage', s=3, label = 'b')
ax2.set_ylabel('r and b values', color='sage')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('sage')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=1)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('rb_plug_timeline_node_' + node + '.png')
#plot_plug_and_num_jobs_timeline('c48')
#plot_plug_and_num_jobs_timeline('c578')
#plot_plug_and_num_jobs_timeline('c578')
#plot_plug_and_CPUs_timeline('c577')
"""
plot_plug_and_MEM_timeline('c48')
plot_plug_and_MEM_timeline('c577')
plot_plug_and_MEM_timeline('c31')
plot_plug_and_MEM_timeline('c63')
plot_plug_and_MEM_timeline('c750')
plot_plug_and_MEM_timeline('c34')
"""
"""
plot_plug_and_rb_timeline('c48')
plot_plug_and_rb_timeline('c577')
plot_plug_and_rb_timeline('c31')
plot_plug_and_rb_timeline('c63')
plot_plug_and_rb_timeline('c750')
plot_plug_and_rb_timeline('c34')
"""
"""
# for the nodes running only one job
plot_plug_timeline('c424')
plot_plug_and_num_jobs_timeline('c424')
"""
# this is for the only node that did not run any jobs
#plot_plug_and_num_jobs_timeline('c42')
#plot_plug_timeline_v2('c42')
"""
# for random nodes
for node in [ 'c31', 'c34', 'c42', 'c48', 'c63', 'c329', 'c424', \
'c577', 'c578', 'c604', 'c672', 'c735', 'c750']:
#plot_plug_timeline_v2(node)
plot_plug_and_num_jobs_timeline(node)
"""
# for the nodes running only one unique (same) job all the time
#plot_plug_timeline('c7')
#plot_plug_and_num_jobs_timeline('c7')
"""
for node in ['c9', 'c10', 'c11', 'c12', 'c13', 'c16', 'c18', 'c19', 'c20']:
#plot_plug_timeline_v2(node)
#plot_plug_and_num_jobs_timeline(node)
plot_plug_and_CPUs_timeline(node)
"""
#plot_plug_and_CPUs_timeline('c4')
"""
# these nodes have the highest normalized stdev of plug
for node in ['c849', 'c666', 'c747', 'c908', 'c658', 'c620', 'c85', 'c364']:
#plot_plug_timeline_v2(node)
plot_plug_and_num_jobs_timeline(node)
plot_plug_and_CPUs_timeline(node)
"""
"""
# these are some of the nodes that have the smallest normalized stdev of plug
# SandyBridge
for node in ['c423']:
#plot_plug_timeline_v2(node)
plot_plug_and_num_jobs_timeline(node)
plot_plug_and_CPUs_timeline(node)
"""
| sanja7s/EEDC | src/timelines/node_plug_timeline.py | Python | apache-2.0 | 10,157 |
"""
lexical chain module for text tiling
"""
from tile_reader import TileReader
from scoring import boundarize, depth_scoring, window_diff
# ======================================================================================================================
# Main
# ======================================================================================================================
class LexicalChains(object):
def __init__(self):
self.sentences = []
self.actives = {}
self.gap_scores = []
self.boundary_vector = []
def analyze(self, sents, window=4, pos_filter=('PUNCT', 'SYM', 'SPACE', 'DET'), boundary_type='liberal'):
"""
Set attributes
:param sents: (list) spacy-analyzed sentences
:param window: (int) distance threshold within which chains are considered active
:param boundary_type: (str) 'liberal' or 'conservative' boundary scoring
:param pos_filter: (tuple) spacy pos_ labels to exclude (i.e. a pos-based stoplist)
:return: void
"""
self.sentences = self._preproc(sents, pos_filter)
self.actives = self._get_actives(self.sentences, window)
self.gap_scores = [len(self.actives[k]) for k in self.actives.keys()]
self.boundary_vector = self._get_boundaries(self.gap_scores, boundary_type)
@staticmethod
def _preproc(sentences, pos_filter):
"""
Filters out stop POSs and lemmatizes sentences
:param sentences: list of tokenized sentences in doc
:param pos_filter: tuple of spacy pos_ labels to filter out
:return: list
"""
filtered = [[tok for tok in sent if tok.pos_ not in pos_filter] for sent in sentences]
lemmatized = [[tok.lemma_ for tok in sent] for sent in filtered]
return lemmatized
@staticmethod
def _get_actives(sents, window):
"""
Get active lexical chains for each gap between sentences
:param sents: list of tokenized sentences
:param window: difference threshold over which lexical chains are considered active
:return: dictionary containing active lexical chains for each sentence transition
"""
# initialize active chains dictionary
actives = {}
for i in xrange(len(sents)-1):
actives[i] = set()
# loop over all sentences
for sent in sents:
# get index and unique tokens from current sentence
i = sents.index(sent)
uniques_i = set(sent)
# loop over all sentences within dist thresh of current
for diff in xrange(window, 0, -1):
# back off diff when there are less sentences left than dist thresh
while not i + diff < len(sents):
diff -= 1
# find shared tokens between current sent[i] and sent[i+diff]
n = i + diff
uniques_n = set(sents[n])
intersection = uniques_i.intersection(uniques_n)
# add the intersections to all affected transitions between sent[i] and sent[i+diff]
for k in list(xrange(diff)):
[actives[i+k].add(word) for word in intersection]
return actives
@staticmethod
def _get_boundaries(scores, boundary_type):
"""
Calculate boundaries from gap scores
:param scores: list containing # of active chains across each sentence gap in doc
:param boundary_type: string indicating 'liberal' or 'conservative' boundary scoring
:return: list indicating which sentences in doc constitute beginnings of new topic tiles
"""
d_scores = depth_scoring(scores)
boundaries = boundarize(d_scores, type=boundary_type)
boundary_vector = [1] + [0 if i not in boundaries else 1 for i in xrange(len(scores))]
return boundary_vector
# ======================================================================================================================
# Test if invoked directly
# ======================================================================================================================
if __name__ == "__main__":
from decimal import Decimal
import matplotlib.pyplot as plt
import sys
import os
# set doc
try:
doc = sys.argv[1]
except IndexError:
sys.exit("ERROR: Expected 1 arg, got {}\nUsage: (python) lexical_chains.py <docname> <docpath>".format(
len(sys.argv)-1))
# get doc path
path = os.path.dirname(__file__)
if doc in ('coron','athens','chatham','cuba','merida'):
doc_path = os.path.join(path, os.path.join("data", "GUM_voyage_{}_noheads.txt".format(doc)))
else:
raise ValueError("unrecognized document: {}".format(doc))
# get gold
gold_file = os.path.join(path, os.path.join("data", "GUM_5_gold_tiles.txt"))
with open(gold_file) as f:
boundaries = [[int(x) for x in line.split(",")] for line in f.read().split()]
texts = ["athens", "chatham", "coron", "cuba", "merida"]
gold_dict = dict(zip(texts, boundaries))
gold = gold_dict[doc]
# Instantiate TileReader
reader = TileReader()
reader.read(doc_path, newline_tokenization=True)
sents = reader.sentences
# Instantiate Lexical Chains
chains = LexicalChains()
chains.analyze(sents)
# compare gold and predicted boundaries
print "GOLD: {}".format(gold)
print "MINE: {}".format(chains.boundary_vector)
# get window_diff
window_size = len(gold)/4
wdiff = window_diff(chains.boundary_vector, gold, window_size)
print "Window Diff: {}".format(wdiff)
# Plot scores
scores = [0] + chains.gap_scores
plt.plot([x for x in xrange(len(scores))], scores)
for index, grp in enumerate(zip(gold, chains.boundary_vector)):
if 1 == grp[0] == grp[1]:
plt.axvline(x=index, color = 'green', linewidth='2.0')
elif 1 == grp[0] != grp[1]:
plt.axvline(x=index, color = 'red')
elif 1 == grp[1] != grp[0]:
plt.axvline(x=index, color = 'gray')
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
wdiff_rounded = round(Decimal(wdiff), 3)
plt.text(xmax-(xmax-xmin)/4,ymax+0.5, "window diff: {}".format(wdiff_rounded))
plt.show()
| cligu/compdisc | lexical_chains.py | Python | apache-2.0 | 6,335 |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions related to task commands."""
import time
from drydock_provisioner.cli.action import CliAction
from drydock_provisioner.cli.const import TaskStatus
class TaskList(CliAction): # pylint: disable=too-few-public-methods
"""Action to list tasks."""
def __init__(self, api_client):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
"""
super().__init__(api_client)
self.logger.debug('TaskList action initialized')
def invoke(self):
"""Invoke execution of this action."""
return self.api_client.get_tasks()
class TaskCreate(CliAction): # pylint: disable=too-few-public-methods
"""Action to create tasks against a design."""
def __init__(self,
api_client,
design_ref,
action_name=None,
node_names=None,
rack_names=None,
node_tags=None,
block=False,
poll_interval=15):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
:param string design_ref: The URI reference to design documents
:param string action_name: The name of the action being performed for this task
:param List node_names: The list of node names to restrict action application
:param List rack_names: The list of rack names to restrict action application
:param List node_tags: The list of node tags to restrict action application
:param bool block: Whether to block CLI exit until task completes
:param integer poll_interval: Polling interval to query task status
"""
super().__init__(api_client)
self.design_ref = design_ref
self.action_name = action_name
self.logger.debug('TaskCreate action initialized for design=%s',
design_ref)
self.logger.debug('Action is %s', action_name)
self.logger.debug("Node names = %s", node_names)
self.logger.debug("Rack names = %s", rack_names)
self.logger.debug("Node tags = %s", node_tags)
self.block = block
self.poll_interval = poll_interval
if any([node_names, rack_names, node_tags]):
filter_items = {'filter_type': 'union'}
if node_names is not None:
filter_items['node_names'] = node_names
if rack_names is not None:
filter_items['rack_names'] = rack_names
if node_tags is None:
filter_items['node_tags'] = node_tags
self.node_filter = {
'filter_set_type': 'intersection',
'filter_set': [filter_items]
}
else:
self.node_filter = None
def invoke(self):
"""Invoke execution of this action."""
task = self.api_client.create_task(
design_ref=self.design_ref,
task_action=self.action_name,
node_filter=self.node_filter)
if not self.block:
return task
task_id = task.get('task_id')
while True:
time.sleep(self.poll_interval)
task = self.api_client.get_task(task_id=task_id)
if task.get('status',
'') in [TaskStatus.Complete, TaskStatus.Terminated]:
return task
class TaskShow(CliAction): # pylint: disable=too-few-public-methods
"""Action to show a task's detial."""
def __init__(self, api_client, task_id, block=False, poll_interval=15):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
:param string task_id: the UUID of the task to retrieve
:param bool block: Whether to block CLI exit until task completes
:param integer poll_interval: Polling interval to query task status
"""
super().__init__(api_client)
self.task_id = task_id
self.logger.debug('TaskShow action initialized for task_id=%s,',
task_id)
self.block = block
self.poll_interval = poll_interval
def invoke(self):
"""Invoke execution of this action."""
task = self.api_client.get_task(task_id=self.task_id)
if not self.block:
return task
task_id = task.get('task_id')
while True:
time.sleep(self.poll_interval)
task = self.api_client.get_task(task_id=task_id)
if task.status in [TaskStatus.Complete, TaskStatus.Terminated]:
return task
| att-comdev/drydock | drydock_provisioner/cli/task/actions.py | Python | apache-2.0 | 5,257 |
"""
Helper functions for use by mac modules
.. versionadded:: 2016.3.0
"""
import logging
import os
import plistlib
import subprocess
import time
import xml.parsers.expat
import salt.grains.extra
import salt.modules.cmdmod
import salt.utils.args
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.timed_subprocess
from salt.exceptions import (
CommandExecutionError,
SaltInvocationError,
TimedProcTimeoutError,
)
try:
import pwd
except ImportError:
# The pwd module is not available on all platforms
pass
DEFAULT_SHELL = salt.grains.extra.shell()["shell"]
# Set up logging
log = logging.getLogger(__name__)
__virtualname__ = "mac_utils"
__salt__ = {
"cmd.run_all": salt.modules.cmdmod._run_all_quiet,
"cmd.run": salt.modules.cmdmod._run_quiet,
}
def __virtual__():
"""
Load only on Mac OS
"""
if not salt.utils.platform.is_darwin():
return (
False,
"The mac_utils utility could not be loaded: "
"utility only works on MacOS systems.",
)
return __virtualname__
def _run_all(cmd):
"""
Args:
cmd:
Returns:
"""
if not isinstance(cmd, list):
cmd = salt.utils.args.shlex_split(cmd, posix=False)
for idx, item in enumerate(cmd):
if not isinstance(cmd[idx], str):
cmd[idx] = str(cmd[idx])
cmd = " ".join(cmd)
run_env = os.environ.copy()
kwargs = {
"cwd": None,
"shell": DEFAULT_SHELL,
"env": run_env,
"stdin": None,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"with_communicate": True,
"timeout": None,
"bg": False,
}
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
except OSError as exc:
raise CommandExecutionError(
"Unable to run command '{}' with the context '{}', reason: {}".format(
cmd, kwargs, exc
)
)
ret = {}
try:
proc.run()
except TimedProcTimeoutError as exc:
ret["stdout"] = str(exc)
ret["stderr"] = ""
ret["retcode"] = 1
ret["pid"] = proc.process.pid
return ret
out, err = proc.stdout, proc.stderr
if out is not None:
out = salt.utils.stringutils.to_str(out).rstrip()
if err is not None:
err = salt.utils.stringutils.to_str(err).rstrip()
ret["pid"] = proc.process.pid
ret["retcode"] = proc.process.returncode
ret["stdout"] = out
ret["stderr"] = err
return ret
def _check_launchctl_stderr(ret):
"""
helper class to check the launchctl stderr.
launchctl does not always return bad exit code
if there is a failure
"""
err = ret["stderr"].lower()
if "service is disabled" in err:
return True
return False
def execute_return_success(cmd):
"""
Executes the passed command. Returns True if successful
:param str cmd: The command to run
:return: True if successful, otherwise False
:rtype: bool
:raises: Error if command fails or is not supported
"""
ret = _run_all(cmd)
log.debug("Execute return success %s: %r", cmd, ret)
if ret["retcode"] != 0 or "not supported" in ret["stdout"].lower():
msg = "Command Failed: {}\n".format(cmd)
msg += "Return Code: {}\n".format(ret["retcode"])
msg += "Output: {}\n".format(ret["stdout"])
msg += "Error: {}\n".format(ret["stderr"])
raise CommandExecutionError(msg)
return True
def execute_return_result(cmd):
"""
Executes the passed command. Returns the standard out if successful
:param str cmd: The command to run
:return: The standard out of the command if successful, otherwise returns
an error
:rtype: str
:raises: Error if command fails or is not supported
"""
ret = _run_all(cmd)
if ret["retcode"] != 0 or "not supported" in ret["stdout"].lower():
msg = "Command Failed: {}\n".format(cmd)
msg += "Return Code: {}\n".format(ret["retcode"])
msg += "Output: {}\n".format(ret["stdout"])
msg += "Error: {}\n".format(ret["stderr"])
raise CommandExecutionError(msg)
return ret["stdout"]
def parse_return(data):
"""
Returns the data portion of a string that is colon separated.
:param str data: The string that contains the data to be parsed. Usually the
standard out from a command
For example:
``Time Zone: America/Denver``
will return:
``America/Denver``
"""
if ": " in data:
return data.split(": ")[1]
if ":\n" in data:
return data.split(":\n")[1]
else:
return data
def validate_enabled(enabled):
"""
Helper function to validate the enabled parameter. Boolean values are
converted to "on" and "off". String values are checked to make sure they are
either "on" or "off"/"yes" or "no". Integer ``0`` will return "off". All
other integers will return "on"
:param enabled: Enabled can be boolean True or False, Integers, or string
values "on" and "off"/"yes" and "no".
:type: str, int, bool
:return: "on" or "off" or errors
:rtype: str
"""
if isinstance(enabled, str):
if enabled.lower() not in ["on", "off", "yes", "no"]:
msg = (
"\nMac Power: Invalid String Value for Enabled.\n"
"String values must be 'on' or 'off'/'yes' or 'no'.\n"
"Passed: {}".format(enabled)
)
raise SaltInvocationError(msg)
return "on" if enabled.lower() in ["on", "yes"] else "off"
return "on" if bool(enabled) else "off"
def confirm_updated(value, check_fun, normalize_ret=False, wait=5):
"""
Wait up to ``wait`` seconds for a system parameter to be changed before
deciding it hasn't changed.
:param str value: The value indicating a successful change
:param function check_fun: The function whose return is compared with
``value``
:param bool normalize_ret: Whether to normalize the return from
``check_fun`` with ``validate_enabled``
:param int wait: The maximum amount of seconds to wait for a system
parameter to change
"""
for i in range(wait):
state = validate_enabled(check_fun()) if normalize_ret else check_fun()
log.debug(
"Confirm update try: %d func:%r state:%s value:%s",
i,
check_fun,
state,
value,
)
if value in state:
return True
time.sleep(1)
return False
def launchctl(sub_cmd, *args, **kwargs):
"""
Run a launchctl command and raise an error if it fails
Args: additional args are passed to launchctl
sub_cmd (str): Sub command supplied to launchctl
Kwargs: passed to ``cmd.run_all``
return_stdout (bool): A keyword argument. If true return the stdout of
the launchctl command
Returns:
bool: ``True`` if successful
str: The stdout of the launchctl command if requested
Raises:
CommandExecutionError: If command fails
CLI Example:
.. code-block:: bash
import salt.utils.mac_service
salt.utils.mac_service.launchctl('debug', 'org.cups.cupsd')
"""
# Get return type
return_stdout = kwargs.pop("return_stdout", False)
# Construct command
cmd = ["launchctl", sub_cmd]
cmd.extend(args)
# fix for https://github.com/saltstack/salt/issues/57436
if sub_cmd == "bootout":
kwargs["success_retcodes"] = [
36,
]
# Run command
kwargs["python_shell"] = False
kwargs = salt.utils.args.clean_kwargs(**kwargs)
ret = __salt__["cmd.run_all"](cmd, **kwargs)
error = _check_launchctl_stderr(ret)
# Raise an error or return successful result
if ret["retcode"] or error:
out = "Failed to {} service:\n".format(sub_cmd)
out += "stdout: {}\n".format(ret["stdout"])
out += "stderr: {}\n".format(ret["stderr"])
out += "retcode: {}".format(ret["retcode"])
raise CommandExecutionError(out)
else:
return ret["stdout"] if return_stdout else True
def _read_plist_file(root, file_name):
"""
:param root: The root path of the plist file
:param file_name: The name of the plist file
:return: An empty dictionary if the plist file was invalid, otherwise, a dictionary with plist data
"""
file_path = os.path.join(root, file_name)
log.debug("read_plist: Gathering service info for %s", file_path)
# Must be a plist file
if not file_path.lower().endswith(".plist"):
log.debug("read_plist: Not a plist file: %s", file_path)
return {}
# ignore broken symlinks
if not os.path.exists(os.path.realpath(file_path)):
log.warning("read_plist: Ignoring broken symlink: %s", file_path)
return {}
try:
with salt.utils.files.fopen(file_path, "rb") as handle:
plist = plistlib.load(handle)
except plistlib.InvalidFileException:
# Raised in python3 if the file is not XML.
# There's nothing we can do; move on to the next one.
log.warning(
'read_plist: Unable to parse "%s" as it is invalid XML: InvalidFileException.',
file_path,
)
return {}
except ValueError as err:
# fixes https://github.com/saltstack/salt/issues/58143
# choosing not to log a Warning as this would happen on BigSur+ machines.
log.debug(
"Caught ValueError: '%s', while trying to parse '%s'.", err, file_path
)
return {}
except xml.parsers.expat.ExpatError:
# Raised by py3 if the file is XML, but with errors.
log.warning(
'read_plist: Unable to parse "%s" as it is invalid XML: xml.parsers.expat.ExpatError.',
file_path,
)
return {}
if "Label" not in plist:
# not all launchd plists contain a Label key
log.debug(
"read_plist: Service does not contain a Label key. Skipping %s.", file_path
)
return {}
return {
"file_name": file_name,
"file_path": file_path,
"plist": plist,
}
def _available_services(refresh=False):
"""
This is a helper function for getting the available macOS services.
The strategy is to look through the known system locations for
launchd plist files, parse them, and use their information for
populating the list of services. Services can run without a plist
file present, but normally services which have an automated startup
will have a plist file, so this is a minor compromise.
"""
if "available_services" in __context__ and not refresh:
log.debug("Found context for available services.")
__context__["using_cached_services"] = True
return __context__["available_services"]
launchd_paths = {
"/Library/LaunchAgents",
"/Library/LaunchDaemons",
"/System/Library/LaunchAgents",
"/System/Library/LaunchDaemons",
}
agent_path = "/Users/{}/Library/LaunchAgents"
launchd_paths.update(
{
agent_path.format(user)
for user in os.listdir("/Users/")
if os.path.isdir(agent_path.format(user))
}
)
result = {}
for launch_dir in launchd_paths:
for root, dirs, files in salt.utils.path.os_walk(launch_dir):
for file_name in files:
data = _read_plist_file(root, file_name)
if data:
result[data["plist"]["Label"].lower()] = data
# put this in __context__ as this is a time consuming function.
# a fix for this issue. https://github.com/saltstack/salt/issues/48414
__context__["available_services"] = result
# this is a fresh gathering of services, set cached to false
__context__["using_cached_services"] = False
return result
def available_services(refresh=False):
"""
Return a dictionary of all available services on the system
:param bool refresh: If you wish to refresh the available services
as this data is cached on the first run.
Returns:
dict: All available services
CLI Example:
.. code-block:: bash
import salt.utils.mac_service
salt.utils.mac_service.available_services()
"""
log.debug("Loading available services")
return _available_services(refresh)
def console_user(username=False):
"""
Gets the UID or Username of the current console user.
:return: The uid or username of the console user.
:param bool username: Whether to return the username of the console
user instead of the UID. Defaults to False
:rtype: Interger of the UID, or a string of the username.
Raises:
CommandExecutionError: If we fail to get the UID.
CLI Example:
.. code-block:: bash
import salt.utils.mac_service
salt.utils.mac_service.console_user()
"""
try:
# returns the 'st_uid' stat from the /dev/console file.
uid = os.stat("/dev/console")[4]
except (OSError, IndexError):
# we should never get here but raise an error if so
raise CommandExecutionError("Failed to get a UID for the console user.")
if username:
return pwd.getpwuid(uid)[0]
return uid
def git_is_stub():
"""
Return whether macOS git is the standard OS stub or a real binary.
"""
# On a fresh macOS install, /usr/bin/git is a stub, which if
# accessed, triggers a UI dialog box prompting the user to install
# the developer command line tools. We don't want that! So instead,
# running the below command will return a path to the installed dev
# tools and retcode 0, or print a bunch of info to stderr and
# retcode 2.
try:
cmd = ["/usr/bin/xcode-select", "-p"]
_ = subprocess.check_call(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=1
)
log.debug("Xcode command line tools present")
return False
except subprocess.CalledProcessError:
log.debug("Xcode command line tools not present")
return True
| saltstack/salt | salt/utils/mac_utils.py | Python | apache-2.0 | 14,345 |
#!/usr/bin/env python3
import logging
from src import util
from src import etherscan
from src import messages
from crypto.prices import *
logger = logging.getLogger("node")
######
# Telegram command handler for adding nodes for the user who fired the command.
#
# Command: /node :address0;name0 ... :addressN;nameN
#
# Command parameter: :address0 - Address of the first node to add
# :name0 - Name of the first node
# :addressN - Address of the last node to add
# :nameN - Name of the last node
#
# Gets only called by the telegram bot api
######
def nodeAdd(bot, update, args):
response = "*Add*\n\n"
chatId = update.message.chat_id
logger.warning("add - args " + " ".join(args))
logger.warning("add - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
if pool == None:
bot.create(bot,update,[])
if len(args) == 0:
response += ("Arguments required: address_0;name_0 ... address_n;name_n\n\n"
"Example: /add 0xFf2ED74286a5686Bc4F4896761718DE031680000;Node1 0xFf2ED74286a5686Bc4F4896761718DE031681111;Node2\n")
valid = False
else:
for arg in args:
valid = True
newNode = arg.split(";")
if len(newNode) != 2:
response += messages.invalidParameterError.format(arg)
valid = False
else:
if not util.validateTntAddress( newNode[0] ):
response += messages.invalidTntAddressError.format(newNode[0])
valid = False
if not util.validateName( newNode[1] ):
response += messages.invalidNameError.format(newNode[1])
valid = False
if valid:
address = newNode[0]
name = newNode[1]
if bot.database.addNode(update.message.chat_id, address, name, update.message.from_user.id,update.message.from_user.username):
response += "Added node {}!\n".format(address)
else:
response += messages.nodeExistsError.format(address)
bot.sendMessage(update.message.chat_id, response )
######
# Telegram command handler for updating nodes for the user who fired the command.
#
# Command: /add :address :newname
#
# Command parameter: :address - Address of the node to update
# :newname - New name for the node
#
# Gets only called by the telegram bot api
######
def nodeUpdate(bot, update, args):
response = "*Update*\n\n"
chatId = update.message.chat_id
logger.warning("update - args " + " ".join(args))
logger.warning("update - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
user = bot.database.getUser(chatId, update.message.from_user.id)
if pool == None:
response+= messages.noPoolError
elif user == None:
response += messages.notActiveInPoolError
elif len(args) != 2:
response += ("Exactly 2 arguments required: :address :newname\n"
"Where :address is the address of the node to update and :newname the"
"new name of the node.\n\n"
"Example: /update 0xFf2ED74286a5686Bc4F4896761718DE031680000 MyNewNodeName\n")
else:
valid = True
if not util.validateTntAddress( args[0] ):
response += messages.invalidTntAddressError.format(args[0])
valid = False
elif not util.validateName( args[1] ):
response += messages.invalidNameError.format(args[1])
valid = False
if valid:
address = args[0]
name = args[1]
logger.info("update - {} {}".format(address, user['id']))
if bot.database.getNode(address, user['id']) == None:
response += messages.nodeNotExistsError.format(address)
else:
bot.database.updateNode(address,user['id'], name)
response += "Node successfully updated. {}\n".format(address)
bot.sendMessage(chatId, response )
######
# Telegram command handler for removing nodes for the user who fired the command.
#
# Command: /remove :address
#
# Command parameter: :address - Address of the node to remove
#
#
# Gets only called by the telegram bot api
######
def nodeRemove(bot, update, args):
response = "*Remove*\n\n"
chatId = update.message.chat_id
logger.warning("remove - " + " ".join(args))
logger.warning("remove - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
user = bot.database.getUser(chatId, update.message.from_user.id)
if pool == None:
response+= messages.noPoolError
elif user == None:
response += messages.notActiveInPoolError
elif len(args) != 1:
response += ("Argument required: :address\n\n"
"Example: /remove 0xFf2ED74286a5686Bc4F4896761718DE031680000\n")
else:
address = args[0]
if not util.validateTntAddress( address ):
response += "ERROR: Invalid TNT-Address: {}\n".format(address)
else:
logger.info("remove - valid {}".format(address, ))
if bot.database.getNode(address, user['id']) == None:
response += "ERROR: Node {} does not exist!\n".format(address)
else:
bot.database.deleteNode(address,user['id'])
response += "Node {} successfully deleted.\n".format(address)
bot.sendMessage(chatId, response )
######
# Telegram command handler for reading the amounts of each node of the users
# in the pool
#
# Command: /nodes
#
# Gets only called by the telegram bot api
######
def nodes(bot, update):
response = ""
chatId = update.message.chat_id
nodesFound = False
pool = bot.database.getPoolById(chatId)
logger.warning("nodes - {}".format(chatId))
if pool == None:
response = "*Nodes*\n\n"
response += ("You need to create a pool with nodes first. "
"Type /help to show the list of commands.")
else:
tntPrice = liqui(Cryptos.TNT)
addresses = []
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
for node in nodes:
addresses.append(node["address"])
amounts = etherscan.getTNTAmount(addresses, pool['api_key'])
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
nodesFound = True
response += "*" + user['name'] + "*\n"
total = 0
for node in nodes:
tnt = amounts[node["address"]]
if tnt == -1:
response += node['name'] + " -> Sorry, there was an error.\n".format(tnt)
else:
total += int(tnt)
response += node['name'] + " -> {} TNT\n".format(tnt)
if tntPrice != None:
response += '\n*Total:\n TNT: {}\n USD: {}*\n\n'.format(total,int(total*tntPrice.usd))
else:
response += '\n*Total TNT: {}*\n\n'.format(total)
response += "\n\n"
if not nodesFound and pool:
response = "*Nodes*\n\n"
response += ("There are currently no nodes in this pool. You can create "
"nodes with /add.")
bot.sendMessage(chatId, response )
######
# Telegram command handler for reading the total amounts of all nodes of the users
# in the pool
#
# Command: /total
#
# Gets only called by the telegram bot api
######
def total(bot, update):
response = ""
chatId = update.message.chat_id
nodesFound = False
pool = bot.database.getPoolById(chatId)
logger.warning("total - {}".format(chatId))
if pool == None:
response = "*Total*\n\n"
response += ("You need to create a pool with nodes first. "
"Type /help to show the list of commands.")
else:
tntPrice = liqui(Cryptos.TNT)
addresses = []
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
for node in nodes:
addresses.append(node["address"])
amounts = etherscan.getTNTAmount(addresses, pool['api_key'])
for user in bot.database.getUsers(chatId):
nodes = bot.database.getNodes(user['id'])
total = 0
if len(nodes) == 0:
continue
nodesFound = True
for node in bot.database.getNodes(user['id']):
total += amounts[node['address']]
if tntPrice != None:
response += '{} -> {} TNT | {} USD\n'.format(user['name'],total,int(total * tntPrice.usd))
else:
response += '{} -> {} TNT\n'.format(user['name'],total,int(total))
if not nodesFound:
response = "*Total*\n\n"
response += ("There are currently no nodes in this pool. You can create "
"nodes with /add.")
bot.sendMessage(chatId, response )
######
# Telegram command handler for reading the addresses of all nodes of the users
# in the pool
#
# Command: /addresses
#
# Gets only called by the telegram bot api
######
def addresses(bot, update):
response = ""
chatId = update.message.chat_id
nodesFound = False
pool = bot.database.getPoolById(chatId)
logger.warning("addresses - {}".format(chatId))
if pool == None:
response = "*Addresses*\n\n"
response += ("You need to create a pool with nodes first. "
"Type /help to show the list of commands.")
else:
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
response += "*" + user['name'] + "*\n"
nodesFound = True
for node in nodes:
response += node['name'] + " -> " + node['address'] + "\n"
response += "\n\n"
if not nodesFound:
response = "*Addresses*\n\n"
response += ("There are currently no nodes in this pool. You can create "
"nodes with /add.")
bot.sendMessage(update.message.chat_id, response )
| kevinrombach/TNTNodeMonitorBot | src/commandhandler/node.py | Python | apache-2.0 | 10,737 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metadata utils."""
import os
import tensorflow.compat.v1 as tf
from explainable_ai_sdk.metadata.tf.v1 import utils
class UtilsTest(tf.test.TestCase):
def test_save_graph_model_explicit_session(self):
sess = tf.Session(graph=tf.Graph())
with sess.graph.as_default():
x = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='inp')
weights = tf.constant(1., shape=(10, 2), name='weights')
model_path = os.path.join(tf.test.get_temp_dir(), 'explicit')
utils.save_graph_model(sess, model_path, {'x': x}, {'w': weights}, {'tag'})
self.assertTrue(os.path.isfile(os.path.join(model_path, 'saved_model.pb')))
tf.reset_default_graph()
loading_session = tf.Session(graph=tf.Graph())
with loading_session.graph.as_default():
tf.saved_model.loader.load(loading_session, ['tag'], model_path)
self.assertIn(x.op.name,
[n.name for n in loading_session.graph.as_graph_def().node])
self.assertIn(weights.op.name,
[n.name for n in loading_session.graph.as_graph_def().node])
def test_save_graph_model_default_session(self):
x = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='inp')
weights = tf.constant(1., shape=(10, 2), name='weights')
model_path = os.path.join(tf.test.get_temp_dir(), 'default')
utils.save_graph_model(
tf.Session(), model_path, {'x': x}, {'w': weights}, {'tag'})
self.assertTrue(os.path.isfile(os.path.join(model_path, 'saved_model.pb')))
def test_save_graph_model_kwargs(self):
x = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='inp')
weights = tf.constant(1., shape=(10, 2), name='weights')
model_path = os.path.join(tf.test.get_temp_dir(), 'kwargs')
utils.save_graph_model(
tf.Session(),
model_path, {'x': x}, {'w': weights}, {'tag'},
main_op=tf.tables_initializer(),
strip_default_attrs=False)
self.assertTrue(os.path.isfile(os.path.join(model_path, 'saved_model.pb')))
if __name__ == '__main__':
tf.test.main()
| GoogleCloudPlatform/explainable_ai_sdk | explainable_ai_sdk/metadata/tf/v1/utils_test.py | Python | apache-2.0 | 2,629 |
#!/home/mjwtom/install/python/bin/python
# -*- coding: utf-8 -*-
import os
import subprocess
from nodes import storage_nodes as ips
def generate_rings():
print (os.environ["PATH"])
os.environ["PATH"] = '/home/mjwtom/install/python/bin' + ":" + os.environ["PATH"]
print (os.environ["PATH"])
dev = 'sdb1'
ETC_SWIFT='/etc/swift'
if not os.path.exists(ETC_SWIFT):
os.makedirs(ETC_SWIFT)
if os.path.exists(ETC_SWIFT+'/backups'):
cmd = ['rm',
'-rf',
'%s/backups' % ETC_SWIFT]
subprocess.call(cmd)
print 'current work path:%s' % os.getcwd()
os.chdir(ETC_SWIFT)
print 'change work path to:%s' % os.getcwd()
files = os.listdir(ETC_SWIFT)
for file in files:
path = ETC_SWIFT + '/' + file
if os.path.isdir(path):
continue
shotname, extentsion = os.path.splitext(file)
if (extentsion == '.builder') or (extentsion == '.gz'):
try:
os.remove(path)
except Exception as e:
print e
for builder, port in [('object.builder', 6000),
('object-1.builder', 6000),
('object-2.builder', 6000),
('container.builder', 6001),
('account.builder', 6002)]:
cmd = ['swift-ring-builder',
'%s' % builder,
'create',
'10',
'3',
'1']
subprocess.call(cmd)
i = 1
for ip in ips:
cmd = ['swift-ring-builder',
'%s' % builder,
'add',
'r%dz%d-%s:%d/%s' % (i, i, ip, port, dev),
'1']
subprocess.call(cmd)
i += 1
cmd = ['swift-ring-builder',
'%s' % builder,
'rebalance']
subprocess.call(cmd)
if __name__ == '__main__':
generate_rings() | mjwtom/swift | test/dedupe/bin/remakerings.py | Python | apache-2.0 | 1,973 |
from __future__ import absolute_import
from __future__ import print_function
from contextlib import contextmanager
from typing import (cast, Any, Callable, Dict, Iterable, Iterator, List, Mapping, Optional,
Sized, Tuple, Union, Text)
from django.core.urlresolvers import resolve
from django.conf import settings
from django.test import TestCase
from django.test.client import (
BOUNDARY, MULTIPART_CONTENT, encode_multipart,
)
from django.template import loader
from django.test.testcases import SerializeMixin
from django.http import HttpResponse
from django.db.utils import IntegrityError
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.str_utils import force_text
from zerver.lib.utils import is_remote_server
from zerver.lib import cache
from zerver.tornado.handlers import allocate_handler_id
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, bulk_add_subscriptions,
get_display_recipient, bulk_remove_subscriptions
)
from zerver.lib.test_helpers import (
instrument_url, find_key_by_email,
)
from zerver.models import (
get_stream,
get_user,
get_user_profile_by_email,
get_realm,
get_realm_by_email_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.request import JsonableError
from zilencer.models import get_remote_server_by_uuid
import base64
import mock
import os
import re
import time
import ujson
import unittest
from six.moves import urllib
from six import binary_type
from zerver.lib.str_utils import NonBinaryStr
from contextlib import contextmanager
import six
API_KEYS = {} # type: Dict[Text, Text]
def flush_caches_for_testing():
# type: () -> None
global API_KEYS
API_KEYS = {}
class UploadSerializeMixin(SerializeMixin):
"""
We cannot use override_settings to change upload directory because
because settings.LOCAL_UPLOADS_DIR is used in url pattern and urls
are compiled only once. Otherwise using a different upload directory
for conflicting test cases would have provided better performance
while providing the required isolation.
"""
lockfile = 'var/upload_lock'
@classmethod
def setUpClass(cls, *args, **kwargs):
# type: (*Any, **Any) -> None
if not os.path.exists(cls.lockfile):
with open(cls.lockfile, 'w'): # nocoverage - rare locking case
pass
super(UploadSerializeMixin, cls).setUpClass(*args, **kwargs)
class ZulipTestCase(TestCase):
# Ensure that the test system just shows us diffs
maxDiff = None # type: Optional[int]
'''
WRAPPER_COMMENT:
We wrap calls to self.client.{patch,put,get,post,delete} for various
reasons. Some of this has to do with fixing encodings before calling
into the Django code. Some of this has to do with providing a future
path for instrumentation. Some of it's just consistency.
The linter will prevent direct calls to self.client.foo, so the wrapper
functions have to fake out the linter by using a local variable called
django_client to fool the regext.
'''
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
# This method should be removed when we migrate to version 3 of Python
import six
if six.PY2:
self.assertRaisesRegex = self.assertRaisesRegexp
super(ZulipTestCase, self).__init__(*args, **kwargs)
DEFAULT_REALM = Realm.objects.get(string_id='zulip')
@instrument_url
def client_patch(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
"""
We need to urlencode, since Django's function won't do it for us.
"""
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.patch(url, encoded, **kwargs)
@instrument_url
def client_patch_multipart(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
"""
Use this for patch requests that have file uploads or
that need some sort of multi-part content. In the future
Django's test client may become a bit more flexible,
so we can hopefully eliminate this. (When you post
with the Django test client, it deals with MULTIPART_CONTENT
automatically, but not patch.)
"""
encoded = encode_multipart(BOUNDARY, info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.patch(
url,
encoded,
content_type=MULTIPART_CONTENT,
**kwargs)
@instrument_url
def client_put(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.put(url, encoded, **kwargs)
@instrument_url
def client_delete(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.delete(url, encoded, **kwargs)
@instrument_url
def client_options(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.options(url, encoded, **kwargs)
@instrument_url
def client_post(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
django_client = self.client # see WRAPPER_COMMENT
return django_client.post(url, info, **kwargs)
@instrument_url
def client_post_request(self, url, req):
# type: (Text, Any) -> HttpResponse
"""
We simulate hitting an endpoint here, although we
actually resolve the URL manually and hit the view
directly. We have this helper method to allow our
instrumentation to work for /notify_tornado and
future similar methods that require doing funny
things to a request object.
"""
match = resolve(url)
return match.func(req)
@instrument_url
def client_get(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
django_client = self.client # see WRAPPER_COMMENT
return django_client.get(url, info, **kwargs)
example_user_map = dict(
hamlet=u'[email protected]',
cordelia=u'[email protected]',
iago=u'[email protected]',
prospero=u'[email protected]',
othello=u'[email protected]',
AARON=u'[email protected]',
aaron=u'[email protected]',
ZOE=u'[email protected]',
)
mit_user_map = dict(
sipbtest=u"[email protected]",
starnine=u"[email protected]",
espuser=u"[email protected]",
)
# Non-registered test users
nonreg_user_map = dict(
test=u'[email protected]',
test1=u'[email protected]',
alice=u'[email protected]',
newuser=u'[email protected]',
bob=u'[email protected]',
cordelia=u'[email protected]',
newguy=u'[email protected]',
me=u'[email protected]',
)
def nonreg_user(self, name):
# type: (str) -> UserProfile
email = self.nonreg_user_map[name]
return get_user(email, get_realm_by_email_domain(email))
def example_user(self, name):
# type: (str) -> UserProfile
email = self.example_user_map[name]
return get_user(email, get_realm('zulip'))
def mit_user(self, name):
# type: (str) -> UserProfile
email = self.mit_user_map[name]
return get_user(email, get_realm('zephyr'))
def nonreg_email(self, name):
# type: (str) -> Text
return self.nonreg_user_map[name]
def example_email(self, name):
# type: (str) -> Text
return self.example_user_map[name]
def mit_email(self, name):
# type: (str) -> Text
return self.mit_user_map[name]
def notification_bot(self):
# type: () -> UserProfile
return get_user('[email protected]', get_realm('zulip'))
def login_with_return(self, email, password=None):
# type: (Text, Optional[Text]) -> HttpResponse
if password is None:
password = initial_password(email)
return self.client_post('/accounts/login/',
{'username': email, 'password': password})
def login(self, email, password=None, fails=False):
# type: (Text, Optional[Text], bool) -> HttpResponse
if password is None:
password = initial_password(email)
if not fails:
self.assertTrue(self.client.login(username=email, password=password))
else:
self.assertFalse(self.client.login(username=email, password=password))
def logout(self):
# type: () -> None
self.client.logout()
def register(self, email, password):
# type: (Text, Text) -> HttpResponse
self.client_post('/accounts/home/', {'email': email})
return self.submit_reg_form_for_user(email, password)
def submit_reg_form_for_user(self, email, password, realm_name="Zulip Test",
realm_subdomain="zuliptest",
from_confirmation='', full_name=None, timezone=u'', **kwargs):
# type: (Text, Text, Optional[Text], Optional[Text], Optional[Text], Optional[Text], Optional[Text], **Any) -> HttpResponse
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
You can pass the HTTP_HOST variable for subdomains via kwargs.
"""
if full_name is None:
full_name = email.replace("@", "_")
return self.client_post('/accounts/register/',
{'full_name': full_name,
'password': password,
'realm_name': realm_name,
'realm_subdomain': realm_subdomain,
'key': find_key_by_email(email),
'timezone': timezone,
'terms': True,
'from_confirmation': from_confirmation},
**kwargs)
def get_confirmation_url_from_outbox(self, email_address, path_pattern="(\S+)>"):
# type: (Text, Text) -> Text
from django.core.mail import outbox
for message in reversed(outbox):
if email_address in message.to:
return re.search(settings.EXTERNAL_HOST + path_pattern,
message.body).groups()[0]
else:
raise AssertionError("Couldn't find a confirmation email.")
def get_api_key(self, email):
# type: (Text) -> Text
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def get_server_api_key(self, server_uuid):
# type: (Text) -> Text
if server_uuid not in API_KEYS:
API_KEYS[server_uuid] = get_remote_server_by_uuid(server_uuid).api_key
return API_KEYS[server_uuid]
def api_auth(self, identifier):
# type: (Text) -> Dict[str, Text]
"""
identifier: Can be an email or a remote server uuid.
"""
if is_remote_server(identifier):
api_key = self.get_server_api_key(identifier)
else:
api_key = self.get_api_key(identifier)
credentials = u"%s:%s" % (identifier, api_key)
return {
'HTTP_AUTHORIZATION': u'Basic ' + base64.b64encode(credentials.encode('utf-8')).decode('utf-8')
}
def get_streams(self, email, realm):
# type: (Text, Realm) -> List[Text]
"""
Helper function to get the stream names for a user
"""
user_profile = get_user(email, realm)
subs = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
return [cast(Text, get_display_recipient(sub.recipient)) for sub in subs]
def send_message(self, sender_name, raw_recipients, message_type,
content=u"test content", subject=u"test", **kwargs):
# type: (Text, Union[Text, List[Text]], int, Text, Text, **Any) -> int
sender = get_user_profile_by_email(sender_name)
if message_type in [Recipient.PERSONAL, Recipient.HUDDLE]:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(raw_recipients, six.string_types):
recipient_list = [raw_recipients]
else:
recipient_list = raw_recipients
(sending_client, _) = Client.objects.get_or_create(name="test suite")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_messages(self, anchor=1, num_before=100, num_after=100,
use_first_unread_anchor=False):
# type: (int, int, int, bool) -> List[Dict[str, Any]]
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after,
"use_first_unread_anchor": ujson.dumps(use_first_unread_anchor)}
result = self.client_get("/json/messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm):
# type: (Text, Realm) -> List[UserProfile]
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_url_serves_contents_of_file(self, url, result):
# type: (str, bytes) -> None
response = self.client_get(url)
data = b"".join(response.streaming_content)
self.assertEqual(result, data)
def assert_json_success(self, result):
# type: (HttpResponse) -> Dict[str, Any]
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
# type: (HttpResponse, int) -> Dict[str, Any]
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
# type: (HttpResponse, Text, int) -> None
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count):
# type: (Sized, int) -> None
actual_count = len(queries)
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring, status_code=400):
# type: (HttpResponse, Text, int) -> None
self.assertIn(msg_substring, self.get_json_error(result, status_code=status_code))
def assert_in_response(self, substring, response):
# type: (Text, HttpResponse) -> None
self.assertIn(substring, response.content.decode('utf-8'))
def assert_in_success_response(self, substrings, response):
# type: (List[Text], HttpResponse) -> None
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
for substring in substrings:
self.assertIn(substring, decoded)
def assert_not_in_success_response(self, substrings, response):
# type: (List[Text], HttpResponse) -> None
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
for substring in substrings:
self.assertNotIn(substring, decoded)
def fixture_data(self, type, action, file_type='json'):
# type: (Text, Text, Text) -> Text
return force_text(open(os.path.join(os.path.dirname(__file__),
"../webhooks/%s/fixtures/%s.%s" % (type, action, file_type))).read())
def make_stream(self, stream_name, realm=None, invite_only=False):
# type: (Text, Optional[Realm], Optional[bool]) -> Stream
if realm is None:
realm = self.DEFAULT_REALM
try:
stream = Stream.objects.create(
realm=realm,
name=stream_name,
invite_only=invite_only,
)
except IntegrityError: # nocoverage -- this is for bugs in the tests
raise Exception('''
%s already exists
Please call make_stream with a stream name
that is not already in use.''' % (stream_name,))
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
return stream
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
# type: (Text, Text, Optional[Realm]) -> Stream
if realm is None:
realm = get_realm_by_email_domain(email)
try:
stream = get_stream(stream_name, realm)
from_stream_creation = False
except Stream.DoesNotExist:
stream, from_stream_creation = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
bulk_add_subscriptions([stream], [user_profile], from_stream_creation=from_stream_creation)
return stream
def unsubscribe_from_stream(self, email, stream_name, realm):
# type: (Text, Text, Realm) -> None
user_profile = get_user(email, realm)
stream = get_stream(stream_name, realm)
bulk_remove_subscriptions([user_profile], [stream])
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data={}, invite_only=False):
# type: (Text, Iterable[Text], Dict[str, Any], bool) -> HttpResponse
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client_post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
# type: (Text, Text, Union[Text, Dict[str, Any]], Optional[Text], **Any) -> Message
if stream_name is not None:
self.subscribe_to_stream(email, stream_name)
result = self.client_post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = self.get_last_message()
self.assertEqual(msg.sender.email, email)
if stream_name is not None:
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
# TODO: should also validate recipient for private messages
return msg
def get_last_message(self):
# type: () -> Message
return Message.objects.latest('id')
def get_second_to_last_message(self):
# type: () -> Message
return Message.objects.all().order_by('-id')[1]
@contextmanager
def simulated_markdown_failure(self):
# type: () -> Iterator[None]
'''
This raises a failure inside of the try/except block of
bugdown.__init__.do_convert.
'''
with \
self.settings(ERROR_BOT=None), \
mock.patch('zerver.lib.bugdown.timeout', side_effect=KeyError('foo')), \
mock.patch('zerver.lib.bugdown.log_bugdown_error'):
yield
class WebhookTestCase(ZulipTestCase):
"""
Common for all webhooks tests
Override below class attributes and run send_and_test_message
If you create your url in uncommon way you can override build_webhook_url method
In case that you need modify body or create it without using fixture you can also override get_body method
"""
STREAM_NAME = None # type: Optional[Text]
TEST_USER_EMAIL = '[email protected]'
URL_TEMPLATE = None # type: Optional[Text]
FIXTURE_DIR_NAME = None # type: Optional[Text]
def setUp(self):
# type: () -> None
self.url = self.build_webhook_url()
def send_and_test_stream_message(self, fixture_name, expected_subject=None,
expected_message=None, content_type="application/json", **kwargs):
# type: (Text, Optional[Text], Optional[Text], Optional[Text], **Any) -> Message
payload = self.get_body(fixture_name)
if content_type is not None:
kwargs['content_type'] = content_type
msg = self.send_json_payload(self.TEST_USER_EMAIL, self.url, payload,
self.STREAM_NAME, **kwargs)
self.do_test_subject(msg, expected_subject)
self.do_test_message(msg, expected_message)
return msg
def send_and_test_private_message(self, fixture_name, expected_subject=None,
expected_message=None, content_type="application/json", **kwargs):
# type: (Text, Text, Text, str, **Any) -> Message
payload = self.get_body(fixture_name)
if content_type is not None:
kwargs['content_type'] = content_type
msg = self.send_json_payload(self.TEST_USER_EMAIL, self.url, payload,
stream_name=None, **kwargs)
self.do_test_message(msg, expected_message)
return msg
def build_webhook_url(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
url = self.URL_TEMPLATE
if url.find("api_key") >= 0:
api_key = self.get_api_key(self.TEST_USER_EMAIL)
url = self.URL_TEMPLATE.format(api_key=api_key,
stream=self.STREAM_NAME)
else:
url = self.URL_TEMPLATE.format(stream=self.STREAM_NAME)
has_arguments = kwargs or args
if has_arguments and url.find('?') == -1:
url = "{}?".format(url)
else:
url = "{}&".format(url)
for key, value in kwargs.items():
url = "{}{}={}&".format(url, key, value)
for arg in args:
url = "{}{}&".format(url, arg)
return url[:-1] if has_arguments else url
def get_body(self, fixture_name):
# type: (Text) -> Union[Text, Dict[str, Text]]
"""Can be implemented either as returning a dictionary containing the
post parameters or as string containing the body of the request."""
return ujson.dumps(ujson.loads(self.fixture_data(self.FIXTURE_DIR_NAME, fixture_name)))
def do_test_subject(self, msg, expected_subject):
# type: (Message, Optional[Text]) -> None
if expected_subject is not None:
self.assertEqual(msg.topic_name(), expected_subject)
def do_test_message(self, msg, expected_message):
# type: (Message, Optional[Text]) -> None
if expected_message is not None:
self.assertEqual(msg.content, expected_message)
| vabs22/zulip | zerver/lib/test_classes.py | Python | apache-2.0 | 24,636 |
import atexit
import sys
def all_done():
print('all_done()')
print('Registering')
atexit.register(all_done)
print('Registered')
print('Exiting...')
sys.exit()
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_application_building_blocks/atexit_sys_exit.py | Python | apache-2.0 | 172 |
# -*- coding: utf-8 -*-
#
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404 as getObj
from django.utils import timezone
from django.utils.translation import ugettext
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from webframe.functions import getDateTime, getDate, FMT_DATE, FMT_DATETIME, getEndOfDay
from .models import Record
import logging
logger=logging.getLogger('sugar.views')
def _getUser(req, username=None):
if username:
if req.user.is_superuser or req.user.username==username:
return getObj(get_user_model(), username=username)
else:
return req.user
raise PermissionDenied()
def index(req):
if req.user.is_authenticated():
return redirect('dashboard', username=req.user.username)
return render(req, 'webframe/empty.html')
@login_required
def dashboard(req, username=None):
user=_getUser(req, username)
if req.method=='POST':
logger.info('Saving record to user<%s>:%s...'%(user.id, user.username))
with transaction.atomic():
r=Record()
r.owner=user
r.date=getDateTime(req.POST.get('date'))
r.sugar=req.POST.get('sugar', '0')
r.sugar=float(r.sugar) if r.sugar else 0
r.pulse=req.POST.get('pulse', '0')
r.pulse=int(r.pulse) if r.pulse else 0
r.sys=req.POST.get('sys', '0')
r.sys=int(r.sys) if r.sys else 0
r.dia=req.POST.get('dia', '0')
r.dia=int(r.dia) if r.dia else 0
r.save()
return redirect('reports-user', username=username) if username else redirect('reports')
return render(req, 'sugar/dashboard.html', {})
@login_required
def reports(req, username=None):
user=_getUser(req, username)
params=dict()
params['to']=getDate(req.GET.get('to', None), timezone.now())
params['to']=getEndOfDay(params['to']) #Due to the system should include the selected date instead
params['from']=getDate(req.GET.get('from', None), params['to']-timedelta(days=30))
params['target']=Record.objects.filter(owner=user, date__range=(params['from'], params['to'])).order_by('date')
return render(req, 'sugar/reports.html', params)
@login_required
def downloads(req, username=None):
user=_getUser(req, username)
params=dict()
params['to']=getDate(req.GET.get('to', None), datetime.now())
params['from']=getDate(req.GET.get('from', None), params['to']-timedelta(days=30))
params['target']=Record.objects.filter(owner=user, date__range=(params['from'], params['to'])).order_by('date')
logger.debug(params['target'])
filename=ugettext('From %(from)s to %(to)s'%params)
wb=Workbook()
ws=wb.active
ws.merge_cells('A1:G1')
ws['A1']=filename
ws['A2']=ugettext('Record.owner')
ws['B2']=user.get_full_name() if user.get_full_name() else user.username
ws['A3']=ugettext('from')
ws['B3']=params['from'].strftime(FMT_DATE)
ws['A4']=ugettext('to')
ws['B4']=params['to'].strftime(FMT_DATE)
ws.cell(row=5, column=3, value=ugettext('Record.date'))
ws.cell(row=5, column=4, value=ugettext('Record.sugar'))
ws.cell(row=5, column=5, value=ugettext('Record.pulse'))
ws.cell(row=5, column=6, value=ugettext('Record.sys'))
ws.cell(row=5, column=7, value=ugettext('Record.dia'))
row=6
for r in params['target']:
ws.cell(row=row, column=3, value=timezone.localtime(r.date).strftime(FMT_DATETIME))
ws.cell(row=row, column=4, value=r.sugar)
ws.cell(row=row, column=5, value=r.pulse)
ws.cell(row=row, column=6, value=r.sys)
ws.cell(row=row, column=7, value=r.dia)
row+=1
rst=HttpResponse(save_virtual_workbook(wb), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
rst['Content-Disposition'] = 'attachment; filename=\"%s.xlsx\"'%filename
return rst
| kensonman/mansonsolutions.sugar | src/sugar/views.py | Python | apache-2.0 | 4,129 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import tuskarclient.tests.utils as tutils
from tuskarclient.v1 import resource_classes
class ResourceClassManagerTest(tutils.TestCase):
def setUp(self):
super(ResourceClassManagerTest, self).setUp()
self.api = mock.Mock()
self.rcm = resource_classes.ResourceClassManager(self.api)
def test_get(self):
self.rcm._get = mock.Mock(return_value='fake_resource_class')
self.assertEqual(self.rcm.get(42), 'fake_resource_class')
self.rcm._get.assert_called_with('/v1/resource_classes/42')
def test_list(self):
self.rcm._list = mock.Mock(return_value=['fake_resource_class'])
self.assertEqual(self.rcm.list(), ['fake_resource_class'])
self.rcm._list.assert_called_with('/v1/resource_classes')
def test_create(self):
self.rcm._create = mock.Mock(return_value=['fake_resource_class'])
self.assertEqual(
self.rcm.create(dummy='dummy resource class data'),
['fake_resource_class'])
self.rcm._create.assert_called_with(
'/v1/resource_classes',
{'dummy': 'dummy resource class data'})
def test_update(self):
self.rcm._update = mock.Mock(return_value=['fake_resource_class'])
self.assertEqual(
self.rcm.update(42, dummy='dummy resource class data'),
['fake_resource_class'])
self.rcm._update.assert_called_with(
'/v1/resource_classes/42',
{'dummy': 'dummy resource class data'})
def test_delete(self):
self.rcm._delete = mock.Mock(return_value=None)
self.assertEqual(self.rcm.delete(42), None)
self.rcm._delete.assert_called_with('/v1/resource_classes/42')
| ccrouch/python-tuskarclient | tuskarclient/tests/v1/test_resource_class.py | Python | apache-2.0 | 2,273 |
# -*- coding: utf-8 -*-
#
# Baobab documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 7 00:44:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo','jsonext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates/sphinxdoc']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Baobab'
copyright = u'2010, Riccardo Attilio Galli'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.1'
# The full version, including alpha/beta/rc tags.
release = '1.3.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logo_baobab_200.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'*': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'],
'index': ['download.html','globaltoc.html', 'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'example_animals':'animals.html','example_forum':'forum.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Baobabdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Baobab.tex', u'Baobab Documentation',
u'Riccardo Attilio Galli', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'baobab', u'Baobab Documentation',
[u'Riccardo Attilio Galli'], 1)
]
| riquito/Baobab | doc/source/conf.py | Python | apache-2.0 | 7,266 |
# TODO: Yes need to fix this violation of visibility
from functools import partial
from jarvis_cli.client.common import _get_jarvis_resource, _post_jarvis_resource, \
_put_jarvis_resource, query
def _construct_log_entry_endpoint(event_id):
return "events/{0}/logentries".format(event_id)
def get_log_entry(event_id, conn, log_entry_id):
return _get_jarvis_resource(_construct_log_entry_endpoint(event_id), conn,
log_entry_id)
def post_log_entry(event_id, conn, log_entry_request, quiet=False,
skip_tags_check=False):
return _post_jarvis_resource(_construct_log_entry_endpoint(event_id), conn,
log_entry_request, quiet, skip_tags_check)
def put_log_entry(event_id, conn, log_entry_id, log_entry_request):
return _put_jarvis_resource(_construct_log_entry_endpoint(event_id), conn,
log_entry_id, log_entry_request)
query_log_entries = partial(query, "search/logentries")
| clb6/jarvis-cli | jarvis_cli/client/log_entry.py | Python | apache-2.0 | 938 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializer serialization / deserialization."""
import tensorflow.compat.v2 as tf
import threading
from tensorflow.python import tf2
from keras.initializers import initializers_v1
from keras.initializers import initializers_v2
from keras.utils import generic_utils
from keras.utils import tf_inspect as inspect
from tensorflow.python.ops import init_ops
from tensorflow.python.util.tf_export import keras_export
# LOCAL.ALL_OBJECTS is meant to be a global mutable. Hence we need to make it
# thread-local to avoid concurrent mutations.
LOCAL = threading.local()
def populate_deserializable_objects():
"""Populates dict ALL_OBJECTS with every built-in initializer.
"""
global LOCAL
if not hasattr(LOCAL, 'ALL_OBJECTS'):
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = None
if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf.__internal__.tf2.enabled():
# Objects dict is already generated for the proper TF version:
# do nothing.
return
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = tf.__internal__.tf2.enabled()
# Compatibility aliases (need to exist in both V1 and V2).
LOCAL.ALL_OBJECTS['ConstantV2'] = initializers_v2.Constant
LOCAL.ALL_OBJECTS['GlorotNormalV2'] = initializers_v2.GlorotNormal
LOCAL.ALL_OBJECTS['GlorotUniformV2'] = initializers_v2.GlorotUniform
LOCAL.ALL_OBJECTS['HeNormalV2'] = initializers_v2.HeNormal
LOCAL.ALL_OBJECTS['HeUniformV2'] = initializers_v2.HeUniform
LOCAL.ALL_OBJECTS['IdentityV2'] = initializers_v2.Identity
LOCAL.ALL_OBJECTS['LecunNormalV2'] = initializers_v2.LecunNormal
LOCAL.ALL_OBJECTS['LecunUniformV2'] = initializers_v2.LecunUniform
LOCAL.ALL_OBJECTS['OnesV2'] = initializers_v2.Ones
LOCAL.ALL_OBJECTS['OrthogonalV2'] = initializers_v2.Orthogonal
LOCAL.ALL_OBJECTS['RandomNormalV2'] = initializers_v2.RandomNormal
LOCAL.ALL_OBJECTS['RandomUniformV2'] = initializers_v2.RandomUniform
LOCAL.ALL_OBJECTS['TruncatedNormalV2'] = initializers_v2.TruncatedNormal
LOCAL.ALL_OBJECTS['VarianceScalingV2'] = initializers_v2.VarianceScaling
LOCAL.ALL_OBJECTS['ZerosV2'] = initializers_v2.Zeros
# Out of an abundance of caution we also include these aliases that have
# a non-zero probability of having been included in saved configs in the past.
LOCAL.ALL_OBJECTS['glorot_normalV2'] = initializers_v2.GlorotNormal
LOCAL.ALL_OBJECTS['glorot_uniformV2'] = initializers_v2.GlorotUniform
LOCAL.ALL_OBJECTS['he_normalV2'] = initializers_v2.HeNormal
LOCAL.ALL_OBJECTS['he_uniformV2'] = initializers_v2.HeUniform
LOCAL.ALL_OBJECTS['lecun_normalV2'] = initializers_v2.LecunNormal
LOCAL.ALL_OBJECTS['lecun_uniformV2'] = initializers_v2.LecunUniform
if tf.__internal__.tf2.enabled():
# For V2, entries are generated automatically based on the content of
# initializers_v2.py.
v2_objs = {}
base_cls = initializers_v2.Initializer
generic_utils.populate_dict_with_module_objects(
v2_objs,
[initializers_v2],
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
for key, value in v2_objs.items():
LOCAL.ALL_OBJECTS[key] = value
# Functional aliases.
LOCAL.ALL_OBJECTS[generic_utils.to_snake_case(key)] = value
else:
# V1 initializers.
v1_objs = {
'Constant': tf.compat.v1.constant_initializer,
'GlorotNormal': tf.compat.v1.glorot_normal_initializer,
'GlorotUniform': tf.compat.v1.glorot_uniform_initializer,
'Identity': tf.compat.v1.initializers.identity,
'Ones': tf.compat.v1.ones_initializer,
'Orthogonal': tf.compat.v1.orthogonal_initializer,
'VarianceScaling': tf.compat.v1.variance_scaling_initializer,
'Zeros': tf.compat.v1.zeros_initializer,
'HeNormal': initializers_v1.HeNormal,
'HeUniform': initializers_v1.HeUniform,
'LecunNormal': initializers_v1.LecunNormal,
'LecunUniform': initializers_v1.LecunUniform,
'RandomNormal': initializers_v1.RandomNormal,
'RandomUniform': initializers_v1.RandomUniform,
'TruncatedNormal': initializers_v1.TruncatedNormal,
}
for key, value in v1_objs.items():
LOCAL.ALL_OBJECTS[key] = value
# Functional aliases.
LOCAL.ALL_OBJECTS[generic_utils.to_snake_case(key)] = value
# More compatibility aliases.
LOCAL.ALL_OBJECTS['normal'] = LOCAL.ALL_OBJECTS['random_normal']
LOCAL.ALL_OBJECTS['uniform'] = LOCAL.ALL_OBJECTS['random_uniform']
LOCAL.ALL_OBJECTS['one'] = LOCAL.ALL_OBJECTS['ones']
LOCAL.ALL_OBJECTS['zero'] = LOCAL.ALL_OBJECTS['zeros']
# For backwards compatibility, we populate this file with the objects
# from ALL_OBJECTS. We make no guarantees as to whether these objects will
# using their correct version.
populate_deserializable_objects()
globals().update(LOCAL.ALL_OBJECTS)
# Utility functions
@keras_export('keras.initializers.serialize')
def serialize(initializer):
return generic_utils.serialize_keras_object(initializer)
@keras_export('keras.initializers.deserialize')
def deserialize(config, custom_objects=None):
"""Return an `Initializer` object from its config."""
populate_deserializable_objects()
return generic_utils.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name='initializer')
@keras_export('keras.initializers.get')
def get(identifier):
"""Retrieve a Keras initializer by the identifier.
The `identifier` may be the string name of a initializers function or class (
case-sensitively).
>>> identifier = 'Ones'
>>> tf.keras.initializers.deserialize(identifier)
<...keras.initializers.initializers_v2.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> tf.keras.initializers.deserialize(cfg)
<...keras.initializers.initializers_v2.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
Raises:
ValueError: If the input identifier is not a supported type or in a bad
format.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, str):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
if inspect.isclass(identifier):
identifier = identifier()
return identifier
else:
raise ValueError('Could not interpret initializer identifier: ' +
str(identifier))
| keras-team/keras | keras/initializers/__init__.py | Python | apache-2.0 | 7,577 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
#
import os
import sys
import falcon_config as fc
import subprocess
cmd = sys.argv[0]
prg, base_dir = fc.resolve_sym_link(os.path.abspath(cmd))
service_stop_cmd = os.path.join(base_dir, 'bin', 'service_stop.py')
subprocess.call(['python', service_stop_cmd, 'prism'])
| OpenPOWER-BigData/HDP-falcon | src/bin/prism_stop.py | Python | apache-2.0 | 848 |
#!/usr/bin/python2.5
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple wave robot WSGI application and forwarding middleware."""
import webob
import webob.exc
from api import robot_abstract
import logging
class RobotMiddleware(object):
"""WSGI middleware that routes /_wave/ requests to a robot wsgi app."""
def __init__(self, robot_app, main_app):
self._robot_app = robot_app
self._main_app = main_app
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
if path.startswith('/_wave/'):
return self._robot_app(environ, start_response)
return self._main_app(environ, start_response)
class SimpleRobotApp(object):
"""WSGI application for serving an abstract robot.
This is just like the Robot class in the Wave api, but it uses the plain WebOb
request/response objects instead of the analogous AppEngine objects.
"""
def __init__(self, robot):
self._robot = robot
def capabilities(self):
xml = self._robot.GetCapabilitiesXml()
response = webob.Response(content_type='text/xml', body=xml)
response.cache_control = 'Private' # XXX
return response
def profile(self):
xml = self._robot.GetProfileJson()
response = webob.Response(content_type='application/json', body=xml)
response.cache_control = 'Private' # XXX
return response
def jsonrpc(self, req):
json_body = req.body
logging.info('Incoming: %s', json_body)
context, events = robot_abstract.ParseJSONBody(json_body)
for event in events:
self._robot.HandleEvent(event, context)
json_response = robot_abstract.SerializeContext(
context, self._robot.version)
logging.info('Outgoing: %s', json_response)
return webob.Response(content_type='application/json',
body=json_response)
def __call__(self, environ, start_response):
req = webob.Request(environ)
if req.path_info == '/_wave/capabilities.xml' and req.method == 'GET':
response = self.capabilities()
elif req.path_info == '/_wave/robot/profile' and req.method == 'GET':
response = self.profile()
elif req.path_info == '/_wave/robot/jsonrpc' and req.method == 'POST':
response = self.jsonrpc(req)
else:
response = webob.exc.HTTPNotFound()
return response(environ, start_response)
| alexisvincent/downy | app.py | Python | apache-2.0 | 2,858 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr.schemata import commons
ENDPOINT_DELETE_SCHEMA = {
u'links': [{
u'method': u'POST',
u'href': u'/NetworkDriver.DeleteEndpoint',
u'description': u'Delete an Endpoint',
u'rel': u'self',
u'title': u'Delete'
}],
u'title': u'Delete endpoint',
u'required': [u'NetworkID', u'EndpointID'],
u'definitions': {u'commons': {}},
u'$schema': u'http://json-schema.org/draft-04/hyper-schema',
u'type': u'object',
u'properties': {
u'NetworkID': {
u'description': u'Network ID',
u'$ref': u'#/definitions/commons/definitions/id'
},
u'EndpointID': {
u'description': u'Endpoint ID',
u'$ref': u'#/definitions/commons/definitions/id'
}
}
}
ENDPOINT_DELETE_SCHEMA[u'definitions'][u'commons'] = commons.COMMONS
| midonet/kuryr | kuryr/schemata/endpoint_delete.py | Python | apache-2.0 | 1,400 |
"""Tests for greeneye_monitor sensors."""
from unittest.mock import AsyncMock, MagicMock
from homeassistant.components.greeneye_monitor.sensor import (
DATA_PULSES,
DATA_WATT_SECONDS,
)
from homeassistant.const import STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import async_get as get_entity_registry
from .common import (
SINGLE_MONITOR_CONFIG_POWER_SENSORS,
SINGLE_MONITOR_CONFIG_PULSE_COUNTERS,
SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS,
SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS,
SINGLE_MONITOR_SERIAL_NUMBER,
mock_monitor,
setup_greeneye_monitor_component_with_config,
)
from .conftest import assert_sensor_state
async def test_disable_sensor_before_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor disabled before its monitor connected stops listening for new monitors."""
# The sensor base class handles connecting the monitor, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert len(monitors.listeners) == 1
await disable_entity(hass, "sensor.voltage_1")
assert len(monitors.listeners) == 0 # Make sure we cleaned up the listener
async def test_updates_state_when_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor updates its state when its monitor first connects."""
# The sensor base class handles updating the state on connection, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert_sensor_state(hass, "sensor.voltage_1", STATE_UNKNOWN)
assert len(monitors.listeners) == 1
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitors.listeners) == 0 # Make sure we cleaned up the listener
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_disable_sensor_after_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor disabled after its monitor connected stops listening for sensor changes."""
# The sensor base class handles connecting the monitor, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitor.listeners) == 1
await disable_entity(hass, "sensor.voltage_1")
assert len(monitor.listeners) == 0
async def test_updates_state_when_sensor_pushes(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor entity updates its state when the underlying sensor pushes an update."""
# The sensor base class handles triggering state updates, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
monitor.voltage = 119.8
monitor.notify_all_listeners()
assert_sensor_state(hass, "sensor.voltage_1", "119.8")
async def test_power_sensor_initially_unknown(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that the power sensor can handle its initial state being unknown (since the GEM API needs at least two packets to arrive before it can compute watts)."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(
hass, "sensor.channel_1", STATE_UNKNOWN, {DATA_WATT_SECONDS: 1000}
)
# This sensor was configured with net metering on, so we should be taking the
# polarized value
assert_sensor_state(
hass, "sensor.channel_two", STATE_UNKNOWN, {DATA_WATT_SECONDS: -400}
)
async def test_power_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a power sensor reports its values correctly, including handling net metering."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
monitor = connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
monitor.channels[0].watts = 120.0
monitor.channels[1].watts = 120.0
monitor.channels[0].notify_all_listeners()
monitor.channels[1].notify_all_listeners()
assert_sensor_state(hass, "sensor.channel_1", "120.0", {DATA_WATT_SECONDS: 1000})
# This sensor was configured with net metering on, so we should be taking the
# polarized value
assert_sensor_state(hass, "sensor.channel_two", "120.0", {DATA_WATT_SECONDS: -400})
async def test_pulse_counter(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a pulse counter sensor reports its values properly, including calculating different units."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_PULSE_COUNTERS
)
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.pulse_a", "10.0", {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per minute, so 10 pulses per second -> 300 gal/min
assert_sensor_state(hass, "sensor.pulse_2", "300.0", {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per hour, so 10 pulses per second -> 18000 gal/hr
assert_sensor_state(hass, "sensor.pulse_3", "18000.0", {DATA_PULSES: 1000})
async def test_temperature_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a temperature sensor reports its values properly, including proper handling of when its native unit is different from that configured in hass."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS
)
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
# The config says that the sensor is reporting in Fahrenheit; if we set that up
# properly, HA will have converted that to Celsius by default.
assert_sensor_state(hass, "sensor.temp_a", "0.0")
async def test_voltage_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a voltage sensor reports its values properly."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
connect_monitor(monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
def connect_monitor(monitors: AsyncMock, serial_number: int) -> MagicMock:
"""Simulate a monitor connecting to Home Assistant. Returns the mock monitor API object."""
monitor = mock_monitor(serial_number)
monitors.add_monitor(monitor)
return monitor
async def disable_entity(hass: HomeAssistant, entity_id: str) -> None:
"""Disable the given entity."""
entity_registry = get_entity_registry(hass)
entity_registry.async_update_entity(entity_id, disabled_by="user")
await hass.async_block_till_done()
| jawilson/home-assistant | tests/components/greeneye_monitor/test_sensor.py | Python | apache-2.0 | 7,421 |
from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Enum, Float
from sqlalchemy import dialects
from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
node_info = Table('node_info',meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('node_id',Integer,nullable=False),
Column('name',String(length=30),nullable=False),
Column('ip_addr',String(length=20)),
Column('hostname',String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
edge_info = Table('edge_info',meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('start',Integer,nullable=False),
Column('end',Integer,nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
try:
node_info.create()
except Exception:
LOG.info(repr(node_info))
LOG.exception(_('Exception while creating table node_info.'))
raise
try:
edge_info.create()
except Exception:
LOG.info(repr(edge_info))
LOG.exception(_('Exception while creating table edge_info.'))
raise
# TO DO
# Create indicies
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
node_info = Table('node_info',meta)
try:
node_info.drop()
except Exception:
LOG.info("Table node_info doesn't exist")
#LOG.info(repr(node_info))
#LOG.exception(_('Exception while deleting table node_info.'))
edge_info = Table('edge_info',meta)
try:
edge_info.drop()
except Exception:
LOG.info("Table edge_info doesn't exist")
#LOG.info(repr(edge_info))
#LOG.exception(_('Exception while deleting table edge_info.'))
| ashepelev/TopologyWeigher | source/migrate_versions/243_topology_tables.py | Python | apache-2.0 | 2,616 |
import tornado.ioloop
from functools import partial
from tornado.testing import AsyncTestCase
from elasticsearch_tornado import ClusterClient
try:
# python 2.6
from unittest2 import TestCase, SkipTest
except ImportError:
from unittest import TestCase, SkipTest
class ClusterClientTest(AsyncTestCase):
def handle_cb(self, req, **kwargs):
if kwargs.get('codes'):
cl = [200, 201] + kwargs.get('codes')
self.assertTrue(req.code in cl)
else:
self.assertTrue(req.code in (200, 201, ))
self.stop()
def test_health(self):
c = ClusterClient()
c.cluster_health(callback=self.handle_cb)
self.wait()
def test_pending_tasks(self):
c = ClusterClient()
c.cluster_pending_tasks(callback=self.handle_cb)
self.wait()
def test_state(self):
c = ClusterClient()
c.cluster_state(callback=self.handle_cb)
self.wait()
def test_stats(self):
c = ClusterClient()
c.cluster_stats(callback=self.handle_cb)
self.wait()
def test_reroute(self):
c = ClusterClient()
h_cb = partial(
self.handle_cb,
**{'codes':[400, 404]}
)
body = """
{
"commands" : [ {
"move" :
{
"index" : "test", "shard" : 0,
"from_node" : "node1", "to_node" : "node2"
}
},
{
"allocate" : {
"index" : "test", "shard" : 1, "node" : "node3"
}
}
]
}
"""
c.cluster_reroute(body, callback=h_cb)
self.wait()
def test_get_settings(self):
c = ClusterClient()
c.cluster_get_settings(callback=self.handle_cb)
self.wait()
def test_put_settings(self):
c = ClusterClient()
body = """
{
"persistent" : {
"discovery.zen.minimum_master_nodes" : 1
}
}
"""
c.cluster_put_settings(body, callback=self.handle_cb)
self.wait()
| hodgesds/elasticsearch_tornado | tests/test_cluster.py | Python | apache-2.0 | 2,219 |
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.apidoc',
'openstackdocstheme',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/oslo.privsep'
openstackdocs_bug_project = 'oslo.privsep'
openstackdocs_bug_tag = ''
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2014, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'openstackdocs'
# -- sphinxcontrib.apidoc configuration --------------------------------------
apidoc_module_dir = '../../oslo_privsep'
apidoc_output_dir = 'reference/api'
apidoc_excluded_paths = [
'tests',
]
| openstack/oslo.privsep | doc/source/conf.py | Python | apache-2.0 | 2,044 |
from colordetection import *
topColors(992780587437103) | PTAug/fashion-analytics | fashion-analytics/image-processing/testcolor.py | Python | apache-2.0 | 56 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import random
import time
from oslo.config import cfg
import six
from sahara.openstack.common._i18n import _, _LE, _LI
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(periodic_opts))]
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]]
[, name=[None|"string"])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts. If name is not provided, __name__ of function is used.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
f._periodic_name = kwargs.pop('name', f.__name__)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def _add_periodic_task(cls, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
:return: whether task was actually enabled
"""
name = task._periodic_name
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
return False
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
return False
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
return True
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
cls._add_periodic_task(value)
def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...
0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter
@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def add_periodic_task(self, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
"""
if self.__class__._add_periodic_task(task):
self._periodic_last_run[task._periodic_name] = (
task._periodic_last_run)
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = _nearest_boundary(
last_run, spacing)
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
{"full_task_name": full_task_name, "e": e})
time.sleep(0)
return idle_for
| esikachev/scenario | sahara/openstack/common/periodic_task.py | Python | apache-2.0 | 8,319 |
"""Tests for the SmartThings component init module."""
from uuid import uuid4
from aiohttp import ClientConnectionError, ClientResponseError
from asynctest import Mock, patch
from pysmartthings import InstalledAppStatus, OAuthToken
import pytest
from homeassistant.components import cloud, smartthings
from homeassistant.components.smartthings.const import (
CONF_CLOUDHOOK_URL, CONF_INSTALLED_APP_ID, CONF_REFRESH_TOKEN,
DATA_BROKERS, DOMAIN, EVENT_BUTTON, SIGNAL_SMARTTHINGS_UPDATE,
SUPPORTED_PLATFORMS)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from tests.common import MockConfigEntry
async def test_migration_creates_new_flow(
hass, smartthings_mock, config_entry):
"""Test migration deletes app and creates new flow."""
config_entry.version = 1
config_entry.add_to_hass(hass)
await smartthings.async_migrate_entry(hass, config_entry)
await hass.async_block_till_done()
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]['handler'] == 'smartthings'
assert flows[0]['context'] == {'source': 'import'}
async def test_unrecoverable_api_errors_create_new_flow(
hass, config_entry, smartthings_mock):
"""
Test a new config flow is initiated when there are API errors.
401 (unauthorized): Occurs when the access token is no longer valid.
403 (forbidden/not found): Occurs when the app or installed app could
not be retrieved/found (likely deleted?)
"""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = \
ClientResponseError(None, None, status=401)
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
# Assert entry was removed and new flow created
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]['handler'] == 'smartthings'
assert flows[0]['context'] == {'source': 'import'}
hass.config_entries.flow.async_abort(flows[0]['flow_id'])
async def test_recoverable_api_errors_raise_not_ready(
hass, config_entry, smartthings_mock):
"""Test config entry not ready raised for recoverable API errors."""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = \
ClientResponseError(None, None, status=500)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_api_errors_raise_not_ready(
hass, config_entry, app, installed_app, smartthings_mock):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.scenes.side_effect = \
ClientResponseError(None, None, status=500)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_connection_errors_raise_not_ready(
hass, config_entry, smartthings_mock):
"""Test config entry not ready raised for connection errors."""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = ClientConnectionError()
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_base_url_no_longer_https_does_not_load(
hass, config_entry, app, smartthings_mock):
"""Test base_url no longer valid creates a new flow."""
hass.config.api.base_url = 'http://0.0.0.0'
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
async def test_unauthorized_installed_app_raises_not_ready(
hass, config_entry, app, installed_app,
smartthings_mock):
"""Test config entry not ready raised when the app isn't authorized."""
config_entry.add_to_hass(hass)
installed_app.installed_app_status = InstalledAppStatus.PENDING
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_unauthorized_loads_platforms(
hass, config_entry, app, installed_app,
device, smartthings_mock, subscription_factory):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.side_effect = \
ClientResponseError(None, None, status=403)
mock_token = Mock()
mock_token.access_token.return_value = str(uuid4())
mock_token.refresh_token.return_value = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [subscription_factory(capability)
for capability in device.capabilities]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries,
'async_forward_entry_setup') as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_config_entry_loads_platforms(
hass, config_entry, app, installed_app,
device, smartthings_mock, subscription_factory, scene):
"""Test config entry loads properly and proxies to platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token.return_value = str(uuid4())
mock_token.refresh_token.return_value = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [subscription_factory(capability)
for capability in device.capabilities]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries,
'async_forward_entry_setup') as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_config_entry_loads_unconnected_cloud(
hass, config_entry, app, installed_app,
device, smartthings_mock, subscription_factory, scene):
"""Test entry loads during startup when cloud isn't connected."""
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
hass.config.api.base_url = 'http://0.0.0.0'
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token.return_value = str(uuid4())
mock_token.refresh_token.return_value = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [subscription_factory(capability)
for capability in device.capabilities]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(
hass.config_entries, 'async_forward_entry_setup') as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_unload_entry(hass, config_entry):
"""Test entries are unloaded correctly."""
connect_disconnect = Mock()
smart_app = Mock()
smart_app.connect_event.return_value = connect_disconnect
broker = smartthings.DeviceBroker(
hass, config_entry, Mock(), smart_app, [], [])
broker.connect()
hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] = broker
with patch.object(hass.config_entries, 'async_forward_entry_unload',
return_value=True) as forward_mock:
assert await smartthings.async_unload_entry(hass, config_entry)
assert connect_disconnect.call_count == 1
assert config_entry.entry_id not in hass.data[DOMAIN][DATA_BROKERS]
# Assert platforms unloaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_remove_entry(hass, config_entry, smartthings_mock):
"""Test that the installed app and app are removed up."""
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_cloudhook(hass, config_entry, smartthings_mock):
"""Test that the installed app, app, and cloudhook are removed up."""
# Arrange
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
# Act
with patch.object(cloud, 'async_is_logged_in',
return_value=True) as mock_async_is_logged_in, \
patch.object(cloud, 'async_delete_cloudhook') \
as mock_async_delete_cloudhook:
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert mock_async_is_logged_in.call_count == 1
assert mock_async_delete_cloudhook.call_count == 1
async def test_remove_entry_app_in_use(hass, config_entry, smartthings_mock):
"""Test app is not removed if in use by another config entry."""
# Arrange
config_entry.add_to_hass(hass)
data = config_entry.data.copy()
data[CONF_INSTALLED_APP_ID] = str(uuid4())
entry2 = MockConfigEntry(version=2, domain=DOMAIN, data=data)
entry2.add_to_hass(hass)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_already_deleted(
hass, config_entry, smartthings_mock):
"""Test handles when the apps have already been removed."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = ClientResponseError(
None, None, status=403)
smartthings_mock.delete_app.side_effect = ClientResponseError(
None, None, status=403)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_installedapp_api_error(
hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the installed app."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = \
ClientResponseError(None, None, status=500)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_installedapp_unknown_error(
hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the installed app."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_app_api_error(
hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
smartthings_mock.delete_app.side_effect = \
ClientResponseError(None, None, status=500)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_app_unknown_error(
hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
smartthings_mock.delete_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_broker_regenerates_token(
hass, config_entry):
"""Test the device broker regenerates the refresh token."""
token = Mock(OAuthToken)
token.refresh_token = str(uuid4())
stored_action = None
def async_track_time_interval(hass, action, interval):
nonlocal stored_action
stored_action = action
with patch('homeassistant.components.smartthings'
'.async_track_time_interval',
new=async_track_time_interval):
broker = smartthings.DeviceBroker(
hass, config_entry, token, Mock(), [], [])
broker.connect()
assert stored_action
await stored_action(None) # pylint:disable=not-callable
assert token.refresh.call_count == 1
assert config_entry.data[CONF_REFRESH_TOKEN] == token.refresh_token
async def test_event_handler_dispatches_updated_devices(
hass, config_entry, device_factory, event_request_factory,
event_factory):
"""Test the event handler dispatches updated devices."""
devices = [
device_factory('Bedroom 1 Switch', ['switch']),
device_factory('Bathroom 1', ['switch']),
device_factory('Sensor', ['motionSensor']),
device_factory('Lock', ['lock'])
]
device_ids = [devices[0].device_id, devices[1].device_id,
devices[2].device_id, devices[3].device_id]
event = event_factory(devices[3].device_id, capability='lock',
attribute='lock', value='locked',
data={'codeId': '1'})
request = event_request_factory(device_ids=device_ids, events=[event])
config_entry.data[CONF_INSTALLED_APP_ID] = request.installed_app_id
called = False
def signal(ids):
nonlocal called
called = True
assert device_ids == ids
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(
hass, config_entry, Mock(), Mock(), devices, [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
for device in devices:
assert device.status.values['Updated'] == 'Value'
assert devices[3].status.attributes['lock'].value == 'locked'
assert devices[3].status.attributes['lock'].data == {'codeId': '1'}
async def test_event_handler_ignores_other_installed_app(
hass, config_entry, device_factory, event_request_factory):
"""Test the event handler dispatches updated devices."""
device = device_factory('Bedroom 1 Switch', ['switch'])
request = event_request_factory([device.device_id])
called = False
def signal(ids):
nonlocal called
called = True
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(
hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert not called
async def test_event_handler_fires_button_events(
hass, config_entry, device_factory, event_factory,
event_request_factory):
"""Test the event handler fires button events."""
device = device_factory('Button 1', ['button'])
event = event_factory(device.device_id, capability='button',
attribute='button', value='pushed')
request = event_request_factory(events=[event])
config_entry.data[CONF_INSTALLED_APP_ID] = request.installed_app_id
called = False
def handler(evt):
nonlocal called
called = True
assert evt.data == {
'component_id': 'main',
'device_id': device.device_id,
'location_id': event.location_id,
'value': 'pushed',
'name': device.label,
'data': None
}
hass.bus.async_listen(EVENT_BUTTON, handler)
broker = smartthings.DeviceBroker(
hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
| jabesq/home-assistant | tests/components/smartthings/test_init.py | Python | apache-2.0 | 17,803 |
Subsets and Splits