hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79447be28526dcdd7e5a596d76eb4b32887b0abb | 423 | py | Python | tools/make_list_unique.py | JakubOnderka/misp-warninglists | 78590f35269658cceb009d0fe282d5274e74ba89 | [
"CC0-1.0"
] | 18 | 2020-10-05T11:15:37.000Z | 2022-01-06T08:01:59.000Z | tools/make_list_unique.py | JakubOnderka/misp-warninglists | 78590f35269658cceb009d0fe282d5274e74ba89 | [
"CC0-1.0"
] | 1 | 2021-05-04T12:40:30.000Z | 2021-05-04T12:40:30.000Z | tools/make_list_unique.py | JakubOnderka/misp-warninglists | 78590f35269658cceb009d0fe282d5274e74ba89 | [
"CC0-1.0"
] | 8 | 2020-09-08T12:04:12.000Z | 2022-01-29T12:18:54.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
import json
for p in Path('../lists/').glob('*/*.json'):
with p.open() as _f:
warninglist = json.load(_f, encoding="utf-8")
warninglist['list'] = sorted(list(set(warninglist['list'])))
with p.open('w') as _f:
warninglist = json.dump(warninglist, _f, indent=2, sort_keys=True, ensure_ascii=False)
_f.write('\n')
| 26.4375 | 94 | 0.614657 |
79447d4baf44e046698b3dc13e6c4f32a604d924 | 9,722 | py | Python | graphtheory/structures/tests/test_graphs.py | mashal02/graphs-dict | 39917d8a7f3bdcd5d95f3549ca054d16ba535e90 | [
"BSD-3-Clause"
] | 36 | 2015-09-20T20:55:39.000Z | 2021-09-20T05:49:03.000Z | graphtheory/structures/tests/test_graphs.py | mashal02/graphs-dict | 39917d8a7f3bdcd5d95f3549ca054d16ba535e90 | [
"BSD-3-Clause"
] | 6 | 2016-03-25T21:41:46.000Z | 2020-02-12T03:18:59.000Z | graphtheory/structures/tests/test_graphs.py | mashal02/graphs-dict | 39917d8a7f3bdcd5d95f3549ca054d16ba535e90 | [
"BSD-3-Clause"
] | 9 | 2016-09-12T07:57:27.000Z | 2022-03-21T16:15:39.000Z | #!/usr/bin/python
import unittest
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
# A --o B
# o / o
# | / |
# | o |
# C --o D
class TestGraphDirected(unittest.TestCase):
def setUp(self):
self.N = 4 # number of nodes
self.G = Graph(self.N, directed=True)
self.nodes = ["A", "B", "C", "D"]
self.edges = [
Edge("A", "B", 2), Edge("B", "C", 4), Edge("C", "A", 6),
Edge("C", "D", 3), Edge("D", "B", 5)]
for node in self.nodes:
self.G.add_node(node)
for edge in self.edges:
self.G.add_edge(edge)
#self.G.show()
def test_directed(self):
self.assertTrue(self.G.is_directed())
self.assertEqual(self.G.v(), self.N)
self.assertEqual(self.G.e(), 5)
self.G.del_node("B")
self.assertEqual(self.G.v(), 3)
self.assertEqual(self.G.e(), 2)
self.assertTrue(self.G.has_edge(("C", "A")))
self.assertEqual(self.G.weight(("C", "A")), 6)
self.G.del_edge(("C", "A"))
self.assertFalse(self.G.has_edge(("C", "A")))
def test_cmp(self):
T = Graph(self.N)
self.assertFalse(T == self.G, "directed and undirected graphs")
T = Graph(self.N, directed=True)
for node in ["A", "B", "C", "X"]:
T.add_node(node)
self.assertFalse(T == self.G, "nodes are different")
T.del_node("X")
self.assertFalse(T == self.G, "numbers of nodes are different")
T.add_node("D")
T.add_edge(Edge("A", "B", 2))
T.add_edge(Edge("B", "C", 4))
T.add_edge(Edge("C", "A", 6))
T.add_edge(Edge("C", "D", 3))
self.assertFalse(T == self.G, "edge numbers are different")
T.add_edge(Edge("D", "B", 7))
self.assertFalse(T == self.G, "edge weights are different")
T.del_edge(Edge("D", "B", 7))
T.add_edge(Edge("B", "D", 5))
self.assertFalse(T == self.G, "edge directions are different")
T.del_edge(Edge("B", "D", 5))
T.add_edge(Edge("D", "B", 5))
self.assertTrue(T == self.G, "graphs are the same")
def test_iteredges(self):
inedges_B = list(self.G.iterinedges("B"))
outedges_B = list(self.G.iteroutedges("B"))
#print inedges_B, outedges_B
self.assertEqual(len(inedges_B), 2)
self.assertEqual(len(outedges_B), 1)
def test_copy(self):
T = self.G.copy()
self.assertEqual(T.v(), self.G.v())
self.assertEqual(T.e(), self.G.e())
for node in T.iternodes():
self.assertTrue(self.G.has_node(node))
for edge in T.iteredges():
self.assertTrue(self.G.has_edge(edge))
def test_transpose(self):
T = self.G.transpose()
self.assertEqual(T.v(), self.G.v())
self.assertEqual(T.e(), self.G.e())
for node in T.iternodes():
self.assertTrue(self.G.has_node(node))
for edge in T.iteredges():
self.assertTrue(self.G.has_edge(~edge))
def test_complement(self):
T = self.G.complement()
self.assertEqual(T.v(), self.G.v())
self.assertEqual(T.e(), self.N*(self.N-1) - self.G.e())
for node in T.iternodes():
self.assertTrue(self.G.has_node(node))
for edge in T.iteredges():
self.assertFalse(self.G.has_edge(edge))
for edge in self.G.iteredges():
self.assertFalse(T.has_edge(edge))
def test_subgraph(self):
T = self.G.subgraph(["A", "B", "C"])
self.assertEqual(T.v(), 3)
self.assertEqual(T.e(), 3)
for edge in T.iteredges():
self.assertTrue(self.G.has_edge(edge))
def test_add_graph_directed(self):
T = Graph(self.N, directed=True)
for node in self.nodes:
T.add_node(node)
T.add_edge(Edge("A", "D", 9))
self.assertEqual(T.v(), self.N)
self.assertEqual(T.e(), 1)
self.G.add_graph(T)
self.assertEqual(self.G.v(), self.N)
self.assertEqual(self.G.e(), 6)
def test_degree(self):
self.assertEqual(self.G.indegree("A"), 1)
self.assertEqual(self.G.indegree("B"), 2)
self.assertEqual(self.G.indegree("C"), 1)
self.assertEqual(self.G.indegree("D"), 1)
self.assertEqual(self.G.outdegree("A"), 1)
self.assertEqual(self.G.outdegree("B"), 1)
self.assertEqual(self.G.outdegree("C"), 2)
self.assertEqual(self.G.outdegree("D"), 1)
def test_exceptions(self):
self.assertRaises(ValueError, self.G.add_edge, Edge("A", "A", 1))
self.assertRaises(ValueError, self.G.add_edge, Edge("A", "B", 2))
self.assertRaises(ValueError, self.G.degree, "A")
def tearDown(self): pass
# A --- B
# | / |
# | / |
# C --- D
class TestGraphUndirected(unittest.TestCase):
def setUp(self):
self.N = 4 # number of nodes
self.G = Graph(self.N)
self.nodes = ["A", "B", "C", "D"]
self.edges = [
Edge("A", "B", 2), Edge("B", "C", 4), Edge("C", "A", 6),
Edge("C", "D", 3), Edge("D", "B", 5)]
for node in self.nodes:
self.G.add_node(node)
for edge in self.edges:
self.G.add_edge(edge)
#self.G.show()
def test_undirected(self):
self.assertFalse(self.G.is_directed())
self.assertEqual(self.G.v(), self.N)
self.assertEqual(self.G.e(), 5)
self.G.del_node("B")
self.assertEqual(self.G.v(), 3)
self.assertEqual(self.G.e(), 2)
self.assertTrue(self.G.has_edge(("C", "A")))
self.assertEqual(self.G.weight(("C", "A")), 6)
self.G.del_edge(("C", "A"))
self.assertFalse(self.G.has_edge(("C", "A")))
def test_iteredges(self):
inedges_B = list(self.G.iterinedges("B"))
outedges_B = list(self.G.iteroutedges("B"))
#print inedges_B, outedges_B
self.assertEqual(len(inedges_B), 3)
self.assertEqual(len(outedges_B), 3)
def test_iteredges_connected(self):
start_edge = next(self.G.iteredges())
A = set([start_edge.source, start_edge.target])
for edge in self.G.iteredges_connected(start_edge):
B = set([edge.source, edge.target])
self.assertTrue(len(A & B) > 0)
A.update(B)
#print ( A )
def test_copy(self):
T = self.G.copy()
self.assertEqual(T.v(), self.G.v())
self.assertEqual(T.e(), self.G.e())
for node in T.iternodes():
self.assertTrue(self.G.has_node(node))
for edge in T.iteredges():
self.assertTrue(self.G.has_edge(edge))
def test_transpose(self):
T = self.G.transpose()
self.assertEqual(T.v(), self.G.v())
self.assertEqual(T.e(), self.G.e())
for node in T.iternodes():
self.assertTrue(self.G.has_node(node))
for edge in T.iteredges():
self.assertTrue(self.G.has_edge(~edge))
def test_complement(self):
T = self.G.complement()
self.assertEqual(T.v(), self.G.v())
self.assertEqual(T.e(), self.N*(self.N-1)/2 - self.G.e())
for node in T.iternodes():
self.assertTrue(self.G.has_node(node))
for edge in T.iteredges():
self.assertFalse(self.G.has_edge(edge))
for edge in self.G.iteredges():
self.assertFalse(T.has_edge(edge))
def test_subgraph(self):
T = self.G.subgraph(["A", "B", "C"])
self.assertEqual(T.v(), 3)
self.assertEqual(T.e(), 3)
for edge in T.iteredges():
self.assertTrue(self.G.has_edge(edge))
def test_degree(self):
self.assertEqual(self.G.degree("A"), 2)
self.assertEqual(self.G.degree("B"), 3)
self.assertEqual(self.G.degree("C"), 3)
self.assertEqual(self.G.degree("D"), 2)
def test_add_graph_undirected(self):
T = Graph(self.N)
for node in self.nodes:
T.add_node(node)
T.add_edge(Edge("A", "D", 9))
self.assertEqual(T.v(), self.N)
self.assertEqual(T.e(), 1)
self.G.add_graph(T)
self.assertEqual(self.G.v(), self.N)
self.assertEqual(self.G.e(), 6)
def tearDown(self): pass
# 0-2-4-6
# | | | | ladder
# 1-3-5-7
class TestGraphLadder(unittest.TestCase):
def setUp(self):
self.N = 8 # number of nodes
self.G = Graph(self.N)
self.edges = [
Edge(0, 1, 2), Edge(0, 2, 1), Edge(2, 3, 5),
Edge(1, 3, 3), Edge(2, 4, 4), Edge(3, 5, 6), Edge(4, 6, 7),
Edge(4, 5, 8), Edge(5, 7, 9), Edge(6, 7, 10)]
for edge in self.edges:
self.G.add_edge(edge)
def test_basic(self):
self.assertFalse(self.G.is_directed())
self.assertEqual(self.G.v(), self.N)
self.assertEqual(self.G.e(), len(self.edges))
def test_edges(self):
for edge in self.edges:
self.assertTrue(self.G.has_edge(edge))
self.assertEqual(self.G.weight(edge), edge.weight)
self.assertFalse(self.G.has_edge(Edge(0, 3)))
self.assertEqual(self.G.weight(Edge(0, 3)), 0) # no edge
def test_del(self):
self.assertEqual(self.G.e(), 10)
self.G.del_node(7)
self.assertEqual(self.G.e(), 8)
self.G.del_node(2)
self.assertEqual(self.G.e(), 5)
def test_adjacent(self):
for node in self.G.iteradjacent(0):
self.assertTrue(node in [1, 2])
for node in self.G.iteradjacent(2):
self.assertTrue(node in [0, 3, 4])
def tearDown(self): pass
if __name__ == "__main__":
unittest.main()
# EOF
| 33.993007 | 73 | 0.55215 |
79447d975c6669f51bb15d7a9140a2045ccd9224 | 36,756 | py | Python | recipe_engine/recipe_api.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | 1 | 2021-04-24T04:03:01.000Z | 2021-04-24T04:03:01.000Z | recipe_engine/recipe_api.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | null | null | null | recipe_engine/recipe_api.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from __future__ import absolute_import
import bisect
import contextlib
import copy
import hashlib
import inspect
import json
import keyword
import os
import re
import types
from functools import wraps
import attr
from six import iteritems
from google.protobuf import message
from .config_types import Path
from .internal import engine_step
from .internal.attr_util import attr_dict_type
from .internal.warn import escape
from .recipe_test_api import DisabledTestData, ModuleTestData
from .third_party import luci_context
from .third_party.logdog import streamname
from .third_party.logdog.bootstrap import ButlerBootstrap, NotBootstrappedError
from .types import StepPresentation, freeze, FrozenDict
from .util import ModuleInjectionSite
# TODO(iannucci): Rationalize the use of this in downstream scripts.
from .util import Placeholder # pylint: disable=unused-import
class UnknownRequirementError(object):
"""Raised by a requirement function when the referenced requirement is
unknown.
"""
def __init__(self, req):
super(UnknownRequirementError, self).__init__(
'Unknown requirement [%s]' % (req,))
self.typ = req._typ
self.name = req._name
class _UnresolvedRequirement(object):
"""Internal placeholder type for an unresolved module/recipe requirement."""
def __init__(self, typ, name):
self._typ = typ
self._name = name
def __str__(self):
return '%s:%s' % (self._typ, self._name)
def __getattr__(self, key):
raise AttributeError(
'Cannot reference [%s] in unresolved requirement [%s]' % (
key, str(self,)))
def __call__(self, *args, **kwargs):
raise AttributeError('Cannot call unresolved requirement [%s]' % (
str(self,)))
def RequireClient(name):
"""Returns: A dependency injection placeholder for a recipe engine client.
Recipes and Recipe APIs can call this function to install a placeholder for
the dependency injection of a recipe engine client. This dependency will be
noted by the recipe engine and resolved prior to recipe execution.
Clients are intended to be used to interface between the recipe engine and
low-level modules (e.g., "step"). As a general rule of thumb, higher-level
modules should not use clients and interface with the low-level modules
instead.
Recipe engine clients are referenced by name and resolved directly by the
recipe engine. Modules must require them as class member variables in their
recipe API subclass, and recipes must require them as top-level variables.
For example:
class MyCollRecipeApi(recipe_api.RecipeApi):
step_client = recipe_api.RequireClient('step')
def do_something(self):
self.step_client.whatever()
Args:
name (str): the name of the recipe engine client to install.
"""
return _UnresolvedRequirement('client', name)
@attr.s(frozen=True, slots=True)
class LUCIContextClient(object):
"""A recipe engine client which reads/writes the LUCI_CONTEXT."""
IDENT = 'lucictx'
ENV_KEY = luci_context.ENV_KEY
initial_context = attr.ib(validator=attr_dict_type(str, (dict, FrozenDict)),
factory=dict, converter=freeze)
class PathsClient(object):
"""A recipe engine client which exposes all known base paths.
In particular, you can use this client to discover all known:
* recipe resource path
* loaded module resource paths
* loaded recipe repo paths
"""
IDENT = 'paths'
def __init__(self, start_dir):
self.paths = []
self.path_strings = []
self._start_dir = start_dir
def _initialize_with_recipe_api(self, root_api):
"""This method is called once before the start of every recipe.
It is passed the recipe's `api` object. This method crawls the api object
and extracts every resource base path it can find."""
paths_found = {}
def add_found(path):
if path is not None:
paths_found[str(path)] = path
search_set = [root_api]
found_api_id_set = {id(root_api)}
while search_set:
api = search_set.pop()
add_found(api.resource())
add_found(api.repo_resource())
for name in dir(api.m):
sub_api = getattr(api.m, name)
if not isinstance(sub_api, RecipeApiPlain):
continue
if id(sub_api) not in found_api_id_set:
found_api_id_set.add(id(api))
search_set.append(sub_api)
# transpose
# [(path_string, path), ...]
# into
# ([path_string, ...], [path, ...])
self.path_strings, self.paths = zip(*sorted(paths_found.items()))
def find_longest_prefix(self, target, sep):
"""Identifies a known resource path which would contain the `target` path.
sep must be the current path separator (can vary from os.path.sep when
running under simulation).
Returns (str(Path), Path) if the prefix path is found, or (None, None) if no
such prefix exists.
"""
idx = bisect.bisect_left(self.path_strings, target)
if idx == len(self.paths):
return (None, None) # off the end
sPath, path = self.path_strings[idx], self.paths[idx]
if target == sPath :
return sPath, path
if idx > 0:
sPath, path = self.path_strings[idx-1], self.paths[idx-1]
if target.startswith(sPath+sep):
return sPath, path
return (None, None)
@property
def start_dir(self):
"""Returns the START_DIR for this recipe execution."""
return self._start_dir
class PropertiesClient(object):
"""A recipe engine client representing the recipe engine properties."""
IDENT = 'properties'
def __init__(self, properties):
self._properties = properties
def get_properties(self):
return copy.deepcopy(self._properties)
class StepClient(object):
"""A recipe engine client representing step running and introspection."""
IDENT = 'step'
StepConfig = engine_step.StepConfig
EnvAffix = engine_step.EnvAffix
def __init__(self, engine):
self._engine = engine
def previous_step_result(self):
"""Allows api.step to get the active result from any context.
This always returns the innermost nested step that is still open --
presumably the one that just failed if we are in an exception handler."""
active_step_data = self._engine.active_step
if not active_step_data:
raise ValueError(
'No steps have been run yet, and you are asking for a previous step '
'result.')
return active_step_data
def parent_step(self, name_tokens):
"""Opens a parent step.
Returns a contextmanager object yielding (StepPresentation, List[StepData]).
Refer to RecipeEngine.parent_step for details.
"""
return self._engine.parent_step(name_tokens)
def run_step(self, step):
"""
Runs a step from a StepConfig.
Args:
* step (StepConfig) - The step to run.
Returns:
A StepData object containing the result of finished the step.
"""
assert isinstance(step, engine_step.StepConfig)
return self._engine.run_step(step)
def close_non_parent_step(self):
"""Closes the currently active non-parent step, if any."""
return self._engine.close_non_parent_step()
@attr.s(frozen=True, slots=True)
class ConcurrencyClient(object):
IDENT = 'concurrency'
supports_concurrency = attr.ib() # type: bool
_spawn_impl = attr.ib() # type: f(func, args, kwargs) -> Greenlet
def spawn(self, func, args, kwargs, greenlet_name):
return self._spawn_impl(func, args, kwargs, greenlet_name)
class WarningClient(object):
IDENT = 'warning'
def __init__(self, recorder, recipe_deps):
from .internal.warn import record # Avoid early proto import
if recorder != record.NULL_WARNING_RECORDER and (
not isinstance(recorder, record.WarningRecorder)):
raise ValueError('Expected either an instance of WarningRecorder '
'or NULL_WARNING_RECORDER sentinel. Got type '
'(%s): %r' % (type(recorder), recorder))
self._recorder = recorder
# A repo may locate inside another repo (e.g. generally, deps repos are
# inside main repo). So we should start with the repo with the longest
# path to decide which repo contains the issuer file.
self._repo_paths = sorted(
((repo_name, repo.path)
for repo_name, repo in iteritems(recipe_deps.repos)),
key=lambda r: r[1],
reverse=True,
)
@escape.escape_all_warnings
def record_execution_warning(self, name):
"""Captures the current stack and records an execution warning."""
self._recorder.record_execution_warning(
name,
[frame_tup[0] for frame_tup in inspect.stack()],
)
def record_import_warning(self, name, importer):
"""Records import warning during DEPS resolution."""
self._recorder.record_import_warning(name, importer)
def resolve_warning(self, name, issuer_file):
"""Returns the fully-qualified warning name for the given warning.
The repo that contains the issuer_file is considered as where the
warning is defined.
Args:
* name (str): the warning name to be resolved. If fully-qualified name
is provided, returns as it is.
* issuer_file (str): The file path where warning is issued.
Raise ValueError if none of the repo contains the issuer_file.
"""
if '/' in name:
return name
abs_issuer_path = os.path.abspath(issuer_file)
for _, (repo_name, repo_path) in enumerate(self._repo_paths):
if abs_issuer_path.startswith(repo_path):
return '/'.join((repo_name, name))
raise ValueError('Failed to resolve warning: %r issued in %s. To '
'disambiguate, please provide fully-qualified warning name '
'(i.e. $repo_name/WARNING_NAME)' % (name, abs_issuer_path))
def escape_frame_function(self, warning, frame):
"""Escapes the function the given frame executes from warning attribution.
"""
loc = escape.FuncLoc.from_code_obj(frame.f_code)
if '/' in warning:
pattern = re.compile('^%s$' % warning)
else:
pattern = re.compile('^.+/%s$' % warning)
escaped_warnings = escape.WARNING_ESCAPE_REGISTRY.get(loc, ())
if pattern not in escaped_warnings:
escaped_warnings = (pattern,) + escaped_warnings
escape.WARNING_ESCAPE_REGISTRY[loc] = escaped_warnings
# Exports warning escape decorators
escape_warnings = escape.escape_warnings
escape_all_warnings = escape.escape_all_warnings
class StepFailure(Exception):
"""
This is the base class for all step failures.
Raising a StepFailure counts as 'running a step' for the purpose of
infer_composite_step's logic.
FIXME: This class is as a general way to fail, but it should be split up.
See crbug.com/892792 for more information.
FIXME: These exceptions should be made into more-normal exceptions (e.g.
the way reason_message is overridden by subclasses is very strange).
"""
def __init__(self, name_or_reason, result=None):
# Raising a StepFailure counts as running a step.
_DEFER_CONTEXT.mark_ran_step()
self.exc_result = None # default to None
if result:
self.name = name_or_reason
self.result = result
self.reason = self.reason_message()
# TODO(iannucci): This hasattr stuff is pretty bogus. This is attempting
# to detect when 'result' was a StepData. However AggregatedStepFailure
# passes in something else.
if hasattr(result, 'exc_result'):
self.exc_result = result.exc_result
if self.exc_result.had_timeout:
self.reason += ' (timeout)'
if self.exc_result.was_cancelled:
self.reason += ' (canceled)'
self.reason += ' (retcode: {!r})'.format(self.exc_result.retcode)
else:
self.name = None
self.result = None
self.reason = name_or_reason
super(StepFailure, self).__init__(self.reason)
def reason_message(self):
return 'Step({!r})'.format(self.name)
@property
def was_cancelled(self):
"""
Returns True if this exception was caused by a cancelation event
(see ExecutionResult.was_cancelled).
If this was a manual failure, returns None.
"""
if not self.exc_result:
return None
return self.exc_result.was_cancelled
@property
def had_timeout(self):
"""
Returns True if this exception was caused by a timeout. If this was a manual
failure, returns None.
"""
if not self.exc_result:
return None
return self.exc_result.had_timeout
@property
def retcode(self):
"""
Returns the retcode of the step which failed. If this was a manual
failure, returns None
"""
if not self.exc_result:
return None
return self.exc_result.retcode
class StepWarning(StepFailure):
"""
A subclass of StepFailure, which still fails the build, but which is
a warning. Need to figure out how exactly this will be useful.
"""
def reason_message(self): # pragma: no cover
return "Warning: Step({!r})".format(self.name)
class InfraFailure(StepFailure):
"""
A subclass of StepFailure.
Raised for any non-failure, non-success cases, e.g.
* Step failed to start due to missing executable
* Step timed out
* Step was canceled
* Step was marked as `infra_step`, or run in a context with `infra_steps`
set and returned a not-ok retcode.
"""
def reason_message(self):
return "Infra Failure: Step({!r})".format(self.name)
class AggregatedStepFailure(StepFailure):
def __init__(self, result):
super(AggregatedStepFailure, self).__init__(
"Aggregate step failure.", result=result)
def reason_message(self):
msg = "{!r} out of {!r} aggregated steps failed: ".format(
len(self.result.failures), len(self.result.all_results))
msg += ', '.join((f.reason or f.name) for f in self.result.failures)
return msg
class AggregatedResult(object):
"""Holds the result of an aggregated run of steps.
Currently this is only used internally by defer_results, but it may be exposed
to the consumer of defer_results at some point in the future. For now it's
expected to be easier for defer_results consumers to do their own result
aggregation, as they may need to pick and chose (or label) which results they
really care about.
"""
def __init__(self):
self.successes = []
self.failures = []
self.contains_infra_failure = False
# Needs to be here to be able to treat this as a step result
self.retcode = None
@property
def all_results(self):
"""
Return a list of two item tuples (x, y), where
x is whether or not the step succeeded, and
y is the result of the run
"""
res = [(True, result) for result in self.successes]
res.extend([(False, result) for result in self.failures])
return res
def add_success(self, result):
self.successes.append(result)
return DeferredResult(result, None)
def add_failure(self, exception):
if isinstance(exception, InfraFailure):
self.contains_infra_failure = True
self.failures.append(exception)
return DeferredResult(None, exception)
class DeferredResult(object):
def __init__(self, result, failure):
self._result = result
self._failure = failure
@property
def is_ok(self):
return self._failure is None
def get_result(self):
if not self.is_ok:
raise self.get_error()
return self._result
def get_error(self):
return self._failure
class _DEFER_CONTEXT_OBJ(object):
"""This object keeps track of state pertaining to the behavior of
defer_results and composite_step.
"""
def __init__(self):
"""The object starts in a state where no steps have been run, and there's no
current aggregated_result."""
self._ran_step = [False]
self._aggregated_result = [None]
@property
def ran_step(self):
"""Returns True if a step has run within this defer_results context."""
return self._ran_step[-1]
def mark_ran_step(self):
"""Marks that a step has run within this defer_results context."""
self._ran_step[-1] = True
@property
def aggregated_result(self):
"""Returns the current AggregatedResult() or None, if we're not currently
deferring results."""
return self._aggregated_result[-1]
@contextlib.contextmanager
def begin_aggregate(self):
"""Begins aggregating new results. Use with a with statement:
with _DEFER_CONTEXT.begin_aggregate() as agg:
...
Where `agg` is the AggregatedResult() for that with section.
"""
try:
yield self._enter(AggregatedResult())
finally:
self._exit()
@contextlib.contextmanager
def begin_normal(self):
"""Returns the context to normal (stop aggregating results).
with _DEFER_CONTEXT.begin_normal():
...
"""
try:
yield self._enter(None)
finally:
self._exit()
def _enter(self, agg):
self._ran_step.append(False)
self._aggregated_result.append(agg)
return agg
def _exit(self):
self._ran_step.pop()
self._aggregated_result.pop()
_DEFER_CONTEXT = _DEFER_CONTEXT_OBJ()
def non_step(func):
"""A decorator which prevents a method from automatically being wrapped as
a infer_composite_step by RecipeApiMeta.
This is needed for utility methods which don't run any steps, but which are
invoked within the context of a defer_results().
@see infer_composite_step, defer_results, RecipeApiMeta
"""
assert not hasattr(func, "_skip_inference"), \
"Double-wrapped method %r?" % func
func._skip_inference = True # pylint: disable=protected-access
return func
_skip_inference = non_step
def infer_composite_step(func):
"""A decorator which possibly makes this step act as a single step, for the
purposes of the defer_results function.
Behaves as if this function were wrapped by composite_step, unless this
function:
* is already wrapped by non_step
* returns a result without calling api.step
* raises an exception which is not derived from StepFailure
In any of these cases, this function will behave like a normal function.
This decorator is automatically applied by RecipeApiMeta (or by inheriting
from RecipeApi). If you want to decalare a method's behavior explicitly, you
may decorate it with either composite_step or with non_step.
"""
if getattr(func, "_skip_inference", False):
return func
@_skip_inference # to prevent double-wraps
@wraps(func)
@escape.escape_all_warnings
def _inner(*a, **kw):
agg = _DEFER_CONTEXT.aggregated_result
# We're not deferring results, so run the function normally.
if agg is None:
return func(*a, **kw)
# Stop deferring results within this function; the ultimate result of the
# function will be added to our parent context's aggregated results and
# we'll return a DeferredResult.
with _DEFER_CONTEXT.begin_normal():
try:
ret = func(*a, **kw)
# This is how we differ from composite_step; if we didn't actually run
# a step or throw a StepFailure, return normally.
if not _DEFER_CONTEXT.ran_step:
return ret
return agg.add_success(ret)
except StepFailure as ex:
return agg.add_failure(ex)
_inner.__original = func
return _inner
def composite_step(func):
"""A decorator which makes this step act as a single step, for the purposes of
the defer_results function.
This means that this function will not quit during the middle of its execution
because of a StepFailure, if there is an aggregator active.
You may use this decorator explicitly if infer_composite_step is detecting
the behavior of your method incorrectly to force it to behave as a step. You
may also need to use this if your Api class inherits from RecipeApiPlain and
so doesn't have its methods automatically wrapped by infer_composite_step.
"""
@_skip_inference # to avoid double-wraps
@wraps(func)
@escape.escape_all_warnings
def _inner(*a, **kw):
# composite_steps always count as running a step.
_DEFER_CONTEXT.mark_ran_step()
agg = _DEFER_CONTEXT.aggregated_result
# If we're not aggregating
if agg is None:
return func(*a, **kw)
# Stop deferring results within this function; the ultimate result of the
# function will be added to our parent context's aggregated results and
# we'll return a DeferredResult.
with _DEFER_CONTEXT.begin_normal():
try:
return agg.add_success(func(*a, **kw))
except StepFailure as ex:
return agg.add_failure(ex)
_inner.__original = func
return _inner
@contextlib.contextmanager
def defer_results():
"""
Use this to defer step results in your code. All steps which would previously
return a result or throw an exception will instead return a DeferredResult.
Any exceptions which were thrown during execution will be thrown when either:
a. You call get_result() on the step's result.
b. You exit the lexical scope inside of the with statement
Example:
with defer_results():
api.step('a', ..)
api.step('b', ..)
result = api.m.module.im_a_composite_step(...)
api.m.echo('the data is', result.get_result())
If 'a' fails, 'b' and 'im a composite step' will still run.
If 'im a composite step' fails, then the get_result() call will raise
an exception.
If you don't try to use the result (don't call get_result()), an aggregate
failure will still be raised once you exit the lexical scope inside
the with statement.
"""
assert _DEFER_CONTEXT.aggregated_result is None, (
"may not call defer_results in an active defer_results context")
with _DEFER_CONTEXT.begin_aggregate() as agg:
yield
if agg.failures:
raise AggregatedStepFailure(agg)
class RecipeApiMeta(type):
WHITELIST = ('__init__',)
def __new__(mcs, name, bases, attrs):
"""Automatically wraps all methods of subclasses of RecipeApi with
@infer_composite_step. This allows defer_results to work as intended without
manually decorating every method.
"""
wrap = lambda f: infer_composite_step(f) if f else f
for attr in attrs:
if attr in RecipeApiMeta.WHITELIST:
continue
val = attrs[attr]
if isinstance(val, types.FunctionType):
attrs[attr] = wrap(val)
elif isinstance(val, property):
attrs[attr] = property(
wrap(val.fget),
wrap(val.fset),
wrap(val.fdel),
val.__doc__)
return super(RecipeApiMeta, mcs).__new__(mcs, name, bases, attrs)
class RecipeApiPlain(object):
"""
Framework class for handling recipe_modules.
Inherit from this in your recipe_modules/<name>/api.py . This class provides
wiring for your config context (in self.c and methods, and for dependency
injection (in self.m).
Dependency injection takes place in load_recipe_modules() in loader.py.
USE RecipeApi INSTEAD, UNLESS your RecipeApi subclass derives from something
which defines its own __metaclass__. Deriving from RecipeApi instead of
RecipeApiPlain allows your RecipeApi subclass to automatically work with
defer_results without needing to decorate every methods with
@infer_composite_step.
"""
def __init__(self, module=None, test_data=DisabledTestData(), **_kwargs):
"""Note: Injected dependencies are NOT available in __init__()."""
super(RecipeApiPlain, self).__init__()
self._module = module
assert isinstance(test_data, (ModuleTestData, DisabledTestData))
self._test_data = test_data
# If we're the 'root' api, inject directly into 'self'.
# Otherwise inject into 'self.m'
if not isinstance(module, types.ModuleType):
self.m = self
else:
self.m = ModuleInjectionSite(self)
# If our module has a test api, it gets injected here.
self.test_api = None
# Config goes here.
self.c = None
def initialize(self):
"""
Initializes the recipe module after it has been instantiated with all
dependencies injected and available.
"""
pass
def get_config_defaults(self): # pylint: disable=R0201
"""
Allows your api to dynamically determine static default values for configs.
"""
return {}
def make_config(self, config_name=None, optional=False, **CONFIG_VARS):
"""Returns a 'config blob' for the current API."""
return self.make_config_params(config_name, optional, **CONFIG_VARS)[0]
def _get_config_item(self, config_name, optional=False):
"""Get the config item for a given name.
If `config_name` does not refer to a config item for the current module,
the behavior is determined by the value of `optional`:
* if optional is True, then None will be returned
* else a KeyError will be raised with an error message containing
`config_name`, the name of the api's module and the list of the api's
module's config names.
"""
ctx = self._module.CONFIG_CTX
try:
return ctx.CONFIG_ITEMS[config_name]
except KeyError:
if optional:
return None
raise KeyError(
'%s is not the name of a configuration for module %s: %s' % (
config_name, self._module.__name__, sorted(ctx.CONFIG_ITEMS)))
def make_config_params(self, config_name, optional=False, **CONFIG_VARS):
"""Returns a 'config blob' for the current API, and the computed params
for all dependent configurations.
The params have the following order of precendence. Each subsequent param
is dict.update'd into the final parameters, so the order is from lowest to
higest precedence on a per-key basis:
* if config_name in CONFIG_CTX
* get_config_defaults()
* CONFIG_CTX[config_name].DEFAULT_CONFIG_VARS()
* CONFIG_VARS
* else
* get_config_defaults()
* CONFIG_VARS
"""
generic_params = self.get_config_defaults() # generic defaults
generic_params.update(CONFIG_VARS) # per-invocation values
ctx = self._module.CONFIG_CTX
if optional and not ctx:
return None, generic_params
assert ctx, '%s has no config context' % self
params = self.get_config_defaults() # generic defaults
itm = None
if config_name:
itm = self._get_config_item(config_name, optional)
if not itm:
return None, generic_params
if itm:
params.update(itm.DEFAULT_CONFIG_VARS()) # per-item defaults
params.update(CONFIG_VARS) # per-invocation values
base = ctx.CONFIG_SCHEMA(**params)
if config_name is None:
return base, params
else:
return itm(base), params
def set_config(self, config_name=None, optional=False, **CONFIG_VARS):
"""Sets the modules and its dependencies to the named configuration."""
assert self._module
config, _ = self.make_config_params(config_name, optional, **CONFIG_VARS)
if config:
self.c = config
def apply_config(self, config_name, config_object=None, optional=False):
"""Apply a named configuration to the provided config object or self."""
itm = self._get_config_item(config_name)
itm(config_object or self.c, optional=optional)
def resource(self, *path):
"""Returns path to a file under <recipe module>/resources/ directory.
Args:
path: path relative to module's resources/ directory.
"""
# TODO(vadimsh): Verify that file exists. Including a case like:
# module.resource('dir').join('subdir', 'file.py')
return self._module.RESOURCE_DIRECTORY.join(*path)
def repo_resource(self, *path):
"""Returns a resource path, where path is relative to the root of
the recipe repo where this module is defined.
"""
return self._module.REPO_ROOT.join(*path)
@property
def name(self):
return self._module.NAME
class RecipeApi(RecipeApiPlain):
__metaclass__ = RecipeApiMeta
class RecipeScriptApi(RecipeApiPlain, ModuleInjectionSite):
# TODO(dnj): Delete this and make recipe scripts use standard recipe APIs.
pass
# This is a sentinel object for the Property system. This allows users to
# specify a default of None that will actually be respected.
PROPERTY_SENTINEL = object()
class BoundProperty(object):
"""
A bound, named version of a Property.
A BoundProperty is different than a Property, in that it requires a name,
as well as all of the arguments to be provided. It's intended to be
the declaration of the Property, with no mutation, so the logic about
what a property does is very clear.
The reason there is a distinction between this and a Property is because
we want the user interface for defining properties to be
PROPERTIES = {
'prop_name': Property(),
}
We don't want to have to duplicate the name in both the key of the dictionary
and then Property constructor call, so we need to modify this dictionary
before we actually use it, and inject knowledge into it about its name. We
don't want to actually mutate this though, since we're striving for immutable,
declarative code, so instead we generate a new BoundProperty object from the
defined Property object.
"""
MODULE_PROPERTY = 'module'
RECIPE_PROPERTY = 'recipe'
@staticmethod
def legal_module_property_name(name, full_decl_name):
"""
If this is a special $repo_name/module name.
"""
repo_name, module = full_decl_name.split('::', 1)
return name == '$%s/%s' % (repo_name, module)
@staticmethod
def legal_name(name, is_param_name=False):
"""
If this name is a legal property name.
is_param_name determines if this name in the name of a property, or a
param_name. See the constructor documentation for more information.
The rules are as follows:
* Cannot start with an underscore.
This is for internal arguments, namely _engine (for the step module).
* Cannot be 'self'
This is to avoid conflict with recipe modules, which use the name self.
* Cannot be a python keyword
"""
if name.startswith('_'):
return False
if name in ('self',):
return False
if keyword.iskeyword(name):
return False
regex = r'^[a-zA-Z][a-zA-Z0-9_]*$' if is_param_name else (
r'^[a-zA-Z][.\w-]*$')
return bool(re.match(regex, name))
def __init__(self, default, from_environ, help, kind, name, property_type,
full_decl_name, param_name=None):
"""
Constructor for BoundProperty.
Args:
default (jsonish): The default value for this Property. Must be
JSON-encodable or PROPERTY_SENTINEL.
from_environ (str|None): If given, specifies an environment variable to
grab the default property value from before falling back to the
hardcoded default. If the property value is explicitly passed to the
recipe, it still takes precedence over the environment. If you rely on
this, 'kind' must be string-compatible (since environ contains strings).
help (str): The help text for this Property.
kind (type|ConfigBase): The type of this Property. You can either pass in
a raw python type, or a Config Type, using the recipe engine config
system.
name (str): The name of this Property.
property_type (str): One of RECIPE_PROPERTY or MODULE_PROPERTY.
full_decl_name (str): The fully qualified name of the recipe or module
where this property is defined. This has the form of:
repo_name::module_name
repo_name::path/to/recipe
param_name (str|None): The name of the python function parameter this
property should be stored in. Can be used to allow for dotted property
names, e.g.
PROPERTIES = {
'foo.bar.bam': Property(param_name="bizbaz")
}
"""
assert property_type in (self.RECIPE_PROPERTY, self.MODULE_PROPERTY), \
property_type
# first, check if this is a special '$repo_name/module' property type
# declaration.
is_module_property = (
property_type is self.MODULE_PROPERTY and
self.legal_module_property_name(name, full_decl_name))
if not (is_module_property or BoundProperty.legal_name(name)):
raise ValueError("Illegal name '{}'.".format(name))
param_name = param_name or name
if not BoundProperty.legal_name(param_name, is_param_name=True):
raise ValueError("Illegal param_name '{}'.".format(param_name))
if default is not PROPERTY_SENTINEL:
try:
json.dumps(default)
except:
raise TypeError('default=%r is not json-encodable' % (default,))
self.__default = default
self.__from_environ = from_environ
self.__help = help
self.__kind = kind
self.__name = name
self.__property_type = property_type
self.__param_name = param_name
self.__full_decl_name = full_decl_name
@property
def name(self):
return self.__name
@property
def param_name(self):
return self.__param_name
@property
def default(self):
if self.__default is PROPERTY_SENTINEL:
return self.__default
return copy.deepcopy(self.__default)
@property
def from_environ(self):
return self.__from_environ
@property
def kind(self):
return self.__kind
@property
def help(self):
return self.__help
@property
def full_decl_name(self):
return self.__full_decl_name
def interpret(self, value, environ):
"""
Interprets the value for this Property.
Args:
value: The value to interpret. May be None, which means no explicit value
is provided and we should grab a default.
environ: An environment dict to use for grabbing values for properties
that use 'from_environ'.
Returns:
The value to use for this property. Raises an error if
this property has no valid interpretation.
"""
# Pick from environment if not given explicitly.
if value is PROPERTY_SENTINEL and self.__from_environ:
value = environ.get(self.__from_environ, PROPERTY_SENTINEL)
# If have a value (passed explicitly or through environ), check its type.
if value is not PROPERTY_SENTINEL:
if self.kind is not None:
# The config system handles type checking for us here.
self.kind.set_val(value)
return value
if self.__default is not PROPERTY_SENTINEL:
return self.default
raise ValueError(
"No default specified and no value provided for '{}' from {} '{}'".format(
self.name, self.__property_type, self.full_decl_name))
class Property(object):
def __init__(self, default=PROPERTY_SENTINEL, from_environ=None, help="",
kind=None, param_name=None):
"""
Constructor for Property.
Args:
default: The default value for this Property. Note: A default
value of None is allowed. To have no default value, omit
this argument. This must be a valid JSON-encodable object.
from_environ: If given, specifies an environment variable to grab the
default property value from before falling back to the
hardcoded default. If the property value is explicitly
passed to the recipe, it still takes precedence over the
environment. If you rely on this, 'kind' must be
string-compatible (since environ contains strings).
help: The help text for this Property.
kind: The type of this Property. You can either pass in a raw python
type, or a Config Type, using the recipe engine config system.
"""
if default is not PROPERTY_SENTINEL:
try:
json.dumps(default)
except:
raise TypeError('default=%r is not json-encodable' % (default,))
if from_environ is not None:
if not isinstance(from_environ, basestring):
raise TypeError('from_environ=%r must be a string' % (from_environ,))
self._default = default
self._from_environ = from_environ
self.help = help
self.param_name = param_name
# NOTE: late import to avoid early protobuf import
from .config import Single
if isinstance(kind, type):
if kind in (str, unicode):
kind = basestring
kind = Single(kind)
self.kind = kind
def bind(self, name, property_type, full_decl_name):
"""
Gets the BoundProperty version of this Property. Requires a name.
"""
return BoundProperty(
self._default, self._from_environ, self.help, self.kind, name,
property_type, full_decl_name, self.param_name)
class UndefinedPropertyException(TypeError):
pass
| 32.672 | 80 | 0.69605 |
79447e67bdc5b9ee90f3d51f7c9e1acdd1cc4600 | 1,264 | py | Python | SentinelUtilities/setup.py | itzdan/Azure-Sentinel-Notebooks | 8798b27d8c721ad51cd48e376a24d43e59564ee9 | [
"MIT"
] | null | null | null | SentinelUtilities/setup.py | itzdan/Azure-Sentinel-Notebooks | 8798b27d8c721ad51cd48e376a24d43e59564ee9 | [
"MIT"
] | null | null | null | SentinelUtilities/setup.py | itzdan/Azure-Sentinel-Notebooks | 8798b27d8c721ad51cd48e376a24d43e59564ee9 | [
"MIT"
] | null | null | null | """
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License. See License.txt in the project root for
license information.
"""
import setuptools
INSTALL_REQUIRES = ['azure>=4.0.0']
#with open("README.rst", "r") as f:
# long_description_text = f.read()
with open("LICENSE.txt", "r") as fh:
LICENSE_TXT = fh.read()
setuptools.setup(
name="Azure-Sentinel-Utilities",
version="0.5.12",
author="Azure Sentinel Notebooks Devs",
author_email="[email protected]",
description="AZURE SENTINEL NOTEBOOKS PYTHON TOOLS: \
This package is developed to support Azure Sentinel Notebooks. \
It is in an early preview stage so please provide feedback, \
report bugs, and suggets for new features.",
#long_description='',
#long_description_content_type="text/x-rst",
license=LICENSE_TXT,
url="https://github.com/Azure/Azure-Sentinel",
python_requires='>=3.6',
packages=setuptools.find_packages(),
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent",
],
install_requires=INSTALL_REQUIRES,
keywords=['security', 'azure', 'sentinel'],
zip_safe=False,
)
| 31.6 | 72 | 0.684335 |
79447fcdb8f1064021152620bb180e2fb9552ea6 | 108 | py | Python | Darlington/phase1/python Basic 1/day 6 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Darlington/phase1/python Basic 1/day 6 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Darlington/phase1/python Basic 1/day 6 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | #program to check whether a file exists
import os.path
open('abc.txt', 'w')
print(os.path.isfile('abc.txt')) | 27 | 39 | 0.722222 |
79447ffd5c267fa7508114fbb2d220cbda740c45 | 75,856 | py | Python | pytition/petition/views.py | toshihidetagami/Pytition | 3eedc40160d483b1d64a2a0afdbe3a0f6f7784c5 | [
"BSD-3-Clause"
] | null | null | null | pytition/petition/views.py | toshihidetagami/Pytition | 3eedc40160d483b1d64a2a0afdbe3a0f6f7784c5 | [
"BSD-3-Clause"
] | null | null | null | pytition/petition/views.py | toshihidetagami/Pytition | 3eedc40160d483b1d64a2a0afdbe3a0f6f7784c5 | [
"BSD-3-Clause"
] | null | null | null | import csv
from datetime import timedelta
import os
import urllib.parse
import random
from time import time
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse, HttpResponseForbidden, JsonResponse
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.contrib.messages import get_messages
from django.utils.html import format_html
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.db import transaction, IntegrityError
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordChangeForm
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.timezone import now
from django.core.files.storage import FileSystemStorage
from django.core.paginator import Paginator
from django.views.generic.edit import CreateView
from formtools.wizard.views import SessionWizardView
from .models import Petition, Signature, Organization, PytitionUser, PetitionTemplate, Permission
from .models import SlugModel
from .forms import SignatureForm, ContentFormPetition, EmailForm, NewsletterForm, SocialNetworkForm, ContentFormTemplate
from .forms import StyleForm, PetitionCreationStep1, PetitionCreationStep2, PetitionCreationStep3, UpdateInfoForm
from .forms import DeleteAccountForm, OrgCreationForm
from .helpers import get_client_ip, get_session_user, petition_from_id
from .helpers import check_petition_is_accessible
from .helpers import send_confirmation_email, subscribe_to_newsletter, send_welcome_mail
from .helpers import get_update_form, petition_detail_meta
from .helpers import sanitize_html
#------------------------------------ Views -----------------------------------
# Path : /
# Depending on the settings.INDEX_PAGE, show a list of petitions or
# redirect to an user/org profile page
def index(request):
if not hasattr(settings, 'INDEX_PAGE'):
raise Http404(_("You must set an INDEX_PAGE config in your settings"))
if settings.INDEX_PAGE == 'USER_PROFILE':
try:
user_name = settings.INDEX_PAGE_USER
except:
raise Http404(_("You must set an INDEX_PAGE_USER config in your settings"))
elif settings.INDEX_PAGE == 'ORGA_PROFILE':
try:
org_name = settings.INDEX_PAGE_ORGA
except:
raise Http404(_("You must set an INDEX_PAGE_ORGA config in your settings"))
if settings.INDEX_PAGE == 'ORGA_PROFILE':
org = Organization.objects.get(name=org_name)
return redirect("org_profile", org.slugname)
elif settings.INDEX_PAGE == 'USER_PROFILE':
return redirect("user_profile", user_name)
elif settings.INDEX_PAGE == 'LOGIN_REGISTER':
if request.user.is_authenticated:
return redirect("user_dashboard")
else:
return redirect("login")
else:
authenticated = request.user.is_authenticated
if authenticated:
user = get_session_user(request)
else:
user = request.user
sort = request.GET.get('sort', 'desc')
creation_date = '-creation_date' if sort == 'desc' else 'creation_date'
all_petitions = Petition.objects.filter(published=True).order_by(creation_date)
paginator = Paginator(all_petitions, settings.PAGINATOR_COUNT)
page = request.GET.get('page')
petitions = paginator.get_page(page)
return render(request, 'petition/index.html',
{
'user': user,
'petitions': petitions,
'sort': sort
}
)
# <int:petition_id>/show_sympa_subscribe_bloc
# Show sympa subscribe bloc to mass subscribe people to newsletter
@login_required
def show_sympa_subscribe_bloc(request, petition_id):
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
if not pytitionuser:
return redirect('index')
petition = petition_from_id(petition_id)
if petition.owner_type == "org" and not petition.org.is_allowed_to(pytitionuser, "can_view_signatures"):
return redirect("index")
elif petition.owner_type == "user" and petition.owner != pytitionuser:
return redirect("index")
text_bloc = ""
signatures = petition.signature_set.filter(subscribed_to_mailinglist=True)
if not signatures:
return HttpResponse(_("No newsletter subscription yet!"))
for signature in signatures:
text_bloc = text_bloc + "{email} {firstname} {lastname}<br/>\n".format(email=signature.email,
firstname=signature.first_name,
lastname=signature.last_name)
return HttpResponse(sanitize_html(text_bloc))
# /search?q=QUERY
# Show results of a search query
def search(request):
q = request.GET.get('q', '')
if q != "":
petitions = Petition.objects.filter(Q(title__icontains=q) | Q(text__icontains=q)).filter(published=True)[:15]
orgs = Organization.objects.filter(name__icontains=q)
else:
petitions = Petition.objects.filter(published=True).order_by('-id')
paginator = Paginator(petitions, settings.PAGINATOR_COUNT)
page = request.GET.get('page')
petitions = paginator.get_page(page)
orgs = []
return render(
request, 'petition/search.html',
{
'petitions': petitions,
'orgs': orgs,
'q': q
}
)
def hide_sign_form_if_user_just_signed(request, ctx):
storage = get_messages(request)
for message in storage:
if message.level == messages.SUCCESS:
just_confirmed = request.session.get('just_confirmed', False)
if just_confirmed:
ctx.update({'signature_is_confirmed': True})
request.session['just_confirmed'] = False
else:
ctx.update({'petition_is_signed': True})
# /<int:petition_id>/
# Show information on a petition
def detail(request, petition_id):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
sign_form = SignatureForm(petition=petition)
ctx = {"user": pytitionuser, 'petition': petition, 'form': sign_form,
'meta': petition_detail_meta(request, petition_id)}
# If we've just signed successfully the petition, do not show the sign form
hide_sign_form_if_user_just_signed(request, ctx)
if "application/json" in request.META.get('HTTP_ACCEPT', []):
response = JsonResponse(petition.to_json)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
return response
else:
return render(request, 'petition/petition_detail.html', ctx)
# /<int:petition_id>/confirm/<confirmation_hash>
# Confirm signature to a petition
def confirm(request, petition_id, confirmation_hash):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
try:
successmsg = petition.confirm_signature(confirmation_hash)
if successmsg is None:
messages.error(request, _("Error: This confirmation code is invalid. Maybe you\'ve already confirmed?"))
else:
messages.success(request, successmsg)
request.session['just_confirmed'] = True
except ValidationError as e:
messages.error(request, _(e.message))
except Signature.DoesNotExist:
messages.error(request, _("Error: This confirmation code is invalid."))
return redirect(petition.url)
# <int:petition_id>/get_csv_signature
# <int:petition_id>/get_csv_confirmed_signature
# returns the CSV files of the list of signatures
@login_required
def get_csv_signature(request, petition_id, only_confirmed):
user = get_session_user(request)
try:
petition = Petition.objects.get(pk=petition_id)
except Petition.DoesNotExist:
return JsonResponse({}, status=404)
if petition.owner_type == "org" and not petition.org.is_allowed_to(user, "can_view_signatures"):
return JsonResponse({}, status=403)
elif petition.owner_type == "user" and petition.owner != user:
return JsonResponse({}, status=403)
filename = '{}.csv'.format(petition)
signatures = Signature.objects.filter(petition = petition)
if only_confirmed:
signatures = signatures.filter(confirmed = True)
else:
signatures = signatures.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename={}'.format(filename).replace('\r\n', '').replace(' ', '%20')
writer = csv.writer(response)
attrs = ['first_name', 'last_name', 'phone', 'email', 'subscribed_to_mailinglist', 'confirmed']
writer.writerow(attrs)
for signature in signatures:
values = [getattr(signature, field) for field in attrs]
writer.writerow(values)
return response
# resend/<int:signature_id>
# resend the signature confirmation email
@login_required
def go_send_confirmation_email(request, signature_id):
app_label = Signature._meta.app_label
signature = Signature.objects.filter(pk=signature_id).get()
send_confirmation_email(request, signature)
return redirect('admin:{}_signature_change'.format(app_label), signature_id)
# <int:petition_id>/sign
# Sign a petition
def create_signature(request, petition_id):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
if request.method == "POST":
form = SignatureForm(petition=petition, data=request.POST)
if not form.is_valid():
return render(request, 'petition/petition_detail.html', {'petition': petition, 'form': form, 'meta': petition_detail_meta(request, petition_id)})
ipaddr = make_password(
get_client_ip(request),
salt=petition.salt.encode('utf-8'))
since = now() - timedelta(seconds=settings.SIGNATURE_THROTTLE_TIMING)
signatures = Signature.objects.filter(
petition=petition,
ipaddress=ipaddr,
date__gt=since)
if signatures.count() > settings.SIGNATURE_THROTTLE:
messages.error(request, _("Too many signatures from your IP address, please try again later."))
return render(request, 'petition/petition_detail.html', {'petition': petition, 'form': form, 'meta': petition_detail_meta(request, petition_id)})
else:
signature = form.save()
signature.ipaddress = ipaddr
signature.save()
send_confirmation_email(request, signature)
messages.success(request,
format_html(_("Thank you for signing this petition, an email has just been sent to you at your address \'{}\'" \
" in order to confirm your signature.<br>" \
"You will need to click on the confirmation link in the email.<br>" \
"If you cannot find the email in your Inbox, please have a look in your Spam box.")\
, signature.email))
if petition.has_newsletter and signature.subscribed_to_mailinglist:
subscribe_to_newsletter(petition, signature.email)
return redirect(petition.url)
# /org/<slug:orgslugname>/dashboard
# Show the dashboard of an organization
@login_required
def org_dashboard(request, orgslugname):
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
messages.error(request, _("This organization does not exist: '{}'".format(orgslugname)))
return redirect("user_dashboard")
pytitionuser = get_session_user(request)
if pytitionuser not in org.members.all():
messages.error(request, _("You are not part of this organization: '{}'".format(org.name)))
return redirect("user_dashboard")
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
messages.error(request,
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)))
return redirect("user_dashboard")
can_create_petition = org.is_allowed_to(pytitionuser, "can_create_petitions")
petitions = org.petition_set.all()
other_orgs = pytitionuser.organization_set.filter(~Q(name=org.name)).all()
return render(request, 'petition/org_dashboard.html',
{'org': org, 'user': pytitionuser, "other_orgs": other_orgs,
'petitions': petitions, 'user_permissions': permissions,
'can_create_petition': can_create_petition})
# /user/dashboard
# Dashboard of the logged in user
@login_required
def user_dashboard(request):
user = get_session_user(request)
petitions = user.petition_set.all()
return render(
request,
'petition/user_dashboard.html',
{'user': user, 'petitions': petitions, 'can_create_petition': True}
)
# /user/<user_name>
# Show the user profile
def user_profile(request, user_name):
try:
user = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
raise Http404(_("not found"))
sort = request.GET.get('sort', 'desc')
creation_date = '-creation_date' if sort == 'desc' else 'creation_date'
petitions = user.petition_set.filter(published=True).order_by(creation_date)
paginator = Paginator(petitions, settings.PAGINATOR_COUNT)
page = request.GET.get('page')
petitions = paginator.get_page(page)
return render(
request,
'petition/user_profile.html',
{'user': user, 'petitions': petitions, 'sort': sort }
)
# /org/<slug:orgslugname>/leave_org
# User is leaving the organisation
@login_required
def leave_org(request, orgslugname):
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
pytitionuser = get_session_user(request)
if pytitionuser not in org.members.all():
raise Http404(_("not found"))
with transaction.atomic():
if org.is_last_admin(pytitionuser):
messages.error(request, _('Impossible to leave this organisation, you are the last administrator'))
return redirect(reverse('account_settings') + '#a_org_form')
elif org.members.count() == 1:
messages.error(request, _('Impossible to leave this organisation, you are the last member'))
return redirect(reverse('account_settings') + '#a_org_form')
else:
org.members.remove(pytitionuser)
return redirect('account_settings')
# /org/<slug:orgslugname>
# Show the profile of an organization
def org_profile(request, orgslugname):
try:
user = get_session_user(request)
except:
user = None
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
sort = request.GET.get('sort', 'desc')
creation_date = '-creation_date' if sort == 'desc' else 'creation_date'
petitions = org.petition_set.filter(published=True).order_by(creation_date)
paginator = Paginator(petitions, settings.PAGINATOR_COUNT)
page = request.GET.get('page')
petitions = paginator.get_page(page)
ctx = {'org': org,
'petitions': petitions,
'sort': sort}
# if a user is logged-in, put it in the context, it will feed the navbar dropdown
if user is not None:
ctx['user'] = user
return render(request, "petition/org_profile.html", ctx)
# /get_user_list
# get the list of users
@login_required
def get_user_list(request):
q = request.GET.get('q', '')
if q != "":
users = PytitionUser.objects.filter(Q(user__username__contains=q) | Q(user__first_name__icontains=q) |
Q(user__last_name__icontains=q)).all()
else:
users = []
userdict = {
"values": [user.user.username for user in users],
}
return JsonResponse(userdict)
# PATH : org/<slug:orgslugname>/add_user
# Add an user to an organization
@login_required
def org_add_user(request, orgslugname):
adduser = request.GET.get('user', '')
try:
adduser = PytitionUser.objects.get(user__username=adduser)
except PytitionUser.DoesNotExist:
message = _("This user does not exist (anylonger?)")
return JsonResponse({"message": message}, status=404)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
message = _("This organization does not exist (anylonger?)")
return JsonResponse({"message": message}, status=404)
pytitionuser = get_session_user(request)
if org not in pytitionuser.organization_set.all():
message = _("You are not part of this organization.")
return JsonResponse({"message": message}, status=403)
if org in adduser.organization_set.all():
message = _("User is already member of {orgname} organization".format(orgname=org.name))
return JsonResponse({"message": message}, status=500)
if not org.is_allowed_to(pytitionuser, "can_add_members"):
message = _("You are not allowed to invite new members into this organization.")
return JsonResponse({"message": message}, status=403)
try:
adduser.invitations.add(org)
adduser.save()
except:
message = _("An error occured")
return JsonResponse({"message": message}, status=500)
message = _("You invited {username} to join {orgname}".format(username=adduser.name, orgname=org.name))
return JsonResponse({"message": message})
# /org/<slug:orgslugname>/invite_accept
# Accept an invitation to an organisation
# Called from /user/dashboard
@login_required
def invite_accept(request, orgslugname):
if orgslugname == "":
return HttpResponse(status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
if org in pytitionuser.invitations.all():
try:
with transaction.atomic():
pytitionuser.invitations.remove(org)
org.members.add(pytitionuser)
except:
return HttpResponse(status=500)
else:
raise Http404(_("not found"))
return redirect('user_dashboard')
# /org/<slug:orgslugname>/invite_dismiss
# Dismiss the invitation to an organisation
@login_required
def invite_dismiss(request, orgslugname):
if orgslugname == "":
return JsonResponse({}, status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
if org in pytitionuser.invitations.all():
try:
pytitionuser.invitations.remove(org)
except:
return JsonResponse({}, status=500)
else:
raise Http404(_("not found"))
return redirect('user_dashboard')
# /org/<slug:orgslugname>/new_template
# /user/new_template
# Create a new template
@login_required
def new_template(request, orgslugname=None):
pytitionuser = get_session_user(request)
ctx = {'user': pytitionuser}
if orgslugname:
redirection = "org_new_template"
try:
org = Organization.objects.get(slugname=orgslugname)
ctx['org'] = org
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if org not in pytitionuser.organization_set.all():
return HttpResponseForbidden(_("You are not allowed to view this organization dashboard"))
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
ctx['user_permissions'] = permissions
except Permission.DoesNotExist:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
if not permissions.can_create_templates:
return HttpResponseForbidden(_("You don't have the permission to create a Template in this organization"))
ctx['base_template'] = 'petition/org_base.html'
else:
redirection = "user_new_template"
ctx['base_template'] = 'petition/user_base.html'
if request.method == "POST":
template_name = request.POST.get('template_name', '')
if template_name != '':
if orgslugname:
template = PetitionTemplate(name=template_name, org=org)
else:
template = PetitionTemplate(name=template_name, user=pytitionuser)
template.save()
return redirect("edit_template", template.id)
else:
messages.error(request, _("You need to provide a template name."))
return redirect(redirection)
else:
return render(request, "petition/new_template.html", ctx)
# /templates/<int:template_id>/edit
# Edit a petition template
@login_required
def edit_template(request, template_id):
id = template_id
if id == '':
return HttpResponseForbidden(_("You need to provide the template id to modify"))
try:
template = PetitionTemplate.objects.get(pk=id)
except PetitionTemplate.DoesNotExist:
raise Http404(_("This template does not exist"))
pytitionuser = get_session_user(request)
context = {'user': pytitionuser}
if template.owner_type == "org":
owner = template.org
else:
owner = template.user
if template.owner_type == "org":
try:
permissions = Permission.objects.get(organization=owner, user=pytitionuser)
except:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=owner.name)), status=500)
context['user_permissions'] = permissions
if owner not in pytitionuser.organization_set.all() or not permissions.can_modify_templates:
return HttpResponseForbidden(_("You are not allowed to edit this organization's templates"))
context['org'] = owner
base_template = "petition/org_base.html"
else:
if owner != pytitionuser:
return HttpResponseForbidden(_("You are not allowed to edit this user's templates"))
base_template = "petition/user_base.html"
submitted_ctx = {
'content_form_submitted': False,
'email_form_submitted': False,
'social_network_form_submitted': False,
'newsletter_form_submitted': False,
'style_form_submitted': False,
}
if request.method == "POST":
if 'content_form_submitted' in request.POST:
content_form = ContentFormTemplate(request.POST)
submitted_ctx['content_form_submitted'] = True
if content_form.is_valid():
template.name = content_form.cleaned_data['name']
template.text = content_form.cleaned_data['text']
template.side_text = content_form.cleaned_data['side_text']
template.footer_text = content_form.cleaned_data['footer_text']
template.footer_links = content_form.cleaned_data['footer_links']
template.sign_form_footer = content_form.cleaned_data['sign_form_footer']
template.save()
else:
content_form = ContentFormTemplate({f: getattr(template, f) for f in ContentFormTemplate.base_fields})
if 'email_form_submitted' in request.POST:
email_form = EmailForm(request.POST)
submitted_ctx['email_form_submitted'] = True
if email_form.is_valid():
template.confirmation_email_reply = email_form.cleaned_data['confirmation_email_reply']
template.save()
else:
email_form = EmailForm({f: getattr(template, f) for f in EmailForm.base_fields})
if 'social_network_form_submitted' in request.POST:
social_network_form = SocialNetworkForm(request.POST, request.FILES)
submitted_ctx['social_network_form_submitted'] = True
if social_network_form.is_valid():
storage = FileSystemStorage()
file = social_network_form.cleaned_data['twitter_image']
if file:
path = os.path.join(storage.location, pytitionuser.username, file.name)
name = storage._save(path, file)
newrelpath = os.path.relpath(name, storage.location)
template.twitter_image = urllib.parse.urljoin(settings.MEDIA_URL, newrelpath)
if social_network_form.cleaned_data['remove_twitter_image']:
template.twitter_image = ""
template.twitter_description = social_network_form.cleaned_data['twitter_description']
template.org_twitter_handle = social_network_form.cleaned_data['org_twitter_handle']
template.save()
else:
remove_fields = ["twitter_image", "remove_twitter_image"]
fields = dict((k, v) for k,v in SocialNetworkForm.base_fields.items() if k not in remove_fields)
social_network_form = SocialNetworkForm({f: getattr(template, f) for f in fields})
if 'newsletter_form_submitted' in request.POST:
newsletter_form = NewsletterForm(request.POST)
submitted_ctx['newsletter_form_submitted'] = True
if newsletter_form.is_valid():
template.has_newsletter = newsletter_form.cleaned_data['has_newsletter']
template.newsletter_text = newsletter_form.cleaned_data['newsletter_text']
template.newsletter_subscribe_http_data = newsletter_form.cleaned_data['newsletter_subscribe_http_data']
template.newsletter_subscribe_http_mailfield = newsletter_form.cleaned_data['newsletter_subscribe_http_mailfield']
template.newsletter_subscribe_http_url = newsletter_form.cleaned_data['newsletter_subscribe_http_url']
template.newsletter_subscribe_mail_subject = newsletter_form.cleaned_data['newsletter_subscribe_mail_subject']
template.newsletter_subscribe_mail_from = newsletter_form.cleaned_data['newsletter_subscribe_mail_from']
template.newsletter_subscribe_mail_to = newsletter_form.cleaned_data['newsletter_subscribe_mail_to']
template.newsletter_subscribe_method = newsletter_form.cleaned_data['newsletter_subscribe_method']
template.newsletter_subscribe_mail_smtp_host = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_host']
template.newsletter_subscribe_mail_smtp_port = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_port']
template.newsletter_subscribe_mail_smtp_user = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_user']
template.newsletter_subscribe_mail_smtp_password = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_password']
template.newsletter_subscribe_mail_smtp_tls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_tls']
template.newsletter_subscribe_mail_smtp_starttls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_starttls']
template.save()
else:
newsletter_form = NewsletterForm({f: getattr(template, f) for f in NewsletterForm.base_fields})
if 'style_form_submitted' in request.POST:
submitted_ctx['style_form_submitted'] = True
style_form = StyleForm(request.POST)
if style_form.is_valid():
template.bgcolor = style_form.cleaned_data['bgcolor']
template.linear_gradient_direction = style_form.cleaned_data['linear_gradient_direction']
template.gradient_from = style_form.cleaned_data['gradient_from']
template.gradient_to = style_form.cleaned_data['gradient_to']
template.save()
else:
style_form = StyleForm({f: getattr(template, f) for f in StyleForm.base_fields})
else:
remove_fields = ["twitter_image", "remove_twitter_image"]
fields = dict((k, v) for k, v in SocialNetworkForm.base_fields.items() if k not in remove_fields)
social_network_form = SocialNetworkForm({f: getattr(template, f) for f in fields})
content_form = ContentFormTemplate({f: getattr(template, f) for f in ContentFormTemplate.base_fields})
email_form = EmailForm({f: getattr(template, f) for f in EmailForm.base_fields})
newsletter_form = NewsletterForm({f: getattr(template, f) for f in NewsletterForm.base_fields})
style_form = StyleForm({f: getattr(template, f) for f in StyleForm.base_fields})
ctx = {'content_form': content_form,
'email_form': email_form,
'social_network_form': social_network_form,
'newsletter_form': newsletter_form,
'style_form': style_form,
'petition': template}
context['base_template'] = base_template
context.update(ctx)
context.update(submitted_ctx)
return render(request, "petition/edit_template.html", context)
# /templates/<int:template_id>/delete
# Delete a template
@login_required
def template_delete(request, template_id):
pytitionuser = get_session_user(request)
if template_id == '':
return JsonResponse({}, status=500)
try:
template = PetitionTemplate.objects.get(pk=template_id)
except:
return JsonResponse({}, status=404)
if template.owner_type == "org":
if not pytitionuser in template.org.members.all():
return JsonResponse({}, status=403) # User not in organization
try:
permissions = Permission.objects.get(
organization=template.org,
user=pytitionuser)
except Permission.DoesNotExist:
return JsonResponse({}, status=500) # No permission? fatal error!
if not permissions.can_delete_templates:
return JsonResponse({}, status=403) # User does not have the permission!
else:
if pytitionuser != template.user:
return JsonResponse({}, status=403) # User cannot delete a template if it's not his
template.delete()
return JsonResponse({})
# /templates/<int:template_id>/fav
# Set a template as favourite
@login_required
def template_fav_toggle(request, template_id):
pytitionuser = get_session_user(request)
if template_id == '':
return JsonResponse({}, status=500)
try:
template = PetitionTemplate.objects.get(pk=template_id)
except PetitionTemplate.DoesNotExist:
return JsonResponse({}, status=404)
if template.owner_type == "org":
owner = template.org
else:
owner = template.user
if template.owner_type == "org":
if owner not in pytitionuser.organization_set.all():
return JsonResponse({}, status=403) # Forbidden
else:
if owner != pytitionuser:
return JsonResponse({'msg': _("You are not allowed to change this user's default template")}, status=403)
if owner.default_template == template:
owner.default_template = None
else:
owner.default_template = template
owner.save()
return JsonResponse({})
# /org/<slug:orgslugname>/delete_member
# Remove a member from an organization
@login_required
def org_delete_member(request, orgslugname):
member_name = request.GET.get('member', '')
try:
member = PytitionUser.objects.get(user__username=member_name)
except PytitionUser.DoesNotExist:
raise Http404(_("User does not exist"))
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if pytitionuser not in org.members.all():
return JsonResponse({}, status=403) # Forbidden
try:
permissions = Permission.objects.get(user=pytitionuser, organization=org)
except Permission.DoesNoeExist:
return JsonResponse({}, status=500)
if permissions.can_remove_members or pytitionuser == member:
if org in member.organization_set.all():
if org.is_last_admin(member):
return JsonResponse({}, status=403) # Forbidden
member.organization_set.remove(org)
else:
return JsonResponse({}, status=404)
else:
return JsonResponse({}, status=403) # Forbidden
return JsonResponse({}, status=200)
# PATH : org/<slug:orgslugname>/edit_user_permissions/<slug:user_name>
# Show a webpage to edit permissions
@login_required
def org_edit_user_perms(request, orgslugname, user_name):
"""Shows the page which lists the user permissions."""
pytitionuser = get_session_user(request)
try:
member = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
messages.error(request, _("User '{name}' does not exist".format(name=user_name)))
return redirect("org_dashboard", orgslugname)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization '{name}' does not exist".format(name=orgslugname)))
if org not in member.organization_set.all():
messages.error(request, _("The user '{username}' is not member of this organization ({orgname}).".
format(username=user_name, orgname=org.name)))
return redirect("org_dashboard", org.slugname)
try:
permissions = Permission.objects.get(organization=org, user=member)
except Permission.DoesNotExist:
messages.error(request,
_("Internal error, this member does not have permissions attached to this organization."))
return redirect("org_dashboard", org.slugname)
try:
user_permissions = Permission.objects.get(organization=org, user=pytitionuser)
except:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
return render(request, "petition/org_edit_user_perms.html",
{'org': org, 'member': member, 'user': pytitionuser,
'permissions': permissions,
'user_permissions': user_permissions})
# PATH /org/<slug:orgslugname>/set_user_permissions/<slug:user_name>
# Set a permission for an user
@login_required
def org_set_user_perms(request, orgslugname, user_name):
"""Actually do the modification of user permissions.
Data come from "org_edit_user_perms" view's form.
"""
pytitionuser = get_session_user(request)
try:
member = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
messages.error(request, _("User does not exist"))
return redirect("org_dashboard", orgslugname)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if org not in member.organization_set.all():
messages.error(request, _("This user is not part of organization \'{orgname}\'".format(orgname=org.name)))
return redirect("org_dashboard", org.slugname)
try:
permissions = Permission.objects.get(user=member, organization=org)
except Permission.DoesNotExist:
messages.error(request, _("Fatal error, this user does not have permissions attached for this organization"))
return redirect("org_dashboard", org.slugname)
try:
userperms = Permission.objects.get(user=pytitionuser, organization=org)
except:
messages.error(request, _("Fatal error, you don't have permissions attached to you for this organization"))
return redirect("org_dashboard", org.slugname)
if pytitionuser not in org.members.all():
messages.error(request, _("You are not part of this organization"))
return redirect("user_dashboard")
if not userperms.can_modify_permissions:
messages.error(request, _("You are not allowed to modify this organization members' permissions"))
return redirect("org_edit_user_perms", orgslugname, user_name)
if request.method == "POST":
error = False
post = request.POST
permissions.can_remove_members = post.get('can_remove_members', '') == 'on'
permissions.can_add_members = post.get('can_add_members', '') == 'on'
permissions.can_create_petitions = post.get('can_create_petitions', '') == 'on'
permissions.can_modify_petitions = post.get('can_modify_petitions', '') == 'on'
permissions.can_delete_petitions = post.get('can_delete_petitions', '') == 'on'
permissions.can_create_templates = post.get('can_create_templates', '') == 'on'
permissions.can_modify_templates = post.get('can_modify_templates', '') == 'on'
permissions.can_delete_templates = post.get('can_delete_templates', '') == 'on'
permissions.can_view_signatures = post.get('can_view_signatures', '') == 'on'
permissions.can_modify_signatures = post.get('can_modify_signatures', '') == 'on'
permissions.can_delete_signatures = post.get('can_delete_signatures', '') == 'on'
can_modify_perms = post.get('can_modify_permissions', '') == 'on'
with transaction.atomic():
# if user is dropping his own permissions
if not can_modify_perms and permissions.can_modify_permissions and pytitionuser == member:
# get list of people with can_modify_permissions permission on this org
owners = org.owners
if owners.count() > 1:
permissions.can_modify_permissions = can_modify_perms
else:
if org.members.count() > 1:
error = True
messages.error(request, _("You cannot remove your ability to change permissions on this "
"Organization because you are the only one left who can do this. "
"Give the permission to someone else before removing yours."))
else:
error = True
messages.error(request, _("You cannot remove your ability to change permissions on this "
"Organization because you are the only member left."))
if not error:
permissions.can_modify_permissions = can_modify_perms
messages.success(request, _("Permissions successfully changed!"))
permissions.save()
return redirect("org_edit_user_perms", orgslugname, user_name)
WizardTemplates = {"step1": "petition/new_petition_step1.html",
"step2": "petition/new_petition_step2.html",
"step3": "petition/new_petition_step3.html"}
WizardForms = [("step1", PetitionCreationStep1),
("step2", PetitionCreationStep2),
("step3", PetitionCreationStep3)]
# Class Based Controller
# PATH : subroutes of /wizard
@method_decorator(login_required, name='dispatch')
class PetitionCreationWizard(SessionWizardView):
def dispatch(self, request, *args, **kwargs):
if settings.DISABLE_USER_PETITION and "orgslugname" not in self.kwargs:
messages.error(request, _("Users are not allowed to create their own petitions."))
return redirect("user_dashboard")
return super().dispatch(request, *args, **kwargs)
def get_template_names(self):
return [WizardTemplates[self.steps.current]]
def get_form_initial(self, step):
if step == "step2":
use_template = False
org_petition = "orgslugname" in self.kwargs
if org_petition:
orgslugname = self.kwargs['orgslugname']
org = Organization.objects.get(slugname=orgslugname)
else:
pytitionuser = get_session_user(self.request)
# Use a specific template if its id is given
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if org_petition:
if template in org.petitiontemplate_set.all():
return {'message': template.text}
else:
if template in pytitionuser.petitiontemplate_set.all():
return {'message': template.text}
# if no template id is given, check for default templates
if org_petition:
if org.default_template is not None:
template = org.default_template
use_template = True
elif pytitionuser.default_template is not None:
template = pytitionuser.default_template
use_template = True
if use_template:
return {'message': template.text}
return self.initial_dict.get(step, {})
def get_form_kwargs(self, step=None):
if step == "step1":
org_petition = "orgslugname" in self.kwargs
if org_petition:
orgslugname = self.kwargs['orgslugname']
kwargs = {"orgslugname": orgslugname}
else:
pytitionuser = get_session_user(self.request)
kwargs = {"user_name": pytitionuser.user.username}
return kwargs
else:
return {}
def done(self, form_list, **kwargs):
org_petition = "orgslugname" in self.kwargs
title = self.get_cleaned_data_for_step("step1")["title"]
message = self.get_cleaned_data_for_step("step2")["message"]
publish = self.get_cleaned_data_for_step("step3")["publish"]
pytitionuser = get_session_user(self.request)
_redirect = self.request.POST.get('redirect', '')
if org_petition:
orgslugname = self.kwargs['orgslugname']
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
messages.error(self.request, _("Cannot find this organization"))
return redirect("user_dashboard")
#raise Http404(_("Organization does not exist"))
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
return redirect("org_dashboard", orgslugname)
if pytitionuser in org.members.all() and permissions.can_create_petitions:
#FIXME I think new here is better than create
petition = Petition.objects.create(title=title, text=message, org=org)
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if template in org.petitiontemplate_set.all():
petition.prepopulate_from_template(template)
petition.save()
else:
messages.error(self.request, _("This template does not belong to your organization"))
return redirect("org_dashboard", orgslugname)
if publish:
petition.publish()
if _redirect and _redirect == '1':
return redirect("edit_petition", petition.id)
else:
return redirect("org_dashboard", orgslugname)
else:
messages.error(self.request, _("You don't have the permission to create a new petition in this Organization"))
return redirect("org_dashboard", orgslugname)
else:
petition = Petition.objects.create(title=title, text=message, user=pytitionuser)
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if template in pytitionuser.petitiontemplate_set.all():
petition.prepopulate_from_template(template)
petition.save()
else:
messages.error(self.request, _("This template does not belong to you"))
return redirect("user_dashboard")
if publish:
petition.publish()
if _redirect and _redirect == '1':
return redirect("edit_petition", petition.id)
else:
return redirect("user_dashboard")
def get_context_data(self, form, **kwargs):
org_petition = "orgslugname" in self.kwargs
context = super(PetitionCreationWizard, self).get_context_data(form=form, **kwargs)
if org_petition:
base_template = 'petition/org_base.html'
try:
org = Organization.objects.get(slugname=self.kwargs['orgslugname'])
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
else:
base_template = 'petition/user_base.html'
pytitionuser = get_session_user(self.request)
context.update({'user': pytitionuser,
'base_template': base_template})
if org_petition:
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
context.update({'org': org,
'user_permissions': permissions})
else:
petitions = pytitionuser.petition_set.all()
context.update({'petitions': petitions})
if self.steps.current == "step3":
context.update(self.get_cleaned_data_for_step("step1"))
context.update(self.get_cleaned_data_for_step("step2"))
return context
# /<int:petition_id>/delete
# Delete a petition
@login_required
def petition_delete(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.delete()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
else: # an organization owns the petition
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_delete_petitions:
petition.delete()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
# /<int:petition_id>/publish
# Publish a petition
@login_required
def petition_publish(request, petition_id):
pytitionuser = get_session_user(request)
petition = petition_from_id(petition_id)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.publish()
return JsonResponse({})
else:
# Petition owned by someone else
return JsonResponse({}, status=403)
else:
# Check if the user has permission over this org
try:
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_modify_petitions:
petition.publish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
except Permission.DoesNotExist:
return JsonResponse({}, status=403)
# /<int:petition_id>/unpublish
# Unpublish a petition
@login_required
def petition_unpublish(request, petition_id):
pytitionuser = get_session_user(request)
petition = petition_from_id(petition_id)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.unpublish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
else:
# Check if the user has permission over this org
try:
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_modify_petitions:
petition.unpublish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
except Permission.DoesNotExist:
return JsonResponse({}, status=403)
# /<int:petition_id>/edit
# Edit a petition
@login_required
def edit_petition(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
if not petition.is_allowed_to_edit(pytitionuser):
messages.error(request, _("You are not allowed to edit this petition"))
return redirect("user_dashboard")
submitted_ctx = {
'content_form_submitted': False,
'email_form_submitted': False,
'social_network_form_submitted': False,
'newsletter_form_submitted': False,
}
if request.method == "POST":
if 'content_form_submitted' in request.POST:
submitted_ctx['content_form_submitted'] = True
content_form = ContentFormPetition(request.POST)
if content_form.is_valid():
petition.title = content_form.cleaned_data['title']
petition.target = content_form.cleaned_data['target']
petition.paper_signatures = content_form.cleaned_data['paper_signatures']
petition.text = content_form.cleaned_data['text']
petition.side_text = content_form.cleaned_data['side_text']
petition.footer_text = content_form.cleaned_data['footer_text']
petition.footer_links = content_form.cleaned_data['footer_links']
petition.sign_form_footer = content_form.cleaned_data['sign_form_footer']
petition.save()
else:
content_form = ContentFormPetition({f: getattr(petition, f) for f in ContentFormPetition.base_fields})
if 'email_form_submitted' in request.POST:
submitted_ctx['email_form_submitted'] = True
email_form = EmailForm(request.POST)
if email_form.is_valid():
petition.confirmation_email_reply = email_form.cleaned_data['confirmation_email_reply']
petition.save()
else:
email_form = EmailForm({f: getattr(petition, f) for f in EmailForm.base_fields})
if 'social_network_form_submitted' in request.POST:
submitted_ctx['social_network_form_submitted'] = True
social_network_form = SocialNetworkForm(request.POST, request.FILES)
if social_network_form.is_valid():
storage = FileSystemStorage()
file = social_network_form.cleaned_data['twitter_image']
if file:
path = os.path.join(storage.location, pytitionuser.username, file.name)
name = storage._save(path, file)
newrelpath = os.path.relpath(name, storage.location)
petition.twitter_image = urllib.parse.urljoin(settings.MEDIA_URL, newrelpath)
if social_network_form.cleaned_data['remove_twitter_image']:
petition.twitter_image = ""
petition.twitter_description = social_network_form.cleaned_data['twitter_description']
petition.org_twitter_handle = social_network_form.cleaned_data['org_twitter_handle']
petition.save()
else:
data = {'twitter_description': petition.twitter_description,
'org_twitter_handle': petition.org_twitter_handle}
social_network_form = SocialNetworkForm(data)
if 'newsletter_form_submitted' in request.POST:
submitted_ctx['newsletter_form_submitted'] = True
newsletter_form = NewsletterForm(request.POST)
if newsletter_form.is_valid():
petition.has_newsletter = newsletter_form.cleaned_data['has_newsletter']
petition.newsletter_text = newsletter_form.cleaned_data['newsletter_text']
petition.newsletter_subscribe_http_data = newsletter_form.cleaned_data['newsletter_subscribe_http_data']
petition.newsletter_subscribe_http_mailfield = newsletter_form.cleaned_data['newsletter_subscribe_http_mailfield']
petition.newsletter_subscribe_http_url = newsletter_form.cleaned_data['newsletter_subscribe_http_url']
petition.newsletter_subscribe_mail_subject = newsletter_form.cleaned_data['newsletter_subscribe_mail_subject']
petition.newsletter_subscribe_mail_from = newsletter_form.cleaned_data['newsletter_subscribe_mail_from']
petition.newsletter_subscribe_mail_to = newsletter_form.cleaned_data['newsletter_subscribe_mail_to']
petition.newsletter_subscribe_method = newsletter_form.cleaned_data['newsletter_subscribe_method']
petition.newsletter_subscribe_mail_smtp_host = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_host']
petition.newsletter_subscribe_mail_smtp_port = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_port']
petition.newsletter_subscribe_mail_smtp_user = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_user']
petition.newsletter_subscribe_mail_smtp_password = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_password']
petition.newsletter_subscribe_mail_smtp_tls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_tls']
petition.newsletter_subscribe_mail_smtp_starttls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_starttls']
petition.save()
else:
newsletter_form = NewsletterForm({f: getattr(petition, f) for f in NewsletterForm.base_fields})
if 'style_form_submitted' in request.POST:
submitted_ctx['style_form_submitted'] = True
style_form = StyleForm(request.POST)
if style_form.is_valid():
petition.bgcolor = style_form.cleaned_data['bgcolor']
petition.linear_gradient_direction = style_form.cleaned_data['linear_gradient_direction']
petition.gradient_from = style_form.cleaned_data['gradient_from']
petition.gradient_to = style_form.cleaned_data['gradient_to']
petition.save()
else:
style_form = StyleForm({f: getattr(petition, f) for f in StyleForm.base_fields})
else:
data = {'twitter_description': petition.twitter_description,
'org_twitter_handle': petition.org_twitter_handle}
social_network_form = SocialNetworkForm(data)
content_form = ContentFormPetition({f: getattr(petition, f) for f in ContentFormPetition.base_fields})
style_form = StyleForm({f: getattr(petition, f) for f in StyleForm.base_fields})
email_form = EmailForm({f: getattr(petition, f) for f in EmailForm.base_fields})
newsletter_form = NewsletterForm({f: getattr(petition, f) for f in NewsletterForm.base_fields})
ctx = {'user': pytitionuser,
'content_form': content_form,
'style_form': style_form,
'email_form': email_form,
'social_network_form': social_network_form,
'newsletter_form': newsletter_form,
'petition': petition}
url_prefix = request.scheme + "://" + request.get_host()
if petition.owner_type == "org":
permissions = Permission.objects.get(organization=petition.org, user=pytitionuser)
example_url = url_prefix + reverse("slug_show_petition",
kwargs={'orgslugname': petition.org.slugname,
'petitionname': _("save-the-kittens-from-bad-wolf")})
slug_prefix = (url_prefix + reverse("slug_show_petition",
kwargs={'orgslugname': petition.org.slugname,
'petitionname': 'toto'})).rsplit('/', 1)[0]
ctx.update({'org': petition.org,
'user_permissions': permissions,
'base_template': 'petition/org_base.html',
'example_url': example_url,
'slug_prefix': slug_prefix})
else:
example_url = url_prefix + reverse("slug_show_petition",
kwargs={'username': pytitionuser.user.username,
'petitionname': _("save-the-kittens-from-bad-wolf")})
slug_prefix = (url_prefix + reverse("slug_show_petition",
kwargs={'username': pytitionuser.user.username,
'petitionname': 'toto'})).rsplit('/', 1)[0]
ctx.update({'base_template': 'petition/user_base.html',
'example_url': example_url,
'slug_prefix': slug_prefix})
ctx.update(submitted_ctx)
return render(request, "petition/edit_petition.html", ctx)
# /<int:petition_id>/show_signatures
# Show the signatures of a petition
@login_required
def show_signatures(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
ctx = {}
if petition.owner_type == "user":
base_template = 'petition/user_base.html'
if petition.user != pytitionuser:
messages.error(request, _("You are not allowed to view this petition's signatures."))
return redirect("user_dashboard")
else:
org = petition.org
base_template = 'petition/org_base.html'
other_orgs = pytitionuser.organization_set.filter(~Q(name=org.name)).all()
if pytitionuser not in org.members.all():
messages.error(request, _("You are not member of the following organization: \'{}\'".format(org.name)))
return redirect("user_dashboard")
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
messages.error(request, _("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')".format(orgname=org.name)))
return redirect("user_dashboard")
if not permissions.can_view_signatures:
messages.error(request, _("You are not allowed to view signatures in this organization"))
return redirect("org_dashboard", org.slugname)
ctx.update({'org': org, 'other_orgs': other_orgs,
'user_permissions': permissions})
if request.method == "POST":
action = request.POST.get('action', '')
selected_signature_ids = request.POST.getlist('signature_id', '')
failed = False
if selected_signature_ids and action:
selected_signatures = Signature.objects.filter(pk__in=selected_signature_ids)
if action == "delete":
for s in selected_signatures:
pet = s.petition
if pet.org: # Petition is owned by an org, we check for rights
if pet.org.is_allowed_to(pytitionuser, 'can_delete_signatures'):
s.delete()
else:
failed = True
else: # Petition is owned by a user, we check it's the one asking for deletion
s.delete()
if failed:
messages.error(request, _("You don't have permission to delete some or all of selected signatures"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
if action == "re-send":
for s in selected_signatures:
try:
send_confirmation_email(request, s)
except:
failed = True
if failed:
messages.error(request, _("An error happened while trying to re-send confirmation emails"))
else:
messages.success(request, _("You successfully re-sent all selected confirmation emails"))
if action == "re-send-all":
selected_signatures = Signature.objects.filter(petition=petition)
for s in selected_signatures:
try:
send_confirmation_email(request, s)
except:
failed = True
if failed:
messages.error(request, _("An error happened while trying to re-send confirmation emails"))
else:
messages.success(request, _("You successfully re-sent all confirmation emails"))
return redirect("show_signatures", petition_id)
signatures = petition.signature_set.all()
ctx.update({'petition': petition, 'user': pytitionuser,
'base_template': base_template,
'signatures': signatures})
return render(request, "petition/signature_data.html", ctx)
# /account_settings
# Show settings for the user accounts
@login_required
def account_settings(request):
pytitionuser = get_session_user(request)
submitted_ctx = {
'update_info_form_submitted': False,
'delete_account_form_submitted': False,
'password_change_form_submitted': False
}
if request.method == "POST":
if 'update_info_form_submitted' in request.POST:
update_info_form = UpdateInfoForm(pytitionuser.user, request.POST)
submitted_ctx['update_info_form_submitted'] = True
if update_info_form.is_valid():
update_info_form.save()
else:
update_info_form = get_update_form(pytitionuser.user)
if 'delete_account_form_submitted' in request.POST:
delete_account_form = DeleteAccountForm(request.POST)
submitted_ctx['delete_account_form_submitted'] = True
if delete_account_form.is_valid():
pytitionuser.drop()
return redirect("index")
else:
delete_account_form = DeleteAccountForm()
if 'password_change_form_submitted' in request.POST:
password_change_form = PasswordChangeForm(pytitionuser.user, request.POST)
submitted_ctx['password_change_form_submitted'] = True
if password_change_form.is_valid():
password_change_form.save()
messages.success(request, _("You successfully changed your password!"))
else:
password_change_form = PasswordChangeForm(pytitionuser.user)
else:
update_info_form = get_update_form(pytitionuser.user)
delete_account_form = DeleteAccountForm()
password_change_form = PasswordChangeForm(pytitionuser.user)
orgs = pytitionuser.organization_set.all()
# Checking if the user is allowed to leave the organisation
for org in orgs:
if org.members.count() < 2:
org.leave = False
else:
# More than one user, we need to check owners
owners = org.owners.all()
if owners.count() == 1 and pytitionuser in owners:
org.leave = False
else:
org.leave = True
ctx = {'user': pytitionuser,
'update_info_form': update_info_form,
'delete_account_form': delete_account_form,
'password_change_form': password_change_form,
'base_template': 'petition/user_base.html',
'orgs': orgs}
ctx.update(submitted_ctx)
return render(request, "petition/account_settings.html", ctx)
# GET/POST /org/create
# Create a new organization
@login_required
def org_create(request):
if settings.RESTRICT_ORG_CREATION and not request.user.is_superuser:
messages.error(request, _("Only super users can create an organization."))
return redirect("user_dashboard")
user = get_session_user(request)
ctx = {'user': user}
if request.method == "POST":
form = OrgCreationForm(request.POST)
if form.is_valid():
org = form.save()
org.members.add(user)
perm = Permission.objects.get(organization=org)
perm.set_all(True)
messages.success(request, _("You successfully created organization '{}'".format(org.name)))
return redirect('user_dashboard')
else:
ctx.update({'form': form})
return render(request, "petition/org_create.html", ctx)
form = OrgCreationForm()
ctx.update({'form': form})
return render(request, "petition/org_create.html", ctx)
# GET /org/<slug:orgslugname>/<slug:petitionname>
# Show a petition
def slug_show_petition(request, orgslugname=None, username=None, petitionname=None):
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
if orgslugname:
try:
org = Organization.objects.get(slugname=orgslugname)
slug = SlugModel.objects.get(slug=petitionname, petition__org=org)
except (Organization.DoesNotExist, SlugModel.DoesNotExist):
raise Http404(_("Sorry, we are not able to find this petition"))
petition = slug.petition
else:
try:
user = PytitionUser.objects.get(user__username=username)
slug = SlugModel.objects.get(slug=petitionname, petition__user=user)
except PytitionUser.DoesNotExist:
raise Http404(_("Sorry, we are not able to find this petition"))
except SlugModel.DoesNotExist:
raise Http404(_("Sorry, we are not able to find this petition"))
petition = slug.petition
sign_form = SignatureForm(petition=petition)
ctx = {"user": pytitionuser, "petition": petition, "form": sign_form,
'meta': petition_detail_meta(request, petition.id)}
# If we've just signed successfully the petition, do not show the sign form
hide_sign_form_if_user_just_signed(request, ctx)
if "application/json" in request.META.get('HTTP_ACCEPT', []):
response = JsonResponse(petition.to_json)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
return response
else:
return render(request, "petition/petition_detail.html", ctx)
# /<int:petition_id>/add_new_slug
# Add a new slug for a petition
@login_required
def add_new_slug(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist (anymore?)."))
return redirect("user_dashboard")
if request.method == "POST":
slugtexts = request.POST.getlist('slugtext', '')
if slugtexts == '' or slugtexts == []:
messages.error(request, _("You entered an empty slug text"))
else:
if petition.is_allowed_to_edit(pytitionuser):
for slugtext in slugtexts:
try:
petition.add_slug(slugtext)
petition.save()
messages.success(request, _("Successful addition of the slug '{}'!".format(slugtext)))
except IntegrityError:
messages.error(request, _("The slug '{}' already exists!".format(slugtext)))
except ValidationError as v:
for message in v.messages:
messages.error(request, message)
else:
messages.error(request, _("You don't have the permission to modify petitions"))
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
else:
return redirect("user_dashboard")
# /<int:petition_id>/del_slug
# Remove a slug from a petition
@login_required
def del_slug(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist (anymore?)."))
return redirect("user_dashboard")
if petition.is_allowed_to_edit(pytitionuser):
slug_id = request.GET.get('slugid', None)
if not slug_id:
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
slug = SlugModel.objects.get(pk=slug_id)
petition.del_slug(slug)
petition.save()
messages.success(request, _("Successful deletion of a slug"))
else:
messages.error(request, _("You don't have the permission to modify petitions"))
if petition.owner_type == "org":
return redirect("org_dashboard", petition.owner.slugname)
else:
return redirect("user_dashboard")
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
@login_required
def image_upload(request):
pytitionuser = get_session_user(request)
if request.method != "POST":
return HttpResponseForbidden()
file = request.FILES.get('file', '')
if file == '':
return HttpResponseForbidden()
storage = FileSystemStorage()
path = os.path.join(storage.location, pytitionuser.username, file.name)
name = storage._save(path, file)
newrelpath = os.path.relpath(name, storage.location)
return JsonResponse({'location': storage.base_url + newrelpath})
# /transfer_petition/<int:petition_id>
# Transfer a petition to another org or user
@login_required
def transfer_petition(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist."))
return redirect("user_dashboard")
ctx = {'user': pytitionuser,
'petition': petition}
if petition.owner_type == "org":
org = petition.owner
if not org.is_allowed_to(pytitionuser, "can_modify_permissions"):
messages.error(request, _("You don't have the permission to transfer a petition from Organization '{}'"
.format(petition.owner)))
return redirect("org_dashboard", petition.owner)
elif petition.owner_type == "user" and petition.user != pytitionuser:
messages.error(request, _("You don't have the permission to transfer this petition"))
return redirect("user_dashboard")
if petition.owner_type == "org":
ctx['base_template'] = 'petition/org_base.html'
ctx['org'] = petition.owner
else:
ctx['base_template'] = 'petition/user_base.html'
if request.method == "GET":
return render(request, "petition/transfer_petition.html", ctx)
if request.method == "POST":
owner_type = request.POST.get('new_owner_type', '')
new_owner_name = request.POST.get('new_owner_name', '')
org = None
user = None
notFound = False
if owner_type == '' or new_owner_name == '':
messages.error(request, _("Incorrect request, something went wrong."))
return redirect(request, "petition/transfer_petition.html", ctx)
if owner_type == "org":
try:
org = Organization.objects.get(slugname=new_owner_name)
except:
notFound = True
if owner_type == "user":
if settings.DISABLE_USER_PETITION:
messages.error(request, _("Users are not allowed to transfer petitions to organizations on this instance."))
return redirect("user_dashboard")
try:
user = PytitionUser.objects.get(user__username=new_owner_name)
except:
notFound = True
if notFound:
messages.error(request, _("You tried to transfer a petition to a non-exiting account"))
return redirect("transfer_petition", petition.id)
try:
petition.transfer_to(user, org)
petition.unpublish() # To prevent sending an unwanted published petition to an organization.
messages.success(request, _("Petition successfully transfered!"))
return redirect("user_dashboard")
except ValueError:
messages.error(request, _("Something went wrong while transferring this petition."))
return redirect("transfer_petition", petition.id)
@login_required
def search_users_and_orgs(request):
query = request.GET.get('q', '')
if query != "":
users = PytitionUser.objects.filter(Q(user__username__contains=query) | Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)).all()
orgs = Organization.objects.filter(name__icontains=query).all()
else:
users = []
orgs = []
result = {
"orgs": [{'slugname': org.slugname,
'name': org.name} for org in orgs],
"users": [{'username': user.user.username,
'firstname': user.user.first_name,
'lastname': user.user.last_name} for user in users]
}
return JsonResponse(result)
class PytitionUserCreateView(CreateView):
def save_numbers(self, request):
request.session['random_a'] = self.a
request.session['random_b'] = self.b
request.session['answer'] = self.answer
def get_new_numbers(self):
r = random.Random(time())
self.a = r.choice(range(0, 100))
self.b = r.choice(range(0, 100))
self.answer = self.a + self.b
self.form_class.base_fields['answer'].label = _("How much is {a} + {b}?").format(a=self.a, b=self.b)
def __init__(self, *args, **kwargs):
super(PytitionUserCreateView, self).__init__(*args, **kwargs)
self.get_new_numbers()
def get_context_data(self, **kwargs):
self.save_numbers(self.request)
return super(PytitionUserCreateView, self).get_context_data(**kwargs)
def get_form_kwargs(self):
kwargs = super(PytitionUserCreateView, self).get_form_kwargs()
kwargs.update({
'request': self.request
})
return kwargs
def form_valid(self, form):
form.send_success_email()
return super().form_valid(form)
| 43.075525 | 157 | 0.651089 |
794481568f104cc993a427f27bcfc80e6aa2a559 | 3,546 | py | Python | lektor/pagination.py | khashashin/StaticWebSieteLesson | 5cc29b547b3db98dbcb4d0bd9a2aec0b7532774e | [
"BSD-3-Clause"
] | null | null | null | lektor/pagination.py | khashashin/StaticWebSieteLesson | 5cc29b547b3db98dbcb4d0bd9a2aec0b7532774e | [
"BSD-3-Clause"
] | null | null | null | lektor/pagination.py | khashashin/StaticWebSieteLesson | 5cc29b547b3db98dbcb4d0bd9a2aec0b7532774e | [
"BSD-3-Clause"
] | 1 | 2020-09-22T04:18:18.000Z | 2020-09-22T04:18:18.000Z | from math import ceil
from lektor._compat import range_type
class Pagination(object):
def __init__(self, record, pagination_config):
#: the pagination config
self.config = pagination_config
#: the current page's record
self.current = record
#: the current page number (1 indexed)
self.page = record.page_num
#: the number of items to be displayed on a page.
self.per_page = pagination_config.per_page
#: the total number of items matching the query
self.total = pagination_config.count_total_items(record)
@property
def items(self):
"""The children for this page."""
return self.config.slice_query_for_page(self.current, self.page)
@property
def pages(self):
"""The total number of pages"""
if self.per_page == 0:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages
@property
def prev_num(self):
"""Number of the previous page."""
if self.page > 1:
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
@property
def prev(self):
if not self.has_prev:
return None
return self.config.get_record_for_page(self.current,
self.page - 1)
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
if self.page < self.pages:
return self.page + 1
@property
def next(self):
if not self.has_next:
return None
return self.config.get_record_for_page(self.current,
self.page + 1)
def for_page(self, page):
"""Returns the pagination for a specific page."""
if 1 <= page <= self.pages:
return self.config.get_record_for_page(self.current, page)
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
"""Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>...</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in range_type(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
| 32.833333 | 79 | 0.532149 |
79448288ea4c621de509a0cce604a36b0c68804d | 6,111 | py | Python | docs/source/conf.py | dgc-network/dgc-sabre | f4a2530f50adfe75c32a318ff86042bc3d4ad044 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | dgc-network/dgc-sabre | f4a2530f50adfe75c32a318ff86042bc3d4ad044 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | dgc-network/dgc-sabre | f4a2530f50adfe75c32a318ff86042bc3d4ad044 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shlex
# -- Project information -----------------------------------------------------
project = u'Smart'
copyright = u'2018, dgc.network'
author = u'dgc.network'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.imgmath',
'sphinxcontrib.httpdomain',
'sphinxcontrib.openapi',
]
# Autodoc settings
autodoc_member_order = 'bysource'
autoclass_content = 'both'
# Napoleon settings
napoleon_use_ivar = True
napoleon_include_special_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sawtooth Smart'
copyright = u'2018, dgc.network'
author = u'dgc.network'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'latest'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# A dictionary of values to pass into the template's context for all pages.
# Single values can also be put in this dictionary using the -A command-line
# option of sphinx-build.
html_context = {
'css_files': ['_static/theme_overrides.css']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'indexcontent.html'
}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Smartdoc'
PREAMBLE = ''
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': PREAMBLE,
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'smart.tex', u'Smart Documentation',
u'dgc.network', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'smart', u'Smart Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Smart', u'Smart Documentation',
author, 'Smart', 'One line description of project.',
'Miscellaneous'),
]
| 29.955882 | 79 | 0.675503 |
794483c2550759c61a5785fe64f231e3cc87f874 | 10,652 | py | Python | tests/parallel_tests/test_madweight.py | mfasDa/MadGraph5 | 9b90feda56d6209b81f74e61dc353a729886a4a5 | [
"NCSA"
] | null | null | null | tests/parallel_tests/test_madweight.py | mfasDa/MadGraph5 | 9b90feda56d6209b81f74e61dc353a729886a4a5 | [
"NCSA"
] | null | null | null | tests/parallel_tests/test_madweight.py | mfasDa/MadGraph5 | 9b90feda56d6209b81f74e61dc353a729886a4a5 | [
"NCSA"
] | 1 | 2021-07-06T08:19:27.000Z | 2021-07-06T08:19:27.000Z | ################################################################################
#
# Copyright (c) 2009 The MadGraph Development team and Contributors
#
# This file is a part of the MadGraph 5 project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph license which should accompany this
# distribution.
#
# For more information, please visit: http://madgraph.phys.ucl.ac.be
#
################################################################################
"""A set of objects to allow for easy comparisons of results from various ME
generators (e.g., MG v5 against v4, ...) and output nice reports in different
formats (txt, tex, ...).
"""
from __future__ import absolute_import
from __future__ import print_function
import datetime
import glob
import itertools
import logging
import os
import re
import shutil
import subprocess
import sys
import time
import unittest
from six.moves import map
pjoin = os.path.join
# Get the grand parent directory (mg5 root) of the module real path
# (tests/acceptance_tests) and add it to the current PYTHONPATH to allow
# for easy import of MG5 tools
_file_path = os.path.dirname(os.path.realpath(__file__))
import madgraph.iolibs.template_files as template_files
import madgraph.iolibs.save_load_object as save_load_object
import madgraph.interface.master_interface as cmd_interface
import madgraph.various.misc as misc
from madgraph import MadGraph5Error, MG5DIR
from . import me_comparator
class TestMadWeight(unittest.TestCase):
"""A couple of points in order to ensure the MW is working fine."""
def get_result(self, text):
solution = {}
for line in text.split('\n'):
line = line.strip().split('#')[0]
split = line.split()
if not len(split) in [4,5]:
continue
if len(split) ==4:
event_nb, card_nb, weight, error = list(map(float, split))
tf_set = 1.
else:
event_nb, card_nb, tf_set, weight, error = list(map(float, split))
solution[(event_nb,card_nb,tf_set)] = (weight,error)
return solution
def test_short_mw_tt_full_lept(self):
"""checking that the weight for p p > t t~ fulllept is working"""
try:
shutil.rmtree(pjoin(MG5DIR,'TEST_MW_TT_prod'))
except Exception as error:
pass
cmd = """set automatic_html_opening False --no-save
set cluster_temp_path /tmp --no-save
generate p p > t t~, (t > w+ b, w+ > e+ ve), (t~ > w- b~, w- > e- ve~)
output madweight TEST_MW_TT_prod_full
launch
change_tf dbl_gauss_pt_jet
./tests/input_files/mw_ttprod.lhco.gz
set nb_exp_events 2
set log_level debug
set nb_event_by_node 1
set mw_perm montecarlo F
set ebeam1 7000
set ebeam2 7000
set pdlabel cteq6l1
"""
open('/tmp/mg5_cmd','w').write(cmd)
if logging.getLogger('madgraph').level <= 20:
stdout=None
stderr=None
else:
devnull =open(os.devnull,'w')
stdout=devnull
stderr=devnull
start = time.time()
print('this mw test is expected to take 30s on two core. (MBP retina 2012) current time: %02dh%02d' % (time.localtime().tm_hour, time.localtime().tm_min))
subprocess.call([pjoin(MG5DIR,'bin','mg5'),
'/tmp/mg5_cmd'],
cwd=pjoin(MG5DIR),
stdout=stdout, stderr=stderr)
run_time = time.time() - start
print('tt~ full takes %smin %is' % (run_time//60, run_time % 60))
data = open(pjoin(MG5DIR, 'TEST_MW_TT_prod_full', 'Events', 'fermi', 'weights.out')).read()
solution = self.get_result(data)
expected = """# Weight (un-normalize) for each card/event
# format: LHCO_event_number card_id value integration_error
24 1 7.13353274182e-22 3.48595541497e-24
24 2 4.48106063562e-22 2.23501639194e-24
28 1 1.22526200347e-23 5.8955444892e-26
28 2 5.53271960779e-23 2.59251688524e-25
"""
expected = self.get_result(expected)
for key, (value,error) in expected.items():
assert key in solution
value2, error2 = solution[key]
self.assertTrue(abs(value-value2) < 5* abs(error+error2))
self.assertTrue(abs(value-value2)/abs(value+value2) < 2*abs(value/error))
self.assertTrue(abs(error2)/abs(value2) < 0.02)
#try:
# shutil.rmtree(pjoin(MG5DIR,'TEST_MW_TT_prod_full'))
#except Exception, error:
# pass
def test_short_mw_tt_semi(self):
"""checking that the weight for p p > t t~ semilept is working"""
try:
shutil.rmtree(pjoin(MG5DIR,'TEST_MW_TT_prod'))
except Exception as error:
pass
cmd = """set automatic_html_opening False --no-save
set cluster_temp_path /tmp --no-save
generate p p > t t~, (t > w+ b, w+ > l+ vl), (t~ > w- b~, w- > j j)
output madweight TEST_MW_TT_prod -f
launch
change_tf dbl_gauss_pt_jet
./tests/input_files/mw_ttprod.lhco.gz
set nb_exp_events 1
set log_level debug
set nb_event_by_node 1
set mw_run pretrained T
set mw_perm montecarlo T
set mw_run MW_int_points 1000
set mw_run MW_int_refine 8000
set mw_run use_sobol T
set mw_gen force_nwa 2
set ebeam1 7000
set ebeam2 7000
set pdlabel cteq6l1
"""
open('/tmp/mg5_cmd','w').write(cmd)
if logging.getLogger('madgraph').level <= 20:
stdout=None
stderr=None
else:
devnull =open(os.devnull,'w')
stdout=devnull
stderr=devnull
start = time.time()
print('this mw test is expected to take 2 min on two core. (MBP retina 2012) current time: %02dh%02d' % (time.localtime().tm_hour, time.localtime().tm_min))
subprocess.call([pjoin(MG5DIR,'bin','mg5'),
'/tmp/mg5_cmd'],
cwd=pjoin(MG5DIR),
stdout=stdout, stderr=stderr)
run_time = time.time() - start
print('tt~ semi takes %smin %is' % (run_time//60, run_time % 60))
data = open(pjoin(MG5DIR, 'TEST_MW_TT_prod', 'Events', 'fermi', 'weights.out')).read()
solution = self.get_result(data)
expected = """# Weight (un-normalize) for each card/event
# format: LHCO_event_number card_id value integration_error
2 1 1.06068538348e-23 6.39167252183e-26
2 2 5.59862383052e-24 3.76145999572e-26
9 1 6.92859060639e-25 6.04804891841e-27
9 2 6.79399430333e-25 7.38824630883e-27
"""
expected = self.get_result(expected)
for key, (value,error) in expected.items():
assert key in solution
value2, error2 = solution[key]
self.assertTrue(abs(value-value2) < 5* abs(error+error2))
self.assertTrue(abs(value-value2)/abs(value+value2) < 2*abs(value/error))
self.assertTrue(abs(error2)/abs(value2) < 0.02)
#try:
# shutil.rmtree(pjoin(MG5DIR,'TEST_MW_TT_prod'))
#except Exception, error:
# pass
def test_short_mw_wa_refine(self):
"""checking that the weight for p p > w a, w > l- is working"""
try:
shutil.rmtree(pjoin(MG5DIR,'TEST_MW_WA_prod'))
except Exception as error:
pass
cmd = """set automatic_html_opening False --no-save
set cluster_temp_path /tmp --no-save
generate p p > w- a , w- > e- ve~
output madweight TEST_MW_WA_prod -f
launch
change_tf dbl_gauss_pt_jet
./tests/input_files/mw_wa_prod.lhco
set nb_exp_events 1
set log_level debug
set nb_event_by_node 1
set mw_parameter 12 23
set mw_parameter 13 80 90
set mw_run MW_int_points 100
set mw_run MW_int_refine 100
set ebeam1 7000
set ebeam2 7000
set pdlabel cteq6l1
launch -i
refine 0.01
set mw_run MW_int_points 1000
set mw_run MW_int_refine 10000
"""
open('/tmp/mg5_cmd','w').write(cmd)
if logging.getLogger('madgraph').level <= 20:
stdout=None
stderr=None
else:
devnull =open(os.devnull,'w')
stdout=devnull
stderr=devnull
start = time.time()
print('this mw test is expected to take 15s on two core. (MBP retina 2012) current time: %02dh%02d' % (time.localtime().tm_hour, time.localtime().tm_min))
subprocess.call([pjoin(MG5DIR,'bin','mg5'),
'/tmp/mg5_cmd'],
cwd=pjoin(MG5DIR),
stdout=stdout, stderr=stderr)
run_time = time.time() - start
print('wa takes %smin %is' % (run_time//60, run_time % 60))
data = open(pjoin(MG5DIR, 'TEST_MW_WA_prod', 'Events', 'fermi', 'weights.out')).read()
solution = self.get_result(data)
expected = """# Weight (un-normalize) for each card/event
# format: LHCO_event_number card_id value integration_error
# Weight (un-normalize) for each card/event
# format: LHCO_event_number card_id value integration_error
0 1 2.68641824739e-14 1.75587340837e-17
0 2 1.10047493409e-13 4.9103491463e-16
"""
expected = self.get_result(expected)
for key, (value,error) in expected.items():
assert key in solution
value2, error2 = solution[key]
self.assertTrue(abs(value-value2) < 5* abs(error+error2))
self.assertTrue(abs(value-value2)/abs(value+value2) < 2*abs(value/error))
self.assertTrue(abs(error2)/abs(value2) < 0.02)
try:
shutil.rmtree(pjoin(MG5DIR,'TEST_MW_WA_prod'))
except Exception as error:
pass
| 38.594203 | 165 | 0.573038 |
794483e9219878561871eeb85efab9de2d8655bf | 4,701 | py | Python | userbot/plugins/_inlinebot.py | SHER321/Us3rBot | 252ba145dc04a04f27f866d1f7be5165dacc8e32 | [
"MIT"
] | null | null | null | userbot/plugins/_inlinebot.py | SHER321/Us3rBot | 252ba145dc04a04f27f866d1f7be5165dacc8e32 | [
"MIT"
] | null | null | null | userbot/plugins/_inlinebot.py | SHER321/Us3rBot | 252ba145dc04a04f27f866d1f7be5165dacc8e32 | [
"MIT"
] | 1 | 2022-03-30T22:01:02.000Z | 2022-03-30T22:01:02.000Z | from math import ceil
import asyncio
import json
import random
import re
from telethon import events, errors, custom
from userbot import CMD_LIST
import io
if Var.TG_BOT_USER_NAME_BF_HER is not None and tgbot is not None:
@tgbot.on(events.InlineQuery) # pylint:disable=E0602
async def inline_handler(event):
builder = event.builder
result = None
query = event.text
if event.query.user_id == bot.uid and query.startswith("Userbot"):
rev_text = query[::-1]
buttons = paginate_help(0, CMD_LIST, "helpme")
result = builder.article(
"© Userbot Help",
text="{}\nCurrently Loaded Plugins: {}".format(
query, len(CMD_LIST)),
buttons=buttons,
link_preview=False
)
await event.answer([result] if result else None)
@tgbot.on(events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"helpme_next\((.+?)\)")
))
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == bot.uid: # pylint:disable=E0602
current_page_number = int(
event.data_match.group(1).decode("UTF-8"))
buttons = paginate_help(
current_page_number + 1, CMD_LIST, "helpme")
# https://t.me/TelethonChat/115200
await event.edit(buttons=buttons)
else:
reply_pop_up_alert = "Get your own userbot, don't use another's\n ib @Aid_3n for learning how to get a userbot!"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"helpme_prev\((.+?)\)")
))
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == bot.uid: # pylint:disable=E0602
current_page_number = int(
event.data_match.group(1).decode("UTF-8"))
buttons = paginate_help(
current_page_number - 1,
CMD_LIST, # pylint:disable=E0602
"helpme"
)
# https://t.me/TelethonChat/115200
await event.edit(buttons=buttons)
else:
reply_pop_up_alert = "Get your own userbot, don't use another's\n ib @Aid_3n for learning how to get a userbot!"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"us_plugin_(.*)")
))
async def on_plug_in_callback_query_handler(event):
plugin_name = event.data_match.group(1).decode("UTF-8")
help_string = ""
try:
for i in CMD_LIST[plugin_name]:
help_string += i
help_string += "\n"
except:
pass
if help_string is "":
reply_pop_up_alert = "{} is useless".format(plugin_name)
else:
reply_pop_up_alert = help_string
reply_pop_up_alert += "\n Use .unload {} to remove this plugin\n\
© Userbot".format(plugin_name)
try:
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
except:
with io.BytesIO(str.encode(reply_pop_up_alert)) as out_file:
out_file.name = "{}.txt".format(plugin_name)
await event.client.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=plugin_name
)
def paginate_help(page_number, loaded_plugins, prefix):
number_of_rows = 10
number_of_cols = 2
helpable_plugins = []
for p in loaded_plugins:
if not p.startswith("_"):
helpable_plugins.append(p)
helpable_plugins = sorted(helpable_plugins)
modules = [custom.Button.inline(
"{} {}".format("ℹ️", x),
data="us_plugin_{}".format(x))
for x in helpable_plugins]
pairs = list(zip(modules[::number_of_cols], modules[1::number_of_cols]))
if len(modules) % number_of_cols == 1:
pairs.append((modules[-1],))
max_num_pages = ceil(len(pairs) / number_of_rows)
modulo_page = page_number % max_num_pages
if len(pairs) > number_of_rows:
pairs = pairs[modulo_page * number_of_rows:number_of_rows * (modulo_page + 1)] + \
[
(custom.Button.inline("Previous", data="{}_prev({})".format(prefix, modulo_page)),
custom.Button.inline("Next", data="{}_next({})".format(prefix, modulo_page)))
]
return pairs
| 40.525862 | 124 | 0.598171 |
794483f05366960a0699274a35e54ba27117c49d | 1,397 | py | Python | setup.py | ensoft/enbackup | 56d1ed48692269ecfa5799656f90f70663645e5f | [
"MIT"
] | null | null | null | setup.py | ensoft/enbackup | 56d1ed48692269ecfa5799656f90f70663645e5f | [
"MIT"
] | null | null | null | setup.py | ensoft/enbackup | 56d1ed48692269ecfa5799656f90f70663645e5f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# EnBackup setup file
#
from distutils.core import setup
setup(name='enbackup',
version="1.4.18",
description='EnBackup: backup tools based on rdiff-backup',
packages=['enbackup', 'enbackup.cmd'],
package_dir={'': 'src'},
scripts=['bin/enbackup',
'bin/enbackup-archive.py',
'bin/enbackup-archive.sh',
'bin/enbackup-rdiff-to-mirror.py',
'bin/enbackup-sata-poll.py',
'bin/enbackup-sata-insert.sh',
'bin/enbackup-sata-remove.sh',
'bin/backup_breakers.sh',
'bin/enbackup-suspend-usb-device.sh'],
data_files=[('/etc/enbackup.d',
['etc/enbackup-archive.rc',
'etc/enbackup-mirror.rc',
'etc/enbackup.rc']),
('/usr/share/doc/enbackup/examples/',
['etc/enbackup.rules',
'etc/enbackup-mysql-dump.cron',
'etc/enbackup-ldap-dump.cron',
'etc/enbackup-python-dump.cron',
'etc/enbackup.cron',
'etc/enbackup-installed-packages.cron',
'etc/enbackup_unaged.rc',
'etc/enbackup_aged.rc']),
('/usr/share/doc/enbackup/',
['doc/README'])
]
)
| 35.820513 | 65 | 0.486042 |
7944844931e0482a699d8ca6349a457538abaaa3 | 2,032 | py | Python | scripts/ssm_get_default_params.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | scripts/ssm_get_default_params.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | 1 | 2022-02-19T02:10:45.000Z | 2022-02-19T02:15:52.000Z | scripts/ssm_get_default_params.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | import boto3
import json
import os
import subprocess
import sure # noqa # pylint: disable=unused-import
import time
from moto.ssm.utils import convert_to_tree
def retrieve_by_path(client, path):
print(f"Retrieving all parameters from {path}. "
f"AWS has around 14000 parameters, and we can only retrieve 10 at the time, so this may take a while.\n\n")
x = client.get_parameters_by_path(Path=path, Recursive=True)
parameters = x["Parameters"]
next_token = x["NextToken"]
while next_token:
x = client.get_parameters_by_path(Path=path, Recursive=True, NextToken=next_token)
parameters.extend(x["Parameters"])
next_token = x.get("NextToken")
if len(parameters) % 100 == 0:
print(f"Retrieved {len(parameters)} from {path}...")
time.sleep(0.5)
return parameters
def main():
"""
Retrieve global parameters from SSM
- Download from AWS
- Convert them to a more space-optimized data format
- Store this in the dedicated moto/ssm/resources-folder
Note:
There are around 20k parameters, and we can only retrieve 10 at a time.
So running this scripts takes a while.
"""
client = boto3.client('ssm', region_name="us-west-1")
default_param_paths = ["/aws/service/global-infrastructure/regions",
"/aws/service/global-infrastructure/services"]
for path in default_param_paths:
params = retrieve_by_path(client, path)
tree = convert_to_tree(params)
root_dir = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode()
.strip()
)
filename = "{}.json".format(path.split("/")[-1])
dest = os.path.join(root_dir, "moto/ssm/resources/{}".format(filename))
print("Writing data to {0}".format(dest))
with open(dest, "w") as open_file:
json.dump(tree, open_file, sort_keys=True, indent=2)
if __name__ == "__main__":
main()
| 32.774194 | 117 | 0.635827 |
79448468b24ccbb421050b5195e1e710cf3347e4 | 343 | py | Python | src/common/decorators/http.py | yrrodriguezb/djangp_apps | 7a0f85f65558e02d0707525b5d7f5bfa6caacb2d | [
"MIT"
] | null | null | null | src/common/decorators/http.py | yrrodriguezb/djangp_apps | 7a0f85f65558e02d0707525b5d7f5bfa6caacb2d | [
"MIT"
] | null | null | null | src/common/decorators/http.py | yrrodriguezb/djangp_apps | 7a0f85f65558e02d0707525b5d7f5bfa6caacb2d | [
"MIT"
] | null | null | null | from django.http import HttpResponseBadRequest
def ajax_required(func):
def wrap(request, *args, **kwargs):
if not request.is_ajax():
return HttpResponseBadRequest('Request ajax required')
return func(request, *args, **kwargs)
wrap.__doc__ = func.__doc__
wrap.__name__ = func.__name__
return wrap
| 28.583333 | 66 | 0.685131 |
79448479d98fb3ef577de3f8384661a4091524e4 | 6,237 | py | Python | run_model.py | stephenwithav/DexiNed | 3bd8a79c6a6673d2c4e2d0f69b84b7796c924e3b | [
"MIT"
] | 1 | 2020-08-07T12:07:33.000Z | 2020-08-07T12:07:33.000Z | run_model.py | stephenwithav/DexiNed | 3bd8a79c6a6673d2c4e2d0f69b84b7796c924e3b | [
"MIT"
] | null | null | null | run_model.py | stephenwithav/DexiNed | 3bd8a79c6a6673d2c4e2d0f69b84b7796c924e3b | [
"MIT"
] | null | null | null | """ DexiNed main script
This code is based on DexiNed (Dense Extreme Inception Network for Edge Detection),
Please pay attention in the function config_model() to set any parameter before training or
testing the model.
"""
__author__ = "Xavier Soria Poma, CVC-UAB"
__email__ = "[email protected] / [email protected]"
__homepage__="www.cvc.uab.cat/people/xsoria"
__credits__=['DexiNed']
__copyright__ = "MIT License [see LICENSE for details]"#"Copyright 2019, CIMI"
import sys
import argparse
import tensorflow as tf
import utls.dataset_manager as dm
from train import m_trainer
from test import m_tester
def config_model():
parser = argparse.ArgumentParser(description='Basic details to run HED')
# dataset config
parser.add_argument('--train_dataset', default='BIPED', choices=['BIPED','BSDS'])
parser.add_argument('--test_dataset', default='CLASSIC', choices=['BIPED', 'BSDS','MULTICUE','NYUD','PASCAL','CID','DCD'])
parser.add_argument('--dataset_dir',default=None,type=str) # default:'/opt/dataset/'
parser.add_argument('--dataset_augmented', default=True,type=bool)
parser.add_argument('--train_list',default='train_rgb.lst', type=str) # BSDS train_pair.lst, SSMIHD train_rgb_pair.lst/train_rgbn_pair.lst
parser.add_argument('--test_list', default='test_pair.lst',type=str) # for NYUD&BSDS:test_pair.lst, ssmihd: msi_test.lst/vis_test.lst
parser.add_argument('--trained_model_dir', default='train_1',type=str) # 'trainV2_RN'
# SSMIHD_RGBN msi_valid_list.txt and msi_test_list.txt is for unified test
parser.add_argument('--use_nir', default=False, type=bool)
parser.add_argument('--use_dataset', default=False, type=bool) # test: dataset TRUE single image FALSE
# model config
parser.add_argument('--model_state', default='test', choices=['train','test','None']) # always in None
parser.add_argument('--model_name', default='DXN',choices=['DXN','XCP','None'])
parser.add_argument('--use_v1', default=False,type=bool)
parser.add_argument('--model_purpose', default='edges',choices=['edges','restoration','None'])
parser.add_argument('--batch_size_train',default=8,type=int)
parser.add_argument('--batch_size_val',default=8, type=int)
parser.add_argument('--batch_size_test',default=1,type=int)
parser.add_argument('--checkpoint_dir', default='checkpoints',type=str)
parser.add_argument('--logs_dir', default='logs',type=str)
parser.add_argument('--learning_rate',default=1e-4, type=float) # 1e-4=0.0001
parser.add_argument('--lr_scheduler',default=None,choices=[None,'asce','desc']) # check here
parser.add_argument('--learning_rate_decay', default=0.1,type=float)
parser.add_argument('--weight_decay', default=0.0002, type=float)
parser.add_argument('--model_weights_path', default='vgg16_.npy')
parser.add_argument('--train_split', default=0.9, type=float) # default 0.8
parser.add_argument('--max_iterations', default=150000, type=int) # 100000
parser.add_argument('--learning_decay_interval',default=25000, type=int) # 25000
parser.add_argument('--loss_weights', default=1.0, type=float)
parser.add_argument('--save_interval', default=20000, type=int) # 50000
parser.add_argument('--val_interval', default=30, type=int)
parser.add_argument('--use_subpixel', default=None, type=bool) # None=upsampling with transp conv
parser.add_argument('--deep_supervision', default=True, type= bool)
parser.add_argument('--target_regression',default=True, type=bool) # true
parser.add_argument('--mean_pixel_values', default=[103.939,116.779,123.68, 137.86], type=float)# [103.939,116.779,123.68]
# for Nir pixels mean [103.939,116.779,123.68, 137.86]
parser.add_argument('--channel_swap', default=[2,1,0], type=int)
parser.add_argument('--gpu-limit',default=1.0, type= float, )
parser.add_argument('--use_trained_model', default=True,type=bool) # for vvg16
parser.add_argument('--use_previous_trained', default=False, type=bool)
# image configuration
parser.add_argument('--image_width', default=960, type=int) # 480 NYUD=560 BIPED=1280 default 400 other 448
parser.add_argument('--image_height', default=560, type=int) # 480 for NYUD 425 BIPED=720 default 400
parser.add_argument('--n_channels', default=3, type=int) # last ssmihd_xcp trained in 512
# test config
parser.add_argument('--test_snapshot', default=149736, type=int) # BIPED: 149736 BSDS:101179
#DexiNedv1=149736,DexiNedv2=149999
parser.add_argument('--testing_threshold', default=0.0, type=float)
parser.add_argument('--base_dir_results',default=None,type=str) # default: '/opt/results/edges'
args = parser.parse_args()
return args
def get_session(gpu_fraction):
num_threads = False
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto())
def main(args):
if not args.dataset_augmented:
# Only for BIPED dataset
# dm.augment_data(args)
print("Please visit the webpage of BIPED in:")
print("https://xavysp.github.io/MBIPED/")
print("and run the code")
sys.exit()
if args.model_state =='train' or args.model_state=='test':
sess = get_session(args.gpu_limit)
# sess =tf.Session()
else:
print("The model state is None, so it will exit...")
sys.exit()
if args.model_state=='train':
trainer = m_trainer(args)
trainer.setup()
trainer.run(sess)
sess.close()
if args.model_state=='test':
if args.test_dataset=="BIPED":
if args.image_width >700:
pass
else:
print(' image size is not set in non augmented data')
sys.exit()
tester = m_tester(args)
tester.setup(sess)
tester.run(sess)
sess.close()
if args.model_state=="None":
print("Sorry the model state is {}".format(args.model_state))
sys.exit()
if __name__=='__main__':
args = config_model()
main(args=args)
| 47.25 | 142 | 0.700337 |
794485914143dac5eef9059c6bcb6f4c7f01e62e | 12,080 | py | Python | kettle/stream.py | qinqon/test-infra | d9fbec8e187788b04bf03d86939087cb11d801f8 | [
"Apache-2.0"
] | null | null | null | kettle/stream.py | qinqon/test-infra | d9fbec8e187788b04bf03d86939087cb11d801f8 | [
"Apache-2.0"
] | null | null | null | kettle/stream.py | qinqon/test-infra | d9fbec8e187788b04bf03d86939087cb11d801f8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receive push events for new builds and upload rows to BigQuery."""
import argparse
import json
import os
import pprint
import socket
import sys
import traceback
import time
import multiprocessing.pool
import ruamel.yaml as yaml
try:
from google.api_core import exceptions as api_exceptions
from google.cloud import bigquery
from google.cloud import pubsub_v1
import google.cloud.exceptions
except ImportError:
print('WARNING: unable to load google cloud (test environment?)')
traceback.print_exc()
import model
import make_db
import make_json
MAX_ROW_UPLOAD = 10 # See https://github.com/googleapis/google-cloud-go/issues/2855
def should_exclude(object_id, bucket_id, buckets):
# Objects of form a/b/c/<jobname>/<hash>/<objectFile>'
if bucket_id not in buckets:
return False
return any(f'/{job}/' in object_id for job in buckets[bucket_id].get('exclude_jobs', []))
def process_changes(results, buckets):
"""Split GCS change events into trivial ack_ids and builds to further process."""
ack_ids = [] # pubsub rec_message ids to acknowledge
todo = [] # (id, job, build) of builds to grab
# process results, find finished builds to process
for rec_message in results:
eventType = rec_message.message.attributes['eventType']
object_id = rec_message.message.attributes['objectId']
bucket_id = rec_message.message.attributes['bucketId']
exclude = should_exclude(object_id, bucket_id, buckets)
if eventType != 'OBJECT_FINALIZE' or not object_id.endswith('/finished.json') or exclude:
ack_ids.append(rec_message.ack_id)
continue
job, build = object_id[:-len('/finished.json')].rsplit('/', 1)
job = 'gs://%s/%s' % (bucket_id, job)
todo.append((rec_message.ack_id, job, build))
return ack_ids, todo
def get_started_finished(gcs_client, db, todo):
"""Download started/finished.json from build dirs in todo."""
ack_ids = []
build_dirs = []
pool = multiprocessing.pool.ThreadPool(16)
try:
for ack_id, (build_dir, started, finished) in pool.imap_unordered(
lambda ack_id_job_build: (ack_id_job_build[0], gcs_client.get_started_finished(
ack_id_job_build[1], ack_id_job_build[2])),
todo):
if finished:
if not db.insert_build(build_dir, started, finished):
print('build dir already present in db: ', build_dir)
start = time.localtime(started.get('timestamp', 0) if started else 0)
print((build_dir, bool(started), bool(finished),
time.strftime('%F %T %Z', start),
finished and finished.get('result')))
build_dirs.append(build_dir)
ack_ids.append(ack_id)
else:
print('finished.json missing?', build_dir, started, finished)
finally:
pool.close()
db.commit()
return ack_ids, build_dirs
def retry(func, *args, **kwargs):
"""Run a function with arguments, retrying on server errors. """
# pylint: disable=no-member
for attempt in range(20):
try:
return func(*args, **kwargs)
except (socket.error, google.cloud.exceptions.ServerError):
# retry with exponential backoff
traceback.print_exc()
time.sleep(1.4 ** attempt)
except api_exceptions.BadRequest as err:
args_size = sys.getsizeof(args)
kwargs_str = ','.join('{}={}'.format(k, v) for k, v in kwargs.items())
print(f"Error running {func.__name__} \
([bytes in args]{args_size} with {kwargs_str}) : {err}")
return None # Skip
return func(*args, **kwargs) # one last attempt
def insert_data(bq_client, table, rows_iter):
"""Upload rows from rows_iter into bigquery table table.
rows_iter should return a series of (row_id, row dictionary) tuples.
The row dictionary must match the table's schema.
Args:
bq_client: Client connection to BigQuery
table: bigquery.Table object that points to a specific table
rows_iter: row_id, dict representing a make_json.Build
Returns the row_ids that were inserted.
"""
def divide_chunks(l, bin_size=MAX_ROW_UPLOAD):
# break up rows to not hit data limits
for i in range(0, len(l), bin_size):
yield l[i:i + bin_size]
emitted, rows = [], []
for row_id, build in rows_iter:
emitted.append(row_id)
rows.append(build)
if not rows: # nothing to do
return []
for chunk in divide_chunks(rows):
# Insert rows with row_ids into table, retrying as necessary.
errors = retry(bq_client.insert_rows, table, chunk, skip_invalid_rows=True)
if not errors:
print(f'Loaded {len(chunk)} builds into {table.full_table_id}')
else:
print(f'Errors on Chunk: {chunk}')
pprint.pprint(errors)
pprint.pprint(table.schema)
return emitted
def main(
db,
subscriber,
subscription_path,
bq_client,
tables,
buckets,
client_class=make_db.GCSClient,
stop=None,
):
# pylint: disable=too-many-locals
gcs_client = client_class('', {})
if stop is None:
stop = lambda: False
results = [0] * 1000 # don't sleep on first loop
while not stop():
print()
if len(results) < 10 and client_class is make_db.GCSClient:
time.sleep(5) # slow down!
print('====', time.strftime("%F %T %Z"), '=' * 40)
results = retry(subscriber.pull, subscription=subscription_path, max_messages=1000)
results = list(results.received_messages)
start = time.time()
while time.time() < start + 7:
results_more = list(subscriber.pull(
subscription=subscription_path,
max_messages=1000,
return_immediately=True).received_messages)
if not results_more:
break
results.extend(results_more)
print('PULLED', len(results))
ack_ids, todo = process_changes(results, buckets)
if ack_ids:
print('ACK irrelevant', len(ack_ids))
for n in range(0, len(ack_ids), 1000):
retry(
subscriber.acknowledge,
subscription=subscription_path,
ack_ids=ack_ids[n: n + 1000])
if todo:
print('EXTEND-ACK ', len(todo))
# give 3 minutes to grab build details
retry(
subscriber.modify_ack_deadline,
subscription=subscription_path,
ack_ids=[i for i, _j, _b in todo],
ack_deadline_seconds=60*3)
ack_ids, build_dirs = get_started_finished(gcs_client, db, todo)
# notify pubsub queue that we've handled the finished.json messages
if ack_ids:
print('ACK "finished.json"', len(ack_ids))
retry(subscriber.acknowledge, subscription=subscription_path, ack_ids=ack_ids)
# grab junit files for new builds
make_db.download_junit(db, 16, client_class)
# stream new rows to tables
if build_dirs and tables:
for table, incremental_table in tables.values():
builds = db.get_builds_from_paths(build_dirs, incremental_table)
emitted = insert_data(bq_client, table, make_json.make_rows(db, builds))
db.insert_emitted(emitted, incremental_table)
def load_sub(poll):
"""Return the PubSub subscription specified by the /-separated input.
Args:
poll: Follow GCS changes from project/topic/subscription
Ex: kubernetes-jenkins/gcs-changes/kettle
Return:
Subscribed client
"""
subscriber = pubsub_v1.SubscriberClient()
project_id, _, sub = poll.split('/')
subscription_path = f'projects/{project_id}/subscriptions/{sub}'
return subscriber, subscription_path
def load_schema(schemafield):
"""Construct the expected BigQuery schema from files on disk.
Only used for new tables."""
basedir = os.path.dirname(__file__)
with open(os.path.join(basedir, 'schema.json')) as json_file:
schema_json = json.load(json_file)
def make_field(spec):
spec['field_type'] = spec.pop('type')
if 'fields' in spec:
spec['fields'] = [make_field(f) for f in spec['fields']]
return schemafield(**spec)
return [make_field(s) for s in schema_json]
def load_tables(dataset, tablespecs):
"""Construct a dictionary of BigQuery tables given the input tablespec.
Args:
dataset: bigquery.Dataset
tablespecs: list of strings of "NAME:DAYS", e.g. ["day:1"]
Returns:
client, {name: (bigquery.Table, incremental table name)}
"""
project, dataset_name = dataset.split(':')
bq_client = bigquery.Client(project)
tables = {}
for spec in tablespecs:
table_name, days = spec.split(':')
table_ref = f'{project}.{dataset_name}.{table_name}'
try:
table = bq_client.get_table(table_ref) # pylint: disable=no-member
except google.cloud.exceptions.NotFound:
table = bq_client.create_table(table_ref) # pylint: disable=no-member
table.schema = load_schema(bigquery.schema.SchemaField)
tables[table_name] = (table, make_json.get_table(float(days)))
return bq_client, tables
class StopWhen:
"""A simple object that returns True once when the given hour begins."""
def __init__(self, target, clock=lambda: time.localtime().tm_hour):
self.clock = clock
self.last = self.clock()
self.target = target
def __call__(self):
if os.path.exists('stop'):
return True
now = self.clock()
last = self.last
self.last = now
return now != last and now == self.target
def _make_bucket_map(path):
bucket_map = yaml.safe_load(open(path))
bucket_to_attrs = dict()
for k, v in bucket_map.items():
bucket = k.rsplit('/')[2] # of form gs://<bucket>/...
bucket_to_attrs[bucket] = v
return bucket_to_attrs
def get_options(argv):
"""Process command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--poll',
required=True,
help='Follow GCS changes from project/topic/subscription',
)
parser.add_argument(
'--dataset',
help='BigQuery dataset (e.g. k8s-gubernator:build)'
)
parser.add_argument(
'--tables',
nargs='+',
default=[],
help='Upload rows to table:days [e.g. --tables day:1 week:7 all:0]',
)
parser.add_argument(
'--stop_at',
type=int,
help='Terminate when this hour (0-23) rolls around (in local time).'
)
parser.add_argument(
'--buckets',
type=str,
default='buckets.yaml',
help='Path to bucket configuration.'
)
return parser.parse_args(argv)
if __name__ == '__main__':
OPTIONS = get_options(sys.argv[1:])
main(model.Database(),
*load_sub(OPTIONS.poll),
*load_tables(OPTIONS.dataset, OPTIONS.tables),
_make_bucket_map(OPTIONS.buckets),
stop=StopWhen(OPTIONS.stop_at))
| 34.220963 | 97 | 0.629884 |
79448735a3813ebf4e6a793a4b9015ba8fb24018 | 4,698 | py | Python | cmocl.py | psekan/cmocl | 9d1ff3caa13a5079ba45e44daec0a2d2081057bd | [
"Apache-2.0"
] | 2 | 2019-07-18T08:48:40.000Z | 2020-07-26T20:39:34.000Z | cmocl.py | psekan/cmocl | 9d1ff3caa13a5079ba45e44daec0a2d2081057bd | [
"Apache-2.0"
] | null | null | null | cmocl.py | psekan/cmocl | 9d1ff3caa13a5079ba45e44daec0a2d2081057bd | [
"Apache-2.0"
] | null | null | null | import json
import logging
import requests
class CMoCLError(Exception):
"""An error occurs"""
pass
class CMoCL:
"""Class for communication with CMoCL API"""
PERIOD_DAY = "day"
PERIOD_WEEK = "week"
PERIOD_MONTH = "month"
PERIOD_OCCASIONAL = "occasional"
def __init__(self, url, api_key):
self.url = url
self.api_key = api_key
def entries(self, source, period, date_from, date_to) -> list:
response = requests.get(self.url+"/"+source+"/"+period+"/"+date_from+"/"+date_to)
if response.ok:
return response.json()
elif response.status_code == 404:
return []
elif response.status_code == 400:
try:
content = response.json()
message = content["message"]
logging.warning("CMoCL GET - "+message)
except ValueError:
pass
return []
else:
message = str(response.status_code)
try:
content = response.json()
message = content["message"]
except ValueError:
logging.error("CMoCL GET - An error occurs.")
raise CMoCLError("GET An error occurs: "+message)
def dates(self, source, period) -> list:
response = requests.get(self.url+"/"+source+"/"+period)
if response.ok:
return response.json()
elif response.status_code == 404:
return []
elif response.status_code == 400:
try:
content = response.json()
message = content["message"]
logging.warning("CMoCL GET - "+message)
except ValueError:
pass
return []
else:
message = str(response.status_code)
try:
content = response.json()
message = content["message"]
except ValueError:
logging.error("CMoCL GET - An error occurs.")
raise CMoCLError("GET An error occurs: "+message)
def exists(self, source, period, date) -> bool:
"""Check if a record already exists in CMoCL database
:param source:
:param period:
:param date:
:return: True if a record exists, False otherwise
"""
response = requests.get(self.url+"/"+source+"/"+period+"/"+date)
if response.ok:
return True
elif response.status_code == 404:
return False
elif response.status_code == 400:
try:
content = response.json()
message = content["message"]
logging.warning("CMoCL GET - "+message)
except ValueError:
pass
return False
else:
message = str(response.status_code)
try:
content = response.json()
message = content["message"]
except ValueError:
logging.error("CMoCL GET - An error occurs.")
raise CMoCLError("GET An error occurs: "+message)
def upload(self, source, period, date, file_path) -> bool:
"""Upload estimation to
:param source:
:param period:
:param date:
:param file_path:
:return:
"""
with open(file_path) as fp:
content = json.load(fp)
request = {
"source": source,
"period": period,
"date": date,
"estimation": content
}
response = requests.post(self.url+"/", json=request, headers={"Authorization": "Bearer " + self.api_key})
if response.ok:
return True
elif response.status_code == 400 or response.status_code == 403 or response.status_code == 409:
if response.status_code == 400:
head = "Incorrect format of estimation"
elif response.status_code == 403:
head = "Authorization error"
else:
head = "Already existed record"
try:
content = response.json()
message = content["message"]
logging.error("CMoCL POST - "+head+": " + message)
except ValueError:
logging.error("CMoCL POST - "+head+".")
return False
else:
message = str(response.status_code)
try:
content = response.json()
message = content["message"]
except ValueError:
logging.error("CMoCL POST - An error occurs.")
raise CMoCLError("POST An error occurs: "+message)
| 33.319149 | 113 | 0.52086 |
794487a36d67d7149e3deba16893716561440c8e | 192 | py | Python | djwebhooks/utils.py | joaoautodev/dj-webhooks | 1dcbfe6c90c5f56567d338a9edcd52926da47d27 | [
"BSD-3-Clause"
] | 52 | 2015-01-08T15:46:57.000Z | 2022-03-31T23:12:29.000Z | djwebhooks/utils.py | joaoautodev/dj-webhooks | 1dcbfe6c90c5f56567d338a9edcd52926da47d27 | [
"BSD-3-Clause"
] | 1 | 2016-05-28T13:01:21.000Z | 2016-05-28T19:13:39.000Z | djwebhooks/utils.py | joaoautodev/dj-webhooks | 1dcbfe6c90c5f56567d338a9edcd52926da47d27 | [
"BSD-3-Clause"
] | 22 | 2015-03-11T20:19:55.000Z | 2022-03-24T15:29:30.000Z | import sys
def always_string(value):
"""Regardless of the Python version, this always returns a string """
if sys.version > '3':
return value.decode('utf-8')
return value | 24 | 73 | 0.661458 |
794487c81af77df1d8c01ed955dfc16d3eaa9f18 | 497 | py | Python | apps/sushi/migrations/0018_sushifetchattempt_in_progress.py | techlib/celus | f32a7a22be5f4613dcac10b8e02c5c5a9bc297cb | [
"MIT"
] | 7 | 2020-02-20T13:24:40.000Z | 2022-01-28T19:36:04.000Z | apps/sushi/migrations/0018_sushifetchattempt_in_progress.py | techlib/czechelib-stats | ca132e326af0924740a525710474870b1fb5fd37 | [
"MIT"
] | 15 | 2020-04-28T13:09:02.000Z | 2021-11-03T15:21:24.000Z | apps/sushi/migrations/0018_sushifetchattempt_in_progress.py | techlib/czechelib-stats | ca132e326af0924740a525710474870b1fb5fd37 | [
"MIT"
] | 4 | 2020-02-20T13:48:30.000Z | 2021-03-19T00:33:34.000Z | # Generated by Django 2.2.5 on 2019-09-16 08:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sushi', '0017_sushifetchattempt_processing_info'),
]
operations = [
migrations.AddField(
model_name='sushifetchattempt',
name='in_progress',
field=models.BooleanField(
default=False, help_text='True if the data is still downloading'
),
),
]
| 23.666667 | 80 | 0.605634 |
794488de3dc7ad4d908ec34bd81586e1971fab6c | 6,759 | py | Python | test/test/test_topologies/test_simplemodels.py | brucespang/fnss | 8e1d95744347afa77383092e6f144980d84e222d | [
"BSD-2-Clause"
] | 114 | 2015-01-19T14:15:07.000Z | 2022-02-22T01:47:19.000Z | test/test/test_topologies/test_simplemodels.py | brucespang/fnss | 8e1d95744347afa77383092e6f144980d84e222d | [
"BSD-2-Clause"
] | 15 | 2016-02-11T09:09:02.000Z | 2021-04-05T12:57:09.000Z | test/test/test_topologies/test_simplemodels.py | brucespang/fnss | 8e1d95744347afa77383092e6f144980d84e222d | [
"BSD-2-Clause"
] | 36 | 2015-02-08T12:28:04.000Z | 2021-11-19T06:08:17.000Z | import unittest
import fnss
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_line_topology(self):
def test_line_connectivity(n):
G = fnss.line_topology(n)
self.assertEqual(n, G.number_of_nodes())
self.assertEqual(n - 1, G.number_of_edges())
for i in range(n):
if i <= n - 2: self.assertTrue(G.has_edge(i, i + 1))
if i >= 1: self.assertTrue(G.has_edge(i, i - 1))
self.assertRaises(ValueError, fnss.line_topology, 0)
self.assertRaises(ValueError, fnss.line_topology, -1)
test_line_connectivity(8)
test_line_connectivity(11)
def test_k_ary_tree_topology(self):
def test_K_ary_tree_connectivity(k, h):
expected_degree = {'root': k, 'intermediate': k + 1, 'leaf': 1}
G = fnss.k_ary_tree_topology(k, h)
self.assertEqual(sum(k ** d for d in range(h + 1)),
G.number_of_nodes())
self.assertEqual(sum(k ** d for d in range(1, h + 1)),
G.number_of_edges())
degree = G.degree()
for v in G.nodes():
v_type = G.node[v]['type']
v_depth = G.node[v]['depth']
self.assertEqual(expected_degree[v_type], degree[v])
neighbors = G.neighbors(v)
for u in neighbors:
u_depth = G.node[u]['depth']
if u < v:
self.assertEqual(u_depth, v_depth - 1)
elif u > v:
self.assertEqual(u_depth, v_depth + 1)
else: # u == v
self.fail("Node %s has a self-loop" % str(v))
self.assertRaises(ValueError, fnss.k_ary_tree_topology, 0, 3)
self.assertRaises(ValueError, fnss.k_ary_tree_topology, 3, 0)
self.assertRaises(ValueError, fnss.k_ary_tree_topology, -1, 3)
self.assertRaises(ValueError, fnss.k_ary_tree_topology, 3, -1)
test_K_ary_tree_connectivity(3, 5)
test_K_ary_tree_connectivity(5, 3)
test_K_ary_tree_connectivity(2, 1)
def test_ring_topology(self):
def test_ring_connectivity(n):
G = fnss.ring_topology(n)
self.assertEqual(n, G.number_of_nodes())
self.assertEqual(n, G.number_of_edges())
for i in range(n):
self.assertTrue(G.has_edge(i, (i + 1) % n))
self.assertTrue(G.has_edge(i, (i - 1) % n))
self.assertRaises(ValueError, fnss.ring_topology, 0)
self.assertRaises(ValueError, fnss.ring_topology, -1)
self.assertRaises(TypeError, fnss.ring_topology, 'String')
test_ring_connectivity(10)
test_ring_connectivity(21)
def test_star_topology(self):
def test_star_connectivity(n):
G = fnss.star_topology(n)
self.assertEqual(n + 1, G.number_of_nodes())
self.assertEqual(n, G.number_of_edges())
self.assertEqual('root', G.node[0]['type'])
for i in range(1, n + 1):
self.assertEqual('leaf', G.node[i]['type'])
self.assertTrue(G.has_edge(i, 0))
self.assertTrue(G.has_edge(0, i))
self.assertRaises(ValueError, fnss.star_topology, 0)
self.assertRaises(ValueError, fnss.star_topology, -1)
self.assertRaises(TypeError, fnss.star_topology, 'String')
test_star_connectivity(10)
test_star_connectivity(21)
def test_full_mesh_topology(self):
def test_full_mesh_connectivity(n):
G = fnss.full_mesh_topology(n)
self.assertEqual(n, G.number_of_nodes())
self.assertEqual((n * (n - 1)) // 2, G.number_of_edges())
for i in range(n):
for j in range(n):
if i != j:
self.assertTrue(G.has_edge(i, j))
self.assertRaises(ValueError, fnss.full_mesh_topology, 0)
self.assertRaises(ValueError, fnss.full_mesh_topology, -1)
self.assertRaises(TypeError, fnss.full_mesh_topology, 'String')
test_full_mesh_connectivity(10)
test_full_mesh_connectivity(21)
def test_dumbbell_topology(self):
def test_dumbbell_connectivity(m, n):
G = fnss.dumbbell_topology(m, n)
self.assertEqual(2 * m + n, G.number_of_nodes())
self.assertEqual(2 * m + n - 1, G.number_of_edges())
for i in range(m):
self.assertTrue(G.has_edge(i, m))
self.assertEqual('left_bell', G.node[i]['type'])
for i in range(m, m + n):
self.assertTrue(G.has_edge(i, i + 1))
self.assertEqual('core', G.node[i]['type'])
for i in range(m + n, 2 * m + n):
self.assertTrue(G.has_edge(m + n - 1, i))
self.assertEqual('right_bell', G.node[i]['type'])
self.assertRaises(ValueError, fnss.dumbbell_topology, 0, 0)
self.assertRaises(ValueError, fnss.dumbbell_topology, -1, 1)
self.assertRaises(ValueError, fnss.dumbbell_topology, 1, 3)
self.assertRaises(TypeError, fnss.dumbbell_topology, 'String', 4)
self.assertRaises(TypeError, fnss.dumbbell_topology, 4, 'String')
test_dumbbell_connectivity(15, 12)
test_dumbbell_connectivity(2, 1)
def test_chord_topology(self):
def test_chord_connectivity(m, r):
G = fnss.chord_topology(m, r)
n = 2 ** m
self.assertEqual(len(G), n)
if r <= 2:
for i in G.nodes():
self.assertEqual(len(G.adj[i]), m)
else:
for i in G.nodes():
for j in range(i + 1, i + r + 1):
self.assertTrue(G.has_edge(i, j % n))
test_chord_connectivity(2, 1)
test_chord_connectivity(3, 1)
test_chord_connectivity(4, 1)
test_chord_connectivity(5, 1)
test_chord_connectivity(5, 2)
test_chord_connectivity(5, 3)
test_chord_connectivity(3, 7)
self.assertRaises(ValueError, fnss.chord_topology, 0, 3)
self.assertRaises(ValueError, fnss.chord_topology, 1, 3)
self.assertRaises(ValueError, fnss.chord_topology, -1, 3)
self.assertRaises(ValueError, fnss.chord_topology, 5, -1)
self.assertRaises(ValueError, fnss.chord_topology, 5, 0)
self.assertRaises(ValueError, fnss.chord_topology, 3, 8)
self.assertRaises(TypeError, fnss.chord_topology, 5, None)
self.assertRaises(TypeError, fnss.chord_topology, None, 3)
self.assertRaises(TypeError, fnss.chord_topology, 5, "1")
| 43.88961 | 75 | 0.579672 |
7944890d147c11271d3229f97e9b675eeb91fd8c | 8,777 | py | Python | sdk2-src/src/azure-ml/azure/ml/_schema/_endpoint/online/online_endpoint_deployment.py | DamovisaOrg/azureml-v2-preview | 9c5ed1003c19f4d5b19dbea4a7c507e101149139 | [
"MIT"
] | 1 | 2021-09-27T07:54:40.000Z | 2021-09-27T07:54:40.000Z | sdk2-src/src/azure-ml/azure/ml/_schema/_endpoint/online/online_endpoint_deployment.py | DamovisaOrg/azureml-v2-preview | 9c5ed1003c19f4d5b19dbea4a7c507e101149139 | [
"MIT"
] | null | null | null | sdk2-src/src/azure-ml/azure/ml/_schema/_endpoint/online/online_endpoint_deployment.py | DamovisaOrg/azureml-v2-preview | 9c5ed1003c19f4d5b19dbea4a7c507e101149139 | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from typing import Any, Union, Optional, Dict
from marshmallow import fields, post_load
from azure.ml._schema.schema import PatchedNested
from .scale_settings_schema import ScaleSettingsSchema
from .request_settings_schema import InternalRequestSettings, RequestSettingsSchema
from .resource_requirements_schema import ResourceRequirementsSchema
from azure.ml._schema._endpoint.code_configuration_schema import InternalCodeConfiguration
from .liveness_probe import LivenessProbeSchema
from azure.ml._workspace_dependent_operations import WorkspaceScope
from azure.ml._utils._arm_id_utils import get_arm_id_with_version
from azure.ml._schema.environment import InternalEnvironment
from azure.ml._schema.model import InternalModel
from azure.ml._schema._endpoint.endpoint_deployment import EndpointDeploymentSchema, InternalEndpointDeployment
from azure.ml._restclient.machinelearningservices.models import (
CodeConfiguration,
DeploymentConfigurationBase,
OnlineDeploymentPropertiesTrackedResource,
OnlineDeploymentProperties,
AksDeploymentConfiguration,
ManagedDeploymentConfiguration,
ContainerResourceRequirementsAutoGenerated,
IdAssetReference,
LivenessProbeRequirements,
ScaleSettings,
)
from azure.ml.constants import BASE_PATH_CONTEXT_KEY, ComputeType
class InternalOnlineEndpointDeployment(InternalEndpointDeployment):
def __init__(
self,
base_path: Optional[str] = None,
id: str = None,
name: str = None,
type: str = None,
tags: Dict[str, Any] = None,
properties: Dict[str, Any] = None,
model: Union[str, InternalModel] = None,
code_configuration: InternalCodeConfiguration = None,
environment: Union[str, InternalEnvironment] = None,
sku: str = None,
app_insights_enabled: bool = False,
resource_requirements: ContainerResourceRequirementsAutoGenerated = None,
scale_settings: ScaleSettings = None,
request_settings: InternalRequestSettings = None,
liveness_probe: LivenessProbeRequirements = None,
provisioning_status: str = None,
):
super(InternalOnlineEndpointDeployment, self).__init__(
base_path=base_path,
id=id,
name=name,
type=type,
tags=tags,
properties=properties,
model=model,
code_configuration=code_configuration,
environment=environment,
)
self.sku = sku
self.app_insights_enabled = app_insights_enabled
self.resource_requirements = resource_requirements
self.scale_settings = scale_settings
self.request_settings = request_settings
self.liveness_probe = liveness_probe
self.provisioning_status = provisioning_status
def _to_rest_deployment_configuration(self, compute_type: str) -> DeploymentConfigurationBase:
concurrent_requests = (
self.request_settings.max_concurrent_requests_per_instance if self.request_settings else None
)
if compute_type == ComputeType.AKS:
return AksDeploymentConfiguration(
app_insights_enabled=self.app_insights_enabled,
max_queue_wait_ms=self.request_settings.max_queue_wait_ms if self.request_settings else None,
scoring_timeout_ms=self.request_settings.request_timeout_ms if self.request_settings else None,
max_concurrent_requests_per_instance=concurrent_requests,
liveness_probe_requirements=self.liveness_probe,
container_resource_requirements=self.resource_requirements,
)
elif compute_type == ComputeType.MANAGED:
return ManagedDeploymentConfiguration(
app_insights_enabled=self.app_insights_enabled,
max_queue_wait_ms=self.request_settings.max_queue_wait_ms if self.request_settings else None,
scoring_timeout_ms=self.request_settings.request_timeout_ms if self.request_settings else None,
max_concurrent_requests_per_instance=concurrent_requests,
liveness_probe_requirements=self.liveness_probe,
instance_type=self.sku,
)
def _to_rest_online_deployments(
self, compute_type: str, location: str, workspace_scope: WorkspaceScope
) -> OnlineDeploymentPropertiesTrackedResource:
command = None
if self.code_configuration.scoring_script:
command = self.code_configuration.scoring_script
if isinstance(self.code_configuration.code, str):
code = CodeConfiguration(code_artifact_id=self.code_configuration.code, command=command)
else:
code = CodeConfiguration(
code_artifact_id=get_arm_id_with_version(
workspace_scope, "codes", self.code_configuration.code.name, self.code_configuration.code.version
),
command=command,
)
if isinstance(self.model, str):
model = IdAssetReference(id=self.model)
else:
model = IdAssetReference(
id=get_arm_id_with_version(workspace_scope, "models", self.model.name, self.model.version)
)
if isinstance(self.environment, str):
environment = self.environment
else:
environment = get_arm_id_with_version(
workspace_scope, "environments", self.environment.name, self.environment.version
)
deployment_config = self._to_rest_deployment_configuration(compute_type=compute_type)
properties = OnlineDeploymentProperties(
code_configuration=code,
environment_id=environment,
model_reference=model,
deployment_configuration=deployment_config,
scale_settings=self.scale_settings,
properties=self.properties,
)
return OnlineDeploymentPropertiesTrackedResource(location=location, properties=properties, tags=self.tags)
def _from_rest_online_deployment(self, deployment: OnlineDeploymentPropertiesTrackedResource):
liveness_probe = None
request_settings = None
deployment_config = deployment.properties.deployment_configuration
if isinstance(deployment_config, AksDeploymentConfiguration):
liveness_probe = deployment.properties.deployment_configuration.liveness_probe_requirements
request_settings = InternalRequestSettings(
request_timeout_ms=deployment_config.scoring_timeout_ms,
max_concurrent_requests_per_instance=deployment_config.max_concurrent_requests_per_instance,
max_queue_wait_ms=deployment_config.max_queue_wait_ms,
)
self.id = deployment.id
self.name = deployment.name
self.type = deployment.type
self.tags = deployment.tags
self.properties = deployment.properties.properties
self.model = deployment.properties.model_reference.id
self.code_configuration = InternalCodeConfiguration(
code=deployment.properties.code_configuration.code_artifact_id,
scoring_script=deployment.properties.code_configuration.command[0],
)
self.environment = deployment.properties.environment_id
self.sku = None
self.app_insights_enabled = deployment.properties.deployment_configuration.app_insights_enabled
self.resource_requirements = deployment.properties.deployment_configuration.container_resource_requirements
self.scale_settings = deployment.properties.scale_settings
self.request_settings = request_settings
self.liveness_probe = liveness_probe
self.provisioning_status = deployment.properties.provisioning_state
return self
class OnlineEndpointDeploymentSchema(EndpointDeploymentSchema):
sku = fields.Str()
app_insights_enabled = fields.Bool()
resource_requirements = PatchedNested(ResourceRequirementsSchema, required=False)
scale_settings = PatchedNested(ScaleSettingsSchema)
request_settings = PatchedNested(RequestSettingsSchema)
liveness_probe = PatchedNested(LivenessProbeSchema)
provisioning_status = fields.Str(dump_only=True)
@post_load
def make(self, data: Any, **kwargs: Any) -> InternalOnlineEndpointDeployment:
return InternalOnlineEndpointDeployment(base_path=self.context[BASE_PATH_CONTEXT_KEY], **data)
class NamedOnlineEndpointDeploymentSchema(OnlineEndpointDeploymentSchema):
name = fields.Str(required=True)
| 48.225275 | 117 | 0.717215 |
794489356cbff867b103201aefd9d27b477b3575 | 852 | py | Python | theory/oop_1.py | librity/nc_pyjobs | f57c04f7f406e1385a81ae62c786ff88db262d8c | [
"MIT"
] | null | null | null | theory/oop_1.py | librity/nc_pyjobs | f57c04f7f406e1385a81ae62c786ff88db262d8c | [
"MIT"
] | null | null | null | theory/oop_1.py | librity/nc_pyjobs | f57c04f7f406e1385a81ae62c786ff88db262d8c | [
"MIT"
] | null | null | null | # python3 theory/oop_1.py
# from pprint import pprint
class Car():
brand = None
model = None
color = None
wheels = 4
doors = 4
windows = 4
seats = 4
def inspect(car):
print("== Car ==")
print("brand:", car.brand)
print("model:", car.model)
print("color:", car.color)
print("wheels:", car.wheels)
print("doors:", car.doors)
print("windows:", car.windows)
print("seats:", car.seats)
print("")
def start(self):
print("vroom vrooooom!")
print(dir(Car))
print()
porsche = Car()
porsche.color = "white"
porsche.brand = "Porsche"
porsche.model = "Cayenne"
porsche.inspect()
# pprint(porsche)
ferrari = Car()
ferrari.color = "red"
ferrari.doors = 2
ferrari.seats = 2
ferrari.windows = 2
ferrari.brand = "Ferrari"
ferrari.model = "Stradale"
ferrari.inspect()
# pprint(ferrari)
ferrari.start()
| 16.705882 | 34 | 0.638498 |
79448a037559335dadfe3a74c8b372244da4c8b5 | 59,764 | py | Python | rapidtide/dlfilter.py | bbfrederick/rapidtide | ddd1899a93fafd550feb134debdd028bbba8c853 | [
"Apache-2.0"
] | 44 | 2017-01-19T10:12:39.000Z | 2022-02-08T05:43:58.000Z | rapidtide/dlfilter.py | bbfrederick/delaytools | 190d79ae4c19317dfce38a528e43fd05459f29a5 | [
"Apache-2.0"
] | 70 | 2018-05-02T14:35:45.000Z | 2022-03-18T17:43:33.000Z | rapidtide/dlfilter.py | bbfrederick/delaytools | 190d79ae4c19317dfce38a528e43fd05459f29a5 | [
"Apache-2.0"
] | 12 | 2019-02-12T20:40:27.000Z | 2021-06-16T13:28:21.000Z | #!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 28 23:01:07 2018
@author: neuro
"""
import glob
import logging
import os
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pyfftw
import pyfftw.interfaces.scipy_fftpack as fftpack
from statsmodels.robust.scale import mad
pyfftw.interfaces.cache.enable()
import rapidtide.io as tide_io
LGR = logging.getLogger("GENERAL")
LGR.debug("setting backend to Agg")
mpl.use("Agg")
tfversion = -1
try:
import plaidml.keras
plaidml.keras.install_backend("plaidml")
tfversion = 0
LGR.debug("using plaidml keras")
from keras.callbacks import ModelCheckpoint, TerminateOnNaN
from keras.layers import (
LSTM,
Activation,
BatchNormalization,
Bidirectional,
Concatenate,
Conv1D,
Dense,
Dropout,
GlobalMaxPool1D,
Input,
MaxPooling1D,
TimeDistributed,
UpSampling1D,
)
from keras.models import Model, Sequential, load_model
from keras.optimizers import RMSprop
except ImportError:
tfversion = -1
LGR.warning("import plaidml.keras failed: falling back to standard tensorflow keras")
if tfversion == -1:
try:
import tensorflow.compat.v1 as tf
if tf.__version__[0] == "2":
tfversion = 2
elif tf.__version__[0] == "1":
tfversion = 1
else:
LGR.warning(f"could not interpret {tf.__version__[0]}")
LGR.debug(f"tensorflow version is {tfversion}")
except ImportError:
raise ImportError("no backend found - exiting")
if tfversion == 2:
LGR.debug("using tensorflow v2x")
tf.disable_v2_behavior()
from tensorflow.keras.callbacks import ModelCheckpoint, TerminateOnNaN
from tensorflow.keras.layers import (
LSTM,
Activation,
BatchNormalization,
Bidirectional,
Convolution1D,
Dense,
Dropout,
GlobalMaxPool1D,
MaxPooling1D,
TimeDistributed,
UpSampling1D,
)
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.optimizers import RMSprop
LGR.debug(f"tensorflow version: >>>{tf.__version__}<<<")
elif tfversion == 1:
LGR.debug("using tensorflow v1x")
from keras.callbacks import ModelCheckpoint, TerminateOnNaN
from keras.layers import (
LSTM,
Activation,
BatchNormalization,
Bidirectional,
Concatenate,
Conv1D,
Dense,
Dropout,
GlobalMaxPool1D,
Input,
MaxPooling1D,
TimeDistributed,
UpSampling1D,
)
from keras.models import Model, Sequential, load_model, model_from_json
from keras.optimizers import RMSprop
LGR.debug(f"tensorflow version: >>>{tf.__version__}<<<")
elif tfversion == 0:
pass
else:
raise ImportError("could not find backend - exiting")
class DeepLearningFilter:
"""Base class for deep learning filter"""
thesuffix = "sliceres"
thedatadir = "/Users/frederic/Documents/MR_data/physioconn/timecourses"
inputfrag = "abc"
targetfrag = "xyz"
namesuffix = None
modelroot = "."
excludethresh = 4.0
modelname = None
intermediatemodelpath = None
usebadpts = False
activation = "tanh"
dofft = False
readlim = None
countlim = None
lossfilename = None
train_x = None
train_y = None
val_x = None
val_y = None
model = None
modelpath = None
inputsize = None
usehdf = True
infodict = {}
def __init__(
self,
window_size=128,
num_layers=5,
dropout_rate=0.3,
num_pretrain_epochs=0,
num_epochs=1,
activation="relu",
modelroot=".",
dofft=False,
excludethresh=4.0,
usebadpts=False,
thesuffix="25.0Hz",
modelpath=".",
usehdf=True,
thedatadir="/Users/frederic/Documents/MR_data/physioconn/timecourses",
inputfrag="abc",
targetfrag="xyz",
excludebysubject=True,
startskip=200,
endskip=200,
step=1,
namesuffix=None,
readlim=None,
readskip=None,
countlim=None,
**kwargs,
):
self.window_size = window_size
self.dropout_rate = dropout_rate
self.num_pretrain_epochs = num_pretrain_epochs
self.num_epochs = num_epochs
self.usebadpts = usebadpts
self.num_layers = num_layers
if self.usebadpts:
self.inputsize = 2
else:
self.inputsize = 1
self.activation = activation
self.modelroot = modelroot
self.usehdf = usehdf
self.dofft = dofft
self.thesuffix = thesuffix
self.thedatadir = thedatadir
self.modelpath = modelpath
LGR.info(f"modeldir from DeepLearningFilter: {self.modelpath}")
self.excludethresh = excludethresh
self.readlim = readlim
self.readskip = readskip
self.countlim = countlim
self.model = None
self.initialized = False
self.trained = False
self.usetensorboard = False
self.inputfrag = inputfrag
self.targetfrag = targetfrag
self.namesuffix = namesuffix
self.startskip = startskip
self.endskip = endskip
self.step = step
self.excludebysubject = excludebysubject
# populate infodict
self.infodict["window_size"] = self.window_size
self.infodict["usebadpts"] = self.usebadpts
self.infodict["dofft"] = self.dofft
self.infodict["excludethresh"] = self.excludethresh
self.infodict["num_pretrain_epochs"] = self.num_pretrain_epochs
self.infodict["num_epochs"] = self.num_epochs
self.infodict["modelname"] = self.modelname
self.infodict["dropout_rate"] = self.dropout_rate
self.infodict["startskip"] = self.startskip
self.infodict["endskip"] = self.endskip
self.infodict["step"] = self.step
self.infodict["train_arch"] = sys.platform
def loaddata(self):
if not self.initialized:
raise Exception("model must be initialized prior to loading data")
if self.dofft:
(
self.train_x,
self.train_y,
self.val_x,
self.val_y,
self.Ns,
self.tclen,
self.thebatchsize,
dummy,
dummy,
) = prep(
self.window_size,
thesuffix=self.thesuffix,
thedatadir=self.thedatadir,
inputfrag=self.inputfrag,
targetfrag=self.targetfrag,
startskip=self.startskip,
endskip=self.endskip,
step=self.step,
dofft=self.dofft,
usebadpts=self.usebadpts,
excludethresh=self.excludethresh,
excludebysubject=self.excludebysubject,
readlim=self.readlim,
readskip=self.readskip,
countlim=self.countlim,
)
else:
(
self.train_x,
self.train_y,
self.val_x,
self.val_y,
self.Ns,
self.tclen,
self.thebatchsize,
) = prep(
self.window_size,
thesuffix=self.thesuffix,
thedatadir=self.thedatadir,
inputfrag=self.inputfrag,
targetfrag=self.targetfrag,
startskip=self.startskip,
endskip=self.endskip,
step=self.step,
dofft=self.dofft,
usebadpts=self.usebadpts,
excludethresh=self.excludethresh,
excludebysubject=self.excludebysubject,
readlim=self.readlim,
readskip=self.readskip,
countlim=self.countlim,
)
def evaluate(self):
self.lossfilename = os.path.join(self.modelname, "loss.png")
LGR.info(f"lossfilename: {self.lossfilename}")
YPred = self.model.predict(self.val_x)
error = self.val_y - YPred
self.pred_error = np.mean(np.square(error))
error2 = self.val_x - self.val_y
self.raw_error = np.mean(np.square(error2))
LGR.info(f"Prediction Error: {self.pred_error}\tRaw Error: {self.raw_error}")
f = open(os.path.join(self.modelname, "loss.txt"), "w")
f.write(
self.modelname
+ ": Prediction Error: "
+ str(self.pred_error)
+ " Raw Error: "
+ str(self.raw_error)
+ "\n"
)
f.close()
self.loss = self.history.history["loss"]
self.val_loss = self.history.history["val_loss"]
epochs = range(len(self.loss))
self.updatemetadata()
plt.figure()
plt.plot(epochs, self.loss, "bo", label="Training loss")
plt.plot(epochs, self.val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.savefig(self.lossfilename)
plt.close()
return self.loss, self.val_loss, self.pred_error, self.raw_error
def initmetadata(self):
self.infodict = {}
self.infodict["window_size"] = self.window_size
self.infodict["usebadpts"] = self.usebadpts
self.infodict["dofft"] = self.dofft
self.infodict["excludethresh"] = self.excludethresh
self.infodict["num_epochs"] = self.num_epochs
self.infodict["num_layers"] = self.num_layers
self.infodict["dropout_rate"] = self.dropout_rate
self.infodict["train_arch"] = sys.platform
self.infodict["modelname"] = self.modelname
tide_io.writedicttojson(self.infodict, os.path.join(self.modelname, "model_meta.json"))
def updatemetadata(self):
self.infodict["loss"] = self.loss
self.infodict["val_loss"] = self.val_loss
self.infodict["raw_error"] = self.raw_error
self.infodict["prediction_error"] = self.pred_error
tide_io.writedicttojson(self.infodict, os.path.join(self.modelname, "model_meta.json"))
def savemodel(self, usehdf=True):
if usehdf:
# save the trained model as a single hdf file
self.model.save(os.path.join(self.modelname, "model.h5"))
else:
# save the model structure to JSON
model_json = self.model.to_json()
with open(os.path.join(self.modelname, "model.json"), "w") as json_file:
json_file.write(model_json)
# save the weights to hdf
self.model.save_weights(os.path.join(self.modelname, "model_weights.h5"))
def loadmodel(self, modelname, usehdf=True, verbose=False):
# read in the data
LGR.info(f"loading {modelname}")
if usehdf:
# load in the model with weights from hdf
self.model = load_model(os.path.join(self.modelpath, modelname, "model.h5"))
else:
with open(os.path.join(self.modelname, "model.json"), "r") as json_file:
loaded_model_json = json_file.read()
self.model = model_from_json(loaded_model_json)
self.model.load_weights(os.path.join(self.modelname, "model_weights.h5"))
if verbose:
self.model.summary()
# now load additional information
self.infodict = tide_io.readdictfromjson(
os.path.join(self.modelpath, modelname, "model_meta.json")
)
self.window_size = self.infodict["window_size"]
self.usebadpts = self.infodict["usebadpts"]
# model is ready to use
self.initialized = True
self.trained = True
def initialize(self):
self.getname()
self.makenet()
self.model.summary()
self.savemodel(usehdf=True)
self.savemodel(usehdf=False)
self.initmetadata()
self.initialized = True
self.trained = False
def train(self):
self.intermediatemodelpath = os.path.join(
self.modelname, "model_e{epoch:02d}_v{val_loss:.4f}.h5"
)
if self.usetensorboard:
tensorboard = TensorBoard(
log_dir=self.intermediatemodelpath + "logs/{}".format(time())
)
self.model.fit(self.train_x, self.train_y, verbose=1, callbacks=[tensorboard])
else:
if self.num_pretrain_epochs > 0:
LGR.info("pretraining model to reproduce input data")
self.history = self.model.fit(
self.train_y,
self.train_y,
batch_size=1024,
epochs=self.num_pretrain_epochs,
shuffle=True,
verbose=1,
callbacks=[
TerminateOnNaN(),
ModelCheckpoint(self.intermediatemodelpath),
],
validation_data=(self.val_y, self.val_y),
)
self.history = self.model.fit(
self.train_x,
self.train_y,
batch_size=1024,
epochs=self.num_epochs,
shuffle=True,
verbose=1,
callbacks=[
TerminateOnNaN(),
ModelCheckpoint(self.intermediatemodelpath),
],
validation_data=(self.val_x, self.val_y),
)
self.savemodel(usehdf=True)
self.savemodel(usehdf=False)
self.trained = True
def apply(self, inputdata, badpts=None):
initscale = mad(inputdata)
scaleddata = inputdata / initscale
predicteddata = scaleddata * 0.0
weightarray = scaleddata * 0.0
N_pts = len(scaleddata)
if self.usebadpts:
if badpts is None:
badpts = scaleddata * 0.0
X = np.zeros(((N_pts - self.window_size - 1), self.window_size, 2))
for i in range(X.shape[0]):
X[i, :, 0] = scaleddata[i : i + self.window_size]
X[i, :, 1] = badpts[i : i + self.window_size]
else:
X = np.zeros(((N_pts - self.window_size - 1), self.window_size, 1))
for i in range(X.shape[0]):
X[i, :, 0] = scaleddata[i : i + self.window_size]
Y = self.model.predict(X)
for i in range(X.shape[0]):
predicteddata[i : i + self.window_size] += Y[i, :, 0]
weightarray[:] = self.window_size
weightarray[0 : self.window_size] = np.linspace(
1.0, self.window_size, self.window_size, endpoint=False
)
weightarray[-(self.window_size + 1) : -1] = np.linspace(
self.window_size, 1.0, self.window_size, endpoint=False
)
return initscale * predicteddata / weightarray
class MultiscaleCNNDLFilter(DeepLearningFilter):
# from keras.layers import Conv1D, Dense, Dropout, Input, Concatenate, GlobalMaxPooling1D
# from keras.models import Model
# this base model is one branch of the main model
# it takes a time series as an input, performs 1-D convolution, and returns it as an output ready for concatenation
def __init__(
self,
num_filters=10,
kernel_sizes=[4, 8, 12],
input_lens=[64, 128, 192],
input_width=1,
dilation_rate=1,
*args,
**kwargs,
):
self.num_filters = num_filters
self.kernel_sizes = kernel_sizes
self.input_lens = input_lens
self.input_width = input_width
self.dilation_rate = dilation_rate
self.infodict["nettype"] = "multscalecnn"
self.infodict["num_filters"] = self.num_filters
self.infodict["kernel_sizes"] = self.kernel_sizes
self.infodict["input_lens"] = self.input_lens
self.infodict["input_width"] = self.input_width
super(MultiscaleCNNDLFilter, self).__init__(*args, **kwargs)
def getname(self):
self.modelname = "_".join(
[
"model",
"multiscalecnn",
"w" + str(self.window_size),
"l" + str(self.num_layers),
"fn" + str(self.num_filters),
"fl" + str(self.kernel_size),
"e" + str(self.num_epochs),
"t" + str(self.excludethresh),
"s" + str(self.step),
"d" + str(self.dilation_rate),
self.activation,
]
)
if self.usebadpts:
self.modelname += "_usebadpts"
if self.excludebysubject:
self.modelname += "_excludebysubject"
if self.namesuffix is not None:
self.modelname += "_" + self.namesuffix
self.modelpath = os.path.join(self.modelroot, self.modelname)
try:
os.makedirs(self.modelpath)
except OSError:
pass
def makesubnet(self, inputlen, kernelsize):
# the input is a time series of length input_len and width input_width
input_seq = Input(shape=(inputlen, self.input_width))
# 1-D convolution and global max-pooling
convolved = Conv1D(self.num_filters, kernelsize, padding="same", activation="tanh")(
input_seq
)
processed = GlobalMaxPool1D()(convolved)
# dense layer with dropout regularization
compressed = Dense(50, activation="tanh")(processed)
compressed = Dropout(0.3)(compressed)
basemodel = Model(inputs=input_seq, outputs=compressed)
return basemodel
def makenet(self):
# the inputs to the branches are the original time series, and its down-sampled versions
input_smallseq = Input(shape=(self.inputs_lens[0], self.input_width))
input_medseq = Input(shape=(self.inputs_lens[1], self.input_width))
input_origseq = Input(shape=(self.inputs_lens[2], self.input_width))
# the more down-sampled the time series, the shorter the corresponding filter
base_net_small = self.makesubnet(self.inputs_lens[0], self.kernel_sizes[0])
base_net_med = self.makesubnet(self.inputs_lens[1], self.kernel_sizes[1])
base_net_original = self.makesubnet(self.inputs_lens[2], self.kernel_sizes[2])
embedding_small = base_net_small(input_smallseq)
embedding_med = base_net_med(input_medseq)
embedding_original = base_net_original(input_origseq)
# concatenate all the outputs
merged = Concatenate()([embedding_small, embedding_med, embedding_original])
out = Dense(1, activation="sigmoid")(merged)
self.model = Model(inputs=[input_smallseq, input_medseq, input_origseq], outputs=out)
self.model.compile(optimizer=RMSprop(), loss="mse")
class CNNDLFilter(DeepLearningFilter):
def __init__(self, num_filters=10, kernel_size=5, dilation_rate=1, *args, **kwargs):
self.num_filters = num_filters
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.infodict["nettype"] = "cnn"
self.infodict["num_filters"] = self.num_filters
self.infodict["kernel_size"] = self.kernel_size
super(CNNDLFilter, self).__init__(*args, **kwargs)
def getname(self):
self.modelname = "_".join(
[
"model",
"cnn",
"w" + str(self.window_size),
"l" + str(self.num_layers),
"fn" + str(self.num_filters),
"fl" + str(self.kernel_size),
"e" + str(self.num_epochs),
"t" + str(self.excludethresh),
"s" + str(self.step),
"d" + str(self.dilation_rate),
self.activation,
]
)
if self.usebadpts:
self.modelname += "_usebadpts"
if self.excludebysubject:
self.modelname += "_excludebysubject"
if self.namesuffix is not None:
self.modelname += "_" + self.namesuffix
self.modelpath = os.path.join(self.modelroot, self.modelname)
try:
os.makedirs(self.modelpath)
except OSError:
pass
def makenet(self):
self.model = Sequential()
# make the input layer
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
padding="same",
input_shape=(None, self.inputsize),
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the intermediate layers
for layer in range(self.num_layers - 2):
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
dilation_rate=self.dilation_rate,
padding="same",
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the output layer
self.model.add(
Convolution1D(filters=self.inputsize, kernel_size=self.kernel_size, padding="same")
)
self.model.compile(optimizer=RMSprop(), loss="mse")
class DenseAutoencoderDLFilter(DeepLearningFilter):
def __init__(self, encoding_dim=10, *args, **kwargs):
self.encoding_dim = encoding_dim
self.infodict["nettype"] = "autoencoder"
self.infodict["encoding_dim"] = self.encoding_dim
super(DenseAutoencoderDLFilter, self).__init__(*args, **kwargs)
def getname(self):
self.modelname = "_".join(
[
"model",
"denseautoencoder",
"w" + str(self.window_size),
"en" + str(self.encoding_dim),
"e" + str(self.num_epochs),
"t" + str(self.excludethresh),
"s" + str(self.step),
self.activation,
]
)
if self.usebadpts:
self.modelname += "_usebadpts"
if self.excludebysubject:
self.modelname += "_excludebysubject"
if self.namesuffix is not None:
self.modelname += "_" + self.namesuffix
self.modelpath = os.path.join(self.modelroot, self.modelname)
try:
os.makedirs(self.modelpath)
except OSError:
pass
def makenet(self):
self.model = Sequential()
# make the input layer
sizefac = 2
for i in range(1, self.num_layers - 1):
sizefac = int(sizefac * 2)
LGR.info(f"input layer - sizefac: {sizefac}")
self.model.add(Dense(sizefac * self.encoding_dim, input_shape=(None, self.inputsize)))
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the intermediate encoding layers
for i in range(1, self.num_layers - 1):
sizefac = int(sizefac // 2)
LGR.info(f"encoder layer {i + 1}, sizefac: {sizefac}")
self.model.add(Dense(sizefac * self.encoding_dim))
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the encoding layer
sizefac = int(sizefac // 2)
LGR.info(f"encoding layer - sizefac: {sizefac}")
self.model.add(Dense(self.encoding_dim))
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the intermediate decoding layers
for i in range(1, self.num_layers):
sizefac = int(sizefac * 2)
LGR.info(f"decoding layer {i}, sizefac: {sizefac}")
self.model.add(Dense(sizefac * self.encoding_dim))
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the output layer
self.model.add(Dense(self.inputsize))
self.model.compile(optimizer=RMSprop(), loss="mse")
class ConvAutoencoderDLFilter(DeepLearningFilter):
def __init__(
self, encoding_dim=10, num_filters=5, kernel_size=5, dilation_rate=1, *args, **kwargs
):
self.encoding_dim = encoding_dim
self.num_filters = num_filters
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.infodict["num_filters"] = self.num_filters
self.infodict["kernel_size"] = self.kernel_size
self.infodict["nettype"] = "autoencoder"
self.infodict["encoding_dim"] = self.encoding_dim
super(ConvAutoencoderDLFilter, self).__init__(*args, **kwargs)
def getname(self):
self.modelname = "_".join(
[
"model",
"convautoencoder",
"w" + str(self.window_size),
"en" + str(self.encoding_dim),
"fn" + str(self.num_filters),
"fl" + str(self.kernel_size),
"e" + str(self.num_epochs),
"t" + str(self.excludethresh),
"s" + str(self.step),
self.activation,
]
)
if self.usebadpts:
self.modelname += "_usebadpts"
if self.excludebysubject:
self.modelname += "_excludebysubject"
if self.namesuffix is not None:
self.modelname += "_" + self.namesuffix
self.modelpath = os.path.join(self.modelroot, self.modelname)
try:
os.makedirs(self.modelpath)
except OSError:
pass
def makenet(self):
self.model = Sequential()
# make the input layer
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
padding="same",
input_shape=(None, self.inputsize),
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
self.model.add(MaxPooling1D(2, padding="same"))
layersize = self.windowsize
nfilters = self.num_filters
num_encodinglayers = 3
num_decodinglayers = 3
layerprops = [(layersize, nfilters)]
# make the encoding layers
for i in range(num_encodinglayers):
layersize = int(layersize // 2)
nfilters *= 2
LGR.info(f"input layer size: {layersize}, nfilters: {nfilters}")
self.model.add(
Convolution1D(filters=nfilters, kernel_size=self.kernel_size, padding="same")
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
self.model.add(MaxPooling1D(2, padding="same"))
# make the decoding layers
for i in range(num_decodinglayers):
self.model.add(UpSampling1D(2))
layersize *= 2
nfilters = int(nfilters // 2)
LGR.info(f"input layer size: {layersize}")
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
padding="same",
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the intermediate encoding layers
for i in range(1, self.num_layers - 1):
LGR.info(f"input layer size: {layersize}")
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
padding="same",
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
self.model.add(MaxPooling1D(2, padding="same"))
layersize = int(layersize // 2)
# make the encoding layer
LGR.info(f"input layer size: {layersize}")
self.model.add(
Convolution1D(filters=self.num_filters, kernel_size=self.kernel_size, padding="same")
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the intermediate decoding layers
for i in range(1, self.num_layers):
self.model.add(UpSampling1D(2))
layersize = layersize * 2
LGR.info(f"input layer size: {layersize}")
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
padding="same",
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the output layer
LGR.info(f"input layer size: {layersize}")
self.model.add(
Convolution1D(filters=self.inputsize, kernel_size=self.kernel_size, padding="same")
)
self.model.compile(optimizer="adam", loss="mse")
"""
class SepCNNDLFilter(DeepLearningFilter):
def __init__(self, num_filters=10, kernel_size=5, dilation_rate=1, *args, **kwargs):
self.num_filters = num_filters
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.infodict['nettype'] = 'sepcnn'
self.infodict['num_filters'] = self.num_filters
self.infodict['kernel_size'] = self.kernel_size
super(sepcnn, self).__init__(*args, **kwargs)
def getname(self):
self.modelname = '_'.join(['model',
'sepcnn',
'w' + str(self.window_size),
'l' + str(self.num_layers),
'fn' + str(self.num_filters),
'fl' + str(self.kernel_size),
'e' + str(self.num_epochs),
't' + str(self.excludethresh),
's' + str(self.step),
'd' + str(self.dilation_rate),
self.activation])
if self.usebadpts:
self.modelname += '_usebadpts'
if self.excludebysubject:
self.modelname += '_excludebysubject'
if self.namesuffix is not None:
self.modelname += '_' + self.namesuffix
self.modelpath = os.path.join(self.modelroot, self.modelname)
try:
os.makedirs(self.modelpath)
except OSError:
pass
def makenet(self):
self.model = Sequential()
# make the input layer
self.model.add(SeparableConvolution1D(filters=self.num_filters, kernel_size=self.kernel_size, padding='same',
input_shape=(None, self.inputsize)))
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the intermediate layers
for layer in range(self.num_layers - 2):
self.model.add(SeparableConvolution1D(filters=self.num_filters,
kernel_size=self.kernel_size,
dilation_rate=self.dilation_rate,
padding='same'))
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the output layer
self.model.add(SeparableConvolution1D(filters=self.inputsize, kernel_size=self.kernel_size, padding='same'))
self.model.compile(optimizer=RMSprop(),
loss='mse')
"""
class LSTMDLFilter(DeepLearningFilter):
def __init__(self, num_units=16, *args, **kwargs):
self.num_units = num_units
self.infodict["nettype"] = "lstm"
self.infodict["num_units"] = self.num_units
super(LSTMDLFilter, self).__init__(*args, **kwargs)
def getname(self):
self.modelname = "_".join(
[
"model",
"lstm",
"w" + str(self.window_size),
"l" + str(self.num_layers),
"nu" + str(self.num_units),
"d" + str(self.dropout_rate),
"rd" + str(self.dropout_rate),
"e" + str(self.num_epochs),
"t" + str(self.excludethresh),
"s" + str(self.step),
]
)
if self.excludebysubject:
self.modelname += "_excludebysubject"
self.modelpath = os.path.join(self.modelroot, self.modelname)
try:
os.makedirs(self.modelpath)
except OSError:
pass
def makenet(self):
self.model = Sequential()
# each layer consists of an LSTM followed by a dense time distributed layer to get it back to the window size
for layer in range(self.num_layers):
self.model.add(
Bidirectional(
LSTM(
self.num_units,
dropout=self.dropout_rate,
recurrent_dropout=self.dropout_rate,
return_sequences=True,
),
input_shape=(self.window_size, 1),
)
)
self.model.add(TimeDistributed(Dense(1)))
self.model.compile(optimizer="adam", loss="mse")
class HybridDLFilter(DeepLearningFilter):
def __init__(self, invert=False, num_filters=10, kernel_size=5, num_units=16, *args, **kwargs):
self.invert = invert
self.num_filters = num_filters
self.kernel_size = kernel_size
self.num_units = num_units
self.infodict["nettype"] = "hybrid"
self.infodict["num_filters"] = self.num_filters
self.infodict["kernel_size"] = self.kernel_size
self.infodict["invert"] = self.invert
self.infodict["num_units"] = self.num_units
super(HybridDLFilter, self).__init__(*args, **kwargs)
def getname(self):
self.modelname = "_".join(
[
"model",
"hybrid",
"w" + str(self.window_size),
"l" + str(self.num_layers),
"fn" + str(self.num_filters),
"fl" + str(self.kernel_size),
"nu" + str(self.num_units),
"d" + str(self.dropout_rate),
"rd" + str(self.dropout_rate),
"e" + str(self.num_epochs),
"t" + str(self.excludethresh),
"s" + str(self.step),
self.activation,
]
)
if self.invert:
self.modelname += "_invert"
if self.excludebysubject:
self.modelname += "_excludebysubject"
self.modelpath = os.path.join(self.modelroot, self.modelname)
try:
os.makedirs(self.modelpath)
except OSError:
pass
def makenet(self):
self.model = Sequential()
if self.invert:
# make the input layer
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
padding="same",
input_shape=(self.window_size, self.inputsize),
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# then make make the intermediate CNN layers
for layer in range(self.num_layers - 2):
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
padding="same",
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# finish with an LSTM layer to find hidden states
self.model.add(
Bidirectional(
LSTM(
self.num_units,
dropout=self.dropout_rate,
recurrent_dropout=self.dropout_rate,
return_sequences=True,
),
input_shape=(self.window_size, 1),
)
)
self.model.add(TimeDistributed(Dense(1)))
else:
# start with an LSTM layer to find hidden states
self.model.add(
Bidirectional(
LSTM(
self.num_units,
dropout=self.dropout_rate,
recurrent_dropout=self.dropout_rate,
return_sequences=True,
),
input_shape=(self.window_size, 1),
)
)
self.model.add(TimeDistributed(Dense(1)))
self.model.add(Dropout(rate=self.dropout_rate))
# then make make the intermediate CNN layers
for layer in range(self.num_layers - 2):
self.model.add(
Convolution1D(
filters=self.num_filters,
kernel_size=self.kernel_size,
padding="same",
)
)
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.dropout_rate))
self.model.add(Activation(self.activation))
# make the output layer
self.model.add(
Convolution1D(filters=self.inputsize, kernel_size=self.kernel_size, padding="same")
)
self.model.compile(optimizer=RMSprop(), loss="mse")
def filtscale(
data,
scalefac=1.0,
reverse=False,
hybrid=False,
lognormalize=True,
epsilon=1e-10,
numorders=6,
):
if not reverse:
specvals = fftpack.fft(data)
if lognormalize:
themag = np.log(np.absolute(specvals) + epsilon)
scalefac = np.max(themag)
themag = (themag - scalefac + numorders) / numorders
themag[np.where(themag < 0.0)] = 0.0
else:
scalefac = np.std(data)
themag = np.absolute(specvals) / scalefac
thephase = np.angle(specvals)
thephase = thephase / (2.0 * np.pi) - 0.5
if hybrid:
return np.stack((data, themag), axis=1), scalefac
else:
return np.stack((themag, thephase), axis=1), scalefac
else:
if hybrid:
return data[:, 0]
else:
thephase = (data[:, 1] + 0.5) * 2.0 * np.pi
if lognormalize:
themag = np.exp(data[:, 0] * numorders - numorders + scalefac)
else:
themag = data[:, 0] * scalefac
specvals = themag * np.exp(1.0j * thephase)
return fftpack.ifft(specvals).real
def tobadpts(name):
return name.replace(".txt", "_badpts.txt")
def targettoinput(name, targetfrag="xyz", inputfrag="abc"):
LGR.debug(f"replacing {targetfrag} with {inputfrag}")
return name.replace(targetfrag, inputfrag)
def getmatchedfiles(searchstring, usebadpts=False, targetfrag="xyz", inputfrag="abc"):
# list all of the target files
fromfile = sorted(glob.glob(searchstring))
LGR.debug(f"searchstring: {searchstring} -> {fromfile}")
# make sure all files exist
matchedfilelist = []
for targetname in fromfile:
if os.path.isfile(targettoinput(targetname, targetfrag=targetfrag, inputfrag=inputfrag)):
if usebadpts:
if os.path.isfile(
tobadpts(targetname.replace("alignedpleth", "pleth"))
) and os.path.isfile(
tobadpts(
targettoinput(
targetname,
targetfrag=targetfrag,
inputfrag=inputfrag,
)
)
):
matchedfilelist.append(targetname)
LGR.debug(matchedfilelist[-1])
else:
matchedfilelist.append(targetname)
LGR.debug(matchedfilelist[-1])
if usebadpts:
LGR.info(f"{len(matchedfilelist)} runs pass all 4 files present check")
else:
LGR.info(f"{len(matchedfilelist)} runs pass both files present check")
# find out how long the files are
tempy = np.loadtxt(matchedfilelist[0])
tempx = np.loadtxt(
targettoinput(matchedfilelist[0], targetfrag=targetfrag, inputfrag=inputfrag)
)
tclen = np.min([tempx.shape[0], tempy.shape[0]])
LGR.info(f"tclen set to {tclen}")
return matchedfilelist, tclen
def readindata(
matchedfilelist,
tclen,
targetfrag="xyz",
inputfrag="abc",
usebadpts=False,
startskip=0,
endskip=0,
readlim=None,
readskip=None,
):
LGR.info(
"readindata called with usebadpts, startskip, endskip, readlim, readskip, targetfrag, inputfrag = "
f"{usebadpts} {startskip} {endskip} {readlim} {readskip} {targetfrag} {inputfrag}"
)
# allocate target arrays
LGR.info("allocating arrays")
s = len(matchedfilelist[readskip:])
if readlim is not None:
if s > readlim:
LGR.info(f"trimming read list to {readlim} from {s}")
s = readlim
x1 = np.zeros((tclen, s))
y1 = np.zeros((tclen, s))
names = []
if usebadpts:
bad1 = np.zeros((tclen, s))
# now read the data in
count = 0
LGR.info("checking data")
nanfiles = []
shortfiles = []
strangemagfiles = []
for i in range(readskip, readskip + s):
nanfound = False
LGR.info(f"processing {matchedfilelist[i]}")
tempy = np.loadtxt(matchedfilelist[i])
tempx = np.loadtxt(
targettoinput(
matchedfilelist[i],
targetfrag=targetfrag,
inputfrag=inputfrag,
)
)
if np.any(np.isnan(tempy)):
LGR.info(f"NaN found in file {matchedfilelist[i]} - discarding")
nanfound = True
nanfiles.append(matchedfilelist[i])
if np.any(np.isnan(tempx)):
nan_fname = targettoinput(
matchedfilelist[i], targetfrag=targetfrag, inputfrag=inputfrag
)
LGR.info(f"NaN found in file {nan_fname} - discarding")
nanfound = True
nanfiles.append(nan_fname)
strangefound = False
if not (0.5 < np.std(tempx) < 20.0):
strange_fname = targettoinput(
matchedfilelist[i], targetfrag=targetfrag, inputfrag=inputfrag
)
LGR.info(f"file {strange_fname} has an extreme standard deviation - discarding")
strangefound = True
strangemagfiles.append(strange_fname)
if not (0.5 < np.std(tempy) < 20.0):
LGR.info(f"file {matchedfilelist[i]} has an extreme standard deviation - discarding")
strangefound = True
strangemagfiles.append(matchedfilelist[i])
shortfound = False
ntempx = tempx.shape[0]
ntempy = tempy.shape[0]
if ntempx < tclen:
short_fname = targettoinput(
matchedfilelist[i], targetfrag=targetfrag, inputfrag=inputfrag
)
LGR.info(f"file {short_fname} is short - discarding")
shortfound = True
shortfiles.append(short_fname)
if ntempy < tclen:
LGR.info(f"file {matchedfilelist[i]} is short - discarding")
shortfound = True
shortfiles.append(matchedfilelist[i])
if (
(ntempx >= tclen)
and (ntempy >= tclen)
and (not nanfound)
and (not shortfound)
and (not strangefound)
):
x1[:tclen, count] = tempx[:tclen]
y1[:tclen, count] = tempy[:tclen]
names.append(matchedfilelist[i])
if usebadpts:
tempbad1 = np.loadtxt(
tobadpts(matchedfilelist[i].replace("alignedpleth", "pleth"))
)
tempbad2 = np.loadtxt(
tobadpts(
targettoinput(
matchedfilelist[i],
targetfrag=targetfrag,
inputfrag=inputfrag,
)
)
)
bad1[:tclen, count] = 1.0 - (1.0 - tempbad1[:tclen]) * (1.0 - tempbad2[:tclen])
count += 1
LGR.info(f"{count} runs pass file length check")
if len(nanfiles) > 0:
LGR.info("files with NaNs:")
for thefile in nanfiles:
LGR.info(f"\t{thefile}")
if len(shortfiles) > 0:
LGR.info("short files:")
for thefile in shortfiles:
LGR.info(f"\t{thefile}")
if len(strangemagfiles) > 0:
LGR.info("files with extreme standard deviations:")
for thefile in strangemagfiles:
LGR.info(f"\t{thefile}")
if usebadpts:
return (
x1[startskip:-endskip, :count],
y1[startskip:-endskip, :count],
names[:count],
bad1[startskip:-endskip, :count],
)
else:
return (
x1[startskip:-endskip, :count],
y1[startskip:-endskip, :count],
names[:count],
)
def prep(
window_size,
step=1,
excludethresh=4.0,
usebadpts=False,
startskip=200,
endskip=200,
excludebysubject=True,
thesuffix="sliceres",
thedatadir="/data1/frederic/test/output",
inputfrag="abc",
targetfrag="xyz",
dofft=False,
readlim=None,
readskip=None,
countlim=None,
):
"""
prep - reads in training and validation data for 1D filter
Parameters
----------
window_size
step
excludethresh
excludebysubject
usebadpts
startskip
endskip
thesuffix
thedatadir
inputfrag
targetfrag
dofft
readlim
readskip
countlim
Returns
-------
train_x, train_y, val_x, val_y, N_subjs, tclen - startskip, batchsize
"""
searchstring = os.path.join(thedatadir, "*_" + targetfrag + "_" + thesuffix + ".txt")
# find matched files
matchedfilelist, tclen = getmatchedfiles(
searchstring,
usebadpts=usebadpts,
targetfrag=targetfrag,
inputfrag=inputfrag,
)
# read in the data from the matched files
if usebadpts:
x, y, names, bad = readindata(
matchedfilelist,
tclen,
targetfrag=targetfrag,
inputfrag=inputfrag,
usebadpts=True,
startskip=startskip,
endskip=endskip,
readlim=readlim,
readskip=readskip,
)
else:
x, y, names = readindata(
matchedfilelist,
tclen,
targetfrag=targetfrag,
inputfrag=inputfrag,
startskip=startskip,
endskip=endskip,
readlim=readlim,
readskip=readskip,
)
LGR.info(f"xshape, yshape: {x.shape} {y.shape}")
# normalize input and output data
LGR.info("normalizing data")
LGR.info(f"count: {x.shape[1]}")
if LGR.getEffectiveLevel() <= logging.DEBUG:
# Only take these steps if the logger is set to DEBUG.
for thesubj in range(x.shape[1]):
LGR.debug(
f"prenorm sub {thesubj} min, max, mean, std, MAD x, y: "
f"{thesubj} "
f"{np.min(x[:, thesubj])} {np.max(x[:, thesubj])} {np.mean(x[:, thesubj])} "
f"{np.std(x[:, thesubj])} {mad(x[:, thesubj])} {np.min(y[:, thesubj])} "
f"{np.max(y[:, thesubj])} {np.mean(y[:, thesubj])} {np.std(x[:, thesubj])} "
f"{mad(y[:, thesubj])}"
)
y -= np.mean(y, axis=0)
themad = mad(y, axis=0)
for thesubj in range(themad.shape[0]):
if themad[thesubj] > 0.0:
y[:, thesubj] /= themad[thesubj]
x -= np.mean(x, axis=0)
themad = mad(x, axis=0)
for thesubj in range(themad.shape[0]):
if themad[thesubj] > 0.0:
x[:, thesubj] /= themad[thesubj]
if LGR.getEffectiveLevel() <= logging.DEBUG:
# Only take these steps if the logger is set to DEBUG.
for thesubj in range(x.shape[1]):
LGR.debug(
f"postnorm sub {thesubj} min, max, mean, std, MAD x, y: "
f"{thesubj} "
f"{np.min(x[:, thesubj])} {np.max(x[:, thesubj])} {np.mean(x[:, thesubj])} "
f"{np.std(x[:, thesubj])} {mad(x[:, thesubj])} {np.min(y[:, thesubj])} "
f"{np.max(y[:, thesubj])} {np.mean(y[:, thesubj])} {np.std(x[:, thesubj])} "
f"{mad(y[:, thesubj])}"
)
# now decide what to keep and what to exclude
thefabs = np.fabs(x)
if not excludebysubject:
N_pts = x.shape[0]
N_subjs = x.shape[1]
windowspersubject = np.int64((N_pts - window_size - 1) // step)
LGR.info(
f"{N_subjs} subjects with {N_pts} points will be evaluated with "
f"{windowspersubject} windows per subject with step {step}"
)
usewindow = np.zeros(N_subjs * windowspersubject, dtype=np.int64)
subjectstarts = np.zeros(N_subjs, dtype=np.int64)
# check each window
numgoodwindows = 0
LGR.info("checking windows")
subjectnames = []
for subj in range(N_subjs):
subjectstarts[subj] = numgoodwindows
subjectnames.append(names[subj])
LGR.info(f"{names[subj]} starts at {numgoodwindows}")
for windownumber in range(windowspersubject):
if (
np.max(
thefabs[
step * windownumber : (step * windownumber + window_size),
subj,
]
)
<= excludethresh
):
usewindow[subj * windowspersubject + windownumber] = 1
numgoodwindows += 1
LGR.info(
f"found {numgoodwindows} out of a potential {N_subjs * windowspersubject} "
f"({100.0 * numgoodwindows / (N_subjs * windowspersubject)}%)"
)
for subj in range(N_subjs):
LGR.info(f"{names[subj]} starts at {subjectstarts[subj]}")
LGR.info("copying data into windows")
Xb = np.zeros((numgoodwindows, window_size, 1))
Yb = np.zeros((numgoodwindows, window_size, 1))
if usebadpts:
Xb_withbad = np.zeros((numgoodwindows, window_size, 1))
LGR.info(f"dimensions of Xb: {Xb.shape}")
thiswindow = 0
for subj in range(N_subjs):
for windownumber in range(windowspersubject):
if usewindow[subj * windowspersubject + windownumber] == 1:
Xb[thiswindow, :, 0] = x[
step * windownumber : (step * windownumber + window_size), subj
]
Yb[thiswindow, :, 0] = y[
step * windownumber : (step * windownumber + window_size), subj
]
if usebadpts:
Xb_withbad[thiswindow, :, 0] = bad[
step * windownumber : (step * windownumber + window_size),
subj,
]
thiswindow += 1
else:
# now check for subjects that have regions that exceed the target
themax = np.max(thefabs, axis=0)
cleansubjs = np.where(themax < excludethresh)[0]
totalcount = x.shape[1] + 0
cleancount = len(cleansubjs)
if countlim is not None:
if cleancount > countlim:
LGR.info(f"reducing count to {countlim} from {cleancount}")
cleansubjs = cleansubjs[:countlim]
x = x[:, cleansubjs]
y = y[:, cleansubjs]
cleannames = []
for theindex in cleansubjs:
cleannames.append(names[theindex])
if usebadpts:
bad = bad[:, cleansubjs]
subjectnames = cleannames
LGR.info(f"after filtering, shape of x is {x.shape}")
N_pts = y.shape[0]
N_subjs = y.shape[1]
X = np.zeros((1, N_pts, N_subjs))
Y = np.zeros((1, N_pts, N_subjs))
if usebadpts:
BAD = np.zeros((1, N_pts, N_subjs))
X[0, :, :] = x
Y[0, :, :] = y
if usebadpts:
BAD[0, :, :] = bad
windowspersubject = int((N_pts - window_size - 1) // step)
LGR.info(
f"found {windowspersubject * cleancount} out of a potential "
f"{windowspersubject * totalcount} "
f"({100.0 * cleancount / totalcount}%)"
)
LGR.info(f"{windowspersubject} {cleancount} {totalcount}")
Xb = np.zeros((N_subjs * windowspersubject, window_size, 1))
LGR.info(f"dimensions of Xb: {Xb.shape}")
for j in range(N_subjs):
LGR.info(
f"sub {j} ({cleannames[j]}) min, max X, Y: "
f"{j} {np.min(X[0, :, j])} {np.max(X[0, :, j])} {np.min(Y[0, :, j])} "
f"{np.max(Y[0, :, j])}"
)
for i in range(windowspersubject):
Xb[j * windowspersubject + i, :, 0] = X[0, step * i : (step * i + window_size), j]
Yb = np.zeros((N_subjs * windowspersubject, window_size, 1))
LGR.info(f"dimensions of Yb: {Yb.shape}")
for j in range(N_subjs):
for i in range(windowspersubject):
Yb[j * windowspersubject + i, :, 0] = Y[0, step * i : (step * i + window_size), j]
if usebadpts:
Xb_withbad = np.zeros((N_subjs * windowspersubject, window_size, 2))
LGR.info(f"dimensions of Xb_withbad: {Xb_withbad.shape}")
for j in range(N_subjs):
LGR.info(f"packing data for subject {j}")
for i in range(windowspersubject):
Xb_withbad[j * windowspersubject + i, :, 0] = X[
0, step * i : (step * i + window_size), j
]
Xb_withbad[j * windowspersubject + i, :, 1] = BAD[
0, step * i : (step * i + window_size), j
]
Xb = Xb_withbad
subjectstarts = range(N_subjs) * windowspersubject
for subj in range(N_subjs):
LGR.info(f"{names[subj]} starts at {subjectstarts[subj]}")
LGR.info(f"Xb.shape: {Xb.shape}")
LGR.info(f"Yb.shape: {Yb.shape}")
if dofft:
Xb_fourier = np.zeros((N_subjs * windowspersubject, window_size, 2))
LGR.info(f"dimensions of Xb_fourier: {Xb_fourier.shape}")
Xscale_fourier = np.zeros((N_subjs, windowspersubject))
LGR.info(f"dimensions of Xscale_fourier: {Xscale_fourier.shape}")
Yb_fourier = np.zeros((N_subjs * windowspersubject, window_size, 2))
LGR.info(f"dimensions of Yb_fourier: {Yb_fourier.shape}")
Yscale_fourier = np.zeros((N_subjs, windowspersubject))
LGR.info(f"dimensions of Yscale_fourier: {Yscale_fourier.shape}")
for j in range(N_subjs):
LGR.info(f"transforming subject {j}")
for i in range((N_pts - window_size - 1)):
(
Xb_fourier[j * windowspersubject + i, :, :],
Xscale_fourier[j, i],
) = filtscale(X[0, step * i : (step * i + window_size), j])
(
Yb_fourier[j * windowspersubject + i, :, :],
Yscale_fourier[j, i],
) = filtscale(Y[0, step * i : (step * i + window_size), j])
limit = np.int64(0.8 * Xb.shape[0])
LGR.info(f"limit: {limit} out of {len(subjectstarts)}")
# find nearest subject start
firstvalsubject = np.abs(subjectstarts - limit).argmin()
LGR.info(f"firstvalsubject: {firstvalsubject}")
perm_train = np.random.permutation(np.int64(np.arange(subjectstarts[firstvalsubject])))
perm_val = np.random.permutation(
np.int64(np.arange(subjectstarts[firstvalsubject], Xb.shape[0]))
)
LGR.info("training subjects:")
for i in range(0, firstvalsubject):
LGR.info(f"\t{i} {subjectnames[i]}")
LGR.info("validation subjects:")
for i in range(firstvalsubject, len(subjectstarts)):
LGR.info(f"\t{i} {subjectnames[i]}")
perm = range(Xb.shape[0])
batchsize = windowspersubject
if dofft:
train_x = Xb_fourier[perm[:limit], :, :]
train_y = Yb_fourier[perm[:limit], :, :]
val_x = Xb_fourier[perm[limit:], :, :]
val_y = Yb_fourier[perm[limit:], :, :]
LGR.info(f"train, val dims: {train_x.shape} {train_y.shape} {val_x.shape} {val_y.shape}")
return (
train_x,
train_y,
val_x,
val_y,
N_subjs,
tclen - startskip - endskip,
batchsize,
Xscale_fourier,
Yscale_fourier,
)
else:
train_x = Xb[perm_train, :, :]
train_y = Yb[perm_train, :, :]
val_x = Xb[perm_val, :, :]
val_y = Yb[perm_val, :, :]
LGR.info(f"train, val dims: {train_x.shape} {train_y.shape} {val_x.shape} {val_y.shape}")
return (
train_x,
train_y,
val_x,
val_y,
N_subjs,
tclen - startskip - endskip,
batchsize,
)
| 35.658711 | 119 | 0.554263 |
79448a3d5d9f296c6f4f6654ef45dc3b080ced3c | 861 | py | Python | quadratic.py | musicmonkey1223/pythonphilosophy | c859bd0a6c0380d6d7a28c4d3b87c47c2368277d | [
"Apache-2.0"
] | 1 | 2021-02-14T02:33:28.000Z | 2021-02-14T02:33:28.000Z | quadratic.py | musicmonkey1223/pythonphilosophy | c859bd0a6c0380d6d7a28c4d3b87c47c2368277d | [
"Apache-2.0"
] | null | null | null | quadratic.py | musicmonkey1223/pythonphilosophy | c859bd0a6c0380d6d7a28c4d3b87c47c2368277d | [
"Apache-2.0"
] | null | null | null | import cmath
def quadratic(a,b,c):
print("The quadratic equation is: ",a,"x^2 +",b,"x +",c) # printing the quadratic equation from a,b,c
equation = (cmath.sqrt(b*b - 4*a*c))/(2*a) # formula of finding x values in quadratic equation
x1 = -b + equation # first value of x
x2 = -b - equation # second value of x
print("The values of x are: ",x1,x2) # printing x values...for quadratic equation, x has 2 values
a = int(input("Enter a value for 'a': ")) #asking the inputs for a,b,c
b = int(input("Enter a value for 'b': "))
c = int(input("Enter a value for 'c': "))
quadratic(a,b,c) # function call | 61.5 | 124 | 0.45993 |
79448a4e335b956a3d68ac267f6c3eb702567d6c | 1,956 | py | Python | test/send_to_driver/test_listener.py | sapcc/f5-openstack-agent | 61e19c85dcca202db8c594aaa09dee1c1389fc79 | [
"Apache-2.0"
] | null | null | null | test/send_to_driver/test_listener.py | sapcc/f5-openstack-agent | 61e19c85dcca202db8c594aaa09dee1c1389fc79 | [
"Apache-2.0"
] | null | null | null | test/send_to_driver/test_listener.py | sapcc/f5-openstack-agent | 61e19c85dcca202db8c594aaa09dee1c1389fc79 | [
"Apache-2.0"
] | 2 | 2018-02-27T08:42:30.000Z | 2018-03-09T16:34:46.000Z | # -*- coding: utf-8 -*-
'''
test_requirements = {'devices': [VE],
'openstack_infra': []}
'''
# Copyright 2015-2106 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from f5_openstack_agent.lbaasv2.drivers.bigip.listener_service import \
ListenerServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.loadbalancer_service import \
LoadBalancerServiceBuilder
def test_create_listener(bigip):
bigips = [bigip]
lb_service = LoadBalancerServiceBuilder()
listener_builder = ListenerServiceBuilder()
service = json.load(open("../../service.json"))["service"]
try:
# create partition
lb_service.prep_service(service, bigips)
# create BIG-IP virtual servers
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
for listener in listeners:
# create a service object in form expected by builder
svc = {"loadbalancer": loadbalancer,
"listener": listener}
# create
listener_builder.create_listener(svc, bigips)
# validate
l = listener_builder.get_listener(svc, bigips[0])
assert l.name == listener["name"]
print "Created listener: " + l.name
# delete
listener_builder.delete_listener(svc, bigips)
finally:
lb_service.delete_partition(service, bigips)
| 31.548387 | 75 | 0.669734 |
79448afb23900353fcbf29a20ada2dfa9e0222a1 | 28,261 | py | Python | gluon/tests/test_is_url.py | pav0n/web2py_ohka | 2d8302e4d1bffc8c845f9e37638a86bb691a8107 | [
"BSD-3-Clause"
] | 2 | 2017-02-02T00:31:48.000Z | 2017-08-08T22:36:25.000Z | gluon/tests/test_is_url.py | sloe/sloe_web2py | a1524d4da46ff851429a1de2022d852f8f2c8e53 | [
"BSD-3-Clause"
] | null | null | null | gluon/tests/test_is_url.py | sloe/sloe_web2py | a1524d4da46ff851429a1de2022d852f8f2c8e53 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for IS_URL()
"""
import unittest
from fix_path import fix_sys_path
fix_sys_path(__file__)
from validators import IS_URL, IS_HTTP_URL, IS_GENERIC_URL
from validators import unicode_to_ascii_authority
class TestIsUrl(unittest.TestCase):
def testModeHttp(self):
# defaults to mode='http'
x = IS_URL()
self.assertEqual(x('http://google.ca'), ('http://google.ca',
None))
self.assertEqual(x('google.ca'), ('http://google.ca', None))
self.assertEqual(x('google.ca:80'), ('http://google.ca:80',
None))
self.assertEqual(x('unreal.blargg'), ('unreal.blargg',
'Enter a valid URL'))
self.assertEqual(x('google..ca'), ('google..ca', 'Enter a valid URL'))
self.assertEqual(
x('google.ca..'), ('google.ca..', 'Enter a valid URL'))
# explicit use of 'http' mode
x = IS_URL(mode='http')
self.assertEqual(x('http://google.ca'), ('http://google.ca',
None))
self.assertEqual(x('google.ca'), ('http://google.ca', None))
self.assertEqual(x('google.ca:80'), ('http://google.ca:80',
None))
self.assertEqual(x('unreal.blargg'), ('unreal.blargg',
'Enter a valid URL'))
# prepends 'https' instead of 'http'
x = IS_URL(mode='http', prepend_scheme='https')
self.assertEqual(x('http://google.ca'), ('http://google.ca',
None))
self.assertEqual(x('google.ca'), ('https://google.ca', None))
self.assertEqual(x('google.ca:80'), ('https://google.ca:80',
None))
self.assertEqual(x('unreal.blargg'), ('unreal.blargg',
'Enter a valid URL'))
# prepending disabled
x = IS_URL(prepend_scheme=None)
self.assertEqual(x('http://google.ca'), ('http://google.ca',
None))
self.assertEqual(x('google.ca'), ('google.ca', None))
self.assertEqual(x('google.ca:80'), ('google.ca:80', None))
self.assertEqual(x('unreal.blargg'), ('unreal.blargg',
'Enter a valid URL'))
# custom allowed_schemes
x = IS_URL(mode='http', allowed_schemes=[None, 'http'])
self.assertEqual(x('http://google.ca'), ('http://google.ca',
None))
self.assertEqual(x('https://google.ca'), ('https://google.ca',
'Enter a valid URL'))
self.assertEqual(x('google.ca'), ('http://google.ca', None))
self.assertEqual(x('google.ca:80'), ('http://google.ca:80',
None))
self.assertEqual(x('unreal.blargg'), ('unreal.blargg',
'Enter a valid URL'))
# custom allowed_schemes, excluding None
x = IS_URL(allowed_schemes=['http'])
self.assertEqual(x('http://google.ca'), ('http://google.ca',
None))
self.assertEqual(x('https://google.ca'), ('https://google.ca',
'Enter a valid URL'))
self.assertEqual(x('google.ca'), ('google.ca', 'Enter a valid URL'))
self.assertEqual(x('google.ca:80'), ('google.ca:80',
'Enter a valid URL'))
self.assertEqual(x('unreal.blargg'), ('unreal.blargg',
'Enter a valid URL'))
# custom allowed_schemes and prepend_scheme
x = IS_URL(allowed_schemes=[None, 'https'],
prepend_scheme='https')
self.assertEqual(x('http://google.ca'), ('http://google.ca',
'Enter a valid URL'))
self.assertEqual(x('https://google.ca'), ('https://google.ca',
None))
self.assertEqual(x('google.ca'), ('https://google.ca', None))
self.assertEqual(x('google.ca:80'), ('https://google.ca:80',
None))
self.assertEqual(x('unreal.blargg'), ('unreal.blargg',
'Enter a valid URL'))
# Now any URL requiring prepending will fail, but prepending is still
# enabled!
x = IS_URL(allowed_schemes=['http'])
self.assertEqual(x('google.ca'), ('google.ca', 'Enter a valid URL'))
def testModeGeneric(self):
# 'generic' mode
x = IS_URL(mode='generic')
self.assertEqual(x('http://google.ca'), ('http://google.ca', None))
self.assertEqual(x('google.ca'), ('google.ca', None))
self.assertEqual(x('google.ca:80'), ('http://google.ca:80', None))
self.assertEqual(x('blargg://unreal'), ('blargg://unreal',
'Enter a valid URL'))
# 'generic' mode with custom allowed_schemes that still includes
# 'http' (the default for prepend_scheme)
x = IS_URL(mode='generic', allowed_schemes=['http', 'blargg'])
self.assertEqual(x('http://google.ca'), ('http://google.ca',
None))
self.assertEqual(x('ftp://google.ca'), ('ftp://google.ca',
'Enter a valid URL'))
self.assertEqual(x('google.ca'), ('google.ca', 'Enter a valid URL'))
self.assertEqual(x('google.ca:80'), ('google.ca:80',
'Enter a valid URL'))
self.assertEqual(x('blargg://unreal'), ('blargg://unreal',
None))
# 'generic' mode with overriden prepend_scheme
x = IS_URL(mode='generic', prepend_scheme='ftp')
self.assertEqual(x('http://google.ca'), ('http://google.ca',
None))
self.assertEqual(x('ftp://google.ca'), ('ftp://google.ca',
None))
self.assertEqual(x('google.ca'), ('google.ca', None))
self.assertEqual(x('google.ca:80'), ('ftp://google.ca:80',
None))
self.assertEqual(x('blargg://unreal'), ('blargg://unreal',
'Enter a valid URL'))
# 'generic' mode with overriden allowed_schemes and prepend_scheme
x = IS_URL(mode='generic', allowed_schemes=[None, 'ftp', 'ftps'
], prepend_scheme='ftp')
self.assertEqual(x('http://google.ca'), ('http://google.ca',
'Enter a valid URL'))
self.assertEqual(x('google.ca'), ('google.ca', None))
self.assertEqual(x('ftp://google.ca'), ('ftp://google.ca',
None))
self.assertEqual(x('google.ca:80'), ('ftp://google.ca:80',
None))
self.assertEqual(x('blargg://unreal'), ('blargg://unreal',
'Enter a valid URL'))
# Now any URL requiring prepending will fail, but prepending is still
# enabled!
x = IS_URL(mode='generic', allowed_schemes=['http'])
self.assertEqual(x('google.ca'), ('google.ca', 'Enter a valid URL'))
def testExceptionalUse(self):
# mode must be in set ['http', 'generic']
try:
x = IS_URL(mode='ftp')
x('http://www.google.ca')
except Exception, e:
if str(e) != "invalid mode 'ftp' in IS_URL":
self.fail('Wrong exception: ' + str(e))
else:
self.fail("Accepted invalid mode: 'ftp'")
# allowed_schemes in 'http' mode must be in set [None, 'http', 'https']
try:
x = IS_URL(allowed_schemes=[None, 'ftp', 'ftps'],
prepend_scheme='ftp')
x('http://www.benn.ca') # we can only reasonably know about the
# error at calling time
except Exception, e:
if str(e)\
!= "allowed_scheme value 'ftp' is not in [None, 'http', 'https']":
self.fail('Wrong exception: ' + str(e))
else:
self.fail(
"Accepted invalid allowed_schemes: [None, 'ftp', 'ftps']")
# prepend_scheme's value must be in allowed_schemes (default for 'http'
# mode is [None, 'http', 'https'])
try:
x = IS_URL(prepend_scheme='ftp')
x('http://www.benn.ca') # we can only reasonably know about the
# error at calling time
except Exception, e:
if str(e)\
!= "prepend_scheme='ftp' is not in allowed_schemes=[None, 'http', 'https']":
self.fail('Wrong exception: ' + str(e))
else:
self.fail("Accepted invalid prepend_scheme: 'ftp'")
# custom allowed_schemes that excludes 'http', so prepend_scheme must be
# specified!
try:
x = IS_URL(allowed_schemes=[None, 'https'])
except Exception, e:
if str(e)\
!= "prepend_scheme='http' is not in allowed_schemes=[None, 'https']":
self.fail('Wrong exception: ' + str(e))
else:
self.fail("Accepted invalid prepend_scheme: 'http'")
# prepend_scheme must be in allowed_schemes
try:
x = IS_URL(allowed_schemes=[None, 'http'],
prepend_scheme='https')
except Exception, e:
if str(e)\
!= "prepend_scheme='https' is not in allowed_schemes=[None, 'http']":
self.fail('Wrong exception: ' + str(e))
else:
self.fail("Accepted invalid prepend_scheme: 'https'")
# prepend_scheme's value (default is 'http') must be in allowed_schemes
try:
x = IS_URL(mode='generic', allowed_schemes=[None, 'ftp',
'ftps'])
except Exception, e:
if str(e)\
!= "prepend_scheme='http' is not in allowed_schemes=[None, 'ftp', 'ftps']":
self.fail('Wrong exception: ' + str(e))
else:
self.fail("Accepted invalid prepend_scheme: 'http'")
# prepend_scheme's value must be in allowed_schemes, which by default
# is all schemes that really exist
try:
x = IS_URL(mode='generic', prepend_scheme='blargg')
x('http://www.google.ca')
# we can only reasonably know about the error at calling time
except Exception, e:
if not str(e).startswith(
"prepend_scheme='blargg' is not in allowed_schemes="):
self.fail('Wrong exception: ' + str(e))
else:
self.fail("Accepted invalid prepend_scheme: 'blargg'")
# prepend_scheme's value must be in allowed_schemes
try:
x = IS_URL(mode='generic', allowed_schemes=[None, 'http'],
prepend_scheme='blargg')
except Exception, e:
if str(e)\
!= "prepend_scheme='blargg' is not in allowed_schemes=[None, 'http']":
self.fail('Wrong exception: ' + str(e))
else:
self.fail("Accepted invalid prepend_scheme: 'blargg'")
# Not inluding None in the allowed_schemes essentially disabled
# prepending, so even though
# prepend_scheme has the invalid value 'http', we don't care!
x = IS_URL(allowed_schemes=['https'], prepend_scheme='https')
self.assertEqual(x('google.ca'), ('google.ca', 'Enter a valid URL'))
# Not inluding None in the allowed_schemes essentially disabled prepending, so even though
# prepend_scheme has the invalid value 'http', we don't care!
x = IS_URL(mode='generic', allowed_schemes=['https'],
prepend_scheme='https')
self.assertEqual(x('google.ca'), ('google.ca', 'Enter a valid URL'))
# ##############################################################################
class TestIsGenericUrl(unittest.TestCase):
x = IS_GENERIC_URL()
def testInvalidUrls(self):
urlsToCheckA = []
for i in range(0, 32) + [127]:
# Control characters are disallowed in any part of a URL
urlsToCheckA.append('http://www.benn' + chr(i) + '.ca')
urlsToCheckB = [
None,
'',
'http://www.no spaces allowed.com',
'http://www.benn.ca/no spaces allowed/',
'http://www.benn.ca/angle_<bracket/',
'http://www.benn.ca/angle_>bracket/',
'http://www.benn.ca/invalid%character',
'http://www.benn.ca/illegal%%20use',
'http://www.benn.ca/illegaluse%',
'http://www.benn.ca/illegaluse%0',
'http://www.benn.ca/illegaluse%x',
'http://www.benn.ca/ill%egaluse%x',
'http://www.benn.ca/double"quote/',
'http://www.curly{brace.com',
'http://www.benn.ca/curly}brace/',
'http://www.benn.ca/or|symbol/',
'http://www.benn.ca/back\slash',
'http://www.benn.ca/the^carat',
'http://left[bracket.me',
'http://www.benn.ca/right]bracket',
'http://www.benn.ca/angle`quote',
'-ttp://www.benn.ca',
'+ttp://www.benn.ca',
'.ttp://www.benn.ca',
'9ttp://www.benn.ca',
'ht;tp://www.benn.ca',
'ht@tp://www.benn.ca',
'ht&tp://www.benn.ca',
'ht=tp://www.benn.ca',
'ht$tp://www.benn.ca',
'ht,tp://www.benn.ca',
'ht:tp://www.benn.ca',
'htp://invalid_scheme.com',
]
failures = []
for url in urlsToCheckA + urlsToCheckB:
if self.x(url)[1] is None:
failures.append('Incorrectly accepted: ' + str(url))
if len(failures) > 0:
self.fail(failures)
def testValidUrls(self):
urlsToCheck = [
'ftp://ftp.is.co.za/rfc/rfc1808.txt',
'gopher://spinaltap.micro.umn.edu/00/Weather/California/Los%20Angeles',
'http://www.math.uio.no/faq/compression-faq/part1.html',
'mailto:[email protected]',
'news:comp.infosystems.www.servers.unix',
'telnet://melvyl.ucop.edu/',
'hTTp://www.benn.ca',
'%66%74%70://ftp.is.co.za/rfc/rfc1808.txt',
'%46%74%70://ftp.is.co.za/rfc/rfc1808.txt',
'/faq/compression-faq/part1.html',
'google.com',
'www.google.com:8080',
'128.127.123.250:8080',
'blargg:ping',
'http://www.benn.ca',
'http://benn.ca',
'http://amazon.com/books/',
'https://amazon.com/movies',
'rtsp://idontknowthisprotocol',
'HTTP://allcaps.com',
'http://localhost',
'http://localhost#fragment',
'http://localhost/hello',
'http://localhost/hello?query=True',
'http://localhost/hello/',
'http://localhost:8080',
'http://localhost:8080/',
'http://localhost:8080/hello',
'http://localhost:8080/hello/',
'file:///C:/Documents%20and%20Settings/Jonathan/Desktop/view.py',
]
failures = []
for url in urlsToCheck:
if self.x(url)[1] is not None:
failures.append('Incorrectly rejected: ' + str(url))
if len(failures) > 0:
self.fail(failures)
def testPrepending(self):
# Does not prepend scheme for abbreviated domains
self.assertEqual(self.x('google.ca'), ('google.ca', None))
# Does not prepend scheme for abbreviated domains
self.assertEqual(self.x('google.ca:8080'), ('google.ca:8080', None))
# Does not prepend when scheme already exists
self.assertEqual(self.x('https://google.ca'),
('https://google.ca', None))
# Does not prepend if None type is not specified in allowed_scheme,
# because a scheme is required
y = IS_GENERIC_URL(allowed_schemes=['http', 'blargg'],
prepend_scheme='http')
self.assertEqual(y('google.ca'), ('google.ca', 'Enter a valid URL'))
# ##############################################################################
class TestIsHttpUrl(unittest.TestCase):
x = IS_HTTP_URL()
def testInvalidUrls(self):
urlsToCheck = [
None,
'',
'http://invalid' + chr(2) + '.com',
'htp://invalid_scheme.com',
'blargg://invalid_scheme.com',
'http://-123.com',
'http://abcd-.ca',
'http://-abc123-.me',
'http://www.dom&ain.com/',
'http://www.dom=ain.com/',
'http://www.benn.ca&',
'http://%62%65%6E%6E%2E%63%61/path',
'http://.domain.com',
'http://.domain.com./path',
'http://domain..com',
'http://domain...at..com',
'http://domain.com..',
'http://domain.com../path',
'http://domain.3m',
'http://domain.-3m',
'http://domain.3m-',
'http://domain.-3m-',
'http://domain.co&m',
'http://domain.m3456',
'http://domain.m-3/path#fragment',
'http://domain.m---k/path?query=value',
'http://23.32..',
'http://23..32.56.0',
'http://38997.222.999',
'http://23.32.56.99.',
'http://.23.32.56.99',
'http://.23.32.56.99.',
'http://w127.123.0.256:8080',
'http://23.32.56.99:abcd',
'http://23.32.56.99:23cd',
'http://google.com:cd22',
'http://23.32:1300.56.99',
'http://www.yahoo:1600.com',
'path/segment/without/starting/slash',
'http://www.math.uio.no;param=3',
'://ABC.com:/%7esmith/home.html',
]
failures = []
for url in urlsToCheck:
if self.x(url)[1] is None:
failures.append('Incorrectly accepted: ' + str(url))
if len(failures) > 0:
self.fail(failures)
def testValidUrls(self):
urlsToCheck = [
'http://abc.com:80/~smith/home.html',
'http://ABC.com/%7Esmith/home.html',
'http://ABC.com:/%7esmith/home.html',
'http://www.math.uio.no/faq/compression-faq/part1.html',
'//google.ca/faq/compression-faq/part1.html',
'//google.ca/faq;param=3',
'//google.ca/faq/index.html?query=5',
'//google.ca/faq/index.html;param=value?query=5',
'/faq/compression-faq/part1.html',
'/faq;param=3',
'/faq/index.html?query=5',
'/faq/index.html;param=value?query=5',
'google.com',
'benn.ca/init/default',
'benn.ca/init;param=value/default?query=value',
'http://host-name---with-dashes.me',
'http://www.host-name---with-dashes.me',
'http://a.com',
'http://a.3.com',
'http://a.bl-ck.com',
'http://bl-e.b.com',
'http://host123with456numbers.ca',
'http://1234567890.com.',
'http://1234567890.com./path',
'http://google.com./path',
'http://domain.xn--0zwm56d',
'http://127.123.0.256',
'http://127.123.0.256/document/drawer',
'127.123.0.256/document/',
'156.212.123.100',
'http://www.google.com:180200',
'http://www.google.com:8080/path',
'http://www.google.com:8080',
'//www.google.com:8080',
'www.google.com:8080',
'http://127.123.0.256:8080/path',
'//127.123.0.256:8080',
'127.123.0.256:8080',
'http://example.me??query=value?',
'http://a.com',
'http://3.com',
'http://www.benn.ca',
'http://benn.ca',
'http://amazon.com/books/',
'https://amazon.com/movies',
'hTTp://allcaps.com',
'http://localhost',
'HTTPS://localhost.',
'http://localhost#fragment',
'http://localhost/hello;param=value',
'http://localhost/hello;param=value/hi;param2=value2;param3=value3',
'http://localhost/hello?query=True',
'http://www.benn.ca/hello;param=value/hi;param2=value2;param3=value3/index.html?query=3',
'http://localhost/hello/?query=1500&five=6',
'http://localhost:8080',
'http://localhost:8080/',
'http://localhost:8080/hello',
'http://localhost:8080/hello%20world/',
'http://www.a.3.be-nn.5.ca',
'http://www.amazon.COM',
]
failures = []
for url in urlsToCheck:
if self.x(url)[1] is not None:
failures.append('Incorrectly rejected: ' + str(url))
if len(failures) > 0:
self.fail(failures)
def testPrepending(self):
# prepends scheme for abbreviated domains
self.assertEqual(self.x('google.ca'), ('http://google.ca', None))
# prepends scheme for abbreviated domains
self.assertEqual(self.x('google.ca:8080'),
('http://google.ca:8080', None))
# does not prepend when scheme already exists
self.assertEqual(self.x('https://google.ca'),
('https://google.ca', None))
y = IS_HTTP_URL(
prepend_scheme='https', allowed_schemes=[None, 'https'])
self.assertEqual(y('google.ca'), (
'https://google.ca', None)) # prepends https if asked
z = IS_HTTP_URL(prepend_scheme=None)
self.assertEqual(z('google.ca:8080'), ('google.ca:8080',
None)) # prepending disabled
try:
IS_HTTP_URL(prepend_scheme='mailto')
except Exception, e:
if str(e)\
!= "prepend_scheme='mailto' is not in allowed_schemes=[None, 'http', 'https']":
self.fail('Wrong exception: ' + str(e))
else:
self.fail("Got invalid prepend_scheme: 'mailto'")
# Does not prepend if None type is not specified in allowed_scheme, because a scheme is required
a = IS_HTTP_URL(allowed_schemes=['http'])
self.assertEqual(a('google.ca'), ('google.ca', 'Enter a valid URL'))
self.assertEqual(a('google.ca:80'), ('google.ca:80',
'Enter a valid URL'))
class TestUnicode(unittest.TestCase):
x = IS_URL()
y = IS_URL(allowed_schemes=['https'], prepend_scheme='https')
#excludes the option for abbreviated URLs with no scheme
z = IS_URL(prepend_scheme=None)
# disables prepending the scheme in the return value
def testUnicodeToAsciiUrl(self):
self.assertEquals(unicode_to_ascii_authority(u'www.Alliancefran\xe7aise.nu'), 'www.xn--alliancefranaise-npb.nu')
self.assertEquals(
unicode_to_ascii_authority(u'www.benn.ca'), 'www.benn.ca')
self.assertRaises(UnicodeError, unicode_to_ascii_authority,
u'\u4e2d' * 1000) # label is too long
def testValidUrls(self):
self.assertEquals(self.x(u'www.Alliancefrancaise.nu'), (
'http://www.Alliancefrancaise.nu', None))
self.assertEquals(self.x(u'www.Alliancefran\xe7aise.nu'), (
'http://www.xn--alliancefranaise-npb.nu', None))
self.assertEquals(self.x(u'www.Alliancefran\xe7aise.nu:8080'), (
'http://www.xn--alliancefranaise-npb.nu:8080', None))
self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu'),
('http://www.xn--alliancefranaise-npb.nu', None))
self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu/parnaise/blue'), ('http://www.xn--alliancefranaise-npb.nu/parnaise/blue', None))
self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu/parnaise/blue#fragment'), ('http://www.xn--alliancefranaise-npb.nu/parnaise/blue#fragment', None))
self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu/parnaise/blue?query=value#fragment'), ('http://www.xn--alliancefranaise-npb.nu/parnaise/blue?query=value#fragment', None))
self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu:8080/parnaise/blue?query=value#fragment'), ('http://www.xn--alliancefranaise-npb.nu:8080/parnaise/blue?query=value#fragment', None))
self.assertEquals(self.x(u'www.Alliancefran\xe7aise.nu/parnaise/blue?query=value#fragment'), ('http://www.xn--alliancefranaise-npb.nu/parnaise/blue?query=value#fragment', None))
self.assertEquals(self.x(
u'http://\u4e2d\u4fd4.com'), ('http://xn--fiq13b.com', None))
self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com/\u4e86'),
('http://xn--fiq13b.com/%4e%86', None))
self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com/\u4e86?query=\u4e86'), ('http://xn--fiq13b.com/%4e%86?query=%4e%86', None))
self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com/\u4e86?query=\u4e86#fragment'), ('http://xn--fiq13b.com/%4e%86?query=%4e%86#fragment', None))
self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com?query=\u4e86#fragment'), ('http://xn--fiq13b.com?query=%4e%86#fragment', None))
self.assertEquals(
self.x(u'http://B\xfccher.ch'), ('http://xn--bcher-kva.ch', None))
self.assertEquals(self.x(u'http://\xe4\xf6\xfc\xdf.com'), (
'http://xn--ss-uia6e4a.com', None))
self.assertEquals(self.x(
u'http://visegr\xe1d.com'), ('http://xn--visegrd-mwa.com', None))
self.assertEquals(self.x(u'http://h\xe1zipatika.com'), (
'http://xn--hzipatika-01a.com', None))
self.assertEquals(self.x(u'http://www.\xe7ukurova.com'), (
'http://www.xn--ukurova-txa.com', None))
self.assertEquals(self.x(u'http://nixier\xf6hre.nixieclock-tube.com'), ('http://xn--nixierhre-57a.nixieclock-tube.com', None))
self.assertEquals(self.x(u'google.ca.'), ('http://google.ca.', None))
self.assertEquals(
self.y(u'https://google.ca'), ('https://google.ca', None))
self.assertEquals(self.y(
u'https://\u4e2d\u4fd4.com'), ('https://xn--fiq13b.com', None))
self.assertEquals(self.z(u'google.ca'), ('google.ca', None))
def testInvalidUrls(self):
self.assertEquals(
self.x(u'://ABC.com'), (u'://ABC.com', 'Enter a valid URL'))
self.assertEquals(self.x(u'http://\u4e2d\u4fd4.dne'), (
u'http://\u4e2d\u4fd4.dne', 'Enter a valid URL'))
self.assertEquals(self.x(u'https://google.dne'), (
u'https://google.dne', 'Enter a valid URL'))
self.assertEquals(self.x(u'https://google..ca'), (
u'https://google..ca', 'Enter a valid URL'))
self.assertEquals(
self.x(u'google..ca'), (u'google..ca', 'Enter a valid URL'))
self.assertEquals(self.x(u'http://' + u'\u4e2d' * 1000 + u'.com'), (
u'http://' + u'\u4e2d' * 1000 + u'.com', 'Enter a valid URL'))
self.assertEquals(self.x(u'http://google.com#fragment_\u4e86'), (
u'http://google.com#fragment_\u4e86', 'Enter a valid URL'))
self.assertEquals(self.x(u'http\u4e86://google.com'), (
u'http\u4e86://google.com', 'Enter a valid URL'))
self.assertEquals(self.x(u'http\u4e86://google.com#fragment_\u4e86'), (
u'http\u4e86://google.com#fragment_\u4e86', 'Enter a valid URL'))
self.assertEquals(self.y(u'http://\u4e2d\u4fd4.com/\u4e86'), (
u'http://\u4e2d\u4fd4.com/\u4e86', 'Enter a valid URL'))
#self.assertEquals(self.y(u'google.ca'), (u'google.ca', 'Enter a valid URL'))
self.assertEquals(self.z(u'invalid.domain..com'), (
u'invalid.domain..com', 'Enter a valid URL'))
self.assertEquals(self.z(u'invalid.\u4e2d\u4fd4.blargg'), (
u'invalid.\u4e2d\u4fd4.blargg', 'Enter a valid URL'))
# ##############################################################################
if __name__ == '__main__':
unittest.main()
| 41.930267 | 202 | 0.531474 |
79448ba4edecb211ee8a70053446074ece407749 | 2,302 | py | Python | src/atcoder/arc002/c/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | 1 | 2021-07-11T03:20:10.000Z | 2021-07-11T03:20:10.000Z | src/atcoder/arc002/c/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | 39 | 2021-07-10T05:21:09.000Z | 2021-12-15T06:10:12.000Z | src/atcoder/arc002/c/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | null | null | null | import typing
class ReadStdin:
def __call__(
self,
) -> bytes:
return next(self.__chunks)
def __init__(
self,
) -> typing.NoReturn:
import sys
self.__buf = (
sys.stdin.buffer
)
self.__chunks = (
self.__read_chunks()
)
def int(
self,
) -> int:
return int(self())
def __read_chunks(
self,
) -> typing.Iterator[bytes]:
while 1:
l = self.__buf.readline()
for chunk in l.split():
yield chunk
def str(
self,
) -> str:
b = self()
return b.decode()
from abc import (
ABC,
abstractmethod,
)
import typing
class Solver(
ABC,
):
def __call__(
self,
) -> typing.NoReturn:
self._prepare()
self._solve()
def __init__(
self,
) -> typing.NoReturn:
...
@abstractmethod
def _prepare(
self,
) -> typing.NoReturn:
...
@abstractmethod
def _solve(
self,
) -> typing.NoReturn:
...
import typing
import itertools
class Problem(
Solver,
):
def __init__(
self,
) -> typing.NoReturn:
self.__read = ReadStdin()
self.__pool = 'ABXY'
def _prepare(
self,
) -> typing.NoReturn:
read = self.__read
self.__n = read.int()
self.__s = read.str()
def _solve(
self,
) -> typing.NoReturn:
self.__make_combs()
cnt = 1 << 10
for comb in self.__combs:
self.__comb = set(comb)
c = self.__calc()
cnt = min(cnt, c)
print(cnt)
def __calc(
self,
) -> int:
n = self.__n
s = self.__s + '$'
comb = self.__comb
inf = 1 << 10
dp = [
[inf] * 2
for _ in range(n + 1)
]
dp[0][0] = 0
for i in range(n):
dp[i + 1][0] = min(
dp[i][0] + 1,
dp[i][1],
)
w = s[i:i + 2]
if not w in comb:
continue
dp[i + 1][1] = (
dp[i][0] + 1
)
return dp[-1][0]
def __make_combs(
self,
) -> typing.NoReturn:
p = itertools.product(
self.__pool,
repeat=2,
)
p = map(
lambda x: ''.join(x),
p,
)
c = itertools.combinations(
p,
2,
)
self.__combs = c
def main():
p = Problem()
t = 1
# t = ReadStdin().int()
for _ in range(t): p()
if __name__ == '__main__':
main() | 13.229885 | 31 | 0.500434 |
79448c30a3e0d52c5327c130d0386d20d02e7c16 | 1,679 | py | Python | tests/tools/test_translate.py | stranac/voice-skill-sdk | 8bfbbedf36ed4e4b2ff865deffe4dee804d57031 | [
"MIT"
] | 18 | 2020-11-25T12:58:36.000Z | 2022-01-06T21:13:52.000Z | tests/tools/test_translate.py | stranac/voice-skill-sdk | 8bfbbedf36ed4e4b2ff865deffe4dee804d57031 | [
"MIT"
] | 28 | 2020-11-27T08:45:57.000Z | 2022-03-31T09:01:48.000Z | tests/tools/test_translate.py | stranac/voice-skill-sdk | 8bfbbedf36ed4e4b2ff865deffe4dee804d57031 | [
"MIT"
] | 15 | 2020-11-30T08:19:44.000Z | 2022-03-10T13:07:05.000Z | #
# voice-skill-sdk
#
# (C) 2021, Deutsche Telekom AG
#
# This file is distributed under the terms of the MIT license.
# For details see the file LICENSE in the top directory.
#
#
import pathlib
import tempfile
from unittest.mock import patch
import respx
from httpx import Response
from skill_sdk.tools.translate import download_translations
LOCALE_INFO = {
"supportedLanguages": [{"code": "de"}, {"code": "en"}, {"code": "whateva"}]
}
TRANSLATIONS = [
{
"locale": "de",
"scope": "unnamed-skill",
"sentences": ["SCHLÜSSEL1", "SCHLÜSSEL2"],
"tenant": "GLOBAL",
"tag": "KEY",
},
{
"locale": "en",
"scope": "unnamed-skill",
"sentences": ["KEY1", "KEY2"],
"tenant": "GLOBAL",
"tag": "KEY",
},
]
@respx.mock
def test_download_translations():
respx.get("http://service-text-service:1555/v1/text/info/locale").mock(
return_value=Response(200, json=LOCALE_INFO)
)
respx.get("http://service-text-service:1555/v1/text/scope/unnamed-skill").mock(
return_value=Response(200, json=TRANSLATIONS)
)
with tempfile.TemporaryDirectory() as tmp:
with patch("skill_sdk.i18n.LOCALE_DIR", tmp):
download_translations(
"http://service-text-service:1555/v1/text", "unnamed-skill"
)
assert (
pathlib.Path(tmp) / "en.yaml"
).read_text() == "KEY:\n- KEY1\n- KEY2\n"
assert (
pathlib.Path(tmp) / "de.yaml"
).read_text() == "KEY:\n- SCHLÜSSEL1\n- SCHLÜSSEL2\n"
assert (pathlib.Path(tmp) / "whateva.yaml").exists() is False
| 26.650794 | 83 | 0.586659 |
79448d53eb6aeb08e3dc9998b6efe5b2fda40d38 | 5,350 | py | Python | FileDumpParser.py | B-Rad80/code-components | f0aa4479b2329570ad56af31903467e0e166952f | [
"BSD-3-Clause"
] | null | null | null | FileDumpParser.py | B-Rad80/code-components | f0aa4479b2329570ad56af31903467e0e166952f | [
"BSD-3-Clause"
] | 8 | 2020-02-11T23:32:59.000Z | 2022-03-11T23:36:51.000Z | FileDumpParser.py | B-Rad80/code-components | f0aa4479b2329570ad56af31903467e0e166952f | [
"BSD-3-Clause"
] | null | null | null | # FileDumpParser
import csv
import os
import zipfile
import sys
import io
import glob
import pyap
import docx
import codecs
class FileDumpParser:
def __init__(self):
self.og = os.getcwd()
self.debug = False
def __init__(self, d):
self.og = os.getcwd()
self.debug = d
def unzip(self, file):
if(self.debug):
cwd = os.path.join(self.og, "ZipFIles")
print(cwd)
if(os.path.isdir(file.name[:-4])):
print("directory already exists")
return "fuck"
cwd = self.og
os.chdir(cwd) # change directory from working dir to dir with files
ret = ''
if(zipfile.is_zipfile(file)):
print("is zip")
with zipfile.ZipFile(file, "r") as zip_ref:
ret = zip_ref.extractall(cwd)
zip_ref.close() # close file
else:
print(file, "is not an existing Zipfile!")
ret = file.name[:-4]
return ret
def parseCSV(self, file):
# file = "CVSFiles/"+
if(self.debug):
cwd = os.path.join(self.og, "mysite/CSVFiles")
print(cwd)
# os.chdir(cwd)
addy_List = []
noAddyList = []
decoded_file = file.read().decode('utf-16')
io_string = io.StringIO(decoded_file)
line_count = 0
for row in csv.reader(io_string, delimiter=',', quotechar='|'):
if(row == []):
print(addy_List)
print("\n\nNO ADDR LIST \n\n\n\n")
print(noAddyList)
ret = [addy_List, noAddyList]
return ret
print(line_count)
if line_count == 0:
print('Column names are {", ".join(row)}')
elif row[3] != "Rejected":
if(row[4] != ""):
tl = [row[0], row[4]]
addy_List.append(tl)
else:
tl = [row[0]]
noAddyList.append(tl)
line_count += 1
print(addy_List)
print("\n\nNO ADDR LIST \n\n\n\n")
print(noAddyList)
ret = [addy_List, noAddyList]
return ret
def Address_Search(self, test_address): # NOT in use
addresses = pyap.parse(test_address, country='US')
for address in addresses:
# shows found address
print(address)
# shows address parts
print(address.as_dict())
def Docx_to_Text(self, filename): # not in use
noaddylist = []
addylist = []
if(self.debug):
#cwd = os.path.join(self.og, "CSVFiles")
cwd = self.og
print(cwd)
print(os.path.isfile(filename))
doc = docx.Document(filename)
fullText = []
store =""
for para in doc.paragraphs:
tmp = para.text
print(str(para.text), "this is teh paragraph")
store = store + "\n" + tmp
addresses = []
addresses = pyap.parse(store, country='US')
# print(addresses)
addy = []
for address in addresses:
addy.append(str(address))
if(addy == []):
print(addy[0], "no addres!")
tlist = [filename]
noaddylist.append(file)
else:
print(addy[0], "found address with name", filename)
tlist = [filename, addy[0]]
addylist.append(tlist)
ret = [addylist, noaddylist]
print(ret, "= ret")
return ret
def read_through_folder(self, filename):
ret = []
if(self.debug):
cwd = os.path.join(self.og, "CSVFiles")
print(filename, " is path?: ",os.path.isdir(filename))
cwd = os.path.join(self.og, filename)
os.chdir(cwd)
print(cwd)
for file in glob.glob('*.txt'):
ret.append(file)
return ret
def Text_to_String(self, filename):
ret = []
noaddylist = []
addylist = []
if(self.debug):
cwd = os.path.join(self.og, "CSVFiles")
print(os.path.isdir(filename))
print(filename)
cwd = os.path.join(self.og, filename)
os.chdir(cwd)
print(cwd)
for file in glob.glob('*.txt'):
temp = open(file, 'r').read().strip()
addresses = []
addresses = pyap.parse(temp, country='US')
# print(addresses)
addy = []
for address in addresses:
addy.append(str(address))
if(addy == []):
print(addy[0], "no addres!")
tlist = [file]
noaddylist.append(file)
else:
print(addy[0], "found address with name", file)
tlist = [file, addy[0]]
addylist.append(tlist)
os.remove(file)
ret = [addylist, noaddylist]
print(ret, "= ret")
cwd = os.chdir("../")
print(cwd)
os.rmdir(filename)
return ret
# TEST Cases
# heheheheh "bigdumper" .... much funny
bigdumper = FileDumpParser(True)
print("Testing...\n")
print(bigdumper.Docx_to_Text("test1.docx"))
# print(bigdumper.unzip("Test1.zip"))
#bigdumper.Address_Search( bigdumper.Text_to_Sting("test1.txt"))
| 27.720207 | 76 | 0.504486 |
79448d571d2ff58ca90620043ea066847c719dd1 | 4,697 | py | Python | changes.py | hugovk/smtpapi-python | 21239b1c652b149c21bbd05c7fae0ef343ba2095 | [
"MIT"
] | 30 | 2015-01-13T17:13:55.000Z | 2022-03-11T23:22:17.000Z | changes.py | hugovk/smtpapi-python | 21239b1c652b149c21bbd05c7fae0ef343ba2095 | [
"MIT"
] | 97 | 2015-01-23T16:17:50.000Z | 2021-03-11T03:14:24.000Z | changes.py | hugovk/smtpapi-python | 21239b1c652b149c21bbd05c7fae0ef343ba2095 | [
"MIT"
] | 65 | 2015-07-14T16:19:52.000Z | 2022-02-03T17:09:14.000Z | #!/usr/bin/python
"""
Small python script that, when run, will update the CHANGELOG with information
about all merged pull requests since the previous release.
This script must be run after tagging the latest version
It checks the log of commits since the previous tag and parses it
"""
import re
import subprocess
import sys
from datetime import datetime
# Regex patterns
RELEASE_MD_PATTERN = re.compile(r'## \[(\d+\.\d+\.\d+)\]')
MERGED_PR_PATTERN = re.compile(
r'([0-9a-f]{7}) Merge pull request #(\d+) from (.+)/.+'
)
TAG_PATTERN = re.compile(
r'refs/tags/v(\d+\.\d+\.\d+) (\w{3} \w{3} \d{1,2} \d{2}:\d{2}:\d{2} \d{4})'
)
# PR Type terms
FIX_TERMS = ['fix', 'change', 'update']
# Helper functions
def generate_pr_link(pr_num):
"""
Returns a markdown link to a PR in this repo given its number
"""
return (
'[PR #{0}](https://github.com/sendgrid/smtpapi-python/pulls/{0})'
).format(pr_num)
def generate_user_link(user):
"""
Returns a markdown link to a user
"""
return '[@{0}](https://github.com/{0})'.format(user)
# Get latest tag
command = ['git', 'tag', '--format=%(refname) %(creatordate)']
res = subprocess.run(command, capture_output=True, text=True)
if res.returncode != 0:
print('Error occurred when running git tag command:', str(res.stderr))
sys.exit(1)
# Get the last line and get the tag number
latest_release_match = TAG_PATTERN.match(
list(filter(None, res.stdout.split('\n')))[-1],
)
latest_release = latest_release_match[1]
latest_release_date = datetime.strptime(
latest_release_match[2], '%a %b %d %H:%M:%S %Y',
)
print('Generating CHANGELOG for', latest_release)
# Read in the CHANGELOG file first
with open('CHANGELOG.md') as f:
# Read the text in as a list of lines
old_text = f.readlines()
# Get the latest release (top of the CHANGELOG)
for line in old_text:
match = RELEASE_MD_PATTERN.match(line)
if match:
prev_release = match[1]
break
if latest_release == prev_release:
print(
'The latest git tag matches the last release in the CHANGELOG. '
'Please tag the repository before running this script.'
)
sys.exit(1)
# Use git log to list all commits between that tag and HEAD
command = 'git log --oneline v{}..@'.format(prev_release).split(' ')
res = subprocess.run(command, capture_output=True, text=True)
if res.returncode != 0:
print('Error occurred when running git log command:', str(res.stderr))
sys.exit(1)
# Parse the output from the above command to find all commits for merged PRs
merge_commits = []
for line in res.stdout.split('\n'):
match = MERGED_PR_PATTERN.match(line)
if match:
merge_commits.append(match)
# Determine the type of PR from the commit message
added, fixes = [], []
for commit in merge_commits:
# Get the hash of the commit and get the message of it
commit_sha = commit[1]
command = 'git show {} --format=format:%B'.format(commit_sha).split(' ')
res = subprocess.run(command, capture_output=True, text=True)
out = res.stdout.lower()
is_added = True
# When storing we need the PR title, number and user
data = {
# 3rd line of the commit message is the PR title
'title': out.split('\n')[2],
'number': commit[2],
'user': commit[3],
}
for term in FIX_TERMS:
if term in out:
fixes.append(data)
is_added = False
break
if is_added:
added.append(data)
# Now we need to write out the CHANGELOG again
with open('CHANGELOG.md', 'w') as f:
# Write out the header lines first
for i in range(0, 3):
f.write(old_text[i])
# Create and write out the new version information
latest_release_date_string = latest_release_date.strftime('%Y-%m-%d')
f.write('## [{}] - {} ##\n'.format(
latest_release,
latest_release_date_string,
))
# Add the stuff that was added
f.write('### Added\n')
for commit in added:
f.write('- {}: {}{} (via {})\n'.format(
generate_pr_link(commit['number']),
commit['title'],
'.' if commit['title'][-1] != '.' else '',
generate_user_link(commit['user'])
))
f.write('\n')
# Add the fixes
f.write('### Fixes\n')
for commit in fixes:
f.write('- {}: {}{} (via {})\n'.format(
generate_pr_link(commit['number']),
commit['title'],
'.' if commit['title'][-1] != '.' else '',
generate_user_link(commit['user'])
))
f.write('\n')
# Add the old stuff
for i in range(3, len(old_text)):
f.write(old_text[i])
| 30.5 | 79 | 0.622099 |
79448d71b6e9c87943ab8a65b3075d42d6ff57de | 593 | py | Python | src/ana3/__init__.py | balazsfazekas/Assignment-3-ANA | d23f968d9a98405f2cfa850895b7235155d98937 | [
"MIT"
] | null | null | null | src/ana3/__init__.py | balazsfazekas/Assignment-3-ANA | d23f968d9a98405f2cfa850895b7235155d98937 | [
"MIT"
] | 3 | 2021-11-11T07:32:01.000Z | 2021-11-23T15:42:26.000Z | src/ana3/__init__.py | balazsfazekas/Assignment-3-ANA | d23f968d9a98405f2cfa850895b7235155d98937 | [
"MIT"
] | null | null | null | import sys
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import PackageNotFoundError, version # pragma: no cover
else:
from importlib_metadata import PackageNotFoundError, version # pragma: no cover
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = version(dist_name)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
finally:
del version, PackageNotFoundError
| 34.882353 | 86 | 0.716695 |
7944917e83c4a68ab37cf8d609354f2a737f3318 | 477 | py | Python | todos/migrations/0004_alter_todo_author.py | bemot/react_todo_mac | 64bd6de5da675cb5227e20f7ba871a26fb2e6510 | [
"MIT"
] | null | null | null | todos/migrations/0004_alter_todo_author.py | bemot/react_todo_mac | 64bd6de5da675cb5227e20f7ba871a26fb2e6510 | [
"MIT"
] | null | null | null | todos/migrations/0004_alter_todo_author.py | bemot/react_todo_mac | 64bd6de5da675cb5227e20f7ba871a26fb2e6510 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-31 11:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('todos', '0003_todo_author'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='author',
field=models.ForeignKey(default='unknown', on_delete=django.db.models.deletion.CASCADE, to='todos.author'),
),
]
| 23.85 | 119 | 0.633124 |
7944919fd3dc83bc982abfa77e1dd65fcb598a45 | 3,676 | py | Python | nbaspa/model/tasks/metrics.py | ak-gupta/nbaspa | db961717bb23854e0373b7732638021a18d909f5 | [
"MIT"
] | 1 | 2021-02-21T00:44:06.000Z | 2021-02-21T00:44:06.000Z | nbaspa/model/tasks/metrics.py | ak-gupta/nbaspa | db961717bb23854e0373b7732638021a18d909f5 | [
"MIT"
] | 22 | 2021-02-21T16:41:39.000Z | 2021-11-27T16:12:33.000Z | nbaspa/model/tasks/metrics.py | ak-gupta/nbaspa | db961717bb23854e0373b7732638021a18d909f5 | [
"MIT"
] | null | null | null | """Define some metrics for evaluating the model."""
from typing import Callable, List, Optional, Union
import numpy as np
import pandas as pd
from prefect import Task
from sklearn.metrics import roc_auc_score
from .meta import META
class AUROC(Task):
"""Calculate the AUROC score."""
def run(self, data: pd.DataFrame, mode: Optional[str] = "survival") -> float: # type: ignore
"""Calculate the AUROC score.
Parameters
----------
data : pd.DataFrame
The output of ``WinProbability.run()``.
mode : str, optional (default "survival")
The mode, either ``survival`` or ``benchmark``
Returns
-------
float
The AUROC score from ``scikit-learn``.
"""
output = roc_auc_score(y_true=data[META["event"]], y_score=data[META[mode]])
self.logger.info(f"Model has a AUROC value of {np.round(output, 3)}")
return output
class AUROCLift(Task):
"""Calculate the lift in AUROC between two sequences."""
def run( # type: ignore
self,
benchmark: Union[List[float], np.ndarray],
test: Union[List[float], np.ndarray],
) -> np.ndarray:
"""Calculate the lift in AUROC between two sequences.
For our purposes, we will be calculating the AUROC across the entire game. This
task will help produce a series comparing the survival model to the benchmark NBA
win probability model.
Parameters
----------
benchmark : np.ndarray
The benchmark series.
test : np.ndarray
The test series.
Returns
-------
np.ndarray
The output lift series.
"""
if isinstance(test, list):
test = np.array(test)
self.logger.info(
f"Test model has average AUROC of {np.round(np.average(test), 3)}"
)
if isinstance(benchmark, list):
benchmark = np.array(benchmark)
self.logger.info(
f"Benchmark model has average AUROC of {np.round(np.average(benchmark), 3)}"
)
return (test / benchmark) - 1
class MeanAUROCLift(Task):
"""Calculate the weighted average AUROC lift over gametime."""
def run( # type: ignore
self,
lift: np.ndarray,
timestep: List[int],
weight_func: Optional[Callable] = None,
) -> float:
"""Calculate the weighted average AUROC lift over gametime.
Parameters
----------
lift : np.ndarray
An array of the AUROC lift from ``AUROCLift.run()`` at each time step.
timestep : list
The list of time periods for each AUROC calculation. Used to calculate
weighting.
weight_func : Callable, optional (default None)
The function to apply to the ``timestep`` list before multiplying by
the lift value.
Returns
-------
float
The weighted average AUROC lift.
Examples
--------
>>> auroc = np.array([0.5, 0.6, 0.7])
>>> times = [10, 20, 30]
>>> MeanAUROCLift().run(auroc, times, np.log1p)
0.61167242753803508
If you don't provide a weight function,
>>> MeanAUROCLift().run(auroc, times)
0.59999999999999998
"""
if weight_func is not None:
weights = weight_func(timestep)
else:
weights = None
result = np.average(lift, weights=weights)
self.logger.info(
f"Found a weighted average AUROC lift of {np.round(result * 100, 3)}%"
)
return result
| 28.944882 | 97 | 0.571545 |
7944922918bae832081fb3179c692f0ff01a89d2 | 2,614 | py | Python | configs/configs_mnist_auto/mnist_auto_dfc_ssa.py | meulemansalex/deep_feedback_control | 0a592c595334ce81d0c753f65f1cde7c02c8222b | [
"Apache-2.0"
] | 4 | 2021-06-15T06:28:29.000Z | 2021-11-27T17:59:05.000Z | configs/configs_mnist_auto/mnist_auto_dfc_ssa.py | meulemansalex/deep_feedback_control | 0a592c595334ce81d0c753f65f1cde7c02c8222b | [
"Apache-2.0"
] | null | null | null | configs/configs_mnist_auto/mnist_auto_dfc_ssa.py | meulemansalex/deep_feedback_control | 0a592c595334ce81d0c753f65f1cde7c02c8222b | [
"Apache-2.0"
] | null | null | null | config = {
'lr': 0.0001867134052079835,
'alpha_di': 0.036883035536535616,
'k_p_fb': 0.006322358048692642,
'alpha_fb': 0.7332531549909285,
'feedback_wd': 0.1,
'time_constant_ratio_fb': 0.006487346213972566,
'dt_di_fb': 0.0032644966090448454,
'apical_time_constant_fb': 0.2725371146036905,
'proactive_controller': True,
'lr_fb_init': 0.00016397270803247601,
'sigma': 0.0018360352597175634,
'tmax_di_fb': 60.0,
'lr_fb': 1.2260433974696165e-05,
'extra_fb_epochs': 2.0,
'target_stepsize': 0.02027700334158571,
'epsilon': 2.7788915666203028e-08,
'epsilon_fb': 5.849519739358543e-07,
'dataset': 'mnist_autoencoder',
'num_train': 1000,
'num_test': 1000,
'num_val': 1000,
'no_preprocessing_mnist': False,
'no_val_set': False,
'epochs': 25,
'batch_size': 128,
'optimizer': 'Adam',
'optimizer_fb': 'Adam',
'momentum': 0.0,
'forward_wd': 0,
'train_parallel': False,
'normalize_lr': True,
'epochs_fb': 15,
'freeze_forward_weights': False,
'freeze_fb_weights': False,
'freeze_fb_weights_output': True,
'shallow_training': False,
'extra_fb_minibatches': 0,
'only_train_first_layer': False,
'train_only_feedback_parameters': False,
'clip_grad_norm': 1.0,
'grad_deltav_cont': False,
'beta1': 0.99,
'beta2': 0.99,
'beta1_fb': 0.99,
'beta2_fb': 0.99,
'num_hidden': 3,
'size_hidden': [256, 32, 256],
'size_input': 784,
'size_output': 784,
'hidden_activation': ['tanh', 'linear', 'tanh'],
'output_activation': 'linear',
'no_bias': False,
'network_type': 'DFC',
'initialization': 'xavier_normal',
'fb_activation': 'tanh',
'no_cuda': False,
'random_seed': 42,
'cuda_deterministic': False,
'hpsearch': False,
'multiple_hpsearch': False,
'single_precision': False,
'evaluate': False,
'out_dir': 'logs/runs/mnist_auot_lin_ndi',
'save_logs': False,
'save_BP_angle': False,
'save_GN_angle': False,
'save_GNT_angle': False,
'save_condition_gn': False,
'save_df': False,
'gn_damping': 0.0,
'log_interval': 30,
'gn_damping_hpsearch': False,
'save_nullspace_norm_ratio': False,
'save_fb_statistics_init': False,
'compute_gn_condition_init': False,
'ndi': True,
'dt_di': 0.001,
'tmax_di': 300,
'epsilon_di': 0.5,
'reset_K': False,
'initialization_K': 'xavier_normal',
'noise_K': 0.0,
'compare_with_ndi': False,
'learning_rule': 'nonlinear_difference',
'use_initial_activations': False,
'c_homeostatic': -1,
'k_p': 0.0,
'inst_system_dynamics': False,
'noisy_dynamics': False,
'fb_learning_rule': 'normal_controller',
'inst_transmission': True,
'inst_transmission_fb': True,
'time_constant_ratio': 0.005,
'apical_time_constant': -1,
'use_homeostatic_wd_fb': False,
'efficient_controller': True,
'simulate_layerwise': True,
'sigma_output_fb': 0.0,
}
| 25.881188 | 48 | 0.743305 |
7944923ff200da0c0f765f222bcdae2abf9df6bb | 1,214 | py | Python | rapid7_insightvm/komand_rapid7_insightvm/actions/download_report/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | rapid7_insightvm/komand_rapid7_insightvm/actions/download_report/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | rapid7_insightvm/komand_rapid7_insightvm/actions/download_report/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
ID = "id"
INSTANCE = "instance"
class Output:
REPORT = "report"
class DownloadReportInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "integer",
"title": "Report ID",
"description": "Identifier of the report to download e.g. 265",
"order": 1
},
"instance": {
"type": "string",
"title": "Instance",
"description": "The identifier of the report instance, 'latest' or ID e.g. 100",
"order": 2
}
},
"required": [
"instance",
"id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DownloadReportOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"report": {
"type": "string",
"title": "Report",
"displayType": "bytes",
"description": "Base64 encoded report",
"format": "bytes",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 18.676923 | 86 | 0.5486 |
794493cd7e018d18326fccf054c516997bb69ed8 | 2,908 | py | Python | pypureclient/flashblade/FB_2_3/models/group_quota_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flashblade/FB_2_3/models/group_quota_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flashblade/FB_2_3/models/group_quota_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class GroupQuotaPatch(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
required_args = {
}
def __init__(
self,
):
"""
Keyword args:
"""
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `GroupQuotaPatch`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GroupQuotaPatch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GroupQuotaPatch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.433962 | 116 | 0.541609 |
794495fe14116d489887f5b7c960d85f850b65c7 | 411 | py | Python | Challenge005.py | lilimonroy/TheBasics001-011Challenges | 9c45dbd6bb0ab922e81409b5bf289fb908d51bb3 | [
"MIT"
] | null | null | null | Challenge005.py | lilimonroy/TheBasics001-011Challenges | 9c45dbd6bb0ab922e81409b5bf289fb908d51bb3 | [
"MIT"
] | null | null | null | Challenge005.py | lilimonroy/TheBasics001-011Challenges | 9c45dbd6bb0ab922e81409b5bf289fb908d51bb3 | [
"MIT"
] | null | null | null | #----------* CHALLENGE 5 *----------
#Ask the user to enter three numbers. Add together the first two numbers and then multiply this total by the third.
# Display the answer as The answer is [answer].
num1 = int(input("Enter the first number: "))
num2 = int(input("Enter the second number: "))
num3 = int(input("Enter the third number: "))
total = (num1 + num2)*num3
print("The answer is: ",total) | 37.363636 | 117 | 0.654501 |
794497064ec6cb826dee5d4a196a5ee9e9fe8cdc | 6,224 | py | Python | dateparser/date_parser.py | ASOdesk/dateparser | d8050511772c30199d14cd8506d46f9c587c61a8 | [
"BSD-3-Clause"
] | null | null | null | dateparser/date_parser.py | ASOdesk/dateparser | d8050511772c30199d14cd8506d46f9c587c61a8 | [
"BSD-3-Clause"
] | null | null | null | dateparser/date_parser.py | ASOdesk/dateparser | d8050511772c30199d14cd8506d46f9c587c61a8 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import calendar
import regex as re
import sys
from datetime import datetime
from collections import OrderedDict
import six
from dateutil import parser
from dateutil.relativedelta import relativedelta
from .timezone_parser import pop_tz_offset_from_string
from .utils import strip_braces, apply_timezone
from .conf import apply_settings
binary_type = bytes if sys.version_info[0] == 3 else str
class new_parser(parser.parser):
"""
Implements an alternate parse method which supports preference to dates in future and past.
For more see issue #36
"""
def parse(self, timestr, default=None, ignoretz=False, settings=None, **kwargs):
# timestr needs to be a buffer as required by _parse
if isinstance(timestr, binary_type):
timestr = timestr.decode()
# Parse timestr
# handle dateutil>=2.5 tuple result first
try:
res, _ = self._parse(timestr, **kwargs)
except TypeError:
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("unknown string format")
# Fill in missing date
new_date = self._populate(res, default, settings=settings)
# Clean hour and minutes, etc in case not defined
for e in ['hour', 'minute', 'second', 'microsecond']:
if not getattr(res, e):
new_date = new_date.replace(**{e: 0})
return new_date, self.get_period(res)
@staticmethod
def get_period(res):
periods = OrderedDict([
('day', ['day', 'weekday', 'hour', 'minute', 'second', 'microsecond']),
('month', ['month']),
('year', ['year']),
])
for period, markers in six.iteritems(periods):
for marker in markers:
if getattr(res, marker) is not None:
return period
@classmethod
def _populate(cls, res, default, settings=None):
new_date = default
# Populate all fields
repl = {}
for field in ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']:
value = getattr(res, field)
if value is not None:
repl[field] = value
# Fix day and populate date with result
repl_copy = repl.copy()
repl_copy['day'] = cls.get_valid_day(repl, new_date, settings=settings)
new_date = new_date.replace(**repl_copy)
# Fix weekday
if res.weekday is not None and not res.day:
new_date = new_date + relativedelta(weekday=res.weekday)
if new_date > datetime.utcnow():
new_date -= relativedelta(days=7)
# Correct date and return
return cls._correct(new_date, [key + 's' for key in repl.keys()], default, settings=settings)
@staticmethod
@apply_settings
def get_valid_day(res, new_date, settings=None):
_, tail = calendar.monthrange(res.get('year', new_date.year),
res.get('month', new_date.month))
if 'day' in res:
if res['day'] > tail:
raise ValueError('Day not in range for month',)
else:
return res['day']
options = {
'first': 1,
'last': tail,
'current': new_date.day if new_date.day <= tail else tail
}
return options[settings.PREFER_DAY_OF_MONTH]
@classmethod
def _correct(cls, date, given_fields, default, settings=None):
if settings.PREFER_DATES_FROM == 'current_period':
return date
for field in ['microseconds', 'seconds', 'minutes', 'hours', 'days',
'weeks', 'months', 'years']:
# Can't override a given field
if field in given_fields:
continue
# Try if applying the delta for this field corrects the problem
delta = relativedelta(**{field: 1})
# Run through corrections
corrected_date = cls._correct_for_future(date, delta, default, settings)
corrected_date = cls._correct_for_past(corrected_date, delta, default, settings)
# check if changed
if corrected_date != date:
date = corrected_date
break
return date
@staticmethod
def _correct_for_future(date, delta, default, settings=None):
if settings.PREFER_DATES_FROM != 'future':
return date
if date < default < date + delta:
date += delta
return date
@staticmethod
def _correct_for_past(date, delta, default, settings=None):
if settings.PREFER_DATES_FROM != 'past':
return date
if date > default > date - delta:
date -= delta
return date
def dateutil_parse(date_string, settings=None, **kwargs):
"""Wrapper function around dateutil.parser.parse
"""
today = datetime.utcnow()
kwargs.update(default=today)
date_string = re.sub(r'\b(year|month|week|day)\b', '', date_string, re.I)
# XXX: this is needed because of a bug in dateutil.parser
# that raises TypeError for an invalid string
# https://bugs.launchpad.net/dateutil/+bug/1042851
try:
return new_parser().parse(date_string, settings=settings, **kwargs)
except TypeError as e:
raise ValueError(e, "Invalid date: %s" % date_string)
class DateParser(object):
@apply_settings
def parse(self, date_string, settings=None):
date_string = six.text_type(date_string)
if not date_string.strip():
raise ValueError("Empty string")
date_string = strip_braces(date_string)
date_string, tz = pop_tz_offset_from_string(date_string)
date_obj, period = dateutil_parse(date_string, settings=settings)
if tz is not None:
date_obj = tz.localize(date_obj)
if settings.TIMEZONE:
date_obj = apply_timezone(date_obj, settings.TIMEZONE)
if not settings.RETURN_AS_TIMEZONE_AWARE:
date_obj = date_obj.replace(tzinfo=None)
return date_obj, period
date_parser = DateParser()
| 31.276382 | 101 | 0.609576 |
794497c73fafca7aec82c66ce88e3aa0c1a23e11 | 3,079 | py | Python | nibe_modbus_mqtt/mqtt.py | vinklat/nibe-modbus-mqtt | 36cc25961de39c2880df9ee422390d85d881b92f | [
"MIT"
] | null | null | null | nibe_modbus_mqtt/mqtt.py | vinklat/nibe-modbus-mqtt | 36cc25961de39c2880df9ee422390d85d881b92f | [
"MIT"
] | null | null | null | nibe_modbus_mqtt/mqtt.py | vinklat/nibe-modbus-mqtt | 36cc25961de39c2880df9ee422390d85d881b92f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Connect to a MQTT broker
'''
import logging
import time
from socket import gaierror
import paho.mqtt.client as mqtt
from .version import app_instance
# create logger
logger = logging.getLogger(__name__)
class MqttException(Exception):
def __init__(self, message):
Exception.__init__(self, f'MQTT: {message}')
class Mqqt():
def __init__(self, topic_publish: str, topic_subscribe: str) -> None:
self.client = mqtt.Client(app_instance)
self.client.connected_flag = False
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_publish = self.on_publish
self.client.on_message = self.on_message
self.topic_publish = topic_publish
self.topic_subscribe = topic_subscribe
def connect(self, host: str, port: int, keepalive=30):
try:
self.client.connect(host, port, keepalive)
except (ConnectionRefusedError, OSError, gaierror) as exc:
raise MqttException(exc) from exc
def is_connected(self):
return self.client.connected_flag
def on_connect(self, _, userdata, flags, ret):
'''
fired upon a successful connection
ret values:
0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused.
'''
if ret == 0:
self.client.connected_flag = True
logger.info("MQTT connected OK")
logger.debug("userdata=%s, flags=%s, ret=%s", userdata, flags, ret)
else:
logger.error("MQTT connect ERROR: ret=%s", ret)
def on_disconnect(self, _, userdata, ret):
'''fired upon a disconnection'''
self.client.connected_flag = False
logger.error("MQTT disconnect")
logger.debug("userdata=%s, ret=%s", userdata, ret)
def on_publish(self, client, userdata, mid):
'''fired upon a message published'''
del client # Ignored parameter
logger.debug("MQTT published: userdata=%s, mid=%s", userdata, mid)
def on_message(self, client, userdata, msg):
'''receive message from MQTT'''
del client # Ignored parameter
logger.info("MQTT receive: %s %s", msg.topic, msg.payload)
logger.debug("userdata=%s", userdata)
def loop(self):
self.client.loop_start()
while True:
nattempts = 0
while not self.is_connected():
if nattempts > 0:
logger.error("MQTT connect wait... (attempt=%s)", nattempts)
time.sleep(10)
nattempts += 1
time.sleep(1)
def publish(self, payload):
logging.info("MQTT publish: %s %s", self.topic_publish, payload)
self.client.publish(self.topic_publish, payload)
| 32.072917 | 80 | 0.625853 |
7944981477e6b39555201a056b49f109aa5eaa89 | 3,619 | py | Python | scripts/run_presubmit_checks.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | 5,422 | 2015-08-14T01:56:44.000Z | 2022-03-31T23:31:56.000Z | scripts/run_presubmit_checks.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | 14,178 | 2015-08-14T05:21:45.000Z | 2022-03-31T23:54:10.000Z | scripts/run_presubmit_checks.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | 3,574 | 2015-08-14T04:20:06.000Z | 2022-03-29T01:52:37.000Z | # Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script runs the following tests in all cases.
- Javascript and Python Linting
- Backend Python tests
Only when frontend files are changed will it run Frontend Karma unit tests.
"""
from __future__ import annotations
import argparse
import subprocess
from . import common
from . import run_backend_tests
from . import run_frontend_tests
from .linters import pre_commit_linter
_PARSER = argparse.ArgumentParser(
description="""
Run this script from the oppia root folder prior to opening a PR:
python -m scripts.run_presubmit_checks
Set the origin branch to compare against by adding
--branch=your_branch or -b=your_branch
By default, if the current branch tip exists on remote origin,
the current branch is compared against its tip on GitHub.
Otherwise it's compared against 'develop'.
This script runs the following tests in all cases.
- Javascript and Python Linting
- Backend Python tests
Only when frontend files are changed will it run Frontend Karma unit tests.
If any of these tests result in errors, this script will terminate.
Note: The test scripts are arranged in increasing order of time taken. This
enables a broken build to be detected as quickly as possible.
""")
_PARSER.add_argument(
'--branch', '-b',
help='optional; if specified, the origin branch to compare against.')
def main(args=None):
"""Run the presubmit checks."""
parsed_args = _PARSER.parse_args(args=args)
# Run Javascript and Python linters.
print('Linting files since the last commit')
pre_commit_linter.main(args=[])
print('Linting passed.')
print('')
current_branch = subprocess.check_output([
'git', 'rev-parse', '--abbrev-ref', 'HEAD'])
# If the current branch exists on remote origin, matched_branch_num=1
# else matched_branch_num=0.
matched_branch_num = subprocess.check_output([
'git', 'ls-remote', '--heads', 'origin', current_branch, '|', 'wc',
'-l'])
# Set the origin branch to develop if it's not specified.
if parsed_args.branch:
branch = parsed_args.branch
elif matched_branch_num == '1':
branch = 'origin/%s' % current_branch
else:
branch = 'develop'
print('Comparing the current branch with %s' % branch)
all_changed_files = subprocess.check_output([
'git', 'diff', '--cached', '--name-only', '--diff-filter=ACM', branch])
if common.FRONTEND_DIR in all_changed_files:
# Run frontend unit tests.
print('Running frontend unit tests')
run_frontend_tests.main(args=['--run_minified_tests'])
print('Frontend tests passed.')
else:
# If files in common.FRONTEND_DIR were not changed, skip the tests.
common.print_each_string_after_two_new_lines([
'No frontend files were changed.',
'Skipped frontend tests'])
# Run backend tests.
print('Running backend tests')
run_backend_tests.main(args=[])
print('Backend tests passed.')
if __name__ == '__main__':
main()
| 34.141509 | 79 | 0.713733 |
79449844a230624a118be7b6a6dfa5451a758466 | 20,337 | py | Python | pettingzoo/butterfly/pistonball/pistonball.py | jjshoots/PettingZoo | 398209c22bb43dd165932310c91e41c38ddcf49c | [
"Apache-2.0"
] | null | null | null | pettingzoo/butterfly/pistonball/pistonball.py | jjshoots/PettingZoo | 398209c22bb43dd165932310c91e41c38ddcf49c | [
"Apache-2.0"
] | null | null | null | pettingzoo/butterfly/pistonball/pistonball.py | jjshoots/PettingZoo | 398209c22bb43dd165932310c91e41c38ddcf49c | [
"Apache-2.0"
] | null | null | null | import math
import os
import gym
import numpy as np
import pygame
import pymunk
import pymunk.pygame_util
from gym.utils import EzPickle, seeding
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector, wrappers
from pettingzoo.utils.conversions import parallel_wrapper_fn
from .manual_policy import ManualPolicy
_image_library = {}
FPS = 20
def get_image(path):
from os import path as os_path
cwd = os_path.dirname(__file__)
image = pygame.image.load(cwd + "/" + path)
sfc = pygame.Surface(image.get_size(), flags=pygame.SRCALPHA)
sfc.blit(image, (0, 0))
return sfc
def env(**kwargs):
env = raw_env(**kwargs)
if env.continuous:
env = wrappers.ClipOutOfBoundsWrapper(env)
else:
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class raw_env(AECEnv, EzPickle):
metadata = {
"render_modes": ["human", "rgb_array"],
"name": "pistonball_v6",
"is_parallelizable": True,
"render_fps": FPS,
"has_manual_policy": True,
}
def __init__(
self,
n_pistons=20,
time_penalty=-0.1,
continuous=True,
random_drop=True,
random_rotate=True,
ball_mass=0.75,
ball_friction=0.3,
ball_elasticity=1.5,
max_cycles=125,
):
EzPickle.__init__(
self,
n_pistons,
time_penalty,
continuous,
random_drop,
random_rotate,
ball_mass,
ball_friction,
ball_elasticity,
max_cycles,
)
self.dt = 1.0 / FPS
self.n_pistons = n_pistons
self.piston_head_height = 11
self.piston_width = 40
self.piston_height = 40
self.piston_body_height = 23
self.piston_radius = 5
self.wall_width = 40
self.ball_radius = 40
self.screen_width = (2 * self.wall_width) + (self.piston_width * self.n_pistons)
self.screen_height = 560
y_high = self.screen_height - self.wall_width - self.piston_body_height
y_low = self.wall_width
obs_height = y_high - y_low
assert (
self.piston_width == self.wall_width
), "Wall width and piston width must be equal for observation calculation"
assert self.n_pistons > 1, "n_pistons must be greater than 1"
self.agents = ["piston_" + str(r) for r in range(self.n_pistons)]
self.possible_agents = self.agents[:]
self.agent_name_mapping = dict(zip(self.agents, list(range(self.n_pistons))))
self._agent_selector = agent_selector(self.agents)
self.observation_spaces = dict(
zip(
self.agents,
[
gym.spaces.Box(
low=0,
high=255,
shape=(obs_height, self.piston_width * 3, 3),
dtype=np.uint8,
)
]
* self.n_pistons,
)
)
self.continuous = continuous
if self.continuous:
self.action_spaces = dict(
zip(
self.agents,
[gym.spaces.Box(low=-1, high=1, shape=(1,))] * self.n_pistons,
)
)
else:
self.action_spaces = dict(
zip(self.agents, [gym.spaces.Discrete(3)] * self.n_pistons)
)
self.state_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.screen_height, self.screen_width, 3),
dtype=np.uint8,
)
pygame.init()
pymunk.pygame_util.positive_y_is_up = False
self.renderOn = False
self.screen = pygame.Surface((self.screen_width, self.screen_height))
self.max_cycles = max_cycles
self.piston_sprite = get_image("piston.png")
self.piston_body_sprite = get_image("piston_body.png")
self.background = get_image("background.png")
self.random_drop = random_drop
self.random_rotate = random_rotate
self.pistonList = []
self.pistonRewards = [] # Keeps track of individual rewards
self.recentFrameLimit = (
20 # Defines what "recent" means in terms of number of frames.
)
self.recentPistons = set() # Set of pistons that have touched the ball recently
self.time_penalty = time_penalty
# TODO: this was a bad idea and the logic this uses should be removed at some point
self.local_ratio = 0
self.ball_mass = ball_mass
self.ball_friction = ball_friction
self.ball_elasticity = ball_elasticity
self.done = False
self.pixels_per_position = 4
self.n_piston_positions = 16
self.screen.fill((0, 0, 0))
self.draw_background()
# self.screen.blit(self.background, (0, 0))
self.render_rect = pygame.Rect(
self.wall_width, # Left
self.wall_width, # Top
self.screen_width - (2 * self.wall_width), # Width
self.screen_height
- (2 * self.wall_width)
- self.piston_body_height, # Height
)
# Blit background image if ball goes out of bounds. Ball radius is 40
self.valid_ball_position_rect = pygame.Rect(
self.render_rect.left + self.ball_radius, # Left
self.render_rect.top + self.ball_radius, # Top
self.render_rect.width - (2 * self.ball_radius), # Width
self.render_rect.height - (2 * self.ball_radius), # Height
)
self.frames = 0
self.has_reset = False
self.closed = False
self.seed()
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
def observe(self, agent):
observation = pygame.surfarray.pixels3d(self.screen)
i = self.agent_name_mapping[agent]
# Set x bounds to include 40px left and 40px right of piston
x_high = self.wall_width + self.piston_width * (i + 2)
x_low = self.wall_width + self.piston_width * (i - 1)
y_high = self.screen_height - self.wall_width - self.piston_body_height
y_low = self.wall_width
cropped = np.array(observation[x_low:x_high, y_low:y_high, :])
observation = np.rot90(cropped, k=3)
observation = np.fliplr(observation)
return observation
def state(self):
"""
Returns an observation of the global environment
"""
state = pygame.surfarray.pixels3d(self.screen).copy()
state = np.rot90(state, k=3)
state = np.fliplr(state)
return state
def enable_render(self):
self.screen = pygame.display.set_mode((self.screen_width, self.screen_height))
self.renderOn = True
# self.screen.blit(self.background, (0, 0))
self.draw_background()
self.draw()
def close(self):
if not self.closed:
self.closed = True
if self.renderOn:
self.screen = pygame.Surface((self.screen_width, self.screen_height))
self.renderOn = False
pygame.event.pump()
pygame.display.quit()
def add_walls(self):
top_left = (self.wall_width, self.wall_width)
top_right = (self.screen_width - self.wall_width, self.wall_width)
bot_left = (self.wall_width, self.screen_height - self.wall_width)
bot_right = (
self.screen_width - self.wall_width,
self.screen_height - self.wall_width,
)
walls = [
pymunk.Segment(self.space.static_body, top_left, top_right, 1), # Top wall
pymunk.Segment(self.space.static_body, top_left, bot_left, 1), # Left wall
pymunk.Segment(
self.space.static_body, bot_left, bot_right, 1
), # Bottom wall
pymunk.Segment(self.space.static_body, top_right, bot_right, 1), # Right
]
for wall in walls:
wall.friction = 0.64
self.space.add(wall)
def add_ball(self, x, y, b_mass, b_friction, b_elasticity):
mass = b_mass
radius = 40
inertia = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
body = pymunk.Body(mass, inertia)
body.position = x, y
# radians per second
if self.random_rotate:
body.angular_velocity = self.np_random.uniform(-6 * math.pi, 6 * math.pi)
shape = pymunk.Circle(body, radius, (0, 0))
shape.friction = b_friction
shape.elasticity = b_elasticity
self.space.add(body, shape)
return body
def add_piston(self, space, x, y):
piston = pymunk.Body(body_type=pymunk.Body.KINEMATIC)
piston.position = x, y
segment = pymunk.Segment(
piston,
(0, 0),
(self.piston_width - (2 * self.piston_radius), 0),
self.piston_radius,
)
segment.friction = 0.64
segment.color = pygame.color.THECOLORS["blue"]
space.add(piston, segment)
return piston
def move_piston(self, piston, v):
def cap(y):
maximum_piston_y = (
self.screen_height
- self.wall_width
- (self.piston_height - self.piston_head_height)
)
if y > maximum_piston_y:
y = maximum_piston_y
elif y < maximum_piston_y - (
self.n_piston_positions * self.pixels_per_position
):
y = maximum_piston_y - (
self.n_piston_positions * self.pixels_per_position
)
return y
piston.position = (
piston.position[0],
cap(piston.position[1] - v * self.pixels_per_position),
)
def reset(self, seed=None, options=None):
if seed is not None:
self.seed(seed)
self.space = pymunk.Space(threaded=False)
self.add_walls()
# self.space.threads = 2
self.space.gravity = (0.0, 750.0)
self.space.collision_bias = 0.0001
self.space.iterations = 10 # 10 is default in PyMunk
self.pistonList = []
maximum_piston_y = (
self.screen_height
- self.wall_width
- (self.piston_height - self.piston_head_height)
)
for i in range(self.n_pistons):
# Multiply by 0.5 to use only the lower half of possible positions
possible_y_displacements = np.arange(
0,
0.5 * self.pixels_per_position * self.n_piston_positions,
self.pixels_per_position,
)
piston = self.add_piston(
self.space,
self.wall_width
+ self.piston_radius
+ self.piston_width * i, # x position
maximum_piston_y
# y position
- self.np_random.choice(possible_y_displacements),
)
piston.velociy = 0
self.pistonList.append(piston)
self.horizontal_offset = 0
self.vertical_offset = 0
horizontal_offset_range = 30
vertical_offset_range = 15
if self.random_drop:
self.vertical_offset = self.np_random.randint(
-vertical_offset_range, vertical_offset_range + 1
)
self.horizontal_offset = self.np_random.randint(
-horizontal_offset_range, horizontal_offset_range + 1
)
ball_x = (
self.screen_width
- self.wall_width
- self.ball_radius
- horizontal_offset_range
+ self.horizontal_offset
)
ball_y = (
self.screen_height
- self.wall_width
- self.piston_body_height
- self.ball_radius
- (0.5 * self.pixels_per_position * self.n_piston_positions)
- vertical_offset_range
+ self.vertical_offset
)
# Ensure ball starts somewhere right of the left wall
ball_x = max(ball_x, self.wall_width + self.ball_radius + 1)
self.ball = self.add_ball(
ball_x, ball_y, self.ball_mass, self.ball_friction, self.ball_elasticity
)
self.ball.angle = 0
self.ball.velocity = (0, 0)
if self.random_rotate:
self.ball.angular_velocity = self.np_random.uniform(
-6 * math.pi, 6 * math.pi
)
self.lastX = int(self.ball.position[0] - self.ball_radius)
self.distance = self.lastX - self.wall_width
self.draw_background()
self.draw()
self.agents = self.possible_agents[:]
self._agent_selector.reinit(self.agents)
self.agent_selection = self._agent_selector.next()
self.has_reset = True
self.done = False
self.rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self._cumulative_rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self.dones = dict(zip(self.agents, [False for _ in self.agents]))
self.infos = dict(zip(self.agents, [{} for _ in self.agents]))
self.frames = 0
def draw_background(self):
outer_walls = pygame.Rect(
0, # Left
0, # Top
self.screen_width, # Width
self.screen_height, # Height
)
outer_wall_color = (58, 64, 65)
pygame.draw.rect(self.screen, outer_wall_color, outer_walls)
inner_walls = pygame.Rect(
self.wall_width / 2, # Left
self.wall_width / 2, # Top
self.screen_width - self.wall_width, # Width
self.screen_height - self.wall_width, # Height
)
inner_wall_color = (68, 76, 77)
pygame.draw.rect(self.screen, inner_wall_color, inner_walls)
self.draw_pistons()
def draw_pistons(self):
piston_color = (65, 159, 221)
x_pos = self.wall_width
for piston in self.pistonList:
self.screen.blit(
self.piston_body_sprite,
(x_pos, self.screen_height - self.wall_width - self.piston_body_height),
)
# Height is the size of the blue part of the piston. 6 is the piston base height (the gray part at the bottom)
height = (
self.screen_height
- self.wall_width
- self.piston_body_height
- (piston.position[1] + self.piston_radius)
+ (self.piston_body_height - 6)
)
body_rect = pygame.Rect(
piston.position[0]
+ self.piston_radius
+ 1, # +1 to match up to piston graphics
piston.position[1] + self.piston_radius + 1,
18,
height,
)
pygame.draw.rect(self.screen, piston_color, body_rect)
x_pos += self.piston_width
def draw(self):
# redraw the background image if ball goes outside valid position
if not self.valid_ball_position_rect.collidepoint(self.ball.position):
# self.screen.blit(self.background, (0, 0))
self.draw_background()
ball_x = int(self.ball.position[0])
ball_y = int(self.ball.position[1])
color = (255, 255, 255)
pygame.draw.rect(self.screen, color, self.render_rect)
color = (65, 159, 221)
pygame.draw.circle(self.screen, color, (ball_x, ball_y), self.ball_radius)
line_end_x = ball_x + (self.ball_radius - 1) * np.cos(self.ball.angle)
line_end_y = ball_y + (self.ball_radius - 1) * np.sin(self.ball.angle)
color = (58, 64, 65)
pygame.draw.line(
self.screen, color, (ball_x, ball_y), (line_end_x, line_end_y), 3
) # 39 because it kept sticking over by 1 at 40
for piston in self.pistonList:
self.screen.blit(
self.piston_sprite,
(
piston.position[0] - self.piston_radius,
piston.position[1] - self.piston_radius,
),
)
self.draw_pistons()
def get_nearby_pistons(self):
# first piston = leftmost
nearby_pistons = []
ball_pos = int(self.ball.position[0] - self.ball_radius)
closest = abs(self.pistonList[0].position.x - ball_pos)
closest_piston_index = 0
for i in range(self.n_pistons):
next_distance = abs(self.pistonList[i].position.x - ball_pos)
if next_distance < closest:
closest = next_distance
closest_piston_index = i
if closest_piston_index > 0:
nearby_pistons.append(closest_piston_index - 1)
nearby_pistons.append(closest_piston_index)
if closest_piston_index < self.n_pistons - 1:
nearby_pistons.append(closest_piston_index + 1)
return nearby_pistons
def get_local_reward(self, prev_position, curr_position):
local_reward = 0.5 * (prev_position - curr_position)
return local_reward
def render(self, mode="human"):
if mode == "human" and not self.renderOn:
# sets self.renderOn to true and initializes display
self.enable_render()
self.draw_background()
self.draw()
observation = np.array(pygame.surfarray.pixels3d(self.screen))
if mode == "human":
pygame.display.flip()
return (
np.transpose(observation, axes=(1, 0, 2)) if mode == "rgb_array" else None
)
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
action = np.asarray(action)
agent = self.agent_selection
if self.continuous:
self.move_piston(self.pistonList[self.agent_name_mapping[agent]], action)
else:
self.move_piston(
self.pistonList[self.agent_name_mapping[agent]], action - 1
)
self.space.step(self.dt)
if self._agent_selector.is_last():
ball_min_x = int(self.ball.position[0] - self.ball_radius)
ball_next_x = (
self.ball.position[0]
- self.ball_radius
+ self.ball.velocity[0] * self.dt
)
if ball_next_x <= self.wall_width + 1:
self.done = True
# ensures that the ball can't pass through the wall
ball_min_x = max(self.wall_width, ball_min_x)
self.draw()
local_reward = self.get_local_reward(self.lastX, ball_min_x)
# Opposite order due to moving right to left
global_reward = (100 / self.distance) * (self.lastX - ball_min_x)
if not self.done:
global_reward += self.time_penalty
total_reward = [
global_reward * (1 - self.local_ratio)
] * self.n_pistons # start with global reward
local_pistons_to_reward = self.get_nearby_pistons()
for index in local_pistons_to_reward:
total_reward[index] += local_reward * self.local_ratio
self.rewards = dict(zip(self.agents, total_reward))
self.lastX = ball_min_x
self.frames += 1
else:
self._clear_rewards()
if self.frames >= self.max_cycles:
self.done = True
# Clear the list of recent pistons for the next reward cycle
if self.frames % self.recentFrameLimit == 0:
self.recentPistons = set()
if self._agent_selector.is_last():
self.dones = dict(zip(self.agents, [self.done for _ in self.agents]))
self.agent_selection = self._agent_selector.next()
self._cumulative_rewards[agent] = 0
self._accumulate_rewards()
# Game art created by J K Terry
| 34.883362 | 122 | 0.576978 |
794498e3fcf4c60cc90d013ce5f97fc1e4cc7ce4 | 764 | py | Python | tests/test_s_scheduler.py | zoulida/sdufeQuant | dc3715a62f620c0a437daacfe9a113d5a6ecb62d | [
"Apache-2.0"
] | 1 | 2019-04-22T14:29:24.000Z | 2019-04-22T14:29:24.000Z | tests/test_s_scheduler.py | zoulida/sdufealpha | 754e7df303ff94251846863d5cd326b9d121a810 | [
"Apache-2.0"
] | null | null | null | tests/test_s_scheduler.py | zoulida/sdufealpha | 754e7df303ff94251846863d5cd326b9d121a810 | [
"Apache-2.0"
] | 1 | 2019-04-28T01:24:16.000Z | 2019-04-28T01:24:16.000Z | from rqalpha66666.api import *
def init(context):
scheduler.run_weekly(rebalance, 1, time_rule=market_open(0, 0))
def rebalance(context, bar_dict):
stock = "000001.XSHE"
if context.portfolio.positions[stock].quantity == 0:
order_target_percent(stock, 1)
else:
order_target_percent(stock, 0)
__config__ = {
"base": {
"start_date": "2008-07-01",
"end_date": "2017-01-01",
"frequency": "1d",
"matching_type": "current_bar",
"benchmark": "000001.XSHE",
"accounts": {
"stock": 100000
}
},
"extra": {
"log_level": "error",
},
"mod": {
"sys_progress": {
"enabled": True,
"show": True,
},
},
}
| 20.648649 | 67 | 0.527487 |
794499d35184125364638bc09385256ab6b16be6 | 1,484 | py | Python | tests/test_visitors/test_ast/test_compares/test_in_with_single_item_container.py | n1kolasM/wemake-python-styleguide | f39e87897de89bea1c49d410beb5b1cbaf930807 | [
"MIT"
] | 1 | 2020-02-08T12:04:39.000Z | 2020-02-08T12:04:39.000Z | tests/test_visitors/test_ast/test_compares/test_in_with_single_item_container.py | n1kolasM/wemake-python-styleguide | f39e87897de89bea1c49d410beb5b1cbaf930807 | [
"MIT"
] | 15 | 2020-02-22T11:09:46.000Z | 2020-02-27T16:36:54.000Z | tests/test_visitors/test_ast/test_compares/test_in_with_single_item_container.py | n1kolasM/wemake-python-styleguide | f39e87897de89bea1c49d410beb5b1cbaf930807 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.violations.refactoring import (
InCompareWithSingleItemContainerViolation,
WrongInCompareTypeViolation,
)
from wemake_python_styleguide.visitors.ast.compares import (
InCompareSanityVisitor,
)
@pytest.mark.parametrize('code', [
'if a in {1}: ...',
'if a in {1: "a"}: ...',
'if a in [1]: ... ',
'if a in (1,): ... ',
'if a in "a": ... ',
])
def test_single_item_container(
assert_errors,
parse_ast_tree,
code,
default_options,
in_not_in,
):
"""Compares forbid ``in`` with single item containers."""
tree = parse_ast_tree(code)
visitor = InCompareSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(
visitor,
[InCompareWithSingleItemContainerViolation],
ignored_types=(WrongInCompareTypeViolation,),
)
@pytest.mark.parametrize('code', [
'if a in {1, 2}: ...',
'if a in {1: "a", 2: "b"}: ...',
'if a in [1, 2]: ... ',
'if a in (1, 2): ... ',
'if a in "ab": ... ',
])
def test_multi_item_contrainer(
assert_errors,
parse_ast_tree,
code,
default_options,
in_not_in,
):
"""Compares allow ``in`` with multi items containers."""
tree = parse_ast_tree(code)
visitor = InCompareSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(
visitor,
[],
ignored_types=(WrongInCompareTypeViolation,),
)
| 22.484848 | 64 | 0.615903 |
794499db71ae179642d93455cfd9fbc52881201f | 3,210 | py | Python | facebook_scraper/utils.py | davidchoo12/facebook-scraper | f0300c8c0b4b22a807946faae98593c081e6fc04 | [
"MIT"
] | 2 | 2021-11-12T23:00:21.000Z | 2021-11-12T23:00:41.000Z | facebook_scraper/utils.py | davidchoo12/facebook-scraper | f0300c8c0b4b22a807946faae98593c081e6fc04 | [
"MIT"
] | null | null | null | facebook_scraper/utils.py | davidchoo12/facebook-scraper | f0300c8c0b4b22a807946faae98593c081e6fc04 | [
"MIT"
] | null | null | null | import codecs
import re
from datetime import datetime
from typing import Optional
from urllib.parse import parse_qsl, unquote, urlencode, urljoin, urlparse, urlunparse
import dateparser
import lxml.html
from bs4 import BeautifulSoup
from requests_html import DEFAULT_URL, Element, PyQuery
def find_and_search(node, selector, pattern, cast=str):
container = node.find(selector, first=True)
match = container and pattern.search(container.html)
return match and cast(match.groups()[0])
def parse_int(value: str) -> int:
return int(''.join(filter(lambda c: c.isdigit(), value)))
def decode_css_url(url: str) -> str:
url = re.sub(r'\\(..) ', r'\\x\g<1>', url)
url, _ = codecs.unicode_escape_decode(url)
return url
def filter_query_params(url, whitelist=None, blacklist=None) -> str:
def is_valid_param(param):
if whitelist is not None:
return param in whitelist
if blacklist is not None:
return param not in blacklist
return True # Do nothing
parsed_url = urlparse(url)
query_params = parse_qsl(parsed_url.query)
query_string = urlencode([(k, v) for k, v in query_params if is_valid_param(k)])
return urlunparse(parsed_url._replace(query=query_string))
def make_html_element(html: str, url=DEFAULT_URL) -> Element:
pq_element = PyQuery(html)[0] # PyQuery is a list, so we take the first element
return Element(element=pq_element, url=url)
month = (
r"Jan(?:uary)?|"
r"Feb(?:ruary)?|"
r"Mar(?:ch)?|"
r"Apr(?:il)?|"
r"May|"
r"Jun(?:e)?|"
r"Jul(?:y)?|"
r"Aug(?:ust)?|"
r"Sep(?:tember)?|"
r"Oct(?:ober)?|"
r"Nov(?:ember)?|"
r"Dec(?:ember)?"
)
day_of_month = r"\d{1,2}"
specific_date_md = f"(?:{month}) {day_of_month}" + r"(?:,? \d{4})?"
specific_date_dm = f"{day_of_month} (?:{month})" + r"(?:,? \d{4})?"
date = f"{specific_date_md}|{specific_date_dm}|Today|Yesterday"
hour = r"\d{1,2}"
minute = r"\d{2}"
period = r"AM|PM|"
exact_time = f"(?:{date}) at {hour}:{minute} ?(?:{period})"
relative_time_hours = r"\b\d{1,2} ?h(?:rs?)?"
relative_time_mins = r"\b\d{1,2} ?mins?"
relative_time = f"{relative_time_hours}|{relative_time_mins}"
datetime_regex = re.compile(fr"({exact_time}|{relative_time})", re.IGNORECASE)
def parse_datetime(text: str, search=True) -> Optional[datetime]:
"""Looks for a string that looks like a date and parses it into a datetime object.
Uses a regex to look for the date in the string.
Uses dateparser to parse the date (not thread safe).
Args:
text: The text where the date should be.
search: If false, skip the regex search and try to parse the complete string.
Returns:
The datetime object, or None if it couldn't find a date.
"""
if search:
time_match = datetime_regex.search(text)
if time_match:
text = time_match.group(0)
else:
return None
return dateparser.parse(text)
def html_element_to_string(element: Element, pretty=False) -> str:
html = lxml.html.tostring(element.element, encoding='unicode')
if pretty:
html = BeautifulSoup(html, features='html.parser').prettify()
return html
| 29.722222 | 86 | 0.657321 |
794499fa5c6e93cf26f162e85d96ea2a5a2031d9 | 3,741 | py | Python | python/oneflow/test/modules/test_randn.py | grybd/oneflow | 82237ad096a10527591660c09b61444c42917e69 | [
"Apache-2.0"
] | 3,285 | 2020-07-31T05:51:22.000Z | 2022-03-31T15:20:16.000Z | python/oneflow/test/modules/test_randn.py | grybd/oneflow | 82237ad096a10527591660c09b61444c42917e69 | [
"Apache-2.0"
] | 2,417 | 2020-07-31T06:28:58.000Z | 2022-03-31T23:04:14.000Z | python/oneflow/test/modules/test_randn.py | grybd/oneflow | 82237ad096a10527591660c09b61444c42917e69 | [
"Apache-2.0"
] | 520 | 2020-07-31T05:52:42.000Z | 2022-03-29T02:38:11.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.unittest
from test_util import GenArgList
from oneflow.test_utils.automated_test_util import *
def _test_randn(test_case, device, shape):
y1 = flow.randn(*shape, device=flow.device(device))
y2 = flow.randn(*shape, device=flow.device(device))
test_case.assertTrue(not np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))
test_case.assertTrue(shape == y1.shape)
def _test_0d_rand(test_case, device, shape):
y1 = flow.randn(*shape, device=flow.device(device))
y2 = flow.randn(*shape, device=flow.device(device))
test_case.assertTrue(
np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4)
) # 0d is [] and []
test_case.assertTrue(shape == y1.shape)
def _test_different_dtype(test_case, device, shape):
y1 = flow.randn(*shape, dtype=flow.float32, device=flow.device(device))
y2 = flow.randn(*shape, dtype=flow.float64, device=flow.device(device))
test_case.assertTrue(not np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))
test_case.assertTrue(shape == y1.shape)
with test_case.assertRaises(
oneflow._oneflow_internal.exception.UnimplementedException
):
flow.randn(*shape, dtype=flow.int32, device=flow.device(device))
def _test_backward(test_case, device, shape):
x = flow.randn(*shape, device=flow.device(device), requires_grad=True)
y = x.sum()
y.backward()
test_case.assertTrue(
np.allclose(np.ones(shape), x.grad.numpy(), atol=1e-4, rtol=1e-4)
)
def _test_with_generator(test_case, device, shape):
gen = flow.Generator()
gen.manual_seed(0)
y1 = flow.randn(
*shape, dtype=flow.float32, device=flow.device(device), generator=gen
)
gen.manual_seed(0)
y2 = flow.randn(
*shape, dtype=flow.float32, device=flow.device(device), generator=gen
)
test_case.assertTrue(np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))
@flow.unittest.skip_unless_1n1d()
class TestRandnModule(flow.unittest.TestCase):
def test_consistent_naive(test_case):
placement = flow.placement("cpu", {0: [0]})
sbp = (flow.sbp.broadcast,)
x = flow.randn(16, 16, placement=placement, sbp=sbp)
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
def test_randn(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_randn,
_test_different_dtype,
_test_backward,
_test_with_generator,
]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_0d_randn(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_0d_rand]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [(2, 0, 4), (2, 0, 2)]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| 32.815789 | 87 | 0.671746 |
794499fe75ba7d2b6a62ed6cc53c6e84cc3f3a60 | 650 | py | Python | Important/ImplementReverseStack.py | adityaarakeri/Interview-solved | e924011d101621c7121f4f86d82bee089f4c1e25 | [
"MIT"
] | 46 | 2019-10-14T01:21:35.000Z | 2022-01-08T23:55:15.000Z | Important/ImplementReverseStack.py | Siddhant-K-code/Interview-solved | e924011d101621c7121f4f86d82bee089f4c1e25 | [
"MIT"
] | 53 | 2019-10-03T17:16:43.000Z | 2020-12-08T12:48:19.000Z | Important/ImplementReverseStack.py | Siddhant-K-code/Interview-solved | e924011d101621c7121f4f86d82bee089f4c1e25 | [
"MIT"
] | 96 | 2019-10-03T18:12:10.000Z | 2021-03-14T19:41:06.000Z | # Implement a Reverse Stack in python
class ReverseStack():
def __init__(self):
self.items = list()
def push(self, *args):
for val in args:
self.items.insert(0, val)
def peek(self):
return self.items[0]
def pop(self):
return self.items.pop(0)
def isEmpty(self):
if len(self.items) == 0:
return True
else:
return False
def size(self):
return len(self.items)
s = ReverseStack()
s.push(1)
s.push(2)
s.push(3)
s.push(4, 5)
s.push('test')
print(s.items)
print(s.isEmpty())
print(s.peek())
print(s.size())
s.pop()
print(s.items)
| 15.47619 | 37 | 0.563077 |
79449a3080bd8f2415e7f7d91f1b33a9e5d37403 | 3,701 | py | Python | shell/command.py | wenbobuaa/pykit | 43e38fe40297a1e7a9329bcf3db3554c7ca48ead | [
"MIT"
] | null | null | null | shell/command.py | wenbobuaa/pykit | 43e38fe40297a1e7a9329bcf3db3554c7ca48ead | [
"MIT"
] | null | null | null | shell/command.py | wenbobuaa/pykit | 43e38fe40297a1e7a9329bcf3db3554c7ca48ead | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# coding: utf-8
import argparse
import copy
import sys
import logging
import os
from pykit import dictutil
logger = logging.getLogger(__name__)
def command(**kwargs):
root, parser = add_command_help(kwargs)
inputs = sys.argv[1:]
try:
cmds = []
while len(inputs) > 0 and root.has_key(inputs[0]):
k = inputs.pop(0)
cmds.append(k)
node = root[k]
if is_node_executable(node):
call_able, args = parse_executable_node(parser, cmds, node, inputs)
try:
logger.debug("command: " + repr(cmds) + ' args: ' + repr(args) + ' cwd: ' + repr(os.getcwd()))
rc = call_able(*args)
sys.exit(0
if rc is True or rc is 0 or rc is None
else 1)
except Exception as e:
logger.exception(repr(e))
sys.stderr.write(repr(e))
sys.exit(1)
else:
root = node
if need_to_show_help(parser):
if len(cmds) > 0:
argv = [' '.join(cmds)] + inputs
else:
argv = inputs
parser.parse_args(argv)
else:
sys.stderr.write('No such command: ' + ' '.join(sys.argv[1:]))
sys.exit(2)
except Exception as e:
logger.exception(repr(e))
sys.stderr.write(repr(e))
sys.exit(1)
def add_command_help(commands):
new_cmds = copy.deepcopy(commands)
help_msgs = new_cmds.get('__add_help__')
desc = new_cmds.get('__description__')
for k in ('__add_help__', '__description__'):
if new_cmds.has_key(k):
del new_cmds[k]
if help_msgs is None:
return new_cmds, None
parser = argparse.ArgumentParser(description=desc, epilog='\n')
subparsers = parser.add_subparsers(help=' command(s) to select ...')
for cmds, execute_able in dictutil.depth_iter(new_cmds):
help = help_msgs.get(tuple(cmds), '')
cmd = ' '.join(cmds)
cmd_parser = subparsers.add_parser(cmd, help=help)
if need_param_help(execute_able):
call_able = execute_able[0]
param_msgs = execute_able[1:]
params = add_param_help(cmd_parser, param_msgs)
# delete help message
dictutil.make_setter(cmds)(new_cmds, (call_able, params))
return new_cmds, parser
def add_param_help(parser, param_msgs):
params = []
for param, msg in param_msgs:
parser.add_argument(param, **msg)
params.append(param)
return params
def parse_executable_node(parser, cmds, execute_able, args):
if not need_to_show_help(parser):
# no __add_help__ but has paramter help message
if args_need_to_parse(execute_able):
return execute_able[0], args
return execute_able, args
args_parsed = parser.parse_args([' '.join(cmds)] + args)
# to dict
args_parsed = vars(args_parsed)
if not args_need_to_parse(execute_able):
return execute_able, args
call_able, params = execute_able
args = [args_parsed.get(x) for x in params]
return call_able, args
def is_node_executable(node):
if isinstance(node, (list, tuple)) and len(node) > 0:
return callable(node[0])
return callable(node)
def need_to_show_help(parser):
return parser is not None
def args_need_to_parse(execute_able):
return isinstance(execute_able, tuple)
def need_param_help(execute_able):
return isinstance(execute_able, (list, tuple)) and len(execute_able) > 1
| 23.13125 | 114 | 0.590921 |
79449a75f1e341c54210d6b171159396f5be56f1 | 537 | py | Python | dedomeno/houses/migrations/0046_auto_20161226_1416.py | ginopalazzo/dedomeno | e43df365849102016c8819b2082d2cde9109360f | [
"MIT"
] | 38 | 2018-03-19T12:52:17.000Z | 2022-02-17T14:45:57.000Z | dedomeno/houses/migrations/0046_auto_20161226_1416.py | ginopalazzo/dedomeno | e43df365849102016c8819b2082d2cde9109360f | [
"MIT"
] | 7 | 2020-02-11T23:01:40.000Z | 2020-08-06T13:30:58.000Z | dedomeno/houses/migrations/0046_auto_20161226_1416.py | ginopalazzo/dedomeno | e43df365849102016c8819b2082d2cde9109360f | [
"MIT"
] | 12 | 2019-02-23T22:10:34.000Z | 2022-03-24T12:01:38.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-26 13:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('houses', '0045_auto_20161226_1411'),
]
operations = [
migrations.AlterField(
model_name='house',
name='agency',
field=models.ManyToManyField(blank=True, help_text='If blank there is not an agency involved', null=True, to='houses.Agency'),
),
]
| 25.571429 | 138 | 0.638734 |
79449a9b4a0ba771f424f795bc74cdd22a32a8ef | 14,729 | py | Python | vyper/compile_lll.py | mpcnat/vyper | 731263c9bea826a167639989350688833f68c182 | [
"MIT"
] | null | null | null | vyper/compile_lll.py | mpcnat/vyper | 731263c9bea826a167639989350688833f68c182 | [
"MIT"
] | null | null | null | vyper/compile_lll.py | mpcnat/vyper | 731263c9bea826a167639989350688833f68c182 | [
"MIT"
] | null | null | null | from vyper.parser.parser import LLLnode
from .opcodes import opcodes
from vyper.utils import MemoryPositions
def num_to_bytearray(x):
o = []
while x > 0:
o.insert(0, x % 256)
x //= 256
return o
PUSH_OFFSET = 0x5f
DUP_OFFSET = 0x7f
SWAP_OFFSET = 0x8f
next_symbol = [0]
def mksymbol():
next_symbol[0] += 1
return '_sym_' + str(next_symbol[0])
def is_symbol(i):
return isinstance(i, str) and i[:5] == '_sym_'
def get_revert(mem_start=None, mem_len=None):
o = []
end_symbol = mksymbol()
o.extend([end_symbol, 'JUMPI'])
if (mem_start, mem_len) == (None, None):
o.extend(['PUSH1', 0, 'DUP1', 'REVERT'])
else:
o.extend([mem_len, mem_start, 'REVERT'])
o.extend([end_symbol, 'JUMPDEST'])
return o
# Compiles LLL to assembly
def compile_to_assembly(code, withargs=None, break_dest=None, height=0):
if withargs is None:
withargs = {}
# Opcodes
if isinstance(code.value, str) and code.value.upper() in opcodes:
o = []
for i, c in enumerate(code.args[::-1]):
o.extend(compile_to_assembly(c, withargs, break_dest, height + i))
o.append(code.value.upper())
return o
# Numbers
elif isinstance(code.value, int):
if code.value <= -2**255:
raise Exception("Value too low: %d" % code.value)
elif code.value >= 2**256:
raise Exception("Value too high: %d" % code.value)
bytez = num_to_bytearray(code.value % 2**256) or [0]
return ['PUSH' + str(len(bytez))] + bytez
# Variables connected to with statements
elif isinstance(code.value, str) and code.value in withargs:
if height - withargs[code.value] > 16:
raise Exception("With statement too deep")
return ['DUP' + str(height - withargs[code.value])]
# Setting variables connected to with statements
elif code.value == "set":
if len(code.args) != 2 or code.args[0].value not in withargs:
raise Exception("Set expects two arguments, the first being a stack variable")
if height - withargs[code.args[0].value] > 16:
raise Exception("With statement too deep")
return compile_to_assembly(code.args[1], withargs, break_dest, height) + \
['SWAP' + str(height - withargs[code.args[0].value]), 'POP']
# Pass statements
elif code.value == 'pass':
return []
# Code length
elif code.value == '~codelen':
return ['_sym_codeend']
# Calldataload equivalent for code
elif code.value == 'codeload':
return compile_to_assembly(LLLnode.from_list(['seq', ['codecopy', MemoryPositions.FREE_VAR_SPACE, code.args[0], 32], ['mload', MemoryPositions.FREE_VAR_SPACE]]),
withargs, break_dest, height)
# If statements (2 arguments, ie. if x: y)
elif code.value == 'if' and len(code.args) == 2:
o = []
o.extend(compile_to_assembly(code.args[0], withargs, break_dest, height))
end_symbol = mksymbol()
o.extend(['ISZERO', end_symbol, 'JUMPI'])
o.extend(compile_to_assembly(code.args[1], withargs, break_dest, height))
o.extend([end_symbol, 'JUMPDEST'])
return o
# If statements (3 arguments, ie. if x: y, else: z)
elif code.value == 'if' and len(code.args) == 3:
o = []
o.extend(compile_to_assembly(code.args[0], withargs, break_dest, height))
mid_symbol = mksymbol()
end_symbol = mksymbol()
o.extend(['ISZERO', mid_symbol, 'JUMPI'])
o.extend(compile_to_assembly(code.args[1], withargs, break_dest, height))
o.extend([end_symbol, 'JUMP', mid_symbol, 'JUMPDEST'])
o.extend(compile_to_assembly(code.args[2], withargs, break_dest, height))
o.extend([end_symbol, 'JUMPDEST'])
return o
# Repeat statements (compiled from for loops)
# Repeat(memloc, start, rounds, body)
elif code.value == 'repeat':
o = []
loops = num_to_bytearray(code.args[2].value)
start, continue_dest, end = mksymbol(), mksymbol(), mksymbol()
o.extend(compile_to_assembly(code.args[0], withargs, break_dest, height))
o.extend(compile_to_assembly(code.args[1], withargs, break_dest, height + 1))
o.extend(['PUSH' + str(len(loops))] + loops)
# stack: memloc, startvalue, rounds
o.extend(['DUP2', 'DUP4', 'MSTORE', 'ADD', start, 'JUMPDEST'])
# stack: memloc, exit_index
o.extend(compile_to_assembly(code.args[3], withargs, (end, continue_dest, height + 2), height + 2))
# stack: memloc, exit_index
o.extend([continue_dest, 'JUMPDEST', 'DUP2', 'MLOAD', 'PUSH1', 1, 'ADD', 'DUP1', 'DUP4', 'MSTORE'])
# stack: len(loops), index memory address, new index
o.extend(['DUP2', 'EQ', 'ISZERO', start, 'JUMPI', end, 'JUMPDEST', 'POP', 'POP'])
return o
# Continue to the next iteration of the for loop
elif code.value == 'continue':
if not break_dest:
raise Exception("Invalid break")
dest, continue_dest, break_height = break_dest
return [continue_dest, 'JUMP']
# Break from inside a for loop
elif code.value == 'break':
if not break_dest:
raise Exception("Invalid break")
dest, continue_dest, break_height = break_dest
return ['POP'] * (height - break_height) + [dest, 'JUMP']
# With statements
elif code.value == 'with':
o = []
o.extend(compile_to_assembly(code.args[1], withargs, break_dest, height))
old = withargs.get(code.args[0].value, None)
withargs[code.args[0].value] = height
o.extend(compile_to_assembly(code.args[2], withargs, break_dest, height + 1))
if code.args[2].valency:
o.extend(['SWAP1', 'POP'])
else:
o.extend(['POP'])
if old is not None:
withargs[code.args[0].value] = old
else:
del withargs[code.args[0].value]
return o
# LLL statement (used to contain code inside code)
elif code.value == 'lll':
o = []
begincode = mksymbol()
endcode = mksymbol()
o.extend([endcode, 'JUMP', begincode, 'BLANK'])
o.append(compile_to_assembly(code.args[0], {}, None, 0)) # Append is intentional
o.extend([endcode, 'JUMPDEST', begincode, endcode, 'SUB', begincode])
o.extend(compile_to_assembly(code.args[1], withargs, break_dest, height))
o.extend(['CODECOPY', begincode, endcode, 'SUB'])
return o
# Seq (used to piece together multiple statements)
elif code.value == 'seq':
o = []
for arg in code.args:
o.extend(compile_to_assembly(arg, withargs, break_dest, height))
if arg.valency == 1 and arg != code.args[-1]:
o.append('POP')
return o
# Assert (if false, exit)
elif code.value == 'assert':
o = compile_to_assembly(code.args[0], withargs, break_dest, height)
o.extend(get_revert())
return o
elif code.value == 'assert_reason':
o = compile_to_assembly(code.args[0], withargs, break_dest, height)
mem_start = compile_to_assembly(code.args[1], withargs, break_dest, height)
mem_len = compile_to_assembly(code.args[2], withargs, break_dest, height)
o.extend(get_revert(mem_start, mem_len))
return o
# Unsigned/signed clamp, check less-than
elif code.value in ('uclamplt', 'uclample', 'clamplt', 'clample', 'uclampgt', 'uclampge', 'clampgt', 'clampge'):
if isinstance(code.args[0].value, int) and isinstance(code.args[1].value, int):
# Checks for clamp errors at compile time as opposed to run time
if code.value in ('uclamplt', 'clamplt') and 0 <= code.args[0].value < code.args[1].value or \
code.value in ('uclample', 'clample') and 0 <= code.args[0].value <= code.args[1].value or \
code.value in ('uclampgt', 'clampgt') and 0 <= code.args[0].value > code.args[1].value or \
code.value in ('uclampge', 'clampge') and 0 <= code.args[0].value >= code.args[1].value:
return compile_to_assembly(code.args[0], withargs, break_dest, height)
else:
raise Exception("Invalid %r with values %r and %r" % (code.value, code.args[0], code.args[1]))
o = compile_to_assembly(code.args[0], withargs, break_dest, height)
o.extend(compile_to_assembly(code.args[1], withargs, break_dest, height + 1))
o.extend(['DUP2'])
# Stack: num num bound
if code.value == 'uclamplt':
o.extend(['LT'])
elif code.value == "clamplt":
o.extend(['SLT'])
elif code.value == "uclample":
o.extend(['GT', 'ISZERO'])
elif code.value == "clample":
o.extend(['SGT', 'ISZERO'])
elif code.value == 'uclampgt':
o.extend(['GT'])
elif code.value == "clampgt":
o.extend(['SGT'])
elif code.value == "uclampge":
o.extend(['LT', 'ISZERO'])
elif code.value == "clampge":
o.extend(['SLT', 'ISZERO'])
o.extend(get_revert())
return o
# Signed clamp, check against upper and lower bounds
elif code.value in ('clamp', 'uclamp'):
comp1 = 'SGT' if code.value == 'clamp' else 'GT'
comp2 = 'SLT' if code.value == 'clamp' else 'LT'
o = compile_to_assembly(code.args[0], withargs, break_dest, height)
o.extend(compile_to_assembly(code.args[1], withargs, break_dest, height + 1))
o.extend(['DUP1'])
o.extend(compile_to_assembly(code.args[2], withargs, break_dest, height + 3))
o.extend(['SWAP1', comp1, 'ISZERO'])
o.extend(get_revert())
o.extend(['DUP1', 'SWAP2', 'SWAP1', comp2, 'ISZERO'])
o.extend(get_revert())
return o
# Checks that a value is nonzero
elif code.value == 'clamp_nonzero':
o = compile_to_assembly(code.args[0], withargs, break_dest, height)
o.extend(['DUP1'])
o.extend(get_revert())
return o
# SHA3 a single value
elif code.value == 'sha3_32':
o = compile_to_assembly(code.args[0], withargs, break_dest, height)
o.extend([
'PUSH1', MemoryPositions.FREE_VAR_SPACE,
'MSTORE',
'PUSH1', 32,
'PUSH1', MemoryPositions.FREE_VAR_SPACE,
'SHA3'
])
return o
# SHA3 a 64 byte value
elif code.value == 'sha3_64':
o = compile_to_assembly(code.args[0], withargs, break_dest, height)
o.extend(compile_to_assembly(code.args[1], withargs, break_dest, height))
o.extend([
'PUSH1', MemoryPositions.FREE_VAR_SPACE2,
'MSTORE',
'PUSH1', MemoryPositions.FREE_VAR_SPACE,
'MSTORE',
'PUSH1', 64,
'PUSH1', MemoryPositions.FREE_VAR_SPACE,
'SHA3'
])
return o
# <= operator
elif code.value == 'le':
return compile_to_assembly(LLLnode.from_list(['iszero', ['gt', code.args[0], code.args[1]]]), withargs, break_dest, height)
# >= operator
elif code.value == 'ge':
return compile_to_assembly(LLLnode.from_list(['iszero', ['lt', code.args[0], code.args[1]]]), withargs, break_dest, height)
# <= operator
elif code.value == 'sle':
return compile_to_assembly(LLLnode.from_list(['iszero', ['sgt', code.args[0], code.args[1]]]), withargs, break_dest, height)
# >= operator
elif code.value == 'sge':
return compile_to_assembly(LLLnode.from_list(['iszero', ['slt', code.args[0], code.args[1]]]), withargs, break_dest, height)
# != operator
elif code.value == 'ne':
return compile_to_assembly(LLLnode.from_list(['iszero', ['eq', code.args[0], code.args[1]]]), withargs, break_dest, height)
# e.g. 95 -> 96, 96 -> 96, 97 -> 128
elif code.value == "ceil32":
return compile_to_assembly(LLLnode.from_list(['with', '_val', code.args[0],
['sub', ['add', '_val', 31],
['mod', ['sub', '_val', 1], 32]]]), withargs, break_dest, height)
# # jump to a symbol
elif code.value == 'goto':
return [
'_sym_' + str(code.args[0]),
'JUMP'
]
elif isinstance(code.value, str) and code.value.startswith('_sym_'):
return code.value
# set a symbol as a location.
elif code.value == 'label':
return [
'_sym_' + str(code.args[0]),
'JUMPDEST'
]
# inject debug opcode.
elif code.value == 'debugger':
return ['PUSH1', code.pos[0], 'DEBUG']
else:
raise Exception("Weird code element: " + repr(code))
# Assembles assembly into EVM
def assembly_to_evm(assembly):
posmap = {}
sub_assemblies = []
codes = []
pos = 0
for i, item in enumerate(assembly):
if is_symbol(item):
if assembly[i + 1] == 'JUMPDEST' or assembly[i + 1] == 'BLANK':
posmap[item] = pos # Don't increment position as the symbol itself doesn't go into code
else:
pos += 3 # PUSH2 highbits lowbits
elif item == 'BLANK':
pos += 0
elif isinstance(item, list):
c = assembly_to_evm(item)
sub_assemblies.append(item)
codes.append(c)
pos += len(c)
else:
pos += 1
posmap['_sym_codeend'] = pos
o = b''
for i, item in enumerate(assembly):
if is_symbol(item):
if assembly[i + 1] != 'JUMPDEST' and assembly[i + 1] != 'BLANK':
o += bytes([PUSH_OFFSET + 2, posmap[item] // 256, posmap[item] % 256])
elif isinstance(item, int):
o += bytes([item])
elif isinstance(item, str) and item.upper() in opcodes:
o += bytes([opcodes[item.upper()][0]])
elif item[:4] == 'PUSH':
o += bytes([PUSH_OFFSET + int(item[4:])])
elif item[:3] == 'DUP':
o += bytes([DUP_OFFSET + int(item[3:])])
elif item[:4] == 'SWAP':
o += bytes([SWAP_OFFSET + int(item[4:])])
elif item == 'BLANK':
pass
elif isinstance(item, list):
for i in range(len(sub_assemblies)):
if sub_assemblies[i] == item:
o += codes[i]
break
else:
# Should never reach because, assembly is create in compile_to_assembly.
raise Exception("Weird symbol in assembly: " + str(item)) # pragma: no cover
assert len(o) == pos
return o
| 42.082857 | 169 | 0.578179 |
79449aadfa4f3a71ce048634bcb5dbdc0a0cc222 | 3,340 | py | Python | location_finder/location_finder/settings.py | abhisinha4395/propamc_assignment | 0b30a45ad06ac87b5e5c6f411581d76a1f745b66 | [
"MIT"
] | null | null | null | location_finder/location_finder/settings.py | abhisinha4395/propamc_assignment | 0b30a45ad06ac87b5e5c6f411581d76a1f745b66 | [
"MIT"
] | null | null | null | location_finder/location_finder/settings.py | abhisinha4395/propamc_assignment | 0b30a45ad06ac87b5e5c6f411581d76a1f745b66 | [
"MIT"
] | null | null | null | """
Django settings for location_finder project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's2b(a0eb&w=cxphvme-yh*yzglvc!kwil015f38oppwk9v73j6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pinpoint',
'bootstrap4'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'location_finder.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'location_finder.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| 25.891473 | 91 | 0.700599 |
79449b415d4fd3d38e99bd915d84cc9156fc8122 | 14,230 | py | Python | train_ssd.py | shpach/ssd_keras | 08aca69e8cc1b1917aaec78d4c34a5cde22f404a | [
"Apache-2.0"
] | 1 | 2018-11-11T05:52:29.000Z | 2018-11-11T05:52:29.000Z | train_ssd.py | shpach/ssd_keras | 08aca69e8cc1b1917aaec78d4c34a5cde22f404a | [
"Apache-2.0"
] | null | null | null | train_ssd.py | shpach/ssd_keras | 08aca69e8cc1b1917aaec78d4c34a5cde22f404a | [
"Apache-2.0"
] | null | null | null | from tensorflow.python.lib.io import file_io
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger
from keras import backend as K
import tensorflow as tf
from keras.models import load_model
from keras.utils import plot_model
from math import ceil
import numpy as np
#from matplotlib import pyplot as plt
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__)))
os.environ["TF_CPP_MIN_LOG_LEVEL"]="3"
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from tensorflow.python.lib.io import file_io
import argparse
from tensorflow.python.client import device_lib
print("CHECK GPU USAGE!")
print(device_lib.list_local_devices())
K.tensorflow_backend._get_available_gpus()
img_height = 300 # Height of the model input images
img_width = 300 # Width of the model input images
img_channels = 3 # Number of color channels of the model input images
mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
n_classes = 20 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets
scales = scales_pascal
aspect_ratios = [[1.0, 2.0, 0.5, 3.0, 1.0/3.0, 4.0, 0.25],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0, 4.0, 0.25],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0]] # The anchor box aspect ratios used in the original SSD300; the order matters
two_boxes_for_ar1 = True
steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.
offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation
normalize_coords = True
def main(job_dir, **args):
##Setting up the path for saving logs
logs_dir = job_dir + 'logs/'
data_dir = "gs://deeplearningteam11/data"
print("Current Directory: " + os.path.dirname(__file__))
print("Lets copy the data to: " + os.path.dirname(__file__))
os.system("gsutil -m cp -r " + data_dir + " " + os.path.dirname(__file__) + " > /dev/null 2>&1 " )
#exit(0)
with tf.device('/device:GPU:0'):
# 1: Build the Keras model.
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=mean_color,
swap_channels=swap_channels)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
model.summary()
# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.
train_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None)
# 2: Parse the image and label lists for the training and validation datasets. This can take a while.
# VOC 2007
# The directories that contain the images.
VOC_2007_train_images_dir = 'data/data/VOC2007/train/JPEGImages/'
VOC_2007_test_images_dir = 'data/data/VOC2007/test/JPEGImages/'
VOC_2007_train_anns_dir = 'data/data/VOC2007/train/Annotations/'
VOC_2007_test_anns_dir = 'data/data/VOC2007/test/Annotations/'
# The paths to the image sets.
VOC_2007_trainval_image_set_dir = 'data/data/VOC2007/train/ImageSets/Main/'
VOC_2007_test_image_set_dir = 'data/data/VOC2007/test/ImageSets/Main/'
VOC_2007_train_images_dir = os.path.dirname(__file__) + "/" + VOC_2007_train_images_dir
VOC_2007_test_images_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_images_dir
VOC_2007_train_anns_dir = os.path.dirname(__file__) + "/" + VOC_2007_train_anns_dir
VOC_2007_test_anns_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_anns_dir
VOC_2007_trainval_image_set_dir = os.path.dirname(__file__) + "/" + VOC_2007_trainval_image_set_dir
VOC_2007_test_image_set_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_image_set_dir
VOC_2007_trainval_image_set_filename = VOC_2007_trainval_image_set_dir + '/trainval.txt'
VOC_2007_test_image_set_filename = VOC_2007_test_image_set_dir + '/test.txt'
# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = ['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
print("Parsing Training Data ...")
train_dataset.parse_xml(images_dirs=[VOC_2007_train_images_dir],
image_set_filenames=[VOC_2007_trainval_image_set_filename],
annotations_dirs=[VOC_2007_train_anns_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=False,
ret=False,
verbose=False)
print("Done")
print("================================================================")
print("Parsing Test Data ...")
val_dataset.parse_xml(images_dirs=[VOC_2007_test_images_dir],
image_set_filenames=[VOC_2007_test_image_set_filename],
annotations_dirs=[VOC_2007_test_anns_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=True,
ret=False,
verbose=False)
print("Done")
print("================================================================")
# 3: Set the batch size.
batch_size = 32 # Change the batch size if you like, or if you run into GPU memory issues.
# 4: Set the image transformations for pre-processing and data augmentation options.
# For the training generator:
ssd_data_augmentation = SSDDataAugmentation(img_height=img_height,
img_width=img_width,
background=mean_color)
# For the validation generator:
convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=img_height, width=img_width)
# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('conv4_4_norm_mbox_conf').output_shape[1:3],
model.get_layer('fc7_mbox_conf').output_shape[1:3],
model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
model.get_layer('conv9_2_mbox_conf').output_shape[1:3],
model.get_layer('conv10_2_mbox_conf').output_shape[1:3],
model.get_layer('conv11_2_mbox_conf').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=normalize_coords)
# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=[ssd_data_augmentation],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[convert_to_3_channels,
resize],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
# Define a learning rate schedule.
def lr_schedule(epoch):
if epoch < 80:
return 0.001
elif epoch < 100:
return 0.0001
else:
return 0.00001
learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
verbose=1)
terminate_on_nan = TerminateOnNaN()
callbacks = [learning_rate_scheduler,
terminate_on_nan]
# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.
initial_epoch = 0
final_epoch = 120
steps_per_epoch = 500
history = model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=final_epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
model_name = "vgg19BNReLUmodel.h5"
model.save(model_name)
with file_io.FileIO(model_name, mode='rb') as input_f:
with file_io.FileIO("gs://deeplearningteam11/" + model_name, mode='w+') as output_f:
output_f.write(input_f.read())
##Running the app
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
args = parser.parse_args()
arguments = args.__dict__
main(**arguments)
| 51.003584 | 192 | 0.595643 |
79449b8597ab9e3ad9b68cdc2f3e1361a27d828f | 6,564 | py | Python | webcrawler/crawlers.py | mrafayaleem/simple-crawler | 673dc41915bb70b22b1a50d5256c45efd361c2d5 | [
"MIT"
] | 27 | 2016-03-08T11:43:24.000Z | 2021-03-10T11:51:58.000Z | webcrawler/crawlers.py | mrafayaleem/simple-crawler | 673dc41915bb70b22b1a50d5256c45efd361c2d5 | [
"MIT"
] | null | null | null | webcrawler/crawlers.py | mrafayaleem/simple-crawler | 673dc41915bb70b22b1a50d5256c45efd361c2d5 | [
"MIT"
] | 1 | 2022-02-11T05:55:13.000Z | 2022-02-11T05:55:13.000Z | import workerpool
import logging
import threading
from urllib2 import urlopen
from urlparse import urljoin
from Queue import Queue
from lxml import etree
from utils.xpath import build_abs_url_xpath
from utils.xpath import build_relative_url_xpath
from utils.url import is_url_in_domain
from utils.url import is_absolute
from utils.hash import url_hash
from utils.stdout import setup_stdout
setup_stdout()
logger = logging.getLogger('webcrawler')
# A list of extensions that we would ignore when encountered in href links
IGNORED_HREF_EXTENSIONS = [
# frontend
'.css', 'js',
# images
'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',
]
class SimpleCrawler(object):
"""A recursive exhaustive crawler implementation.
"""
# Build absolute url xpath to extract absolute urls from doc
abs_url_xpath = build_abs_url_xpath(
IGNORED_HREF_EXTENSIONS
)
# Build relative url xpath to extract relative urls from doc
relative_url_xpath = build_relative_url_xpath(
IGNORED_HREF_EXTENSIONS
)
_lock = threading.Lock()
def __init__(
self, spider, collectors=None,
post_processors=None, concurrent_requests=1):
self.pool = workerpool.WorkerPool(size=concurrent_requests)
self.spider = spider
self.collectors = collectors if collectors else []
self.post_processors = post_processors if post_processors else []
self.result = Queue()
self.url_hashes = set()
def start(self):
"""The entry point for the crawler. It starts from the start urls
and crawls exhaustively on them. Implemented using the workerpool
pattern.
"""
more_urls = []
for url in self.spider.start_urls:
_hash = url_hash(url)
if is_absolute(url) and is_url_in_domain(
url, self.spider.domains) and (_hash not in self.url_hashes):
# Collect urls from start_urls
more_urls = more_urls + self._crawl(
url=url,
spider=self.spider,
url_hashes=self.url_hashes,
collectors=self.collectors,
post_processors=self.post_processors
)
# Start asynchronous crawls and keep on crawling until all urls are
# exhasuted
while more_urls:
# While we have more urls, send these urls to workers to process
# and collect new urls discovered within the html and repeat.
# This is the actual implementation for exhaustive crawling.
_more_urls = self.pool.map(
lambda x: self._crawl(
x, self.spider, self.url_hashes, self.collectors,
self.post_processors
), more_urls
)
# For more discovered urls, reduce all the results to a single
# big list.
more_urls = reduce(lambda x, y: x + y, _more_urls)
# Wait for all the workers to finish the job and shutdown gracefully.
self.pool.shutdown()
self.pool.join()
@classmethod
def _crawl(cls, url, spider, url_hashes, collectors, post_processors):
_blank = []
# Add hashes using a lock to avoid race condition between worker
# threads.
with cls._lock:
if url_hash(url) in url_hashes:
return _blank
else:
url_hashes.add(url_hash(url))
# If a request fails, log and return
try:
logger.info('Crawling: %s', url)
response = urlopen(url)
except Exception as e:
logger.error('Request failed for url %s Exception: %s', url, e)
return _blank
# If parsing fails, log and return
try:
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
except Exception as e:
logger.error(
'Failed parsing response for url %s Exception: %s', url, e)
return _blank
try:
abs_urls = tree.xpath(
cls.abs_url_xpath,
namespaces={"re": "http://exslt.org/regular-expressions"}
)
except Exception as e:
logger.error('Absolute url extraction failed for %s', url)
abs_urls = []
try:
relative_urls = tree.xpath(
cls.relative_url_xpath,
namespaces={"re": "http://exslt.org/regular-expressions"}
)
except Exception as e:
logger.error('Relative url extraction failed for %s', url)
relative_urls = []
# Filter out all absolute urls that are outside of domain. This is
# only valid for absolute urls as relative urls are always within
# the domain.
abs_urls = filter(
lambda x: is_url_in_domain(x, spider.domains), abs_urls
)
# Build absolute urls from relative urls and merge in abs_urls.
abs_urls = abs_urls + [urljoin(url, r_url) for r_url in relative_urls]
logger.info('%s more urls discovered on %s', len(abs_urls), url)
urls_to_crawl = []
# At this point, we are sure that every url in abs_urls is absolute
# and lies within the domain. Next, we filter which url to actually
# crawl.
for abs_url in abs_urls:
_hash = url_hash(abs_url)
if _hash not in url_hashes:
urls_to_crawl.append(abs_url)
# Here we call the spider parse method and pass the result to the
# collectors.
try:
parsed = spider.parse(response, tree)
except Exception as e:
logger.error('Error parsing HTML for %s: Exception %s', url, e)
else:
logger.info('Parsed HTML for %s', url)
try:
parsed = reduce(
lambda x, y: y.clean(x),
collectors, parsed
)
except Exception as e:
logger.error('Error cleaning %s: Exception %s', url, e)
else:
try:
for post_procesor in post_processors:
post_procesor.process(parsed)
except Exception as e:
logger.error(
'Error post processing %s: Exception %s', url, e)
return urls_to_crawl
| 33.835052 | 81 | 0.584247 |
79449bb6d4b783cd527d7f6b0f7c4d33938b970e | 21,034 | py | Python | onmt/inference/fast_translator.py | jniehues-kit/NMTGMinor | 7631ce9c4f19fc7d0ebf475860fa60e681847969 | [
"MIT"
] | 1 | 2019-09-11T10:09:37.000Z | 2019-09-11T10:09:37.000Z | onmt/inference/fast_translator.py | jniehues-kit/NMTGMinor | 7631ce9c4f19fc7d0ebf475860fa60e681847969 | [
"MIT"
] | null | null | null | onmt/inference/fast_translator.py | jniehues-kit/NMTGMinor | 7631ce9c4f19fc7d0ebf475860fa60e681847969 | [
"MIT"
] | 1 | 2019-09-15T17:22:58.000Z | 2019-09-15T17:22:58.000Z | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
model_list = ['transformer', 'stochastic_transformer']
class FastTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.buffering = not opt.no_buffering
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
def translateBatch(self, batch):
with torch.no_grad():
return self._translateBatch(batch)
def _translateBatch(self, batch):
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0].fill_(self.bos) # first token is bos
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
prefix_tokens = None
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
# if self.match_source_len and step > src_lengths[unfin_idx]:
# score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
decoder_states = dict()
for i in range(self.n_models):
decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size, type=2, buffering=self.buffering)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = math.ceil(int(src_len) * self.dynamic_max_len_scale)
# Start decoding
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
lprobs, avg_attn_scores = self._decode(decode_input, decoder_states)
avg_attn_scores = None
lprobs[:, self.pad] = -math.inf # never select pad
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# if prefix_tokens is not None and step < prefix_tokens.size(1):
# prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
# prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
# prefix_mask = prefix_toks.ne(self.pad)
# lprobs[prefix_mask] = -math.inf
# lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
# -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs
# )
# # if prefix includes eos, then we should make sure tokens and
# # scores are the same across all beams
# eos_mask = prefix_toks.eq(self.eos)
# if eos_mask.any():
# # validate that the first beam matches the prefix
# first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
# eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
# target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
# assert (first_beam == target_prefix).all()
#
# def replicate_first_beam(tensor, mask):
# tensor = tensor.view(-1, beam_size, tensor.size(-1))
# tensor[mask] = tensor[mask][:, :1, :]
# return tensor.view(-1, tensor.size(-1))
#
# # copy tokens, scores and lprobs from the first beam to all beams
# tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
# scores = replicate_first_beam(scores, eos_mask_batch_dim)
# lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
# if prefix_tokens is not None:
# prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
decoder_output = self.models[i].step(tokens, decoder_states[i])
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# attn = attn[:, -1, :] # I dont know what this line means
attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.get_batch(0)
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
| 41.817097 | 119 | 0.563374 |
79449d482bb83376f5de1a58785817a127cf428a | 1,799 | py | Python | knowledge_distillation/text_utils.py | pongnguy/knowledge_distillation | 8378714e07af5a12eb8bf98d60035e0b88b126f0 | [
"MIT"
] | 37 | 2019-12-05T15:24:09.000Z | 2022-01-02T07:42:40.000Z | knowledge_distillation/text_utils.py | pongnguy/knowledge_distillation | 8378714e07af5a12eb8bf98d60035e0b88b126f0 | [
"MIT"
] | 1 | 2020-05-18T09:39:16.000Z | 2020-12-06T02:52:00.000Z | knowledge_distillation/text_utils.py | pongnguy/knowledge_distillation | 8378714e07af5a12eb8bf98d60035e0b88b126f0 | [
"MIT"
] | 11 | 2020-01-20T09:06:22.000Z | 2022-03-27T20:06:05.000Z | # coding: utf-8
from __future__ import unicode_literals, print_function
import re
import inflect
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer, SnowballStemmer
def remove_punctuation(words):
"""Remove punctuation from list of tokenized words"""
new_words = []
for word in words:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_words.append(new_word)
return new_words
def replace_numbers(words):
"""Replace all interger occurrences in list of tokenized words with textual representation"""
p = inflect.engine()
new_words = []
for word in words:
if word.isdigit():
new_word = p.number_to_words(word)
new_words.append(new_word)
else:
new_words.append(word)
return new_words
def remove_stopwords(words):
"""Remove stop words from list of tokenized words"""
new_words = []
for word in words:
if word not in stopwords.words('english'):
new_words.append(word)
return new_words
def stem_words(words):
"""Stem words in list of tokenized words"""
stemmer = SnowballStemmer('english')
stems = []
for word in words:
stem = stemmer.stem(word)
stems.append(stem)
return stems
def lemmatize_verbs(words):
"""Lemmatize verbs in list of tokenized words"""
lemmatizer = WordNetLemmatizer()
lemmas = []
for word in words:
lemma = lemmatizer.lemmatize(word, pos='v')
lemmas.append(lemma)
return lemmas
def normalize(words):
words = remove_punctuation(words)
words = replace_numbers(words)
# words = remove_stopwords(words)
words = stem_words(words)
# words = lemmatize_verbs(words)
return words
| 25.338028 | 97 | 0.658699 |
79449e7c21add45309112648bfb8de3f2b32d5f6 | 1,077 | py | Python | test/ml/classification/test_classification.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null | test/ml/classification/test_classification.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null | test/ml/classification/test_classification.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null |
import unittest as ut
import sys
sys.path.append("../../../package")
import ml.classification
class TestClassification(ut.TestCase):
def setUp(self):
super(TestClassification, self).setUp()
def test_knn(self):
model = ml.classification.getClassifierByName("KNN")
data = dict()
data["features"] = [[0], [1], [2], [3]]
data["label"] = [0, 0, 1, 1]
model.train(data)
self.assertEquals(model.predict([[1.1]]), [0])
def test_svm(self):
model = ml.classification.getClassifierByName("SVM")
data = dict()
data["features"] = [[0], [1], [2], [3]]
data["label"] = [0, 0, 1, 1]
model.train(data)
self.assertEquals(model.predict([[1.1]]), [0])
def test_bayes(self):
model = ml.classification.getClassifierByName("Bayes")
data = dict()
data["features"] = [[0], [1], [2], [3]]
data["label"] = [0, 0, 1, 1]
model.train(data)
self.assertEquals(model.predict([[1.1]]), [0])
if __name__ == "__main__":
ut.main()
| 25.642857 | 62 | 0.551532 |
79449fcd47263c36628330d0a0f9d159cc4c8651 | 3,148 | py | Python | src/modules/tic_tac_toe/actions.py | moraisaugusto/tic-tac-toe-challenge | ffac7572cf6e39bdcae0b6c9f9a57d33698d46b7 | [
"BSD-3-Clause"
] | null | null | null | src/modules/tic_tac_toe/actions.py | moraisaugusto/tic-tac-toe-challenge | ffac7572cf6e39bdcae0b6c9f9a57d33698d46b7 | [
"BSD-3-Clause"
] | null | null | null | src/modules/tic_tac_toe/actions.py | moraisaugusto/tic-tac-toe-challenge | ffac7572cf6e39bdcae0b6c9f9a57d33698d46b7 | [
"BSD-3-Clause"
] | null | null | null | import pickle
from werkzeug.exceptions import NotFound, BadRequest
from db_models import Game
from src.lib.db_helper import DatabaseBase
from src.modules.tic_tac_toe.helper import Helper
from src.modules.tic_tac_toe.db_helper import Database
from src.modules.tic_tac_toe import tic_tac_toe
def update_game(user_request: dict, game_id: int):
"""update a game
Args:
user_request (dict): basic data to update game
game_id (int): Id of the game
Results:
board game
"""
if not game_id:
raise NotFound("Game ID not defined")
game_id = int(game_id)
player_name = user_request["player_name"]
position = user_request["position"]
current_game = retrieve_games(game_id)[0]
board = current_game["board"]
player = Helper.get_user_mark(current_game, user_request)
if player_name == current_game["last_player"]:
raise BadRequest(f"{player_name}, It's not your turn.")
if current_game["done"]:
raise BadRequest("Game already finished. Create a new one")
# check if players exist in the game
if not Helper.players_exit(user_request, current_game):
raise BadRequest("Player not found in the current game.")
has_machine = Helper.is_playing_alone(current_game)
# update board game
board = tic_tac_toe.update_game(board, position, player, has_machine)
# check winner
winner = Helper.get_winner(board, player_name)
if winner or winner == "Draw game!":
Database.set_game_over(game_id, player_name)
if has_machine:
machine = 0 if int(player) else 1
player_name = "MACHINE"
machine_position = tic_tac_toe.machine_move(board)
tic_tac_toe.update_game(board, machine_position, machine)
# check winner
winner = Helper.get_winner(board, player_name)
if winner or winner == "Draw game!":
Database.set_game_over(game_id, player_name)
# update board game in db
Database.update_board(board, game_id, player_name)
return board, winner
def new_game(user_request: dict):
"""create new game
Args:
user_request (dict): basic data to create a new game
Returns:
id of the new game
"""
if user_request["player_one_name"] == user_request["player_two_name"]:
raise ValueError("You can not use equal names.")
game_id = Database.create_game(user_request)
return game_id
def retrieve_games(game_id: int = None):
"""retrieve one or all games from the DB
Args:
user_request (None | int): if none returns all games otherwise one id
Returns:
return a list of games in DB
"""
session = DatabaseBase.db_session()
if game_id:
rows = [session.query(Game).filter_by(id=game_id).one_or_none()]
else:
rows = session.query(Game).all()
if not rows[0]:
raise NotFound("Id not found")
# TODO: please improve me
entries = []
for row in rows:
entry = row.__dict__
entry["board"] = pickle.loads(entry["board"])
del entry["_sa_instance_state"]
entries.append(entry)
return entries
| 28.107143 | 77 | 0.674396 |
7944a0b247702254da4e3d2f2b219e6fdf9d75c6 | 6,790 | py | Python | src/graph_transpiler/webdnn/optimizer/sub_rules/elementwise_kernel_fusion.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | 1 | 2018-07-26T13:52:21.000Z | 2018-07-26T13:52:21.000Z | src/graph_transpiler/webdnn/optimizer/sub_rules/elementwise_kernel_fusion.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/optimizer/sub_rules/elementwise_kernel_fusion.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | from typing import Tuple, List
from webdnn.graph import traverse
from webdnn.graph.graph import Graph
from webdnn.graph.operators.elementwise import Elementwise
from webdnn.graph.operators.fused_elementwise import FusedElementwise
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.util import flags
def _find_elementwise_sub_graph(graph: Graph) -> List[Graph]:
"""
Find all sub graphs which are consisted of only elementwise operators
For each sub graph, follow conditions are checked about all input variable :code:`x`.
- :code:`x.output_from` is elementwise operator.
- All operators in :code`x.input_to` are included in sub graph
And if satisfied, `x.output_from` is merged into sub graph. If `x.output_from` is already merged into other sub graph, then two sub
graph are merged into single sub graph.
In follow examples, let all operators be elementwise.
ex.1)
...code-block:: text
sub_graph sub_graph
+-------+ +-------------------+
+-{op1}-> v1-|-{op3}-|-> v3 +-|-{op1}-> v1 -{op3}-|-> v3
| +-------+ | +-------------------+
-{op0}-> v0 -+ => -{op0}-> v0 -+
| |
+-{op2}-> v2 +-{op2}-> v2
Considering :code:`v1`,
- :code:`v1.output_from = op1` is elementwise operator.
- :code:`v1.input_to` contains only :code:`op3`, which is included in sub graph.
Therefore :code:`op1` is merged into sub graph, and :code:`v0` is registered as input variable.
Considering :code:`v0`,
- :code:`v0.output_from = op0` is elementwise operator.
- :code:`v0.input_to` is :code:`op1` and :code:`op2`, and op2 is not included in sub graph
Therefore :code:`op0` cannot be merged into sub graph.
ex.2)
...code-block:: text
+---------------------+
-{op0}-> v0 -{op1}-> v1 -+ -{op0}-> v0 --|-{op1}-> v1 -+ |
| +-------+ | | |
+-|-{op3}-|-v3 => +-|-------------+-{op3}-|-> v3
| +-------+ | +---------------------+
| |
-{op2}-> v2 -+ -{op2}-> v2-+
Considering :code:`v1`,
- :code:`v1.output_from = op1` is elementwise operator.
- :code:`v1.input_to` is only :code:`op3`, which is included in sub graph.
Therefore :code:`op1` is merged into sub graph, and :code:`v0` is registered as input variable.
ex.3)
...code-block:: text
+-----------------------------------+
+-{op1}-> v1 -+ | +-{op1}-> v1 -+ |
| | +-------+ | | | |
-{op0}-> v0 -+ +-|-{op3}-|-v3 => -|-{op0}-> v0 -+ +-{op3}-|-> v3
| | +-------+ | | | |
+-{op2}-> v2 -+ | +-{op2}-> v2 -+ |
+-----------------------------------+
Considering :code:`v1`,
- :code:`v1.output_from = op1` is elementwise operator.
- :code:`v1.input_to` contains only :code:`op3`, which is included in sub graph.
Therefore :code:`op1` is merged into sub graph, and :code:`v0` is registered as input variable.
Considering :code:`v2`,
- :code:`v2.output_from = op2` is elementwise operator.
- :code:`v2.input_to` contains only :code:`op3`, which is included in sub graph.
Therefore :code:`op2` is also merged into sub graph.
Considering :code:`v0`,
- :code:`v0.output_from = op0` is elementwise operator.
- :code:`v0.input_to` is :code:`op1` and :code`op2`, both are included in sub graph.
Therefore :code:`op0` is also merged into sub graph.
Returns:
(list of :class:`~webdnn.graph.graph.Graph`): list of sub graphs
"""
queue = traverse.filter_nodes(traverse.listup_operators(graph), Elementwise) # type: List[Elementwise]
sub_graphs = {op: Graph(list(op.inputs.values()), list(op.outputs.values())) for op in queue}
result = []
while len(queue) > 0:
out_node = queue.pop()
sub_graph = sub_graphs[out_node]
flag_changed = False
new_inputs = []
for x in sub_graph.inputs:
# Condition 1: x.output_from is elementwise operator
if not isinstance(x.output_from, Elementwise):
new_inputs.append(x)
continue
# Condition 2: All operators in x.input_to are included in sub graph
if not _check_condition2(x, sub_graph):
new_inputs.append(x)
continue
# Sub graph can be merged with x.output_from
if x.output_from in queue:
new_inputs.extend(sub_graphs[x.output_from].inputs)
queue.remove(x.output_from)
flag_changed = True
elif x.output_from in result:
result.remove(x.output_from)
new_inputs.extend(sub_graphs[x.output_from].inputs)
flag_changed = True
else:
new_inputs.extend(sub_graphs[x.output_from].inputs)
flag_changed = True
sub_graph.inputs = list(set(new_inputs))
if flag_changed:
queue.append(out_node)
else:
result.append(out_node)
return list(filter(lambda g: len(traverse.listup_operators(g)) >= 2, [sub_graphs[op] for op in result]))
def _check_condition2(v, sub_graph):
ops = traverse.listup_operators(sub_graph)
for op in v.input_to:
if op not in ops:
return False
return True
class ElementwiseKernelFusion(OptimizeRule):
def flags(self):
return [
flags.optimize.OPTIMIZE,
flags.optimize.ELEMENTWISE_KERNEL_FUSION
]
def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
sub_graphs = _find_elementwise_sub_graph(graph)
if len(sub_graphs) == 0:
return graph, False
for sub_graph in sub_graphs:
FusedElementwise(None, sub_graph)
return graph, True
| 39.022989 | 135 | 0.493225 |
7944a0cc13d1d5e54414d1d673b19e9be8af7ab2 | 179,184 | py | Python | seahub/api2/views.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 2 | 2017-06-21T09:46:55.000Z | 2018-05-30T10:07:32.000Z | seahub/api2/views.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | null | null | null | seahub/api2/views.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 1 | 2020-10-01T04:11:41.000Z | 2020-10-01T04:11:41.000Z | # encoding: utf-8
import logging
import os
import stat
import json
import datetime
import posixpath
import re
from dateutil.relativedelta import relativedelta
from urllib2 import unquote, quote
from rest_framework import parsers
from rest_framework import status
from rest_framework import renderers
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth.hashers import check_password
from django.contrib.sites.models import RequestSite
from django.db import IntegrityError
from django.db.models import F, Q
from django.http import HttpResponse
from django.template import RequestContext
from django.template.loader import render_to_string
from django.template.defaultfilters import filesizeformat
from django.shortcuts import render_to_response
from django.utils import timezone
from django.utils.translation import ugettext as _
from .throttling import ScopedRateThrottle, AnonRateThrottle, UserRateThrottle
from .authentication import TokenAuthentication
from .serializers import AuthTokenSerializer
from .utils import get_diff_details, \
api_error, get_file_size, prepare_starred_files, \
get_groups, get_group_and_contacts, prepare_events, \
api_group_check, get_timestamp, json_response, is_seafile_pro, \
api_repo_user_folder_perm_check, api_repo_setting_permission_check, \
api_repo_group_folder_perm_check
from seahub.avatar.templatetags.avatar_tags import api_avatar_url, avatar
from seahub.avatar.templatetags.group_avatar_tags import api_grp_avatar_url, \
grp_avatar
from seahub.base.accounts import User
from seahub.base.models import UserStarredFiles, DeviceToken
from seahub.base.templatetags.seahub_tags import email2nickname, \
translate_seahub_time, translate_commit_desc_escape
from seahub.group.views import remove_group_common, \
rename_group_with_new_name, is_group_staff
from seahub.group.utils import BadGroupNameError, ConflictGroupNameError, \
validate_group_name
from seahub.thumbnail.utils import generate_thumbnail
from seahub.notifications.models import UserNotification
from seahub.options.models import UserOptions
from seahub.contacts.models import Contact
from seahub.profile.models import Profile, DetailedProfile
from seahub.signals import (repo_created, repo_deleted)
from seahub.share.models import FileShare, OrgFileShare, UploadLinkShare
from seahub.utils import gen_file_get_url, gen_token, gen_file_upload_url, \
check_filename_with_rename, is_valid_username, EVENTS_ENABLED, \
get_user_events, EMPTY_SHA1, get_ccnet_server_addr_port, is_pro_version, \
gen_block_get_url, get_file_type_and_ext, HAS_FILE_SEARCH, \
gen_file_share_link, gen_dir_share_link, is_org_context, gen_shared_link, \
get_org_user_events, calculate_repos_last_modify, send_perm_audit_msg, \
gen_shared_upload_link, convert_cmmt_desc_link, is_org_repo_creation_allowed
from seahub.utils.devices import get_user_devices, do_unlink_device
from seahub.utils.repo import get_sub_repo_abbrev_origin_path
from seahub.utils.star import star_file, unstar_file
from seahub.utils.file_types import DOCUMENT
from seahub.utils.file_size import get_file_size_unit
from seahub.utils.timeutils import utc_to_local, datetime_to_isoformat_timestr
from seahub.views import is_registered_user, check_file_lock, \
group_events_data, get_diff, create_default_library, \
list_inner_pub_repos, get_virtual_repos_by_owner, \
check_folder_permission
from seahub.views.ajax import get_share_in_repo_list, get_groups_by_user, \
get_group_repos
from seahub.views.file import get_file_view_path_and_perm, send_file_access_msg
if HAS_FILE_SEARCH:
from seahub_extra.search.views import search_keyword
from seahub.utils import HAS_OFFICE_CONVERTER
if HAS_OFFICE_CONVERTER:
from seahub.utils import query_office_convert_status, prepare_converted_html
import seahub.settings as settings
from seahub.settings import THUMBNAIL_EXTENSION, THUMBNAIL_ROOT, \
ENABLE_GLOBAL_ADDRESSBOOK, FILE_LOCK_EXPIRATION_DAYS, \
ENABLE_THUMBNAIL, ENABLE_FOLDER_PERM
try:
from seahub.settings import CLOUD_MODE
except ImportError:
CLOUD_MODE = False
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
try:
from seahub.settings import ORG_MEMBER_QUOTA_DEFAULT
except ImportError:
ORG_MEMBER_QUOTA_DEFAULT = None
try:
from seahub.settings import ENABLE_OFFICE_WEB_APP
except ImportError:
ENABLE_OFFICE_WEB_APP = False
try:
from seahub.settings import OFFICE_WEB_APP_FILE_EXTENSION
except ImportError:
OFFICE_WEB_APP_FILE_EXTENSION = ()
from pysearpc import SearpcError, SearpcObjEncoder
import seaserv
from seaserv import seafserv_threaded_rpc, \
get_personal_groups_by_user, get_session_info, is_personal_repo, \
get_repo, check_permission, get_commits, is_passwd_set,\
check_quota, list_share_repos, get_group_repos_by_owner, get_group_repoids, \
list_inner_pub_repos_by_owner, is_group_user, \
remove_share, unset_inner_pub_repo, get_group, \
get_commit, get_file_id_by_path, MAX_DOWNLOAD_DIR_SIZE, edit_repo, \
ccnet_threaded_rpc, get_personal_groups, seafile_api, \
create_org, ccnet_api
from constance import config
logger = logging.getLogger(__name__)
json_content_type = 'application/json; charset=utf-8'
# Define custom HTTP status code. 4xx starts from 440, 5xx starts from 520.
HTTP_440_REPO_PASSWD_REQUIRED = 440
HTTP_441_REPO_PASSWD_MAGIC_REQUIRED = 441
HTTP_520_OPERATION_FAILED = 520
def UTF8Encode(s):
if isinstance(s, unicode):
return s.encode('utf-8')
else:
return s
def check_filename_with_rename_utf8(repo_id, parent_dir, filename):
newname = check_filename_with_rename(repo_id, parent_dir, filename)
return UTF8Encode(newname)
########## Test
class Ping(APIView):
"""
Returns a simple `pong` message when client calls `api2/ping/`.
For example:
curl http://127.0.0.1:8000/api2/ping/
"""
throttle_classes = (ScopedRateThrottle, )
throttle_scope = 'ping'
def get(self, request, format=None):
return Response('pong')
def head(self, request, format=None):
return Response(headers={'foo': 'bar',})
class AuthPing(APIView):
"""
Returns a simple `pong` message when client provided an auth token.
For example:
curl -H "Authorization: Token 9944b09199c62bcf9418ad846dd0e4bbdfc6ee4b" http://127.0.0.1:8000/api2/auth/ping/
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
return Response('pong')
########## Token
class ObtainAuthToken(APIView):
"""
Returns auth token if username and password are valid.
For example:
curl -d "[email protected]&password=123456" http://127.0.0.1:8000/api2/auth-token/
"""
throttle_classes = (AnonRateThrottle, )
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
context = { 'request': request }
serializer = AuthTokenSerializer(data=request.data, context=context)
if serializer.is_valid():
key = serializer.validated_data
return Response({'token': key})
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
########## Accounts
class Accounts(APIView):
"""List all accounts.
Administrator permission is required.
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAdminUser, )
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
# list accounts
start = int(request.GET.get('start', '0'))
limit = int(request.GET.get('limit', '100'))
# reading scope user list
scope = request.GET.get('scope', None)
accounts_ldap = []
accounts_db = []
if scope:
scope = scope.upper()
if scope == 'LDAP':
accounts_ldap = seaserv.get_emailusers('LDAP', start, limit)
elif scope == 'DB':
accounts_db = seaserv.get_emailusers('DB', start, limit)
else:
return api_error(status.HTTP_400_BAD_REQUEST, "%s is not a valid scope value" % scope)
else:
# old way - search first in LDAP if available then DB if no one found
accounts_ldap = seaserv.get_emailusers('LDAP', start, limit)
if len(accounts_ldap) == 0:
accounts_db = seaserv.get_emailusers('DB', start, limit)
accounts_json = []
for account in accounts_ldap:
accounts_json.append({'email': account.email, 'source' : 'LDAP'})
for account in accounts_db:
accounts_json.append({'email': account.email, 'source' : 'DB'})
return Response(accounts_json)
class AccountInfo(APIView):
""" Show account info.
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
info = {}
email = request.user.username
p = Profile.objects.get_profile_by_user(email)
d_p = DetailedProfile.objects.get_detailed_profile_by_user(email)
info['email'] = email
info['name'] = email2nickname(email)
info['total'] = seafile_api.get_user_quota(email)
info['usage'] = seafile_api.get_user_self_usage(email)
info['login_id'] = p.login_id if p else ""
info['department'] = d_p.department if d_p else ""
info['contact_email'] = p.contact_email if p else ""
info['institution'] = p.institution if p else ""
return Response(info)
class RegDevice(APIView):
"""Reg device for iOS push notification.
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def post(self, request, format=None):
version = request.POST.get('version')
platform = request.POST.get('platform')
pversion = request.POST.get('pversion')
devicetoken = request.POST.get('deviceToken')
if not devicetoken or not version or not platform or not pversion:
return api_error(status.HTTP_400_BAD_REQUEST, "Missing argument")
token, modified = DeviceToken.objects.get_or_create(
token=devicetoken, user=request.user.username)
if token.version != version:
token.version = version
modified = True
if token.pversion != pversion:
token.pversion = pversion
modified = True
if token.platform != platform:
token.platform = platform
modified = True
if modified:
token.save()
return Response("success")
class Search(APIView):
""" Search all the repos
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
if not HAS_FILE_SEARCH:
return api_error(status.HTTP_404_NOT_FOUND, "Search not supported")
keyword = request.GET.get('q', None)
if not keyword:
return api_error(status.HTTP_400_BAD_REQUEST, "Missing argument")
results, total, has_more = search_keyword(request, keyword)
for e in results:
e.pop('repo', None)
e.pop('content_highlight', None)
e.pop('exists', None)
e.pop('last_modified_by', None)
e.pop('name_highlight', None)
e.pop('score', None)
try:
path = e['fullpath'].encode('utf-8')
file_id = seafile_api.get_file_id_by_path(e['repo_id'], path)
e['oid'] = file_id
repo = get_repo(e['repo_id'])
e['size'] = get_file_size(repo.store_id, repo.version, file_id)
except SearpcError, err:
pass
res = { "total":total, "results":results, "has_more":has_more }
return Response(res)
########## Repo related
def repo_download_info(request, repo_id, gen_sync_token=True):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
# generate download url for client
relay_id = get_session_info().id
addr, port = get_ccnet_server_addr_port()
email = request.user.username
if gen_sync_token:
token = seafile_api.generate_repo_token(repo_id, email)
else:
token = ''
repo_name = repo.name
repo_desc = repo.desc
repo_size = repo.size
repo_size_formatted = filesizeformat(repo.size)
enc = 1 if repo.encrypted else ''
magic = repo.magic if repo.encrypted else ''
random_key = repo.random_key if repo.random_key else ''
enc_version = repo.enc_version
repo_version = repo.version
calculate_repos_last_modify([repo])
info_json = {
'relay_id': relay_id,
'relay_addr': addr,
'relay_port': port,
'email': email,
'token': token,
'repo_id': repo_id,
'repo_name': repo_name,
'repo_desc': repo_desc,
'repo_size': repo_size,
'repo_size_formatted': repo_size_formatted,
'mtime': repo.latest_modify,
'mtime_relative': translate_seahub_time(repo.latest_modify),
'encrypted': enc,
'enc_version': enc_version,
'magic': magic,
'random_key': random_key,
'repo_version': repo_version,
}
return Response(info_json)
class Repos(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
# parse request params
filter_by = {
'mine': False,
'sub': False,
'shared': False,
'group': False,
'org': False,
}
rtype = request.GET.get('type', "")
if not rtype:
# set all to True, no filter applied
filter_by = filter_by.fromkeys(filter_by.iterkeys(), True)
for f in rtype.split(','):
f = f.strip()
filter_by[f] = True
email = request.user.username
if not UserOptions.objects.is_sub_lib_enabled(email):
filter_by['sub'] = False
repos_json = []
if filter_by['mine']:
if is_org_context(request):
org_id = request.user.org.org_id
owned_repos = seafile_api.get_org_owned_repo_list(org_id,
email, ret_corrupted=True)
else:
owned_repos = seafile_api.get_owned_repo_list(email,
ret_corrupted=True)
owned_repos.sort(lambda x, y: cmp(y.last_modify, x.last_modify))
for r in owned_repos:
# do not return virtual repos
if r.is_virtual:
continue
repo = {
"type": "repo",
"id": r.id,
"owner": email,
"name": r.name,
"desc": r.desc,
"mtime": r.last_modify,
"mtime_relative": translate_seahub_time(r.last_modify),
"size": r.size,
"size_formatted": filesizeformat(r.size),
"encrypted": r.encrypted,
"permission": 'rw', # Always have read-write permission to owned repo
"virtual": r.is_virtual,
"root": r.root,
"head_commit_id": r.head_cmmt_id,
}
if r.encrypted:
repo["enc_version"] = r.enc_version
repo["magic"] = r.magic
repo["random_key"] = r.random_key
repos_json.append(repo)
if filter_by['sub']:
# compose abbrev origin path for display
sub_repos = []
sub_repos = get_virtual_repos_by_owner(request)
for repo in sub_repos:
repo.abbrev_origin_path = get_sub_repo_abbrev_origin_path(
repo.origin_repo_name, repo.origin_path)
sub_repos.sort(lambda x, y: cmp(y.last_modify, x.last_modify))
for r in sub_repos:
# print r._dict
repo = {
"type": "repo",
"id": r.id,
"name": r.name,
"origin_repo_id": r.origin_repo_id,
"origin_path": r.origin_path,
"abbrev_origin_path": r.abbrev_origin_path,
"mtime": r.last_modify,
"mtime_relative": translate_seahub_time(r.last_modify),
"owner": email,
"desc": r.desc,
"size": r.size,
"encrypted": r.encrypted,
"permission": 'rw',
"virtual": r.is_virtual,
"root": r.root,
"head_commit_id": r.head_cmmt_id,
}
if r.encrypted:
repo["enc_version"] = r.enc_version
repo["magic"] = r.magic
repo["random_key"] = r.random_key
repos_json.append(repo)
if filter_by['shared']:
shared_repos = get_share_in_repo_list(request, -1, -1)
shared_repos.sort(lambda x, y: cmp(y.last_modify, x.last_modify))
for r in shared_repos:
r.password_need = is_passwd_set(r.repo_id, email)
repo = {
"type": "srepo",
"id": r.repo_id,
"owner": r.user,
"name": r.repo_name,
"owner_nickname": email2nickname(r.user),
"desc": r.repo_desc,
"mtime": r.last_modify,
"mtime_relative": translate_seahub_time(r.last_modify),
"size": r.size,
"size_formatted": filesizeformat(r.size),
"encrypted": r.encrypted,
"permission": r.user_perm,
"share_type": r.share_type,
"root": r.root,
"head_commit_id": r.head_cmmt_id,
}
if r.encrypted:
repo["enc_version"] = r.enc_version
repo["magic"] = r.magic
repo["random_key"] = r.random_key
repos_json.append(repo)
if filter_by['group']:
groups = get_groups_by_user(request)
group_repos = get_group_repos(request, groups)
group_repos.sort(lambda x, y: cmp(y.last_modify, x.last_modify))
for r in group_repos:
repo = {
"type": "grepo",
"id": r.id,
"owner": r.group.group_name,
"groupid": r.group.id,
"name": r.name,
"desc": r.desc,
"mtime": r.last_modify,
"size": r.size,
"encrypted": r.encrypted,
"permission": check_permission(r.id, email),
"root": r.root,
"head_commit_id": r.head_cmmt_id,
}
if r.encrypted:
repo["enc_version"] = r.enc_version
repo["magic"] = r.magic
repo["random_key"] = r.random_key
repos_json.append(repo)
if filter_by['org'] and request.user.permissions.can_view_org():
public_repos = list_inner_pub_repos(request)
for r in public_repos:
repo = {
"type": "grepo",
"id": r.repo_id,
"name": r.repo_name,
"desc": r.repo_desc,
"owner": "Organization",
"mtime": r.last_modified,
"mtime_relative": translate_seahub_time(r.last_modified),
"size": r.size,
"size_formatted": filesizeformat(r.size),
"encrypted": r.encrypted,
"permission": r.permission,
"share_from": r.user,
"share_type": r.share_type,
"root": r.root,
"head_commit_id": r.head_cmmt_id,
}
if r.encrypted:
repo["enc_version"] = r.enc_version
repo["magic"] = r.magic
repo["random_key"] = r.random_key
repos_json.append(repo)
response = HttpResponse(json.dumps(repos_json), status=200,
content_type=json_content_type)
response["enable_encrypted_library"] = config.ENABLE_ENCRYPTED_LIBRARY
return response
def post(self, request, format=None):
if not request.user.permissions.can_add_repo():
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to create library.')
req_from = request.GET.get('from', "")
if req_from == 'web':
gen_sync_token = False # Do not generate repo sync token
else:
gen_sync_token = True
username = request.user.username
repo_name = request.data.get("name", None)
if not repo_name:
return api_error(status.HTTP_400_BAD_REQUEST,
'Library name is required.')
repo_desc = request.data.get("desc", '')
org_id = -1
if is_org_context(request):
org_id = request.user.org.org_id
repo_id = request.data.get('repo_id', '')
try:
if repo_id:
# client generates magic and random key
repo_id, error = self._create_enc_repo(request, repo_id, repo_name, repo_desc, username, org_id)
else:
repo_id, error = self._create_repo(request, repo_name, repo_desc, username, org_id)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to create library.')
if error is not None:
return error
if not repo_id:
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to create library.')
else:
repo_created.send(sender=None,
org_id=org_id,
creator=username,
repo_id=repo_id,
repo_name=repo_name)
resp = repo_download_info(request, repo_id,
gen_sync_token=gen_sync_token)
# FIXME: according to the HTTP spec, need to return 201 code and
# with a corresponding location header
# resp['Location'] = reverse('api2-repo', args=[repo_id])
return resp
def _create_repo(self, request, repo_name, repo_desc, username, org_id):
passwd = request.data.get("passwd", None)
# to avoid 'Bad magic' error when create repo, passwd should be 'None'
# not an empty string when create unencrypted repo
if not passwd:
passwd = None
if (passwd is not None) and (not config.ENABLE_ENCRYPTED_LIBRARY):
return api_error(status.HTTP_403_FORBIDDEN,
'NOT allow to create encrypted library.')
if org_id > 0:
repo_id = seafile_api.create_org_repo(repo_name, repo_desc,
username, passwd, org_id)
else:
repo_id = seafile_api.create_repo(repo_name, repo_desc,
username, passwd)
return repo_id, None
def _create_enc_repo(self, request, repo_id, repo_name, repo_desc, username, org_id):
if not _REPO_ID_PATTERN.match(repo_id):
return api_error(status.HTTP_400_BAD_REQUEST, 'Repo id must be a valid uuid')
magic = request.data.get('magic', '')
random_key = request.data.get('random_key', '')
try:
enc_version = int(request.data.get('enc_version', 0))
except ValueError:
return None, api_error(status.HTTP_400_BAD_REQUEST,
'Invalid enc_version param.')
if len(magic) != 64 or len(random_key) != 96 or enc_version < 0:
return None, api_error(status.HTTP_400_BAD_REQUEST,
'You must provide magic, random_key and enc_version.')
if org_id > 0:
repo_id = seafile_api.create_org_enc_repo(repo_id, repo_name, repo_desc,
username, magic, random_key, enc_version, org_id)
else:
repo_id = seafile_api.create_enc_repo(
repo_id, repo_name, repo_desc, username,
magic, random_key, enc_version)
return repo_id, None
class PubRepos(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
# List public repos
if not request.user.permissions.can_view_org():
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to view public libraries.')
repos_json = []
public_repos = list_inner_pub_repos(request)
for r in public_repos:
repo = {
"id": r.repo_id,
"name": r.repo_name,
"desc": r.repo_desc,
"owner": r.user,
"owner_nickname": email2nickname(r.user),
"mtime": r.last_modified,
"mtime_relative": translate_seahub_time(r.last_modified),
"size": r.size,
"size_formatted": filesizeformat(r.size),
"encrypted": r.encrypted,
"permission": r.permission,
"root": r.root,
}
if r.encrypted:
repo["enc_version"] = r.enc_version
repo["magic"] = r.magic
repo["random_key"] = r.random_key
repos_json.append(repo)
return Response(repos_json)
def post(self, request, format=None):
# Create public repo
if not request.user.permissions.can_add_repo():
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to create library.')
username = request.user.username
repo_name = request.data.get("name", None)
if not repo_name:
return api_error(status.HTTP_400_BAD_REQUEST,
'Library name is required.')
repo_desc = request.data.get("desc", '')
passwd = request.data.get("passwd", None)
# to avoid 'Bad magic' error when create repo, passwd should be 'None'
# not an empty string when create unencrypted repo
if not passwd:
passwd = None
if (passwd is not None) and (not config.ENABLE_ENCRYPTED_LIBRARY):
return api_error(status.HTTP_403_FORBIDDEN,
'NOT allow to create encrypted library.')
permission = request.data.get("permission", 'r')
if permission != 'r' and permission != 'rw':
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid permission')
if is_org_context(request):
org_id = request.user.org.org_id
repo_id = seafile_api.create_org_repo(repo_name, repo_desc,
username, passwd, org_id)
repo = seafile_api.get_repo(repo_id)
seaserv.seafserv_threaded_rpc.set_org_inner_pub_repo(
org_id, repo.id, permission)
else:
repo_id = seafile_api.create_repo(repo_name, repo_desc,
username, passwd)
repo = seafile_api.get_repo(repo_id)
seafile_api.add_inner_pub_repo(repo.id, permission)
pub_repo = {
"id": repo.id,
"name": repo.name,
"desc": repo.desc,
"size": repo.size,
"size_formatted": filesizeformat(repo.size),
"mtime": repo.last_modify,
"mtime_relative": translate_seahub_time(repo.last_modify),
"encrypted": repo.encrypted,
"permission": 'rw', # Always have read-write permission to owned repo
"owner": username,
"owner_nickname": email2nickname(username),
}
return Response(pub_repo, status=201)
def set_repo_password(request, repo, password):
assert password, 'password must not be none'
try:
seafile_api.set_passwd(repo.id, request.user.username, password)
except SearpcError, e:
if e.msg == 'Bad arguments':
return api_error(status.HTTP_400_BAD_REQUEST, e.msg)
elif e.msg == 'Repo is not encrypted':
return api_error(status.HTTP_409_CONFLICT, e.msg)
elif e.msg == 'Incorrect password':
return api_error(status.HTTP_400_BAD_REQUEST, e.msg)
elif e.msg == 'Internal server error':
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, e.msg)
else:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, e.msg)
def check_set_repo_password(request, repo):
if not check_permission(repo.id, request.user.username):
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this library.')
if repo.encrypted:
password = request.REQUEST.get('password', default=None)
if not password:
return api_error(HTTP_440_REPO_PASSWD_REQUIRED,
'Library password is needed.')
return set_repo_password(request, repo, password)
class Repo(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
username = request.user.username
if not check_folder_permission(request, repo_id, '/'):
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this library.')
# check whether user is repo owner
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo.id)
else:
repo_owner = seafile_api.get_repo_owner(repo.id)
owner = "self" if username == repo_owner else "share"
last_commit = get_commits(repo.id, 0, 1)[0]
repo.latest_modify = last_commit.ctime if last_commit else None
# query repo infomation
repo.size = seafile_api.get_repo_size(repo_id)
current_commit = get_commits(repo_id, 0, 1)[0]
root_id = current_commit.root_id if current_commit else None
repo_json = {
"type":"repo",
"id":repo.id,
"owner":owner,
"name":repo.name,
"desc":repo.desc,
"mtime":repo.latest_modify,
"size":repo.size,
"encrypted":repo.encrypted,
"root":root_id,
"permission": check_permission(repo.id, username),
}
if repo.encrypted:
repo_json["enc_version"] = repo.enc_version
repo_json["magic"] = repo.magic
repo_json["random_key"] = repo.random_key
return Response(repo_json)
def post(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
op = request.GET.get('op', 'setpassword')
if op == 'checkpassword':
magic = request.REQUEST.get('magic', default=None)
if not magic:
return api_error(HTTP_441_REPO_PASSWD_MAGIC_REQUIRED,
'Library password magic is needed.')
if not check_folder_permission(request, repo_id, '/'):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
try:
seafile_api.check_passwd(repo.id, magic)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
"SearpcError:" + e.msg)
return Response("success")
elif op == 'setpassword':
resp = check_set_repo_password(request, repo)
if resp:
return resp
return Response("success")
elif op == 'rename':
username = request.user.username
repo_name = request.POST.get('repo_name')
repo_desc = request.POST.get('repo_desc')
# check permission
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo.id)
else:
repo_owner = seafile_api.get_repo_owner(repo.id)
is_owner = True if username == repo_owner else False
if not is_owner:
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to rename this library.')
if edit_repo(repo_id, repo_name, repo_desc, username):
return Response("success")
else:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
"Unable to rename library")
return Response("unsupported operation")
def delete(self, request, repo_id, format=None):
username = request.user.username
repo = seafile_api.get_repo(repo_id)
if not repo:
return api_error(status.HTTP_400_BAD_REQUEST,
'Library does not exist.')
# check permission
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo.id)
else:
repo_owner = seafile_api.get_repo_owner(repo.id)
is_owner = True if username == repo_owner else False
if not is_owner:
return api_error(
status.HTTP_403_FORBIDDEN,
'You do not have permission to delete this library.'
)
usernames = seaserv.get_related_users_by_repo(repo_id)
seafile_api.remove_repo(repo_id)
repo_deleted.send(sender=None,
org_id=-1,
usernames=usernames,
repo_owner=repo_owner,
repo_id=repo_id,
repo_name=repo.name)
return Response('success', status=status.HTTP_200_OK)
class RepoHistory(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
commits_all = get_commits(repo_id, per_page * (current_page - 1),
per_page + 1)
commits = commits_all[:per_page]
if len(commits_all) == per_page + 1:
page_next = True
else:
page_next = False
return HttpResponse(json.dumps({"commits": commits,
"page_next": page_next},
cls=SearpcObjEncoder),
status=200, content_type=json_content_type)
class RepoHistoryLimit(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# check permission
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
username = request.user.username
# no settings for virtual repo
if repo.is_virtual or username != repo_owner:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
keep_days = seafile_api.get_repo_history_limit(repo_id)
return Response({'keep_days': keep_days})
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
def put(self, request, repo_id, format=None):
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# check permission
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
username = request.user.username
# no settings for virtual repo
if repo.is_virtual or \
not config.ENABLE_REPO_HISTORY_SETTING or \
username != repo_owner:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check arg validation
keep_days = request.data.get('keep_days', None)
if not keep_days:
error_msg = 'keep_days invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
keep_days = int(keep_days)
except ValueError:
error_msg = 'keep_days invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
# days <= -1, keep full history
# days = 0, not keep history
# days > 0, keep a period of days
res = seafile_api.set_repo_history_limit(repo_id, keep_days)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if res == 0:
new_limit = seafile_api.get_repo_history_limit(repo_id)
return Response({'keep_days': new_limit})
else:
error_msg = 'Failed to set library history limit.'
return api_error(status.HTTP_520_OPERATION_FAILED, error_msg)
class DownloadRepo(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
if not check_folder_permission(request, repo_id, '/'):
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this library.')
return repo_download_info(request, repo_id)
class RepoPublic(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def post(self, request, repo_id, format=None):
"""Set organization library.
"""
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library %s not found.' % repo_id)
if not is_org_repo_creation_allowed(request):
return api_error(status.HTTP_403_FORBIDDEN,
'Permission denied.')
if check_permission(repo_id, request.user.username) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this library.')
try:
seafile_api.add_inner_pub_repo(repo_id, "r")
except:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
'Unable to make library public')
return HttpResponse(json.dumps({'success': True}), status=200,
content_type=json_content_type)
def delete(self, request, repo_id, format=None):
"""Unset organization library.
"""
username = request.user.username
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
if not request.user.is_staff and \
not seafile_api.is_repo_owner(username, repo_id):
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to unshare library.')
try:
seafile_api.remove_inner_pub_repo(repo_id)
except:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
'Unable to make library private')
return HttpResponse(json.dumps({'success': True}), status=200,
content_type=json_content_type)
class RepoOwner(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
org_id = None
if is_org_context(request):
org_id = request.user.org.org_id
# check permission
if org_id:
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
if request.user.username != repo_owner:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
return HttpResponse(json.dumps({"owner": repo_owner}), status=200,
content_type=json_content_type)
def put(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
org_id = None
if is_org_context(request):
org_id = request.user.org.org_id
# check permission
if org_id:
repo_owner = seafile_api.get_org_repo_owner(repo.id)
else:
repo_owner = seafile_api.get_repo_owner(repo.id)
if request.user.username != repo_owner:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check new owner validation
new_owner = request.data.get('owner', '').lower()
try:
User.objects.get(email=new_owner)
except User.DoesNotExist:
error_msg = 'User %s not found.' % new_owner
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if org_id:
if not ccnet_api.org_user_exists(org_id, new_owner):
error_msg = _(u'User %s not found in organization.') % new_owner
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# transfer repo
if org_id:
seafile_api.set_org_repo_owner(org_id, repo_id, new_owner)
else:
if ccnet_api.get_orgs_by_user(new_owner):
# can not transfer library to organization user %s.
error_msg = 'Email %s invalid.' % new_owner
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
else:
seafile_api.set_repo_owner(repo_id, new_owner)
return HttpResponse(json.dumps({'success': True}),
content_type=json_content_type)
########## File related
class FileBlockDownloadLinkView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, file_id, block_id, format=None):
parent_dir = request.GET.get('p', '/')
if check_folder_permission(request, repo_id, parent_dir) is None:
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this repo.')
if check_quota(repo_id) < 0:
return api_error(HTTP_520_OPERATION_FAILED, 'Above quota')
token = seafile_api.get_fileserver_access_token(
repo_id, file_id, 'downloadblks', request.user.username)
url = gen_block_get_url(token, block_id)
return Response(url)
class UploadLinkView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
parent_dir = request.GET.get('p', '/')
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this folder.')
if check_quota(repo_id) < 0:
return api_error(HTTP_520_OPERATION_FAILED, 'Above quota')
token = seafile_api.get_fileserver_access_token(
repo_id, 'dummy', 'upload', request.user.username, use_onetime = False)
url = gen_file_upload_url(token, 'upload-api')
return Response(url)
class UpdateLinkView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
parent_dir = request.GET.get('p', '/')
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this folder.')
if check_quota(repo_id) < 0:
return api_error(HTTP_520_OPERATION_FAILED, 'Above quota')
token = seafile_api.get_fileserver_access_token(
repo_id, 'dummy', 'update', request.user.username)
url = gen_file_upload_url(token, 'update-api')
return Response(url)
class UploadBlksLinkView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
parent_dir = request.GET.get('p', '/')
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this folder.')
if check_quota(repo_id) < 0:
return api_error(HTTP_520_OPERATION_FAILED, 'Above quota')
token = seafile_api.get_fileserver_access_token(
repo_id, 'dummy', 'upload-blks-api', request.user.username,
use_onetime = False)
url = gen_file_upload_url(token, 'upload-blks-api')
return Response(url)
def get_blklist_missing(self, repo_id, blks):
if not blks:
return []
blklist = blks.split(',')
try:
return json.loads(seafile_api.check_repo_blocks_missing(
repo_id, json.dumps(blklist)))
except Exception as e:
pass
return blklist
def post(self, request, repo_id, format=None):
parent_dir = request.GET.get('p', '/')
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this folder.')
if check_quota(repo_id) < 0:
return api_error(HTTP_520_OPERATION_FAILED, 'Above quota')
token = seafile_api.get_fileserver_access_token(
repo_id, 'dummy', 'upload', request.user.username,
use_onetime = False)
blksurl = gen_file_upload_url(token, 'upload-raw-blks-api')
commiturl = '%s?commitonly=true&ret-json=true' % gen_file_upload_url(
token, 'upload-blks-api')
blks = request.POST.get('blklist', None)
blklist = self.get_blklist_missing(repo_id, blks)
res = {
'rawblksurl': blksurl,
'commiturl': commiturl,
'blklist': blklist
}
return HttpResponse(json.dumps(res), status=200,
content_type=json_content_type)
class UpdateBlksLinkView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
parent_dir = request.GET.get('p', '/')
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this folder.')
if check_quota(repo_id) < 0:
return api_error(HTTP_520_OPERATION_FAILED, 'Above quota')
token = seafile_api.get_fileserver_access_token(
repo_id, 'dummy', 'update-blks-api', request.user.username,
use_onetime = False)
url = gen_file_upload_url(token, 'update-blks-api')
return Response(url)
def get_dir_recursively(username, repo_id, path, all_dirs):
path_id = seafile_api.get_dir_id_by_path(repo_id, path)
dirs = seafserv_threaded_rpc.list_dir_with_perm(repo_id, path,
path_id, username, -1, -1)
for dirent in dirs:
if stat.S_ISDIR(dirent.mode):
entry = {}
entry["type"] = 'dir'
entry["parent_dir"] = path
entry["id"] = dirent.obj_id
entry["name"] = dirent.obj_name
entry["mtime"] = dirent.mtime
entry["permission"] = dirent.permission
all_dirs.append(entry)
sub_path = posixpath.join(path, dirent.obj_name)
get_dir_recursively(username, repo_id, sub_path, all_dirs)
return all_dirs
def get_dir_entrys_by_id(request, repo, path, dir_id, request_type=None):
""" Get dirents in a dir
if request_type is 'f', only return file list,
if request_type is 'd', only return dir list,
else, return both.
"""
username = request.user.username
try:
dirs = seafserv_threaded_rpc.list_dir_with_perm(repo.id, path, dir_id,
username, -1, -1)
dirs = dirs if dirs else []
except SearpcError, e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to list dir.")
dir_list, file_list = [], []
for dirent in dirs:
dtype = "file"
entry = {}
if stat.S_ISDIR(dirent.mode):
dtype = "dir"
else:
if repo.version == 0:
entry["size"] = get_file_size(repo.store_id, repo.version,
dirent.obj_id)
else:
entry["size"] = dirent.size
if is_pro_version():
entry["is_locked"] = dirent.is_locked
entry["lock_owner"] = dirent.lock_owner
entry["lock_time"] = dirent.lock_time
if username == dirent.lock_owner:
entry["locked_by_me"] = True
else:
entry["locked_by_me"] = False
entry["type"] = dtype
entry["name"] = dirent.obj_name
entry["id"] = dirent.obj_id
entry["mtime"] = dirent.mtime
entry["permission"] = dirent.permission
if dtype == 'dir':
dir_list.append(entry)
else:
file_list.append(entry)
dir_list.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
file_list.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
if request_type == 'f':
dentrys = file_list
elif request_type == 'd':
dentrys = dir_list
else:
dentrys = dir_list + file_list
response = HttpResponse(json.dumps(dentrys), status=200,
content_type=json_content_type)
response["oid"] = dir_id
response["dir_perm"] = seafile_api.check_permission_by_path(repo.id, path, username)
return response
def get_shared_link(request, repo_id, path):
l = FileShare.objects.filter(repo_id=repo_id).filter(
username=request.user.username).filter(path=path)
token = None
if len(l) > 0:
fileshare = l[0]
token = fileshare.token
else:
token = gen_token(max_length=10)
fs = FileShare()
fs.username = request.user.username
fs.repo_id = repo_id
fs.path = path
fs.token = token
try:
fs.save()
except IntegrityError, e:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, e.msg)
http_or_https = request.is_secure() and 'https' or 'http'
domain = RequestSite(request).domain
file_shared_link = '%s://%s%sf/%s/' % (http_or_https, domain,
settings.SITE_ROOT, token)
return file_shared_link
def get_repo_file(request, repo_id, file_id, file_name, op, use_onetime=True):
if op == 'download':
token = seafile_api.get_fileserver_access_token(repo_id, file_id, op,
request.user.username,
use_onetime)
redirect_url = gen_file_get_url(token, file_name)
response = HttpResponse(json.dumps(redirect_url), status=200,
content_type=json_content_type)
response["oid"] = file_id
return response
if op == 'downloadblks':
blklist = []
encrypted = False
enc_version = 0
if file_id != EMPTY_SHA1:
try:
blks = seafile_api.list_blocks_by_file_id(repo_id, file_id)
blklist = blks.split('\n')
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to get file block list')
blklist = [i for i in blklist if len(i) == 40]
if len(blklist) > 0:
repo = get_repo(repo_id)
encrypted = repo.encrypted
enc_version = repo.enc_version
res = {
'file_id': file_id,
'blklist': blklist,
'encrypted': encrypted,
'enc_version': enc_version,
}
response = HttpResponse(json.dumps(res), status=200,
content_type=json_content_type)
response["oid"] = file_id
return response
if op == 'sharelink':
path = request.GET.get('p', None)
if path is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
file_shared_link = get_shared_link(request, repo_id, path)
return Response(file_shared_link)
def reloaddir(request, repo, parent_dir):
try:
dir_id = seafile_api.get_dir_id_by_path(repo.id, parent_dir)
except SearpcError, e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to get dir id by path")
if not dir_id:
return api_error(status.HTTP_404_NOT_FOUND, "Path does not exist")
return get_dir_entrys_by_id(request, repo, parent_dir, dir_id)
def reloaddir_if_necessary (request, repo, parent_dir):
reload_dir = False
s = request.GET.get('reloaddir', None)
if s and s.lower() == 'true':
reload_dir = True
if not reload_dir:
return Response('success')
return reloaddir(request, repo, parent_dir)
# deprecated
class OpDeleteView(APIView):
"""
Delete files.
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
def post(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
username = request.user.username
if check_folder_permission(request, repo_id, '/') != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to delete this file.')
if not check_folder_permission(request, repo_id, '/'):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
parent_dir = request.GET.get('p')
file_names = request.POST.get("file_names")
if not parent_dir or not file_names:
return api_error(status.HTTP_404_NOT_FOUND,
'File or directory not found.')
parent_dir_utf8 = parent_dir.encode('utf-8')
for file_name in file_names.split(':'):
file_name = unquote(file_name.encode('utf-8'))
try:
seafile_api.del_file(repo_id, parent_dir_utf8,
file_name, username)
except SearpcError, e:
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to delete file.")
return reloaddir_if_necessary (request, repo, parent_dir_utf8)
class OpMoveView(APIView):
"""
Move files.
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
def post(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
username = request.user.username
parent_dir = request.GET.get('p', '/')
dst_repo = request.POST.get('dst_repo', None)
dst_dir = request.POST.get('dst_dir', None)
file_names = request.POST.get("file_names", None)
if not parent_dir or not file_names or not dst_repo or not dst_dir:
return api_error(status.HTTP_400_BAD_REQUEST,
'Missing argument.')
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to move file in this folder.')
if check_folder_permission(request, dst_repo, dst_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to move file to destination folder.')
if repo_id == dst_repo and parent_dir == dst_dir:
return api_error(status.HTTP_400_BAD_REQUEST,
'The destination directory is the same as the source.')
parent_dir_utf8 = parent_dir.encode('utf-8')
for file_name in file_names.split(':'):
file_name = unquote(file_name.encode('utf-8'))
new_filename = check_filename_with_rename_utf8(dst_repo, dst_dir,
file_name)
try:
seafile_api.move_file(repo_id, parent_dir_utf8, file_name,
dst_repo, dst_dir, new_filename,
username, 0, synchronous=1)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to move file.")
return reloaddir_if_necessary (request, repo, parent_dir_utf8)
class OpCopyView(APIView):
"""
Copy files.
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
def post(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
username = request.user.username
parent_dir = request.GET.get('p', '/')
dst_repo = request.POST.get('dst_repo', None)
dst_dir = request.POST.get('dst_dir', None)
file_names = request.POST.get("file_names", None)
if not parent_dir or not file_names or not dst_repo or not dst_dir:
return api_error(status.HTTP_400_BAD_REQUEST,
'Missing argument.')
if check_folder_permission(request, repo_id, parent_dir) is None:
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to copy file of this folder.')
if check_folder_permission(request, dst_repo, dst_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to copy file to destination folder.')
if not get_repo(dst_repo):
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
if seafile_api.get_dir_id_by_path(repo_id, parent_dir) is None or \
seafile_api.get_dir_id_by_path(dst_repo, dst_dir) is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path does not exist.')
parent_dir_utf8 = parent_dir.encode('utf-8')
for file_name in file_names.split(':'):
file_name = unquote(file_name.encode('utf-8'))
new_filename = check_filename_with_rename_utf8(dst_repo, dst_dir,
file_name)
try:
seafile_api.copy_file(repo_id, parent_dir_utf8, file_name,
dst_repo, dst_dir, new_filename,
username, 0, synchronous=1)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to copy file.")
return reloaddir_if_necessary(request, repo, parent_dir_utf8)
class StarredFileView(APIView):
"""
Support uniform interface for starred file operation,
including add/delete/list starred files.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
# list starred files
personal_files = UserStarredFiles.objects.get_starred_files_by_username(
request.user.username)
starred_files = prepare_starred_files(personal_files)
return Response(starred_files)
def post(self, request, format=None):
# add starred file
repo_id = request.POST.get('repo_id', '')
path = request.POST.get('p', '')
if not (repo_id and path):
return api_error(status.HTTP_400_BAD_REQUEST,
'Library ID or path is missing.')
if check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
try:
file_id = seafile_api.get_file_id_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal error')
if not file_id:
return api_error(status.HTTP_404_NOT_FOUND, "File not found")
if path[-1] == '/': # Should not contain '/' at the end of path.
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid file path.')
star_file(request.user.username, repo_id, path, is_dir=False,
org_id=-1)
resp = Response('success', status=status.HTTP_201_CREATED)
resp['Location'] = reverse('starredfiles')
return resp
def delete(self, request, format=None):
# remove starred file
repo_id = request.GET.get('repo_id', '')
path = request.GET.get('p', '')
if not (repo_id and path):
return api_error(status.HTTP_400_BAD_REQUEST,
'Library ID or path is missing.')
if check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
try:
file_id = seafile_api.get_file_id_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal error')
if not file_id:
return api_error(status.HTTP_404_NOT_FOUND, "File not found")
if path[-1] == '/': # Should not contain '/' at the end of path.
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid file path.')
unstar_file(request.user.username, repo_id, path)
return Response('success', status=status.HTTP_200_OK)
class OwaFileView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
""" Get action url and access token when view file through Office Web App
"""
# check args
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
path = request.GET.get('path', None)
if not path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
file_id = seafile_api.get_file_id_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not file_id:
error_msg = 'File %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# check permission
if not is_pro_version():
error_msg = 'Office Web App feature only supported in professional edition.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
if check_folder_permission(request, repo_id, path) is None:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
if repo.encrypted:
error_msg = 'Library encrypted.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
if not ENABLE_OFFICE_WEB_APP:
error_msg = 'Office Web App feature not enabled.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
filename = os.path.basename(path)
filetype, fileext = get_file_type_and_ext(filename)
if fileext not in OFFICE_WEB_APP_FILE_EXTENSION:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# get wopi dict
from seahub_extra.wopi.utils import get_wopi_dict
username = request.user.username
wopi_dict = get_wopi_dict(username, repo_id, path)
# send stats message
send_file_access_msg(request, repo, path, 'api')
return Response(wopi_dict)
class DevicesView(APIView):
"""List user devices"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
username = request.user.username
user_devices = get_user_devices(username)
return Response(user_devices)
def delete(self, request, format=None):
platform = request.data.get('platform', '')
device_id = request.data.get('device_id', '')
if not platform:
error_msg = 'platform invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not device_id:
error_msg = 'device_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
do_unlink_device(request.user.username, platform, device_id)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
class FileView(APIView):
"""
Support uniform interface for file related operations,
including create/delete/rename/view, etc.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
# view file
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', None)
if not path:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
if check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this file.')
file_id = None
try:
file_id = seafile_api.get_file_id_by_path(repo_id,
path.encode('utf-8'))
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to get file id by path.")
if not file_id:
return api_error(status.HTTP_404_NOT_FOUND, "File not found")
# send stats message
send_file_access_msg(request, repo, path, 'api')
file_name = os.path.basename(path)
op = request.GET.get('op', 'download')
reuse = request.GET.get('reuse', '0')
if reuse not in ('1', '0'):
return api_error(status.HTTP_400_BAD_REQUEST,
"If you want to reuse file server access token for download file, you should set 'reuse' argument as '1'.")
use_onetime = False if reuse == '1' else True
return get_repo_file(request, repo_id, file_id,
file_name, op, use_onetime)
def post(self, request, repo_id, format=None):
# rename, move, copy or create file
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', '')
if not path or path[0] != '/':
return api_error(status.HTTP_400_BAD_REQUEST,
'Path is missing or invalid.')
username = request.user.username
parent_dir = os.path.dirname(path)
operation = request.POST.get('operation', '')
if operation.lower() == 'rename':
if check_folder_permission(request, repo_id, path) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to rename file.')
newname = request.POST.get('newname', '')
if not newname:
return api_error(status.HTTP_400_BAD_REQUEST,
'New name is missing')
newname = newname.encode('utf-8')
if len(newname) > settings.MAX_UPLOAD_FILE_NAME_LEN:
return api_error(status.HTTP_400_BAD_REQUEST, 'New name is too long')
parent_dir_utf8 = parent_dir.encode('utf-8')
oldname = os.path.basename(path)
if oldname == newname:
return api_error(status.HTTP_409_CONFLICT,
'The new name is the same to the old')
newname = check_filename_with_rename_utf8(repo_id, parent_dir,
newname)
try:
seafile_api.rename_file(repo_id, parent_dir, oldname, newname,
username)
except SearpcError,e:
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to rename file: %s" % e)
if request.GET.get('reloaddir', '').lower() == 'true':
return reloaddir(request, repo, parent_dir_utf8)
else:
resp = Response('success', status=status.HTTP_301_MOVED_PERMANENTLY)
uri = reverse('FileView', args=[repo_id], request=request)
resp['Location'] = uri + '?p=' + quote(parent_dir_utf8) + quote(newname)
return resp
elif operation.lower() == 'move':
if check_folder_permission(request, repo_id, path) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to move file.')
src_dir = os.path.dirname(path)
src_dir_utf8 = src_dir.encode('utf-8')
src_repo_id = repo_id
dst_repo_id = request.POST.get('dst_repo', '')
dst_dir = request.POST.get('dst_dir', '')
dst_dir_utf8 = dst_dir.encode('utf-8')
if dst_dir[-1] != '/': # Append '/' to the end of directory if necessary
dst_dir += '/'
# obj_names = request.POST.get('obj_names', '')
if not (dst_repo_id and dst_dir):
return api_error(status.HTTP_400_BAD_REQUEST, 'Missing arguments.')
if src_repo_id == dst_repo_id and src_dir == dst_dir:
return Response('success', status=status.HTTP_200_OK)
if check_folder_permission(request, dst_repo_id, dst_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to move file.')
filename = os.path.basename(path)
filename_utf8 = filename.encode('utf-8')
new_filename_utf8 = check_filename_with_rename_utf8(dst_repo_id,
dst_dir,
filename)
try:
seafile_api.move_file(src_repo_id, src_dir_utf8,
filename_utf8, dst_repo_id,
dst_dir_utf8, new_filename_utf8,
username, 0, synchronous=1)
except SearpcError, e:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
"SearpcError:" + e.msg)
dst_repo = get_repo(dst_repo_id)
if not dst_repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
if request.GET.get('reloaddir', '').lower() == 'true':
return reloaddir(request, dst_repo, dst_dir)
else:
resp = Response('success', status=status.HTTP_301_MOVED_PERMANENTLY)
uri = reverse('FileView', args=[dst_repo_id], request=request)
resp['Location'] = uri + '?p=' + quote(dst_dir_utf8) + quote(new_filename_utf8)
return resp
elif operation.lower() == 'copy':
src_repo_id = repo_id
src_dir = os.path.dirname(path)
src_dir_utf8 = src_dir.encode('utf-8')
dst_repo_id = request.POST.get('dst_repo', '')
dst_dir = request.POST.get('dst_dir', '')
dst_dir_utf8 = dst_dir.encode('utf-8')
if dst_dir[-1] != '/': # Append '/' to the end of directory if necessary
dst_dir += '/'
if not (dst_repo_id and dst_dir):
return api_error(status.HTTP_400_BAD_REQUEST, 'Missing arguments.')
if src_repo_id == dst_repo_id and src_dir == dst_dir:
return Response('success', status=status.HTTP_200_OK)
# check src folder permission
if check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to copy file.')
# check dst folder permission
if check_folder_permission(request, dst_repo_id, dst_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to copy file.')
filename = os.path.basename(path)
filename_utf8 = filename.encode('utf-8')
new_filename_utf8 = check_filename_with_rename_utf8(dst_repo_id,
dst_dir,
filename)
try:
seafile_api.copy_file(src_repo_id, src_dir_utf8,
filename_utf8, dst_repo_id,
dst_dir_utf8, new_filename_utf8,
username, 0, synchronous=1)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
"SearpcError:" + e.msg)
if request.GET.get('reloaddir', '').lower() == 'true':
return reloaddir(request, dst_repo, dst_dir)
else:
resp = Response('success', status=status.HTTP_200_OK)
uri = reverse('FileView', args=[dst_repo_id], request=request)
resp['Location'] = uri + '?p=' + quote(dst_dir_utf8) + quote(new_filename_utf8)
return resp
elif operation.lower() == 'create':
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to create file.')
parent_dir_utf8 = parent_dir.encode('utf-8')
new_file_name = os.path.basename(path)
new_file_name_utf8 = check_filename_with_rename_utf8(repo_id,
parent_dir,
new_file_name)
try:
seafile_api.post_empty_file(repo_id, parent_dir,
new_file_name_utf8, username)
except SearpcError, e:
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to create file.')
if request.GET.get('reloaddir', '').lower() == 'true':
return reloaddir(request, repo, parent_dir)
else:
resp = Response('success', status=status.HTTP_201_CREATED)
uri = reverse('FileView', args=[repo_id], request=request)
resp['Location'] = uri + '?p=' + quote(parent_dir_utf8) + \
quote(new_file_name_utf8)
return resp
else:
return api_error(status.HTTP_400_BAD_REQUEST,
"Operation can only be rename, create or move.")
def put(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.data.get('p', '')
file_id = seafile_api.get_file_id_by_path(repo_id, path)
if not path or not file_id:
return api_error(status.HTTP_400_BAD_REQUEST,
'Path is missing or invalid.')
username = request.user.username
# check file access permission
if check_folder_permission(request, repo_id, path) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
operation = request.data.get('operation', '')
if operation.lower() == 'lock':
is_locked, locked_by_me = check_file_lock(repo_id, path, username)
if is_locked:
return api_error(status.HTTP_403_FORBIDDEN, 'File is already locked')
# lock file
expire = request.data.get('expire', FILE_LOCK_EXPIRATION_DAYS)
try:
seafile_api.lock_file(repo_id, path.lstrip('/'), username, expire)
return Response('success', status=status.HTTP_200_OK)
except SearpcError, e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal error')
if operation.lower() == 'unlock':
is_locked, locked_by_me = check_file_lock(repo_id, path, username)
if not is_locked:
return api_error(status.HTTP_403_FORBIDDEN, 'File is not locked')
if not locked_by_me:
return api_error(status.HTTP_403_FORBIDDEN, 'You can not unlock this file')
# unlock file
try:
seafile_api.unlock_file(repo_id, path.lstrip('/'))
return Response('success', status=status.HTTP_200_OK)
except SearpcError, e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal error')
else:
return api_error(status.HTTP_400_BAD_REQUEST,
"Operation can only be lock or unlock")
def delete(self, request, repo_id, format=None):
# delete file
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', None)
if not path:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
parent_dir = os.path.dirname(path)
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
parent_dir_utf8 = os.path.dirname(path).encode('utf-8')
file_name_utf8 = os.path.basename(path).encode('utf-8')
try:
seafile_api.del_file(repo_id, parent_dir_utf8,
file_name_utf8,
request.user.username)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to delete file.")
return reloaddir_if_necessary(request, repo, parent_dir_utf8)
class FileDetailView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
repo = seafile_api.get_repo(repo_id)
if repo is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Library not found.')
path = request.GET.get('p', None)
if path is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
commit_id = request.GET.get('commit_id', None)
if commit_id:
try:
obj_id = seafserv_threaded_rpc.get_file_id_by_commit_and_path(
repo.id, commit_id, path)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
'Failed to get file id.')
else:
try:
obj_id = seafile_api.get_file_id_by_path(repo_id,
path.encode('utf-8'))
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
'Failed to get file id.')
if not obj_id:
return api_error(status.HTTP_404_NOT_FOUND, 'File not found.')
# fetch file contributors and latest contributor
try:
# get real path for sub repo
real_path = repo.origin_path + path if repo.origin_path else path
dirent = seafile_api.get_dirent_by_path(repo.store_id, real_path)
if dirent:
latest_contributor, last_modified = dirent.modifier, dirent.mtime
else:
latest_contributor, last_modified = None, 0
except SearpcError as e:
logger.error(e)
latest_contributor, last_modified = None, 0
entry = {}
try:
entry["size"] = get_file_size(repo.store_id, repo.version, obj_id)
except Exception, e:
entry["size"] = 0
entry["type"] = "file"
entry["name"] = os.path.basename(path)
entry["id"] = obj_id
entry["mtime"] = last_modified
return HttpResponse(json.dumps(entry), status=200,
content_type=json_content_type)
class FileRevert(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def put(self, request, repo_id, format=None):
path = request.data.get('p', None)
commit_id = request.data.get('commit_id', None)
if not path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not commit_id:
error_msg = 'commit_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not seafile_api.get_repo(repo_id):
error_msg = 'library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if not seafile_api.get_file_id_by_commit_and_path(repo_id, commit_id, path):
error_msg = 'file %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if check_folder_permission(request, repo_id, '/') != 'rw':
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
username = request.user.username
try:
seafile_api.revert_file(repo_id, commit_id, path, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
class FileRevision(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
path = request.GET.get('p', None)
if path is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
file_name = os.path.basename(path)
commit_id = request.GET.get('commit_id', None)
try:
obj_id = seafserv_threaded_rpc.get_file_id_by_commit_and_path(
repo_id, commit_id, path)
except:
return api_error(status.HTTP_404_NOT_FOUND, 'Revision not found.')
return get_repo_file(request, repo_id, obj_id, file_name, 'download')
class FileHistory(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
path = request.GET.get('p', None)
if path is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
try:
commits = seafserv_threaded_rpc.list_file_revisions(repo_id, path,
-1, -1)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, "Internal error")
if not commits:
return api_error(status.HTTP_404_NOT_FOUND, 'File not found.')
return HttpResponse(json.dumps({"commits": commits}, cls=SearpcObjEncoder), status=200, content_type=json_content_type)
class FileSharedLinkView(APIView):
"""
Support uniform interface for file shared link.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def put(self, request, repo_id, format=None):
repo = seaserv.get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, "Library does not exist")
path = request.data.get('p', None)
if not path:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing')
username = request.user.username
password = request.data.get('password', None)
share_type = request.data.get('share_type', 'download')
if password and len(password) < config.SHARE_LINK_PASSWORD_MIN_LENGTH:
return api_error(status.HTTP_400_BAD_REQUEST, 'Password is too short')
if share_type.lower() == 'download':
if check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
expire = request.data.get('expire', None)
if expire:
try:
expire_days = int(expire)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid expiration days')
else:
expire_date = timezone.now() + relativedelta(days=expire_days)
else:
expire_date = None
is_dir = False
if path == '/':
is_dir = True
else:
try:
real_path = repo.origin_path + path if repo.origin_path else path
dirent = seafile_api.get_dirent_by_path(repo.store_id, real_path)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, "Internal error")
if not dirent:
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid path')
if stat.S_ISDIR(dirent.mode):
is_dir = True
if is_dir:
# generate dir download link
fs = FileShare.objects.get_dir_link_by_path(username, repo_id, path)
if fs is None:
fs = FileShare.objects.create_dir_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
else:
# generate file download link
fs = FileShare.objects.get_file_link_by_path(username, repo_id, path)
if fs is None:
fs = FileShare.objects.create_file_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
token = fs.token
shared_link = gen_shared_link(token, fs.s_type)
elif share_type.lower() == 'upload':
if not seafile_api.get_dir_id_by_path(repo_id, path):
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid path')
if check_folder_permission(request, repo_id, path) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
# generate upload link
uls = UploadLinkShare.objects.get_upload_link_by_path(username, repo_id, path)
if uls is None:
uls = UploadLinkShare.objects.create_upload_link_share(
username, repo_id, path, password)
token = uls.token
shared_link = gen_shared_upload_link(token)
else:
return api_error(status.HTTP_400_BAD_REQUEST,
"Operation can only be download or upload.")
resp = Response(status=status.HTTP_201_CREATED)
resp['Location'] = shared_link
return resp
########## Directory related
class DirView(APIView):
"""
Support uniform interface for directory operations, including
create/delete/rename/list, etc.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
# list dir
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', '/')
if path[-1] != '/':
path = path + '/'
if check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN, 'Forbid to access this folder.')
try:
dir_id = seafile_api.get_dir_id_by_path(repo_id,
path.encode('utf-8'))
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to get dir id by path.")
if not dir_id:
return api_error(status.HTTP_404_NOT_FOUND, "Path does not exist")
old_oid = request.GET.get('oid', None)
if old_oid and old_oid == dir_id:
response = HttpResponse(json.dumps("uptodate"), status=200,
content_type=json_content_type)
response["oid"] = dir_id
return response
else:
request_type = request.GET.get('t', None)
if request_type and request_type not in ('f', 'd'):
return api_error(status.HTTP_400_BAD_REQUEST,
"'t'(type) should be 'f' or 'd'.")
if request_type == 'd':
recursive = request.GET.get('recursive', '0')
if recursive not in ('1', '0'):
return api_error(status.HTTP_400_BAD_REQUEST,
"If you want to get recursive dir entries, you should set 'recursive' argument as '1'.")
if recursive == '1':
username = request.user.username
dir_list = get_dir_recursively(username, repo_id, path, [])
dir_list.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
response = HttpResponse(json.dumps(dir_list), status=200,
content_type=json_content_type)
response["oid"] = dir_id
response["dir_perm"] = seafile_api.check_permission_by_path(repo_id, path, username)
return response
return get_dir_entrys_by_id(request, repo, path, dir_id, request_type)
def post(self, request, repo_id, format=None):
# new dir
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', '')
if not path or path[0] != '/':
return api_error(status.HTTP_400_BAD_REQUEST, "Path is missing.")
if path == '/': # Can not make or rename root dir.
return api_error(status.HTTP_400_BAD_REQUEST, "Path is invalid.")
if path[-1] == '/': # Cut out last '/' if possible.
path = path[:-1]
username = request.user.username
operation = request.POST.get('operation', '')
parent_dir = os.path.dirname(path)
parent_dir_utf8 = parent_dir.encode('utf-8')
if operation.lower() == 'mkdir':
parent_dir = os.path.dirname(path)
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'You do not have permission to access this folder.')
create_parents = request.POST.get('create_parents', '').lower() in ('true', '1')
if not create_parents:
# check whether parent dir exists
if not seafile_api.get_dir_id_by_path(repo_id, parent_dir):
return api_error(status.HTTP_400_BAD_REQUEST,
'Parent dir does not exist')
new_dir_name = os.path.basename(path)
new_dir_name_utf8 = check_filename_with_rename_utf8(repo_id,
parent_dir,
new_dir_name)
try:
seafile_api.post_dir(repo_id, parent_dir,
new_dir_name_utf8, username)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to make directory.')
else:
if not is_seafile_pro():
return api_error(status.HTTP_400_BAD_REQUEST,
'Feature not supported.')
try:
seafile_api.mkdir_with_parents(repo_id, '/',
path[1:], username)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to make directory.')
new_dir_name_utf8 = os.path.basename(path).encode('utf-8')
if request.GET.get('reloaddir', '').lower() == 'true':
resp = reloaddir(request, repo, parent_dir)
else:
resp = Response('success', status=status.HTTP_201_CREATED)
uri = reverse('DirView', args=[repo_id], request=request)
resp['Location'] = uri + '?p=' + quote(parent_dir_utf8) + \
quote(new_dir_name_utf8)
return resp
elif operation.lower() == 'rename':
if check_folder_permission(request, repo.id, path) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'You do not have permission to access this folder.')
parent_dir = os.path.dirname(path)
old_dir_name = os.path.basename(path)
newname = request.POST.get('newname', '')
if not newname:
return api_error(status.HTTP_400_BAD_REQUEST, "New name is mandatory.")
if newname == old_dir_name:
return Response('success', status=status.HTTP_200_OK)
try:
# rename duplicate name
checked_newname = check_filename_with_rename(
repo_id, parent_dir, newname)
# rename dir
seafile_api.rename_file(repo_id, parent_dir, old_dir_name,
checked_newname, username)
return Response('success', status=status.HTTP_200_OK)
except SearpcError, e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to rename folder.')
# elif operation.lower() == 'move':
# pass
else:
return api_error(status.HTTP_400_BAD_REQUEST,
"Operation not supported.")
def delete(self, request, repo_id, format=None):
# delete dir or file
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', None)
if not path:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
if check_folder_permission(request, repo_id, path) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'You do not have permission to access this folder.')
if path == '/': # Can not delete root path.
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is invalid.')
if path[-1] == '/': # Cut out last '/' if possible.
path = path[:-1]
parent_dir = os.path.dirname(path)
parent_dir_utf8 = os.path.dirname(path).encode('utf-8')
file_name_utf8 = os.path.basename(path).encode('utf-8')
username = request.user.username
try:
seafile_api.del_file(repo_id, parent_dir_utf8,
file_name_utf8, username)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to delete file.")
return reloaddir_if_necessary(request, repo, parent_dir_utf8)
class DirDownloadView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', None)
if path is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
if path[-1] != '/': # Normalize dir path
path += '/'
if len(path) > 1:
dirname = os.path.basename(path.rstrip('/'))
else:
dirname = repo.name
current_commit = get_commits(repo_id, 0, 1)[0]
if not current_commit:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
'Failed to get current commit of repo %s.' % repo_id)
try:
dir_id = seafile_api.get_dir_id_by_commit_and_path(current_commit.repo_id,
current_commit.id, path)
except SearpcError, e:
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to get dir id by path")
if not dir_id:
return api_error(status.HTTP_404_NOT_FOUND, "Path does not exist")
try:
total_size = seafserv_threaded_rpc.get_dir_size(repo.store_id, repo.version,
dir_id)
except Exception, e:
logger.error(str(e))
return api_error(HTTP_520_OPERATION_FAILED, "Internal error")
if total_size > MAX_DOWNLOAD_DIR_SIZE:
return api_error(status.HTTP_400_BAD_REQUEST,
'Unable to download directory "%s": size is too large.' % dirname)
token = seafile_api.get_fileserver_access_token(repo_id,
dir_id,
'download-dir',
request.user.username)
redirect_url = gen_file_get_url(token, dirname)
return HttpResponse(json.dumps(redirect_url), status=200,
content_type=json_content_type)
class DirRevert(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def put(self, request, repo_id):
path = request.data.get('p', None)
commit_id = request.data.get('commit_id', None)
if not path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not commit_id:
error_msg = 'commit_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not seafile_api.get_repo(repo_id):
error_msg = 'library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if not seafile_api.get_dir_id_by_commit_and_path(repo_id, commit_id, path):
error_msg = 'folder %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if check_folder_permission(request, repo_id, '/') != 'rw':
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
username = request.user.username
try:
seafile_api.revert_dir(repo_id, commit_id, path, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
class DirSubRepoView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
# from seahub.views.ajax.py::sub_repo
def get(self, request, repo_id, format=None):
'''
check if a dir has a corresponding sub_repo
if it does not have, create one
'''
result = {}
path = request.GET.get('p')
name = request.GET.get('name')
password = request.GET.get('password', None)
repo = get_repo(repo_id)
if not repo:
result['error'] = 'Library not found.'
return HttpResponse(json.dumps(result), status=404, content_type=json_content_type)
if not (path and name):
result['error'] = 'Argument missing'
return HttpResponse(json.dumps(result), status=400, content_type=json_content_type)
username = request.user.username
# check if the sub-lib exist
try:
sub_repo = seafile_api.get_virtual_repo(repo_id, path, username)
except SearpcError, e:
result['error'] = e.msg
return HttpResponse(json.dumps(result), status=500, content_type=json_content_type)
if sub_repo:
result['sub_repo_id'] = sub_repo.id
else:
if not request.user.permissions.can_add_repo():
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to create library.')
# create a sub-lib
try:
# use name as 'repo_name' & 'repo_desc' for sub_repo
if repo.encrypted:
if password:
sub_repo_id = seafile_api.create_virtual_repo(repo_id,
path, name, name, username, password)
else:
result['error'] = 'Password Required.'
return HttpResponse(json.dumps(result), status=403, content_type=json_content_type)
else:
sub_repo_id = seafile_api.create_virtual_repo(repo_id, path, name, name, username)
result['sub_repo_id'] = sub_repo_id
except SearpcError, e:
result['error'] = e.msg
return HttpResponse(json.dumps(result), status=500, content_type=json_content_type)
return HttpResponse(json.dumps(result), content_type=json_content_type)
########## Sharing
class SharedRepos(APIView):
"""
List repos that a user share to others/groups/public.
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
username = request.user.username
shared_repos = []
shared_repos += list_share_repos(username, 'from_email', -1, -1)
shared_repos += get_group_repos_by_owner(username)
if not CLOUD_MODE:
shared_repos += list_inner_pub_repos_by_owner(username)
return HttpResponse(json.dumps(shared_repos, cls=SearpcObjEncoder),
status=200, content_type=json_content_type)
class BeShared(APIView):
"""
List repos that others/groups share to user.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
username = request.user.username
shared_repos = []
shared_repos += seafile_api.get_share_in_repo_list(username, -1, -1)
joined_groups = get_personal_groups_by_user(username)
for grp in joined_groups:
# Get group repos, and for each group repos...
for r_id in get_group_repoids(grp.id):
# No need to list my own repo
if seafile_api.is_repo_owner(username, r_id):
continue
# Convert repo properties due to the different collumns in Repo
# and SharedRepo
r = get_repo(r_id)
if not r:
continue
r.repo_id = r.id
r.repo_name = r.name
r.repo_desc = r.desc
cmmts = get_commits(r_id, 0, 1)
last_commit = cmmts[0] if cmmts else None
r.last_modified = last_commit.ctime if last_commit else 0
r._dict['share_type'] = 'group'
r.user = seafile_api.get_repo_owner(r_id)
r.user_perm = check_permission(r_id, username)
shared_repos.append(r)
if not CLOUD_MODE:
shared_repos += seaserv.list_inner_pub_repos(username)
return HttpResponse(json.dumps(shared_repos, cls=SearpcObjEncoder),
status=200, content_type=json_content_type)
class VirtualRepos(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
result = {}
try:
virtual_repos = get_virtual_repos_by_owner(request)
except SearpcError, e:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
"error:" + e.msg)
result['virtual-repos'] = virtual_repos
return HttpResponse(json.dumps(result, cls=SearpcObjEncoder),
content_type=json_content_type)
class SharedFileView(APIView):
# Anyone should be able to access a Shared File assuming they have the token
throttle_classes = (UserRateThrottle, )
def get(self, request, token, format=None):
assert token is not None # Checked by URLconf
try:
fileshare = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND, "Token not found")
repo_id = fileshare.repo_id
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, "Library not found")
path = fileshare.path.rstrip('/') # Normalize file path
file_name = os.path.basename(path)
file_id = None
try:
file_id = seafserv_threaded_rpc.get_file_id_by_path(repo_id,
path.encode('utf-8'))
except SearpcError, e:
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to get file id by path.")
if not file_id:
return api_error(status.HTTP_404_NOT_FOUND, "File not found")
# Increase file shared link view_cnt, this operation should be atomic
fileshare.view_cnt = F('view_cnt') + 1
fileshare.save()
op = request.GET.get('op', 'download')
return get_repo_file(request, repo_id, file_id, file_name, op)
class SharedFileDetailView(APIView):
throttle_classes = (UserRateThrottle, )
def get(self, request, token, format=None):
assert token is not None # Checked by URLconf
try:
fileshare = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND, "Token not found")
if fileshare.is_encrypted():
password = request.GET.get('password', '')
if not password:
return api_error(status.HTTP_403_FORBIDDEN, "Password is required")
if not check_password(password, fileshare.password):
return api_error(status.HTTP_403_FORBIDDEN, "Invalid Password")
repo_id = fileshare.repo_id
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, "Library not found")
path = fileshare.path.rstrip('/') # Normalize file path
file_name = os.path.basename(path)
file_id = None
try:
file_id = seafserv_threaded_rpc.get_file_id_by_path(repo_id,
path.encode('utf-8'))
commits = seafserv_threaded_rpc.list_file_revisions(repo_id, path,
-1, -1)
c = commits[0]
except SearpcError, e:
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to get file id by path.")
if not file_id:
return api_error(status.HTTP_404_NOT_FOUND, "File not found")
entry = {}
try:
entry["size"] = get_file_size(repo.store_id, repo.version, file_id)
except Exception as e:
logger.error(e)
entry["size"] = 0
entry["type"] = "file"
entry["name"] = file_name
entry["id"] = file_id
entry["mtime"] = c.ctime
entry["repo_id"] = repo_id
entry["path"] = path
return HttpResponse(json.dumps(entry), status=200,
content_type=json_content_type)
class FileShareEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, FileShare):
return None
return {'username':obj.username, 'repo_id':obj.repo_id,
'path':obj.path, 'token':obj.token,
'ctime':obj.ctime, 'view_cnt':obj.view_cnt,
's_type':obj.s_type}
class SharedLinksView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
# from seahub.share.view::list_shared_links
def get(self, request, format=None):
username = request.user.username
fileshares = FileShare.objects.filter(username=username)
p_fileshares = [] # personal file share
for fs in fileshares:
if is_personal_repo(fs.repo_id): # only list files in personal repos
r = seafile_api.get_repo(fs.repo_id)
if not r:
fs.delete()
continue
if fs.s_type == 'f':
if seafile_api.get_file_id_by_path(r.id, fs.path) is None:
fs.delete()
continue
fs.filename = os.path.basename(fs.path)
fs.shared_link = gen_file_share_link(fs.token)
else:
if seafile_api.get_dir_id_by_path(r.id, fs.path) is None:
fs.delete()
continue
fs.filename = os.path.basename(fs.path.rstrip('/'))
fs.shared_link = gen_dir_share_link(fs.token)
fs.repo = r
p_fileshares.append(fs)
return HttpResponse(json.dumps({"fileshares": p_fileshares}, cls=FileShareEncoder), status=200, content_type=json_content_type)
def delete(self, request, format=None):
token = request.GET.get('t', None)
if not token:
return api_error(status.HTTP_400_BAD_REQUEST, 'Token is missing')
username = request.user.username
share = FileShare.objects.filter(token=token).filter(username=username) or \
UploadLinkShare.objects.filter(token=token).filter(username=username)
if not share:
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid token')
share.delete()
return HttpResponse(json.dumps({}), status=200, content_type=json_content_type)
class SharedDirView(APIView):
throttle_classes = (UserRateThrottle, )
def get(self, request, token, format=None):
"""List dirents in dir download shared link
"""
fileshare = FileShare.objects.get_valid_dir_link_by_token(token)
if not fileshare:
return api_error(status.HTTP_400_BAD_REQUEST, "Invalid token")
repo_id = fileshare.repo_id
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_400_BAD_REQUEST, "Invalid token")
if fileshare.is_encrypted():
password = request.GET.get('password', '')
if not password:
return api_error(status.HTTP_403_FORBIDDEN, "Password is required")
if not check_password(password, fileshare.password):
return api_error(status.HTTP_403_FORBIDDEN, "Invalid Password")
req_path = request.GET.get('p', '/')
if req_path[-1] != '/':
req_path += '/'
if req_path == '/':
real_path = fileshare.path
else:
real_path = posixpath.join(fileshare.path, req_path.lstrip('/'))
if real_path[-1] != '/': # Normalize dir path
real_path += '/'
dir_id = seafile_api.get_dir_id_by_path(repo_id, real_path)
if not dir_id:
return api_error(status.HTTP_400_BAD_REQUEST, "Invalid path")
username = fileshare.username
try:
dirs = seafserv_threaded_rpc.list_dir_with_perm(repo_id, real_path, dir_id,
username, -1, -1)
dirs = dirs if dirs else []
except SearpcError, e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED, "Failed to list dir.")
dir_list, file_list = [], []
for dirent in dirs:
dtype = "file"
entry = {}
if stat.S_ISDIR(dirent.mode):
dtype = "dir"
else:
if repo.version == 0:
entry["size"] = get_file_size(repo.store_id, repo.version,
dirent.obj_id)
else:
entry["size"] = dirent.size
entry["type"] = dtype
entry["name"] = dirent.obj_name
entry["id"] = dirent.obj_id
entry["mtime"] = dirent.mtime
if dtype == 'dir':
dir_list.append(entry)
else:
file_list.append(entry)
dir_list.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
file_list.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
dentrys = dir_list + file_list
content_type = 'application/json; charset=utf-8'
return HttpResponse(json.dumps(dentrys), status=200, content_type=content_type)
class DefaultRepoView(APIView):
"""
Get user's default library.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
username = request.user.username
repo_id = UserOptions.objects.get_default_repo(username)
if repo_id is None or (get_repo(repo_id) is None):
json = {
'exists': False,
}
return Response(json)
else:
return self.default_repo_info(repo_id)
def default_repo_info(self, repo_id):
repo_json = {
'exists': False,
}
if repo_id is not None:
repo_json['exists'] = True
repo_json['repo_id'] = repo_id
return Response(repo_json)
def post(self, request):
if not request.user.permissions.can_add_repo():
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to create library.')
username = request.user.username
repo_id = UserOptions.objects.get_default_repo(username)
if repo_id and (get_repo(repo_id) is not None):
return self.default_repo_info(repo_id)
repo_id = create_default_library(request)
return self.default_repo_info(repo_id)
class SharedRepo(APIView):
"""
Support uniform interface for shared libraries.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def delete(self, request, repo_id, format=None):
"""
Unshare a library.
Repo owner and system admin can perform this operation.
"""
username = request.user.username
if not request.user.is_staff and not seafile_api.is_repo_owner(
username, repo_id):
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to unshare library.')
share_type = request.GET.get('share_type', '')
if not share_type:
return api_error(status.HTTP_400_BAD_REQUEST,
'Share type is required.')
if share_type == 'personal':
user = request.GET.get('user', '')
if not user:
return api_error(status.HTTP_400_BAD_REQUEST,
'User is required.')
if not is_valid_username(user):
return api_error(status.HTTP_400_BAD_REQUEST,
'User is not valid')
remove_share(repo_id, username, user)
elif share_type == 'group':
group_id = request.GET.get('group_id', '')
if not group_id:
return api_error(status.HTTP_400_BAD_REQUEST,
'Group ID is required.')
try:
group_id = int(group_id)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST,
'Group ID is not valid.')
seafile_api.unset_group_repo(repo_id, int(group_id), username)
elif share_type == 'public':
unset_inner_pub_repo(repo_id)
else:
return api_error(status.HTTP_400_BAD_REQUEST,
'Share type can only be personal or group or public.')
return Response('success', status=status.HTTP_200_OK)
def put(self, request, repo_id, format=None):
"""
Share a repo to users/groups/public.
"""
username = request.user.username
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
if username != repo_owner:
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to share library.')
share_type = request.GET.get('share_type')
user = request.GET.get('user')
users = request.GET.get('users')
group_id = request.GET.get('group_id')
permission = request.GET.get('permission')
if permission != 'rw' and permission != "r":
return api_error(status.HTTP_400_BAD_REQUEST,
'Permission need to be rw or r.')
if share_type == 'personal':
from_email = seafile_api.get_repo_owner(repo_id)
shared_users = []
invalid_users = []
notexistent_users = []
notsharable_errors = []
usernames = []
if user:
usernames += user.split(",")
if users:
usernames += users.split(",")
if not user and not users:
return api_error(status.HTTP_400_BAD_REQUEST,
'User or users (comma separated are mandatory) are not provided')
for u in usernames:
if not u:
continue
if not is_valid_username(u):
invalid_users.append(u)
continue
if not is_registered_user(u):
notexistent_users.append(u)
continue
try:
seafile_api.share_repo(repo_id, from_email, u, permission)
shared_users.append(u)
except SearpcError, e:
logger.error(e)
notsharable_errors.append(e)
if invalid_users or notexistent_users or notsharable_errors:
# removing already created share
for s_user in shared_users:
try:
remove_share(repo_id, from_email, s_user)
except SearpcError, e:
# ignoring this error, go to next unsharing
continue
if invalid_users:
return api_error(status.HTTP_400_BAD_REQUEST,
'Some users are not valid, sharing rolled back')
if notexistent_users:
return api_error(status.HTTP_400_BAD_REQUEST,
'Some users are not existent, sharing rolled back')
if notsharable_errors:
# show the first sharing error
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
'Internal error occurs, sharing rolled back')
elif share_type == 'group':
try:
group_id = int(group_id)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST,
'Group ID must be integer.')
from_email = seafile_api.get_repo_owner(repo_id)
group = get_group(group_id)
if not group:
return api_error(status.HTTP_400_BAD_REQUEST,
'Group does not exist .')
try:
seafile_api.set_group_repo(repo_id, int(group_id),
from_email, permission)
except SearpcError, e:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
"Searpc Error: " + e.msg)
elif share_type == 'public':
if not CLOUD_MODE:
if not is_org_repo_creation_allowed(request):
return api_error(status.HTTP_403_FORBIDDEN,
'Failed to share library to public: permission denied.')
try:
seafile_api.add_inner_pub_repo(repo_id, permission)
except SearpcError, e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
'Failed to share library to public.')
else:
if is_org_context(request):
org_id = request.user.org.org_id
try:
seaserv.seafserv_threaded_rpc.set_org_inner_pub_repo(org_id, repo_id, permission)
send_perm_audit_msg('add-repo-perm', username, 'all', repo_id, '/', permission)
except SearpcError, e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
'Failed to share library to public.')
else:
return api_error(status.HTTP_403_FORBIDDEN,
'Failed to share library to public.')
else:
return api_error(status.HTTP_400_BAD_REQUEST,
'Share type can only be personal or group or public.')
return Response('success', status=status.HTTP_200_OK)
class GroupAndContacts(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
contacts, umsgnum, group_json, gmsgnum, replies, replynum = get_group_and_contacts(request.user.username)
res = {
"groups": group_json,
"contacts": contacts,
"newreplies":replies,
"replynum": replynum,
"umsgnum" : umsgnum,
"gmsgnum" : gmsgnum,
}
return Response(res)
class EventsView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
if not EVENTS_ENABLED:
events = None
return api_error(status.HTTP_404_NOT_FOUND, 'Events not enabled.')
start = request.GET.get('start', '')
if not start:
start = 0
else:
try:
start = int(start)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST, 'Start id must be integer')
email = request.user.username
events_count = 15
if is_org_context(request):
org_id = request.user.org.org_id
events, events_more_offset = get_org_user_events(org_id, email,
start,
events_count)
else:
events, events_more_offset = get_user_events(email, start,
events_count)
events_more = True if len(events) == events_count else False
l = []
for e in events:
d = dict(etype=e.etype)
l.append(d)
if e.etype == 'repo-update':
d['author'] = e.commit.creator_name
d['time'] = e.commit.ctime
d['desc'] = e.commit.desc
d['repo_id'] = e.repo.id
d['repo_name'] = e.repo.name
d['commit_id'] = e.commit.id
d['converted_cmmt_desc'] = translate_commit_desc_escape(convert_cmmt_desc_link(e.commit))
d['more_files'] = e.commit.more_files
d['repo_encrypted'] = e.repo.encrypted
else:
d['repo_id'] = e.repo_id
d['repo_name'] = e.repo_name
if e.etype == 'repo-create':
d['author'] = e.creator
else:
d['author'] = e.repo_owner
epoch = datetime.datetime(1970, 1, 1)
local = utc_to_local(e.timestamp)
time_diff = local - epoch
d['time'] = time_diff.seconds + (time_diff.days * 24 * 3600)
size = request.GET.get('size', 36)
url, is_default, date_uploaded = api_avatar_url(d['author'], size)
d['nick'] = email2nickname(d['author'])
d['name'] = email2nickname(d['author'])
d['avatar'] = avatar(d['author'], size)
d['avatar_url'] = request.build_absolute_uri(url)
d['time_relative'] = translate_seahub_time(utc_to_local(e.timestamp))
d['date'] = utc_to_local(e.timestamp).strftime("%Y-%m-%d")
ret = {
'events': l,
'more': events_more,
'more_offset': events_more_offset,
}
return Response(ret)
class UnseenMessagesCountView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
username = request.user.username
ret = { 'count' : UserNotification.objects.count_unseen_user_notifications(username)
}
return Response(ret)
########## Groups related
class Groups(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
size = request.GET.get('size', 36)
limit = int(request.GET.get('limit', 8))
with_msg = request.GET.get('with_msg', 'true')
# To not broken the old API, we need to make with_msg default
if with_msg == 'true':
group_json, replynum = get_groups(request.user.username)
res = {"groups": group_json, "replynum": replynum}
return Response(res)
else:
groups_json = []
joined_groups = get_personal_groups_by_user(request.user.username)
for g in joined_groups:
if limit <= 0:
break;
group = {
"id": g.id,
"name": g.group_name,
"creator": g.creator_name,
"ctime": g.timestamp,
"avatar": grp_avatar(g.id, int(size)),
}
groups_json.append(group)
limit = limit - 1
return Response(groups_json)
def put(self, request, format=None):
# modified slightly from groups/views.py::group_list
"""
Add a new group.
"""
result = {}
content_type = 'application/json; charset=utf-8'
username = request.user.username
if not request.user.permissions.can_add_group():
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to create group.')
# check plan
num_of_groups = getattr(request.user, 'num_of_groups', -1)
if num_of_groups > 0:
current_groups = len(get_personal_groups_by_user(username))
if current_groups > num_of_groups:
result['error'] = 'You can only create %d groups.' % num_of_groups
return HttpResponse(json.dumps(result), status=500,
content_type=content_type)
group_name = request.data.get('group_name', None)
group_name = group_name.strip()
if not validate_group_name(group_name):
result['error'] = 'Failed to rename group, group name can only contain letters, numbers, blank, hyphen or underscore.'
return HttpResponse(json.dumps(result), status=403,
content_type=content_type)
# Check whether group name is duplicated.
if request.cloud_mode:
checked_groups = get_personal_groups_by_user(username)
else:
checked_groups = get_personal_groups(-1, -1)
for g in checked_groups:
if g.group_name == group_name:
result['error'] = 'There is already a group with that name.'
return HttpResponse(json.dumps(result), status=400,
content_type=content_type)
# Group name is valid, create that group.
try:
group_id = ccnet_threaded_rpc.create_group(group_name.encode('utf-8'),
username)
return HttpResponse(json.dumps({'success': True, 'group_id': group_id}),
content_type=content_type)
except SearpcError, e:
result['error'] = e.msg
return HttpResponse(json.dumps(result), status=500,
content_type=content_type)
def delete(self, request, group_id, format=None):
try:
group_id = int(group_id)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST, 'Bad group id format')
group = seaserv.get_group(group_id)
if not group:
return api_error(status.HTTP_404_NOT_FOUND, 'Group not found')
# permission check
username = request.user.username
if not seaserv.check_group_staff(group_id, username):
return api_error(status.HTTP_403_FORBIDDEN, 'You do not have permission to delete group')
# delete group
if is_org_context(request):
org_id = request.user.org.org_id
else:
org_id = None
try:
remove_group_common(group.id, username, org_id=org_id)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to remove group.')
return Response('success', status=status.HTTP_200_OK)
def post(self, request, group_id, format=None):
group = seaserv.get_group(group_id)
if not group:
return api_error(status.HTTP_404_NOT_FOUND, 'Group not found')
# permission check
username = request.user.username
if not seaserv.check_group_staff(group.id, username):
return api_error(status.HTTP_403_FORBIDDEN, 'You do not have permission to rename group')
operation = request.POST.get('operation', '')
if operation.lower() == 'rename':
newname = request.POST.get('newname', '')
if not newname:
return api_error(status.HTTP_400_BAD_REQUEST,
'New name is missing')
try:
rename_group_with_new_name(request, group.id, newname)
except BadGroupNameError:
return api_error(status.HTTP_400_BAD_REQUEST,
'Group name is not valid.')
except ConflictGroupNameError:
return api_error(status.HTTP_400_BAD_REQUEST,
'There is already a group with that name.')
return Response('success', status=status.HTTP_200_OK)
else:
return api_error(status.HTTP_400_BAD_REQUEST,
"Operation can only be rename.")
class GroupMembers(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def put(self, request, group_id, format=None):
"""
Add group members.
"""
try:
group_id_int = int(group_id)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid group ID')
group = get_group(group_id_int)
if not group:
return api_error(status.HTTP_404_NOT_FOUND, 'Group not found')
if not is_group_staff(group, request.user):
return api_error(status.HTTP_403_FORBIDDEN, 'Only administrators can add group members')
user_name = request.data.get('user_name', None)
if not is_registered_user(user_name):
return api_error(status.HTTP_400_BAD_REQUEST, 'Not a valid user')
try:
ccnet_threaded_rpc.group_add_member(group.id, request.user.username, user_name)
except SearpcError, e:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Unable to add user to group')
return HttpResponse(json.dumps({'success': True}), status=200, content_type=json_content_type)
def delete(self, request, group_id, format=None):
"""
Delete group members.
"""
try:
group_id_int = int(group_id)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid group ID')
group = get_group(group_id_int)
if not group:
return api_error(status.HTTP_404_NOT_FOUND, 'Group not found')
if not is_group_staff(group, request.user):
return api_error(status.HTTP_403_FORBIDDEN, 'Only administrators can remove group members')
user_name = request.data.get('user_name', None)
try:
ccnet_threaded_rpc.group_remove_member(group.id, request.user.username, user_name)
except SearpcError, e:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Unable to add user to group')
return HttpResponse(json.dumps({'success': True}), status=200, content_type=json_content_type)
class GroupRepos(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
@api_group_check
def post(self, request, group, format=None):
# add group repo
username = request.user.username
repo_name = request.data.get("name", None)
repo_desc = request.data.get("desc", '')
passwd = request.data.get("passwd", None)
# to avoid 'Bad magic' error when create repo, passwd should be 'None'
# not an empty string when create unencrypted repo
if not passwd:
passwd = None
if (passwd is not None) and (not config.ENABLE_ENCRYPTED_LIBRARY):
return api_error(status.HTTP_403_FORBIDDEN,
'NOT allow to create encrypted library.')
permission = request.data.get("permission", 'r')
if permission != 'r' and permission != 'rw':
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid permission')
if is_org_context(request):
org_id = request.user.org.org_id
repo_id = seafile_api.create_org_repo(repo_name, repo_desc,
username, passwd, org_id)
repo = seafile_api.get_repo(repo_id)
seafile_api.add_org_group_repo(repo_id, org_id, group.id,
username, permission)
else:
repo_id = seafile_api.create_repo(repo_name, repo_desc,
username, passwd)
repo = seafile_api.get_repo(repo_id)
seafile_api.set_group_repo(repo.id, group.id, username, permission)
group_repo = {
"id": repo.id,
"name": repo.name,
"desc": repo.desc,
"size": repo.size,
"size_formatted": filesizeformat(repo.size),
"mtime": repo.last_modified,
"mtime_relative": translate_seahub_time(repo.last_modified),
"encrypted": repo.encrypted,
"permission": permission,
"owner": username,
"owner_nickname": email2nickname(username),
"share_from_me": True,
}
return Response(group_repo, status=200)
@api_group_check
def get(self, request, group, format=None):
username = request.user.username
if group.is_pub:
if not request.user.is_staff and not is_group_user(group.id, username):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
if is_org_context(request):
org_id = request.user.org.org_id
repos = seafile_api.get_org_group_repos(org_id, group.id)
else:
repos = seafile_api.get_repos_by_group(group.id)
repos.sort(lambda x, y: cmp(y.last_modified, x.last_modified))
group.is_staff = is_group_staff(group, request.user)
repos_json = []
for r in repos:
repo = {
"id": r.id,
"name": r.name,
"desc": r.desc,
"size": r.size,
"size_formatted": filesizeformat(r.size),
"mtime": r.last_modified,
"mtime_relative": translate_seahub_time(r.last_modified),
"encrypted": r.encrypted,
"permission": r.permission,
"owner": r.user,
"owner_nickname": email2nickname(r.user),
"share_from_me": True if username == r.user else False,
}
repos_json.append(repo)
req_from = request.GET.get('from', "")
if req_from == 'web':
return Response({"is_staff": group.is_staff, "repos": repos_json})
else:
return Response(repos_json)
class GroupRepo(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
@api_group_check
def delete(self, request, group, repo_id, format=None):
username = request.user.username
group_id = group.id
if not group.is_staff and not seafile_api.is_repo_owner(username, repo_id):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
if seaserv.is_org_group(group_id):
org_id = seaserv.get_org_id_by_group(group_id)
seaserv.del_org_group_repo(repo_id, org_id, group_id)
else:
seafile_api.unset_group_repo(repo_id, group_id, username)
return HttpResponse(json.dumps({'success': True}), status=200,
content_type=json_content_type)
class UserAvatarView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, user, size, format=None):
url, is_default, date_uploaded = api_avatar_url(user, int(size))
ret = {
"url": request.build_absolute_uri(url),
"is_default": is_default,
"mtime": get_timestamp(date_uploaded) }
return Response(ret)
class GroupAvatarView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, group_id, size, format=None):
url, is_default, date_uploaded = api_grp_avatar_url(group_id, int(size))
ret = {
"url": request.build_absolute_uri(url),
"is_default": is_default,
"mtime": get_timestamp(date_uploaded)}
return Response(ret)
# Html related code
def html_events(request):
if not EVENTS_ENABLED:
events = None
return render_to_response('api2/events.html', {
"events":events,
}, context_instance=RequestContext(request))
email = request.user.username
start = 0
events_count = 15
if is_org_context(request):
org_id = request.user.org.org_id
events, events_more_offset = get_org_user_events(org_id, email,
start, events_count)
else:
events, events_more_offset = get_user_events(email, start,
events_count)
events_more = True if len(events) == events_count else False
event_groups = group_events_data(events)
prepare_events(event_groups)
return render_to_response('api2/events.html', {
"events": events,
"events_more_offset": events_more_offset,
"events_more": events_more,
"event_groups": event_groups,
"events_count": events_count,
}, context_instance=RequestContext(request))
def ajax_events(request):
events_count = 15
username = request.user.username
start = int(request.GET.get('start', 0))
events, start = get_user_events(username, start, events_count)
events_more = True if len(events) == events_count else False
event_groups = group_events_data(events)
prepare_events(event_groups)
ctx = {'event_groups': event_groups}
html = render_to_string("api2/events_body.html", ctx)
return HttpResponse(json.dumps({'html':html, 'events_more':events_more,
'new_start': start}),
content_type=json_content_type)
def html_repo_history_changes(request, repo_id):
changes = {}
repo = get_repo(repo_id)
if not repo:
return HttpResponse(json.dumps({"err": 'Library does not exist'}), status=400, content_type=json_content_type)
if not check_folder_permission(request, repo_id, '/'):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
if repo.encrypted and not is_passwd_set(repo_id, request.user.username):
return HttpResponse(json.dumps({"err": 'Library is encrypted'}), status=400, content_type=json_content_type)
commit_id = request.GET.get('commit_id', '')
if not commit_id:
return HttpResponse(json.dumps({"err": 'Invalid argument'}), status=400, content_type=json_content_type)
changes = get_diff(repo_id, '', commit_id)
c = get_commit(repo_id, repo.version, commit_id)
if c.parent_id is None:
# A commit is a first commit only if its parent id is None.
changes['cmt_desc'] = repo.desc
elif c.second_parent_id is None:
# Normal commit only has one parent.
if c.desc.startswith('Changed library'):
changes['cmt_desc'] = 'Changed library name or description'
else:
# A commit is a merge only if it has two parents.
changes['cmt_desc'] = 'No conflict in the merge.'
for k in changes:
changes[k] = [f.replace ('a href="/', 'a class="normal" href="api://') for f in changes[k] ]
html = render_to_string('api2/event_details.html', {'changes': changes})
return HttpResponse(json.dumps({"html": html}), content_type=json_content_type)
class AjaxEvents(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
return ajax_events(request)
class EventsHtml(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
return html_events(request)
class RepoHistoryChange(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
return HttpResponse(json.dumps({"err": 'Library does not exist'}),
status=400,
content_type=json_content_type)
if not check_folder_permission(request, repo_id, '/'):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
commit_id = request.GET.get('commit_id', '')
if not commit_id:
return HttpResponse(json.dumps({"err": 'Invalid argument'}),
status=400,
content_type=json_content_type)
details = get_diff_details(repo_id, '', commit_id)
return HttpResponse(json.dumps(details),
content_type=json_content_type)
class RepoHistoryChangeHtml(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
return html_repo_history_changes(request, repo_id)
# based on views/file.py::office_convert_query_status
class OfficeConvertQueryStatus(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
if not HAS_OFFICE_CONVERTER:
return api_error(status.HTTP_404_NOT_FOUND, 'Office converter not enabled.')
content_type = 'application/json; charset=utf-8'
ret = {'success': False}
file_id = request.GET.get('file_id', '')
if len(file_id) != 40:
ret['error'] = 'invalid param'
else:
try:
d = query_office_convert_status(file_id)
if d.error:
ret['error'] = d.error
else:
ret['success'] = True
ret['status'] = d.status
except Exception, e:
logging.exception('failed to call query_office_convert_status')
ret['error'] = str(e)
return HttpResponse(json.dumps(ret), content_type=content_type)
# based on views/file.py::view_file and views/file.py::handle_document
class OfficeGenerateView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
username = request.user.username
# check arguments
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', '/').rstrip('/')
commit_id = request.GET.get('commit_id', None)
if commit_id:
try:
obj_id = seafserv_threaded_rpc.get_file_id_by_commit_and_path(
repo.id, commit_id, path)
except:
return api_error(status.HTTP_404_NOT_FOUND, 'Revision not found.')
else:
try:
obj_id = seafile_api.get_file_id_by_path(repo_id,
path.encode('utf-8'))
except:
return api_error(status.HTTP_404_NOT_FOUND, 'File not found.')
if not obj_id:
return api_error(status.HTTP_404_NOT_FOUND, 'File not found.')
# Check whether user has permission to view file and get file raw path,
# render error page if permission deny.
raw_path, inner_path, user_perm = get_file_view_path_and_perm(request,
repo_id,
obj_id, path)
if not user_perm:
return api_error(status.HTTP_403_FORBIDDEN, 'You do not have permission to view this file.')
u_filename = os.path.basename(path)
filetype, fileext = get_file_type_and_ext(u_filename)
if filetype != DOCUMENT:
return api_error(status.HTTP_400_BAD_REQUEST, 'File is not a convertable document')
ret_dict = {}
if HAS_OFFICE_CONVERTER:
err = prepare_converted_html(inner_path, obj_id, fileext, ret_dict)
# populate return value dict
ret_dict['err'] = err
ret_dict['obj_id'] = obj_id
else:
ret_dict['filetype'] = 'Unknown'
return HttpResponse(json.dumps(ret_dict), status=200, content_type=json_content_type)
class ThumbnailView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
size = request.GET.get('size', None)
if size is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Size is missing.')
try:
size = int(size)
except ValueError as e:
logger.error(e)
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid size.')
path = request.GET.get('p', None)
obj_id = get_file_id_by_path(repo_id, path)
if path is None or obj_id is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Wrong path.')
if repo.encrypted or not ENABLE_THUMBNAIL or \
check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
success, status_code = generate_thumbnail(request, repo_id, size, path)
if success:
thumbnail_dir = os.path.join(THUMBNAIL_ROOT, str(size))
thumbnail_file = os.path.join(thumbnail_dir, obj_id)
try:
with open(thumbnail_file, 'rb') as f:
thumbnail = f.read()
return HttpResponse(thumbnail, 'image/' + THUMBNAIL_EXTENSION)
except IOError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Failed to get thumbnail.')
else:
if status_code == 400:
return api_error(status.HTTP_400_BAD_REQUEST, "Invalid argument")
if status_code == 403:
return api_error(status.HTTP_403_FORBIDDEN, 'Forbidden')
if status_code == 500:
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Failed to generate thumbnail.')
_REPO_ID_PATTERN = re.compile(r'[-0-9a-f]{36}')
class RepoTokensView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
@json_response
def get(self, request, format=None):
repos_id_str = request.GET.get('repos', None)
if not repos_id_str:
return api_error(status.HTTP_400_BAD_REQUEST, "You must specify libaries ids")
repos_id = [repo_id for repo_id in repos_id_str.split(',') if repo_id]
if any([not _REPO_ID_PATTERN.match(repo_id) for repo_id in repos_id]):
return api_error(status.HTTP_400_BAD_REQUEST, "Libraries ids are invalid")
tokens = {}
for repo_id in repos_id:
repo = seafile_api.get_repo(repo_id)
if not repo:
continue
if not check_folder_permission(request, repo.id, '/'):
continue
tokens[repo_id] = seafile_api.generate_repo_token(repo_id, request.user.username)
return tokens
class OrganizationView(APIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAdminUser, )
throttle_classes = (UserRateThrottle, )
def post(self, request, format=None):
if not CLOUD_MODE or not MULTI_TENANCY:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
username = request.POST.get('username', None)
password = request.POST.get('password', None)
org_name = request.POST.get('org_name', None)
prefix = request.POST.get('prefix', None)
quota = request.POST.get('quota', None)
member_limit = request.POST.get('member_limit', ORG_MEMBER_QUOTA_DEFAULT)
if not org_name or not username or not password or \
not prefix or not quota or not member_limit:
return api_error(status.HTTP_400_BAD_REQUEST, "Missing argument")
if not is_valid_username(username):
return api_error(status.HTTP_400_BAD_REQUEST, "Email is not valid")
try:
quota_mb = int(quota)
except ValueError as e:
logger.error(e)
return api_error(status.HTTP_400_BAD_REQUEST, "Quota is not valid")
try:
User.objects.get(email = username)
user_exist = True
except User.DoesNotExist:
user_exist = False
if user_exist:
return api_error(status.HTTP_400_BAD_REQUEST, "A user with this email already exists")
slug_re = re.compile(r'^[-a-zA-Z0-9_]+$')
if not slug_re.match(prefix):
return api_error(status.HTTP_400_BAD_REQUEST, "URL prefix can only be letters(a-z), numbers, and the underscore character")
if ccnet_threaded_rpc.get_org_by_url_prefix(prefix):
return api_error(status.HTTP_400_BAD_REQUEST, "An organization with this prefix already exists")
try:
User.objects.create_user(username, password, is_staff=False, is_active=True)
create_org(org_name, prefix, username)
new_org = ccnet_threaded_rpc.get_org_by_url_prefix(prefix)
# set member limit
from seahub_extra.organizations.models import OrgMemberQuota
OrgMemberQuota.objects.set_quota(new_org.org_id, member_limit)
# set quota
quota = quota_mb * get_file_size_unit('MB')
seafserv_threaded_rpc.set_org_quota(new_org.org_id, quota)
return Response('success', status=status.HTTP_201_CREATED)
except Exception as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, "Internal error")
class RepoDownloadSharedLinks(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
org_id = None
if is_org_context(request):
org_id = request.user.org.org_id
# check permission
if org_id:
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
if request.user.username != repo_owner or repo.is_virtual:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
shared_links = []
fileshares = FileShare.objects.filter(repo_id=repo_id)
for fs in fileshares:
size = None
shared_link = {}
if fs.is_file_share_link():
path = fs.path.rstrip('/') # Normalize file path
if seafile_api.get_file_id_by_path(repo.id, fs.path) is None:
continue
obj_id = seafile_api.get_file_id_by_path(repo_id, path)
size = seafile_api.get_file_size(repo.store_id, repo.version, obj_id)
else:
path = fs.path
if path[-1] != '/': # Normalize dir path
path += '/'
if seafile_api.get_dir_id_by_path(repo.id, fs.path) is None:
continue
shared_link['create_by'] = fs.username
shared_link['creator_name'] = email2nickname(fs.username)
shared_link['create_time'] = datetime_to_isoformat_timestr(fs.ctime)
shared_link['token'] = fs.token
shared_link['path'] = path
shared_link['name'] = os.path.basename(path.rstrip('/')) if path != '/' else '/'
shared_link['view_count'] = fs.view_cnt
shared_link['share_type'] = fs.s_type
shared_link['size'] = size if size else ''
shared_links.append(shared_link)
return Response(shared_links)
class RepoDownloadSharedLink(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def delete(self, request, repo_id, token, format=None):
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
org_id = None
if is_org_context(request):
org_id = request.user.org.org_id
# check permission
if org_id:
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
if request.user.username != repo_owner or repo.is_virtual:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
link = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
error_msg = 'Link %s not found.' % token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
link.delete()
result = {'success': True}
return Response(result)
class RepoUploadSharedLinks(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
org_id = None
if is_org_context(request):
org_id = request.user.org.org_id
# check permission
if org_id:
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
if request.user.username != repo_owner or repo.is_virtual:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
shared_links = []
fileshares = UploadLinkShare.objects.filter(repo_id=repo_id)
for fs in fileshares:
shared_link = {}
path = fs.path
if path[-1] != '/': # Normalize dir path
path += '/'
if seafile_api.get_dir_id_by_path(repo.id, fs.path) is None:
continue
shared_link['create_by'] = fs.username
shared_link['creator_name'] = email2nickname(fs.username)
shared_link['create_time'] = datetime_to_isoformat_timestr(fs.ctime)
shared_link['token'] = fs.token
shared_link['path'] = path
shared_link['name'] = os.path.basename(path.rstrip('/')) if path != '/' else '/'
shared_link['view_count'] = fs.view_cnt
shared_links.append(shared_link)
return Response(shared_links)
class RepoUploadSharedLink(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def delete(self, request, repo_id, token, format=None):
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
org_id = None
if is_org_context(request):
org_id = request.user.org.org_id
# check permission
if org_id:
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
if request.user.username != repo_owner or repo.is_virtual:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
link = UploadLinkShare.objects.get(token=token)
except FileShare.DoesNotExist:
error_msg = 'Link %s not found.' % token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
link.delete()
result = {'success': True}
return Response(result)
def get_repo_user_folder_perm_result(repo_id, path, user):
result = {}
permission = seafile_api.get_folder_user_perm(repo_id, path, user)
if permission:
result['repo_id'] = repo_id
result['user_email'] = user
result['user_name'] = email2nickname(user)
result['folder_path'] = path
result['folder_name'] = path if path == '/' else os.path.basename(path.rstrip('/'))
result['permission'] = permission
return result
class RepoUserFolderPerm(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
@api_repo_setting_permission_check
def get(self, request, repo_id, format=None):
if not is_pro_version():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
results = []
folder_perms = seafile_api.list_folder_user_perm_by_repo(repo_id)
for perm in folder_perms:
result = {}
result['repo_id'] = perm.repo_id
result['user_email'] = perm.user
result['user_name'] = email2nickname(perm.user)
result['folder_path'] = perm.path
result['folder_name'] = perm.path if perm.path == '/' else os.path.basename(perm.path.rstrip('/'))
result['permission'] = perm.permission
results.append(result)
return Response(results)
@api_repo_user_folder_perm_check
def post(self, request, repo_id, format=None):
if not (is_pro_version() and ENABLE_FOLDER_PERM):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
user = request.data.get('user_email')
path = request.data.get('folder_path')
perm = request.data.get('permission')
path = path.rstrip('/') if path != '/' else path
permission = seafile_api.get_folder_user_perm(repo_id, path, user)
if permission:
error_msg = 'Permission already exists.'
return api_error(status.HTTP_409_CONFLICT, error_msg)
username = request.user.username
try:
seafile_api.add_folder_user_perm(repo_id, path, perm, user)
send_perm_audit_msg('add-repo-perm', username, user, repo_id, path, perm)
result = get_repo_user_folder_perm_result(repo_id, path, user)
return Response(result)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
@api_repo_user_folder_perm_check
def put(self, request, repo_id, format=None):
if not (is_pro_version() and ENABLE_FOLDER_PERM):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
user = request.data.get('user_email')
path = request.data.get('folder_path')
perm = request.data.get('permission')
path = path.rstrip('/') if path != '/' else path
permission = seafile_api.get_folder_user_perm(repo_id, path, user)
if not permission:
error_msg = 'Folder permission not found.'
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
try:
seafile_api.set_folder_user_perm(repo_id, path, perm, user)
send_perm_audit_msg('modify-repo-perm', username, user, repo_id, path, perm)
result = get_repo_user_folder_perm_result(repo_id, path, user)
return Response(result)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
@api_repo_user_folder_perm_check
def delete(self, request, repo_id, format=None):
if not (is_pro_version() and ENABLE_FOLDER_PERM):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
user = request.data.get('user_email')
path = request.data.get('folder_path')
path = path.rstrip('/') if path != '/' else path
permission = seafile_api.get_folder_user_perm(repo_id, path, user)
if not permission:
return Response({'success': True})
username = request.user.username
try:
seafile_api.rm_folder_user_perm(repo_id, path, user)
send_perm_audit_msg('delete-repo-perm', username,
user, repo_id, path, permission)
return Response({'success': True})
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
def get_repo_group_folder_perm_result(repo_id, path, group_id):
result = {}
group = seaserv.get_group(group_id)
permission = seafile_api.get_folder_group_perm(repo_id, path, group_id)
if permission:
result['repo_id'] = repo_id
result['group_id'] = group_id
result['group_name'] = group.group_name
result['folder_path'] = path
result['folder_name'] = path if path == '/' else os.path.basename(path.rstrip('/'))
result['permission'] = permission
return result
class RepoGroupFolderPerm(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
@api_repo_setting_permission_check
def get(self, request, repo_id, format=None):
if not is_pro_version():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
results = []
group_folder_perms = seafile_api.list_folder_group_perm_by_repo(repo_id)
for perm in group_folder_perms:
result = {}
group = seaserv.get_group(perm.group_id)
result['repo_id'] = perm.repo_id
result['group_id'] = perm.group_id
result['group_name'] = group.group_name
result['folder_path'] = perm.path
result['folder_name'] = perm.path if perm.path == '/' else os.path.basename(perm.path.rstrip('/'))
result['permission'] = perm.permission
results.append(result)
return Response(results)
@api_repo_group_folder_perm_check
def post(self, request, repo_id, format=None):
if not (is_pro_version() and ENABLE_FOLDER_PERM):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
group_id = request.data.get('group_id')
path = request.data.get('folder_path')
perm = request.data.get('permission')
group_id = int(group_id)
path = path.rstrip('/') if path != '/' else path
permission = seafile_api.get_folder_group_perm(repo_id, path, group_id)
if permission:
error_msg = 'Permission already exists.'
return api_error(status.HTTP_409_CONFLICT, error_msg)
username = request.user.username
try:
seafile_api.add_folder_group_perm(repo_id, path, perm, group_id)
send_perm_audit_msg('add-repo-perm', username, group_id, repo_id, path, perm)
result = get_repo_group_folder_perm_result(repo_id, path, group_id)
return Response(result)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
@api_repo_group_folder_perm_check
def put(self, request, repo_id, format=None):
if not (is_pro_version() and ENABLE_FOLDER_PERM):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
group_id = request.data.get('group_id')
path = request.data.get('folder_path')
perm = request.data.get('permission')
group_id = int(group_id)
path = path.rstrip('/') if path != '/' else path
permission = seafile_api.get_folder_group_perm(repo_id, path, group_id)
if not permission:
error_msg = 'Folder permission not found.'
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
try:
seafile_api.set_folder_group_perm(repo_id, path, perm, group_id)
send_perm_audit_msg('modify-repo-perm', username, group_id, repo_id, path, perm)
result = get_repo_group_folder_perm_result(repo_id, path, group_id)
return Response(result)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
@api_repo_group_folder_perm_check
def delete(self, request, repo_id, format=None):
if not (is_pro_version() and ENABLE_FOLDER_PERM):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
group_id = request.data.get('group_id')
path = request.data.get('folder_path')
group_id = int(group_id)
path = path.rstrip('/') if path != '/' else path
permission = seafile_api.get_folder_group_perm(repo_id, path, group_id)
if not permission:
return Response({'success': True})
username = request.user.username
try:
seafile_api.rm_folder_group_perm(repo_id, path, group_id)
send_perm_audit_msg('delete-repo-perm', username, group_id,
repo_id, path, permission)
return Response({'success': True})
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
| 39.660027 | 135 | 0.597648 |
7944a0ea67a5ff3e960696326d454aea24ac2ced | 6,257 | py | Python | pandas/tests/io/formats/test_to_string.py | botplex/pandas | be569627346bce10ffec205ffff26c5628a5bb9b | [
"BSD-3-Clause"
] | 1 | 2020-10-29T17:32:26.000Z | 2020-10-29T17:32:26.000Z | pandas/tests/io/formats/test_to_string.py | botplex/pandas | be569627346bce10ffec205ffff26c5628a5bb9b | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/io/formats/test_to_string.py | botplex/pandas | be569627346bce10ffec205ffff26c5628a5bb9b | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from io import StringIO
import numpy as np
import pytest
from pandas import DataFrame, Series, option_context, to_datetime
def test_repr_embedded_ndarray():
arr = np.empty(10, dtype=[("err", object)])
for i in range(len(arr)):
arr["err"][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df["err"])
repr(df)
df.to_string()
def test_repr_tuples():
buf = StringIO()
df = DataFrame({"tups": list(zip(range(10), range(10)))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_to_string_truncate():
# GH 9784 - dont truncate when calling DataFrame.to_string
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "let's make this a very VERY long line that is longer "
"than the default 50 character limit",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert df.to_string() == (
" a b "
" c d\n"
"0 foo bar let's make this a very VERY long line t"
"hat is longer than the default 50 character limit 1\n"
"1 foo bar "
" stuff 1"
)
with option_context("max_colwidth", 20):
# the display option has no effect on the to_string method
assert df.to_string() == (
" a b "
" c d\n"
"0 foo bar let's make this a very VERY long line t"
"hat is longer than the default 50 character limit 1\n"
"1 foo bar "
" stuff 1"
)
assert df.to_string(max_colwidth=20) == (
" a b c d\n"
"0 foo bar let's make this ... 1\n"
"1 foo bar stuff 1"
)
@pytest.mark.parametrize(
"input_array, expected",
[
("a", "a"),
(["a", "b"], "a\nb"),
([1, "a"], "1\na"),
(1, "1"),
([0, -1], " 0\n-1"),
(1.0, "1.0"),
([" a", " b"], " a\n b"),
([".1", "1"], ".1\n 1"),
(["10", "-10"], " 10\n-10"),
],
)
def test_format_remove_leading_space_series(input_array, expected):
# GH: 24980
s = Series(input_array).to_string(index=False)
assert s == expected
@pytest.mark.parametrize(
"input_array, expected",
[
({"A": ["a"]}, "A\na"),
({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"),
({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"),
],
)
def test_format_remove_leading_space_dataframe(input_array, expected):
# GH: 24980
df = DataFrame(input_array).to_string(index=False)
assert df == expected
def test_to_string_unicode_columns(float_frame):
df = DataFrame({"\u03c3": np.arange(10.0)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = float_frame.to_string()
assert isinstance(result, str)
def test_to_string_utf8_columns():
n = "\u05d0".encode()
with option_context("display.max_rows", 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two():
dm = DataFrame({"c/\u03c3": []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three():
dm = DataFrame(["\xc2"])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters():
df = DataFrame(
{
"int": [1, 2, 3],
"float": [1.0, 2.0, 3.0],
"object": [(1, 2), True, False],
},
columns=["int", "float", "object"],
)
formatters = [
("int", lambda x: f"0x{x:x}"),
("float", lambda x: f"[{x: 4.1f}]"),
("object", lambda x: f"-{x!s}-"),
]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=list(zip(*formatters))[1])
assert result == (
" int float object\n"
"0 0x1 [ 1.0] -(1, 2)-\n"
"1 0x2 [ 2.0] -True-\n"
"2 0x3 [ 3.0] -False-"
)
assert result == result2
def test_to_string_with_datetime64_monthformatter():
months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
x = DataFrame({"months": months})
def format_func(x):
return x.strftime("%Y-%m")
result = x.to_string(formatters={"months": format_func})
expected = "months\n0 2016-01\n1 2016-02"
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter():
x = DataFrame(
{"hod": to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")}
)
def format_func(x):
return x.strftime("%H:%M")
result = x.to_string(formatters={"hod": format_func})
expected = "hod\n0 10:10\n1 12:12"
assert result.strip() == expected
def test_to_string_with_formatters_unicode():
df = DataFrame({"c/\u03c3": [1, 2, 3]})
result = df.to_string(formatters={"c/\u03c3": str})
assert result == " c/\u03c3\n" + "0 1\n1 2\n2 3"
def test_to_string_complex_number_trims_zeros():
s = Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j])
result = s.to_string()
expected = "0 1.00+1.00j\n1 1.00+1.00j\n2 1.05+1.00j"
assert result == expected
def test_nullable_float_to_string(float_ea_dtype):
# https://github.com/pandas-dev/pandas/issues/36775
dtype = float_ea_dtype
s = Series([0.0, 1.0, None], dtype=dtype)
result = s.to_string()
expected = """0 0.0
1 1.0
2 <NA>"""
assert result == expected
def test_nullable_int_to_string(any_nullable_int_dtype):
# https://github.com/pandas-dev/pandas/issues/36775
dtype = any_nullable_int_dtype
s = Series([0, 1, None], dtype=dtype)
result = s.to_string()
expected = """0 0
1 1
2 <NA>"""
assert result == expected
| 28.058296 | 84 | 0.512066 |
7944a2545997dcaac50bb61b9d61fc7cedf44022 | 129 | py | Python | slack_entities/exceptions/exceptions.py | chimplie/slack-entities | 9d0b57550a91de920ff254a108497db5e4209295 | [
"MIT"
] | 3 | 2018-08-18T06:10:36.000Z | 2018-10-29T14:40:08.000Z | slack_entities/exceptions/exceptions.py | chimplie/slack-entities | 9d0b57550a91de920ff254a108497db5e4209295 | [
"MIT"
] | 2 | 2018-08-29T09:48:56.000Z | 2020-05-25T13:12:02.000Z | slack_entities/exceptions/exceptions.py | chimplie/slack-entities | 9d0b57550a91de920ff254a108497db5e4209295 | [
"MIT"
] | null | null | null | class PluralMethodError(Exception):
"""
Raised when there is no plural form for particular Slack entity
"""
pass
| 21.5 | 67 | 0.682171 |
7944a33e0a5273bc6be8bd0ab73306831ca33734 | 987 | py | Python | home/migrations/0008_auto_20170130_1106.py | cristovao-alves/Wagtail-Multilingual | 14033e373533108bac77142e7b8dc809ad83097d | [
"MIT"
] | 7 | 2017-01-31T13:07:49.000Z | 2020-10-06T02:06:22.000Z | home/migrations/0008_auto_20170130_1106.py | cristovao-alves/Wagtail-Multilingual | 14033e373533108bac77142e7b8dc809ad83097d | [
"MIT"
] | 1 | 2018-07-05T11:40:40.000Z | 2018-07-05T11:40:40.000Z | home/migrations/0008_auto_20170130_1106.py | cristovao-alves/Wagtail-Multilingual | 14033e373533108bac77142e7b8dc809ad83097d | [
"MIT"
] | 5 | 2018-01-29T13:21:23.000Z | 2019-07-01T17:05:11.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-30 11:06
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0007_auto_20170130_1100'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body_en',
field=wagtail.wagtailcore.fields.StreamField([('body', wagtail.wagtailcore.blocks.StructBlock([('body', wagtail.wagtailcore.blocks.CharBlock(required=True))], icon='pick'))], blank=True, null=True),
),
migrations.AlterField(
model_name='homepage',
name='body_fr',
field=wagtail.wagtailcore.fields.StreamField([('body', wagtail.wagtailcore.blocks.StructBlock([('body', wagtail.wagtailcore.blocks.CharBlock(required=True))], icon='pick'))], blank=True, null=True),
),
]
| 35.25 | 210 | 0.663627 |
7944a348ad9d2c450839ca111577697ef7e70bcc | 3,907 | py | Python | pytorch_toolkit/face_recognition/model/backbones/se_resnext.py | AnastasiaaSenina/openvino_training_extensions | 267425d64372dff5b9083dc0ca6abfc305a71449 | [
"Apache-2.0"
] | 1 | 2020-02-09T15:50:49.000Z | 2020-02-09T15:50:49.000Z | pytorch_toolkit/face_recognition/model/backbones/se_resnext.py | akshayjaryal603/openvino_training_extensions | 7d606a22143db0af97087709d63a2ec2aa02036c | [
"Apache-2.0"
] | 28 | 2020-09-25T22:40:36.000Z | 2022-03-12T00:37:36.000Z | pytorch_toolkit/face_recognition/model/backbones/se_resnext.py | akshayjaryal603/openvino_training_extensions | 7d606a22143db0af97087709d63a2ec2aa02036c | [
"Apache-2.0"
] | 1 | 2021-04-02T07:51:01.000Z | 2021-04-02T07:51:01.000Z | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import torch.nn as nn
from model.blocks.se_resnext_blocks import SEBottleneckX
class SEResNeXt(nn.Module):
def __init__(self, block, layers, cardinality=32, num_classes=1000, activation=nn.ReLU, head=False):
super(SEResNeXt, self).__init__()
self.cardinality = cardinality
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], activation=activation)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, activation=activation)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, activation=activation)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, activation=activation)
self.avgpool = nn.Conv2d(512 * block.expansion, 512 * block.expansion, 7,
groups=512 * block.expansion, bias=False)
self.head = head
if not self.head:
self.output_channels = 512 * block.expansion
else:
self.fc = nn.Conv2d(512 * block.expansion, num_classes, 1, stride=1, padding=0, bias=False)
self.output_channels = num_classes
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, activation=nn.ReLU):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, self.cardinality, stride, downsample, activation=activation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, self.cardinality, activation=activation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.head:
x = self.fc(x)
return x
def get_output_channels(self):
return self.output_channels
def se_resnext50(**kwargs):
model = SEResNeXt(SEBottleneckX, [3, 4, 6, 3], **kwargs)
return model
def se_resnext101(**kwargs):
model = SEResNeXt(SEBottleneckX, [3, 4, 23, 3], **kwargs)
return model
def se_resnext152(**kwargs):
model = SEResNeXt(SEBottleneckX, [3, 8, 36, 3], **kwargs)
return model
| 36.858491 | 112 | 0.622472 |
7944a68d469d87956e00502dff9e4940e47289c9 | 2,131 | py | Python | pyNastran/op2/op2_interface/function_codes.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 293 | 2015-03-22T20:22:01.000Z | 2022-03-14T20:28:24.000Z | pyNastran/op2/op2_interface/function_codes.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 512 | 2015-03-14T18:39:27.000Z | 2022-03-31T16:15:43.000Z | pyNastran/op2/op2_interface/function_codes.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 136 | 2015-03-19T03:26:06.000Z | 2022-03-25T22:14:54.000Z | """
Note: In some OFP table descriptions (OEE, OEF, OES for example), you
will see formats such as ACODE,4=05, or TCODE,1=02
(versus ACODE=05 or TCODE=02). The integer values 4 and 1 in these
examples are function codes. Function codes specify operations to
perform on the value in the data block. The operation result will
then be used to determine the data format.
The following lists the available function codes and their operation:
Function Codes Operation
============== =========
1 if (item_name/1000 = 2,3,6) then return 2, else return 1
2 mod(item_name,100)
3 mod(item_name,1000)
4 item_name/10
5 mod(item_name,10)
6 if iand(item_name,8) != then set to 0, else set to 1
7 if item_name/1000
= 0 or 2, then set to 0
= 1 or 3, then set to 1
> 3, then set to 2.
>65535 iand(item_name,iand(func_code,65535))
For example, if a value of 100 is found in an ACODE,4 field, the
function_code of 4 results in the operation:
item_name/10 = 100/10 = 10.
Thus the data format under the ACODE,4=10 row would be used.
"""
import warnings
def func1(item_code):
if item_code // 1000 in [2, 3, 6]:
return 2
return 1
def func2(item_code):
return item_code % 100
def func3(item_code):
return item_code % 1000
def func4(item_code):
return item_code // 10
def func5(item_code):
return item_code % 10
def func6(item_code):
warnings.warn('Function code 6 method not verified',
RuntimeWarning)
if item_code & 8:
return 0
return 1
def func7(item_code):
v = item_code // 1000
if v in [0, 2]:
return 0
if v in [1, 3]:
return 1
return 2
def funcbig(func_code, item_code):
return item_code & (func_code & 65535)
#self._code_funcs = {
#1: func1, 2: func2, 3: func3, 4: func4,
#5: func5, 6: func6, 7: func7,
#'big': funcbig,
#}
def func7(item_code: int) -> int:
v = item_code // 1000
if v in [0, 2]:
return 0
if v in [1, 3]:
return 1
return 2
| 26.308642 | 71 | 0.613796 |
7944a77149a9bae0439d9302aa90a5a14ddaf457 | 2,359 | py | Python | skywalking/client/grpc.py | championquizzer/skywalking-python | a9cf38a5a867a47e7f1ba3025846fb81b9e2dbf7 | [
"Apache-2.0"
] | null | null | null | skywalking/client/grpc.py | championquizzer/skywalking-python | a9cf38a5a867a47e7f1ba3025846fb81b9e2dbf7 | [
"Apache-2.0"
] | null | null | null | skywalking/client/grpc.py | championquizzer/skywalking-python | a9cf38a5a867a47e7f1ba3025846fb81b9e2dbf7 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import grpc
from skywalking import config
from skywalking.client import ServiceManagementClient, TraceSegmentReportService
from skywalking.protocol.common.Common_pb2 import KeyStringValuePair
from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub
from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties
from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub
logger = logging.getLogger(__name__)
class GrpcServiceManagementClient(ServiceManagementClient):
def __init__(self, channel: grpc.Channel):
self.service_stub = ManagementServiceStub(channel)
def send_instance_props(self):
self.service_stub.reportInstanceProperties(InstanceProperties(
service=config.service_name,
serviceInstance=config.service_instance,
properties=[KeyStringValuePair(key='language', value='Python')],
))
def send_heart_beat(self):
logger.debug(
'service heart beats, [%s], [%s]',
config.service_name,
config.service_instance,
)
self.service_stub.keepAlive(InstancePingPkg(
service=config.service_name,
serviceInstance=config.service_instance,
))
class GrpcTraceSegmentReportService(TraceSegmentReportService):
def __init__(self, channel: grpc.Channel):
self.report_stub = TraceSegmentReportServiceStub(channel)
def report(self, generator):
self.report_stub.collect(generator)
| 38.672131 | 93 | 0.758796 |
7944a7c366bacaaae705d6d65ea721ec1f3cf666 | 314 | py | Python | code/others/test2.py | marusqq/Heari | 994798c69932a5c6ed7bd6898dbda30dd170451b | [
"MIT"
] | 1 | 2020-10-06T11:12:09.000Z | 2020-10-06T11:12:09.000Z | code/others/test2.py | marusqq/Heari | 994798c69932a5c6ed7bd6898dbda30dd170451b | [
"MIT"
] | null | null | null | code/others/test2.py | marusqq/Heari | 994798c69932a5c6ed7bd6898dbda30dd170451b | [
"MIT"
] | 1 | 2020-10-05T18:05:47.000Z | 2020-10-05T18:05:47.000Z | def split_time(time_with_date, newspaper):
if newspaper == 'delfi':
s_time = time_with_date.split('T')
date = s_time[0]
s_time = s_time[1].split('+')
time = s_time[0]
return date, time
date,time = split_time("2020-09-23T18:59:24+0300", 'delfi')
print(date)
print(time) | 26.166667 | 59 | 0.611465 |
7944a847ddb8dc593688fb6a7d11a8cf90c4fc64 | 2,823 | py | Python | chaingreen/consensus/pot_iterations.py | WaitWha/chaingreen-blockchain | 959443f03420b80f66028c2183525712aa933465 | [
"Apache-2.0"
] | 103 | 2021-05-30T02:09:28.000Z | 2022-03-17T20:45:49.000Z | chaingreen/consensus/pot_iterations.py | WaitWha/chaingreen-blockchain | 959443f03420b80f66028c2183525712aa933465 | [
"Apache-2.0"
] | 107 | 2021-05-23T02:20:26.000Z | 2022-03-29T17:07:43.000Z | chaingreen/consensus/pot_iterations.py | WaitWha/chaingreen-blockchain | 959443f03420b80f66028c2183525712aa933465 | [
"Apache-2.0"
] | 50 | 2021-05-23T02:19:06.000Z | 2022-01-24T07:32:50.000Z | from chaingreen.consensus.constants import ConsensusConstants
from chaingreen.consensus.pos_quality import _expected_plot_size
from chaingreen.types.blockchain_format.sized_bytes import bytes32
from chaingreen.util.hash import std_hash
from chaingreen.util.ints import uint8, uint64, uint128
def is_overflow_block(constants: ConsensusConstants, signage_point_index: uint8) -> bool:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return signage_point_index >= constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA
def calculate_sp_interval_iters(constants: ConsensusConstants, sub_slot_iters: uint64) -> uint64:
assert sub_slot_iters % constants.NUM_SPS_SUB_SLOT == 0
return uint64(sub_slot_iters // constants.NUM_SPS_SUB_SLOT)
def calculate_sp_iters(constants: ConsensusConstants, sub_slot_iters: uint64, signage_point_index: uint8) -> uint64:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return uint64(calculate_sp_interval_iters(constants, sub_slot_iters) * signage_point_index)
def calculate_ip_iters(
constants: ConsensusConstants,
sub_slot_iters: uint64,
signage_point_index: uint8,
required_iters: uint64,
) -> uint64:
# Note that the SSI is for the block passed in, which might be in the previous epoch
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
sp_interval_iters: uint64 = calculate_sp_interval_iters(constants, sub_slot_iters)
if sp_iters % sp_interval_iters != 0 or sp_iters >= sub_slot_iters:
raise ValueError(f"Invalid sp iters {sp_iters} for this ssi {sub_slot_iters}")
if required_iters >= sp_interval_iters or required_iters == 0:
raise ValueError(
f"Required iters {required_iters} is not below the sp interval iters {sp_interval_iters} "
f"{sub_slot_iters} or not >0."
)
return uint64((sp_iters + constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters) % sub_slot_iters)
def calculate_iterations_quality(
difficulty_constant_factor: uint128,
quality_string: bytes32,
size: int,
difficulty: uint64,
cc_sp_output_hash: bytes32,
) -> uint64:
"""
Calculates the number of iterations from the quality. This is derives as the difficulty times the constant factor
times a random number between 0 and 1 (based on quality string), divided by plot size.
"""
sp_quality_string: bytes32 = std_hash(quality_string + cc_sp_output_hash)
iters = uint64(
int(difficulty)
* int(difficulty_constant_factor)
* int.from_bytes(sp_quality_string, "big", signed=False)
// (int(pow(2, 256)) * int(_expected_plot_size(size)))
)
return max(iters, uint64(1))
| 42.772727 | 118 | 0.755225 |
7944a8a91c26cdf4febb99117a4703be096ebced | 2,774 | py | Python | libs/labelDialog.py | khanh-moriaty/labelImg | 941d3c8486f947d26e8257fae2bf841b656ce69d | [
"MIT"
] | null | null | null | libs/labelDialog.py | khanh-moriaty/labelImg | 941d3c8486f947d26e8257fae2bf841b656ce69d | [
"MIT"
] | null | null | null | libs/labelDialog.py | khanh-moriaty/labelImg | 941d3c8486f947d26e8257fae2bf841b656ce69d | [
"MIT"
] | null | null | null | try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.utils import newIcon, labelValidator
BB = QDialogButtonBox
class LabelDialog(QDialog):
def __init__(self, text="Enter object label", parent=None, listItem=None):
super(LabelDialog, self).__init__(parent)
self.edit = QLineEdit()
self.edit.setText(text)
self.edit.setValidator(labelValidator())
self.edit.editingFinished.connect(self.postProcess)
self.edit.textEdited.connect(self.complete)
model = QStringListModel()
model.setStringList(listItem)
completer = QCompleter()
completer.setModel(model)
self.edit.setCompleter(completer)
layout = QVBoxLayout()
layout.addWidget(self.edit)
self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
bb.button(BB.Ok).setIcon(newIcon('done'))
bb.button(BB.Cancel).setIcon(newIcon('undo'))
bb.accepted.connect(self.validate)
bb.rejected.connect(self.reject)
layout.addWidget(bb)
if listItem is not None and len(listItem) > 0:
self.listWidget = QListWidget(self)
for item in listItem:
self.listWidget.addItem(item)
self.listWidget.itemClicked.connect(self.listItemClick)
self.listWidget.itemDoubleClicked.connect(self.listItemDoubleClick)
layout.addWidget(self.listWidget)
self.setLayout(layout)
def complete(self, s):
if s.endswith(' '):
self.postProcess()
self.accept()
def validate(self):
if self.edit.text().strip():
self.accept()
def postProcess(self):
try:
self.edit.setText(self.edit.text().strip())
print('pp')
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
self.edit.setText(self.edit.text())
def popUp(self, text='', move=True):
self.edit.setText(text)
self.edit.setSelection(0, len(text))
self.edit.setFocus(Qt.PopupFocusReason)
if move:
self.move(QCursor.pos())
return self.edit.text() if self.exec_() else None
def listItemClick(self, tQListWidgetItem):
try:
text = tQListWidgetItem.text().trimmed()
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
text = tQListWidgetItem.text().strip()
self.edit.setText(text)
def listItemDoubleClick(self, tQListWidgetItem):
self.listItemClick(tQListWidgetItem)
self.validate()
| 32.255814 | 79 | 0.628695 |
7944a8b0aca81e67692a1b8a5221c6037408265e | 12,762 | py | Python | anfis/anfis.py | darkrider85/anfis | 77c0f206bb0d2e55990e006e98886cb91cda55fe | [
"MIT"
] | null | null | null | anfis/anfis.py | darkrider85/anfis | 77c0f206bb0d2e55990e006e98886cb91cda55fe | [
"MIT"
] | null | null | null | anfis/anfis.py | darkrider85/anfis | 77c0f206bb0d2e55990e006e98886cb91cda55fe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 03 07:30:34 2014
@author: tim.meggs
"""
import itertools
import numpy as np
from membership import mfDerivs
import copy
class ANFIS:
"""Class to implement an Adaptive Network Fuzzy Inference System: ANFIS"
Attributes:
X
Y
XLen
memClass
memFuncs
memFuncsByVariable
rules
consequents
errors
memFuncsHomo
trainingType
"""
def __init__(self, X, Y, memFunction):
self.X = np.array(copy.copy(X))
self.Y = np.array(copy.copy(Y))
self.XLen = len(self.X)
self.memClass = copy.deepcopy(memFunction)
self.memFuncs = self.memClass.MFList
# building a index list of the member functions (given them ids)
self.memFuncsByVariable = [[x for x in range(len(self.memFuncs[z]))] for z in range(len(self.memFuncs))]
# rules is a cartesian product of the ids of the member functions
self.rules = np.array(list(itertools.product(*self.memFuncsByVariable)))
self.consequents = np.empty(self.Y.ndim * len(self.rules) * (self.X.shape[1] + 1))
self.consequents.fill(0)
self.errors = np.empty(0)
self.memFuncsHomo = all(len(i)==len(self.memFuncsByVariable[0]) for i in self.memFuncsByVariable)
self.trainingType = 'Not trained yet'
def LSE(self, A, B, initialGamma = 1000.):
# coeffMat is the result of layer 4 (in the example a 121x48 matrix)
coeffMat = A
# rhsMat is the label matrix holding the actual results of the training set (121x1)
rhsMat = B
# create an identity matrix of the same shape as the second dimension of the result matrx (in the example 48) and set the values on the diagonal to initialGamma
S = np.eye(coeffMat.shape[1])*initialGamma
x = np.zeros((coeffMat.shape[1],1)) # need to correct for multi-dim B
for i in range(len(coeffMat[:,0])):
# get the i-th row of the result matrix
a = coeffMat[i,:]
# get the actual result label i
b = np.array(rhsMat[i])
# dot product: scalarproduct or row r of first matrix and column c of second matrix; positioning in the result matrix (r, c)
S = S - (np.array(
np.dot(
np.dot(
np.dot(S,
np.matrix(a).transpose()),
np.matrix(a)),
S))) / (1 + (np.dot(np.dot(S, a), a)))
x = x + (np.dot(S, np.dot(np.matrix(a).transpose(), (np.matrix(b) - np.dot(np.matrix(a), x)))))
return x
def trainHybridJangOffLine(self, epochs=5, tolerance=1e-5, initialGamma=1000, k=0.01):
"""
:param epochs: number of training epochs
:param tolerance: convergence error to stop training
:param initialGamma:
:param k: numeric with the initial step size for learning rule
:return:
"""
self.trainingType = 'trainHybridJangOffLine'
convergence = False
epoch = 1
while (epoch < epochs) and (convergence is not True):
# layer four: forward pass
[layerFour, wSum, w] = forwardHalfPass(self, self.X)
# layer five: least squares estimate
layerFive = np.array(self.LSE(layerFour,self.Y,initialGamma))
self.consequents = layerFive
layerFive = np.dot(layerFour, layerFive)
# error
error = np.sum((self.Y - layerFive.T)**2)
print 'current error: ', error
average_error = np.average(np.absolute(self.Y-layerFive.T))
self.errors = np.append(self.errors,error)
if len(self.errors) != 0:
if self.errors[len(self.errors)-1] < tolerance:
convergence = True
# back propagation
if convergence is not True:
cols = range(len(self.X[0,:]))
dE_dAlpha = list(backprop(self, colX, cols, wSum, w, layerFive) for colX in range(self.X.shape[1]))
if len(self.errors) >= 4:
if (self.errors[-4] > self.errors[-3] > self.errors[-2] > self.errors[-1]):
k = k * 1.1
if len(self.errors) >= 5:
if (self.errors[-1] < self.errors[-2]) and (self.errors[-3] < self.errors[-2]) and (self.errors[-3] < self.errors[-4]) and (self.errors[-5] > self.errors[-4]):
k = k * 0.9
## handling of variables with a different number of MFs
t = []
for x in range(len(dE_dAlpha)):
for y in range(len(dE_dAlpha[x])):
for z in range(len(dE_dAlpha[x][y])):
t.append(dE_dAlpha[x][y][z])
eta = k / np.abs(np.sum(t))
if(np.isinf(eta)):
eta = k
## handling of variables with a different number of MFs
dAlpha = copy.deepcopy(dE_dAlpha)
if not(self.memFuncsHomo):
for x in range(len(dE_dAlpha)):
for y in range(len(dE_dAlpha[x])):
for z in range(len(dE_dAlpha[x][y])):
dAlpha[x][y][z] = -eta * dE_dAlpha[x][y][z]
else:
dAlpha = -eta * np.array(dE_dAlpha)
for varsWithMemFuncs in range(len(self.memFuncs)):
for MFs in range(len(self.memFuncsByVariable[varsWithMemFuncs])):
paramList = sorted(self.memFuncs[varsWithMemFuncs][MFs][1])
for param in range(len(paramList)):
self.memFuncs[varsWithMemFuncs][MFs][1][paramList[param]] = self.memFuncs[varsWithMemFuncs][MFs][1][paramList[param]] + dAlpha[varsWithMemFuncs][MFs][param]
epoch = epoch + 1
self.fittedValues = predict(self,self.X)
self.residuals = self.Y - self.fittedValues[:,0]
return self.fittedValues
def plotErrors(self):
if self.trainingType == 'Not trained yet':
print self.trainingType
else:
import matplotlib.pyplot as plt
plt.plot(range(len(self.errors)),self.errors,'ro', label='errors')
plt.ylabel('error')
plt.xlabel('epoch')
plt.show()
def plotMF(self, x, inputVar):
import matplotlib.pyplot as plt
from skfuzzy import gaussmf, gbellmf, sigmf
for mf in range(len(self.memFuncs[inputVar])):
if self.memFuncs[inputVar][mf][0] == 'gaussmf':
y = gaussmf(x,**self.memClass.MFList[inputVar][mf][1])
elif self.memFuncs[inputVar][mf][0] == 'gbellmf':
y = gbellmf(x,**self.memClass.MFList[inputVar][mf][1])
elif self.memFuncs[inputVar][mf][0] == 'sigmf':
y = sigmf(x,**self.memClass.MFList[inputVar][mf][1])
plt.plot(x,y,'r')
plt.show()
def plotResults(self):
if self.trainingType == 'Not trained yet':
print self.trainingType
else:
import matplotlib.pyplot as plt
plt.plot(range(len(self.fittedValues)),self.fittedValues,'r', label='trained')
plt.plot(range(len(self.Y)),self.Y,'b', label='original')
plt.legend(loc='upper left')
plt.show()
def forwardHalfPass(ANFISObj, Xs):
layerFour = np.empty(0,)
wSum = []
# Xs[:, 0] -> get the entire first column of the input matrix
# ref: https://stackoverflow.com/questions/35205173/numpy-array-slicing-using-commas?rq=1
for pattern in range(len(Xs[:,0])):
print("next pattern: {0}".format(pattern))
# layer one: membership grade
layerOne = ANFISObj.memClass.evaluateMF(Xs[pattern,:])
print("layer 1: shape: {0}; vals: {1}".format((len(layerOne), len(layerOne[0])), layerOne))
# layer two: rule firing strength (condition strength)
print("rules: {0}".format(ANFISObj.rules))
miAlloc = [[layerOne[x][ANFISObj.rules[row][x]] for x in range(len(ANFISObj.rules[0]))] for row in range(len(ANFISObj.rules))]
print("miAlloc: {0}".format(miAlloc))
layerTwo = np.array([np.product(x) for x in miAlloc]).T
if pattern == 0:
w = layerTwo
else:
w = np.vstack((w,layerTwo))
print("Layer 2: shape: {0}; vals: {1}".format(layerTwo.shape, layerTwo))
# layer three: normalization of firing strength
wSum.append(np.sum(layerTwo))
if pattern == 0:
wNormalized = layerTwo/wSum[pattern]
else:
wNormalized = np.vstack((wNormalized,layerTwo/wSum[pattern]))
# prep for layer four (bit of a hack)
layerThree = layerTwo/wSum[pattern]
print("Layer 3: shape: {0}; vals: {1}".format(layerThree.shape, layerThree))
# multiply the inputs with the normalized fire strengths and put the results in one vector consisting of (in_1, in_2, ..., 1) * weights_vector
rowHolder = np.concatenate([x*np.append(Xs[pattern,:],1) for x in layerThree])
print("rowHolder: shape: {0}; vals: {1}".format(len(rowHolder), rowHolder))
# collecting all results in layerFour first
layerFour = np.append(layerFour,rowHolder)
print("Layer 4 (before): shape: {0}; vals: {1}".format(layerFour.shape, layerFour))
w = w.T
wNormalized = wNormalized.T
print("pattern: ", pattern)
# split the entire vector in sub arrays so that we gain a matrix where each row is the result of one input row (one row is the output for one input vrow of the input matrix)
layerFour = np.array(np.array_split(layerFour,pattern + 1))
print("Layer 4 (after): shape: {0}; vals: {1}".format(layerFour.shape, layerFour))
with open("layer4.txt", 'a') as f:
np.set_printoptions(threshold=np.inf)
print >> f, layerFour, f
return layerFour, wSum, w
def backprop(ANFISObj, columnX, columns, theWSum, theW, theLayerFive):
paramGrp = [0]* len(ANFISObj.memFuncs[columnX])
for MF in range(len(ANFISObj.memFuncs[columnX])):
parameters = np.empty(len(ANFISObj.memFuncs[columnX][MF][1]))
timesThru = 0
for alpha in sorted(ANFISObj.memFuncs[columnX][MF][1].keys()):
bucket3 = np.empty(len(ANFISObj.X))
for rowX in range(len(ANFISObj.X)):
varToTest = ANFISObj.X[rowX,columnX]
tmpRow = np.empty(len(ANFISObj.memFuncs))
tmpRow.fill(varToTest)
bucket2 = np.empty(ANFISObj.Y.ndim)
for colY in range(ANFISObj.Y.ndim):
rulesWithAlpha = np.array(np.where(ANFISObj.rules[:,columnX]==MF))[0]
adjCols = np.delete(columns,columnX)
senSit = mfDerivs.partial_dMF(ANFISObj.X[rowX,columnX],ANFISObj.memFuncs[columnX][MF],alpha)
# produces d_ruleOutput/d_parameterWithinMF
dW_dAplha = senSit * np.array([np.prod([ANFISObj.memClass.evaluateMF(tmpRow)[c][ANFISObj.rules[r][c]] for c in adjCols]) for r in rulesWithAlpha])
bucket1 = np.empty(len(ANFISObj.rules[:,0]))
for consequent in range(len(ANFISObj.rules[:,0])):
fConsequent = np.dot(np.append(ANFISObj.X[rowX,:],1.),ANFISObj.consequents[((ANFISObj.X.shape[1] + 1) * consequent):(((ANFISObj.X.shape[1] + 1) * consequent) + (ANFISObj.X.shape[1] + 1)),colY])
acum = 0
if consequent in rulesWithAlpha:
acum = dW_dAplha[np.where(rulesWithAlpha==consequent)] * theWSum[rowX]
acum = acum - theW[consequent,rowX] * np.sum(dW_dAplha)
acum = acum / theWSum[rowX]**2
bucket1[consequent] = fConsequent * acum
sum1 = np.sum(bucket1)
if ANFISObj.Y.ndim == 1:
bucket2[colY] = sum1 * (ANFISObj.Y[rowX]-theLayerFive[rowX,colY])*(-2)
else:
bucket2[colY] = sum1 * (ANFISObj.Y[rowX,colY]-theLayerFive[rowX,colY])*(-2)
sum2 = np.sum(bucket2)
bucket3[rowX] = sum2
sum3 = np.sum(bucket3)
parameters[timesThru] = sum3
timesThru = timesThru + 1
paramGrp[MF] = parameters
return paramGrp
def predict(ANFISObj, varsToTest):
[layerFour, wSum, w] = forwardHalfPass(ANFISObj, varsToTest)
# layer five
layerFive = np.dot(layerFour,ANFISObj.consequents)
return layerFive
if __name__ == "__main__":
print "I am main!" | 40.514286 | 217 | 0.570835 |
7944a9f4c980596575a38a3f3964ceaba1266ab7 | 12,154 | py | Python | hard-gists/2032428/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/2032428/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/2032428/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | import sublime, sublime_plugin
import re
py_compl = [ # standard Python functions
("abs()\tabs fn", "abs(${1:number})$0"),
("all()\tall fn", "all(${1:iterable})$0"),
("any()\tany fn", "any(${1:iterable})$0"),
("bin()\tbin fn", "bin(${1:integer})$0"),
("bool()\tbool fn", "bool(${1:[value]})$0"),
("bytearray()\tbytearray fn",
"bytearray(${1:${2:source}${3:[, encoding]}${4:[, errors]}})$0"),
("callable()\tcallable fn", "callable(${1:object})$0"),
("chr()\tchr fn", "chr(${1:integer})$0"),
("classmethod()\tclassmethod fn", "classmethod(${1:function})$0"),
("cmp()\tcmp fn", "cmp(${1:x}, ${2:y})$0"),
("compile()\tcompile fn",
"compile(${1:source}, ${2:filename}, ${3:mode}${4:[, flags]}${5:[, dont_inherit]})$0"),
("complex()\tcomplex fn", "complex(${1:real}${2:[, imag]})$0"),
("delattr()\tdelattr fn", "delattr(${1:object}, ${2:name})$0"),
("dict()\tdict fn", "dict(${1:arg})$0"),
("dir()\tdir fn", "dir(${1:[object]})$0"),
("divmod()\tdivmod fn", "divmod(${1:a}, ${2:b})$0"),
("enumerate()\tenumerate fn", "enumerate(${1:sequence}${2:[, start]})$0"),
("eval()\teval fn", "eval(${1:expression}${2:[, globals]}${3:[, locals]})$0"),
("execfile()\texecfile fn", "execfile(${1:filename}${2:[, globals]}${3:[, locals]})$0"),
("file()\tfile fn", "file(${1:filename}${2:[, mode]}${3:[, bufsize]})$0"),
("filter()\tfilter fn", "filter(${1:function}, ${2:iterable})$0"),
("float()\tfloat fn", "float(${1:[x]})$0"),
("format()\tformat fn", "format(${1:value}${2:[, format_spec]})$0"),
("frozenset()\tfrozenset fn", "frozenset(${1:[iterable]})$0"),
("getattr()\tgetattr fn", "getattr(${1:object}, ${2:name}${3:[, default]})$0"),
("globals()\tglobals fn", "globals()$0"),
("hasattr()\thasattr fn", "hasattr(${1:object}, ${2:name})$0"),
("hash()\thash fn", "hash(${1:object})$0"),
("help()\thelp fn", "help(${1:[object]})$0"),
("hex()\thex fn", "hex(${1:x})$0"),
("id()\tid fn", "id(${1:object})$0"),
("input()\tinput fn", "input(${1:[prompt]})$0"),
("int()\tint fn", "int(${1:x}${2:[, base]})$0"),
("isinstance()\tisinstance fn", "isinstance(${1:object}, ${2:classinfo})$0"),
("issubclass()\tissubclass fn", "issubclass(${1:class}, ${2:classinfo})$0"),
("iter()\titer fn", "iter(${1:o}${2:[, sentinel]})$0"),
("len()\tlen fn", "len(${1:object})$0"),
("list()\tlist fn", "list(${1:[iterable]})$0"),
("locals()\tlocals fn", "locals()$0"),
("long()\tlong fn", "long(${1:x}${2:[, base]})$0"),
("map()\tmap fn", "map(${1:function}${2:[, iterables]})$0"),
("max()\tmax fn", "max(${1:iterable}${2:[, args]}${3:[, key]})$0"),
("memoryview()\tmemoryview fn", "memoryview(${1:object})$0"),
("min()\tmin fn", "min(${1:iterable}${2:[, args]}${3:[, key]})$0"),
("next()\tnext fn", "next(${1:iterator}${2:[, default]})$0"),
("object()\tobject fn", "object()$0"),
("oct()\toct fn", "oct(${1:integer})$0"),
("open()\topen fn", "open(${1:filename}${2:[, mode]}${3:[, bufsize]})$0"),
("ord()\tord fn", "ord(${1:char})$0"),
("pow()\tpow fn", "pow(${1:x}, ${2:y}${3:[, modulo]})$0"),
("print()\tprint fn",
"print(${1:[object, ...][, sep=' '][, end='\\n'][, file=sys.stdout]})$0"),
("property()\tproperty fn", "property(${1:[fget[, fset[, fdel[, doc]]]]})$0"),
("range()\trange fn", "range(${1:[start, ]}${2:stop}${3:[, step]})$0"),
("raw_input()\traw_input fn", "raw_input(${1:[prompt]})$0"),
("reduce()\treduce fn", "reduce(${1:function}, ${2:iterable}${3:[, initializer]})$0"),
("reload()\treload fn", "reload(${1:module})$0"),
("repr()\trepr fn", "repr(${1:object})$0"),
("reversed()\treversed fn", "reversed(${1:seq})$0"),
("round()\tround fn", "round(${1:float}${2:[, digits]})$0"),
("set()\tset fn", "set(${1:[iterable]})$0"),
("setattr()\tsetattr fn", "setattr(${1:object}, ${2:name}, ${3:value})$0"),
("slice()\tslice fn", "slice(${1:[start, ]}${2:stop}${3:[, step]})$0"),
("sorted()\tsorted fn",
"sorted(${1:iterable}${2:${3:[, cmp]}${4:[, key]}${5:[, reverse]}})$0"),
("staticmethod()\tstaticmethod fn", "staticmethod(${1:function})$0"),
("str()\tString fn", "str(${1:object})$0"),
("sum()\tsum fn", "sum(${1:iterable}${2:[, start]})$0"),
("super()\tsuper fn", "super(${1:type}${2:[, object/type]})$0"),
("tuple()\ttuple fn", "tuple(${1:[iterable]})$0"),
("type()\ttype fn", "type(${1:object})$0"),
("type()\ttype ctor", "type(${1:name}, ${2:bases}, ${3:dict})$0"),
("__import__()\t__import__ fn",
"__import__(${1:name}${2:[, globals, locals, fromlist, level]})$0"),
("unichr()\tunichr fn", "unichr(${1:[integer]})$0"),
("unicode()\tunicode fn", "unicode(${1:[object, ]}${2:encoding}${3:[, errors]})$0"),
("vars()\tvars fn", "vars(${1:[object]})$0"),
("xrange()\txrange fn", "xrange(${1:[start, ]}${2:stop}${3:[, step]})$0"),
("zip()\tzip fn", "zip(${1:iterable})$0")
]
subl_compl = [ # Sublime .methods
("active_group()\tST Window", "active_group()$0"),
("active_view()\tST Window", "active_view()$0"),
("active_view_in_group()\tST Window", "active_view_in_group(${1:group})$0"),
("active_window()\tsublime", "active_window()$0"),
("add()\tST RegionSet", "add(${1:region})$0"),
("add_all()\tST RegionSet", "add_all(${1:region_set})$0"),
("add_on_change()\tST Settings", "add_on_change(${1:key}, ${2:on_change})$0"),
("add_regions()\tST View",
"add_regions(${1:key}${2:[, regions]}, ${3:scope}${4:[, icon]}${5:[, flags]})$0"),
("arch()\tsublime", "arch()$0"),
("begin()\tST Region", "begin()$0"),
("begin_edit()\tST View", "begin_edit(${1:[command]}${2:[, args]})$0"),
("buffer_id()\tST View", "buffer_id()$0"),
("clear_on_change()\tST Settings", "clear_on_change(${1:key})$0"),
("command_history()\tST View", "command_history(${1:[index]}${2:[, modifying_only]})$0"),
("contains()\tST Region/Set", "contains(${1:region/pt})$0"),
("cover()\tST Region", "cover(${1:region})$0"),
("em_width()\tST View", "em_width()$0"),
("empty()\tST Region", "empty()$0"),
("encoding()\tST View", "encoding()$0"),
("end()\tST Region", "end()$0"),
("end_edit()\tST View", "end_edit(${1:edit})$0"),
("erase()\tST Settings", "erase(${1:name})$0"),
("erase()\tST View", "erase(${1:edit}, ${2:region})$0"),
("erase_regions()\tST View", "erase_regions(${1:key})$0"),
("erase_status()\tST View", "erase_status(${1:key})$0"),
("error_message()\tsublime", "error_message(${1:string})$0"),
("extract_scope()\tST View", "extract_scope(${1:point})$0"),
("file_name()\tST View", "file_name()$0"),
("find()\tST View", "find(${1:pattern}, ${2:fromPosition}${3:[, flags]})$0"),
("find_all()\tST View",
"find_all(${1:pattern}${2:[, flags]}${3:[, format]}${4:[, extractions]})$0"),
("focus_group()\tST Window", "focus_group(${1:group})$0"),
("focus_view()\tST Window", "focus_view(${1:view})$0"),
("fold()\tST View", "fold(${1:region(s)})$0"),
("folders()\tST Window", "folders()$0"),
("full_line()\tST View", "full_line(${1:region/pt})$0"),
("find_by_selector()\tST View", "find_by_selector(${1:selector})$0"),
("get()\tST Settings", "get(${1:name}${2:[, default]})$0"),
("get_clipboard()\tsublime", "get_clipboard()$0"),
("get_output_panel()\tST Window", "get_output_panel(${1:name})$0"),
("get_regions()\tST View", "get_regions(${1:key})$0"),
("get_status()\tST View", "get_status(${1:key})$0"),
("get_view_index()\tST Window", "get_view_index(${1:view})$0"),
("has()\tST Settings", "has(${1:name})$0"),
("id()\tST View/Window", "id()$0"),
("insert()\tST View", "insert(${1:edit}, ${2:point}, ${3:string})$0"),
("installed_packages_path()\tsublime", "installed_packages_path()$0"),
("intersection()\tST Region", "intersection(${1:region})$0"),
("intersects()\tST Region", "intersects(${1:region})$0"),
("is_dirty()\tST View", "is_dirty()$0"),
("is_loading()\tST View", "is_loading()$0"),
("is_read_only()\tST View", "is_read_only()$0"),
("is_scratch()\tST View", "is_scratch()$0"),
("layout_extent()\tST View", "layout_extent()$0"),
("layout_to_text()\tST View", "layout_to_text(${1:vector})$0"),
("line()\tST View", "line(${1:region/pt})$0"),
("line_endings()\tST View", "line_endings()$0"),
("line_height()\tST View", "line_height()$0"),
("lines()\tST View", "lines(${1:region})$0"),
("load_settings()\tsublime", "load_settings(${1:base_name})$0"),
("log_commands()\tsublime", "log_commands(${1:flag})$0"),
("log_input()\tsublime", "log_input(${1:flag})$0"),
("message_dialog()\tsublime", "message_dialog(${1:string})$0"),
("name()\tST View", "name()$0"),
("new_file()\tST Window", "new_file()$0"),
("num_groups()\tST Window", "num_groups()$0"),
("ok_cancel_dialog()\tsublime", "ok_cancel_dialog(${1:string}${2:[, ok_button]})$0"),
("open_file()\tST Window", "open_file(${1:file_name}${2:[, flags]})$0"),
("packages_path()\tsublime", "packages_path()$0"),
("platform()\tsublime", "platform()$0"),
("Region()\tsublime", "Region(${1:a}, ${2:b})$0"),
("replace()\tST View", "replace(${1:edit}, ${2:region}, ${3:string})$0"),
("rowcol()\tST View", "rowcol(${1:point})$0"),
("run_command()\tsublime/View/Window", "run_command(${1:string}${2:[, args]})$0"),
("save_settings()\tsublime", "save_settings(${1:base_name})$0"),
("scope_name()\tST View", "scope_name(${1:point})$0"),
("score_selector()\tST View/Window", "score_selector(${1:scope/pt}, ${2:selector})$0"),
("sel()\tST View", "sel()$0"),
("set()\tST Settings", "set(${1:name}, ${2:value})$0"),
("set_clipboard()\tsublime", "set_clipboard(${1:string})$0"),
("set_encoding()\tST View", "set_encoding(${1:encoding})$0"),
("set_line_endings()\tST View", "set_line_endings(${1:line_endings})$0"),
("set_name()\tST View", "set_name(${1:name})$0"),
("set_read_only()\tST View", "set_read_only(${1:value})$0"),
("set_scratch()\tST View", "set_scratch(${1:value})$0"),
("set_status()\tST View", "set_status(${1:key}, ${2:value})$0"),
("set_syntax_file()\tST View", "set_syntax_file(${1:syntax_file})$0"),
("set_timeout()\tsublime", "set_timeout(${1:callback}, ${2:delay})$0"),
("set_view_index()\tST Window", "set_view_index(${1:view}, ${2:group}, ${3:index})$0"),
("settings()\tST View", "settings()$0"),
("set_viewport_position()\tST View", "set_viewport_position(${1:vector}${2:[, animate]})$0"),
("show()\tST View", "show(${1:region/pt}${2:[, show_surrounds]})$0"),
("show_at_center()\tST View", "show_at_center(${1:region/pt})$0"),
("show_input_panel()\tST Window",
"show_input_panel(${1:caption}, ${2:initial_text}, ${3:on_done}, ${4:on_change}, ${5:on_cancel})$0"),
("show_quick_panel()\tST Window",
"show_quick_panel(${1:items}, ${2:on_done}${3:[, flags]})$0"),
("size()\tST Region/View", "size()$0"),
("split_by_newlines()\tST View", "split_by_newlines(${1:region})$0"),
("status_message()\tsublime", "status_message(${1:string})$0"),
("substr()\tST View", "substr(${1:region/pt})$0"),
("subtract()\tST RegionSet", "subtract(${1:region})$0"),
("text_point()\tST View", "text_point(${1:row}, ${2:col})$0"),
("text_to_layout()\tST View", "text_to_layout(${1:point})$0"),
("unfold()\tST View", "unfold(${1:region(s)})$0"),
("version()\tsublime", "version()$0"),
("viewport_extent()\tST View", "viewport_extent()$0"),
("viewport_position()\tST View", "viewport_position()$0"),
("views()\tST Window", "views()$0"),
("views_in_group()\tST Window", "views_in_group(${1:group})$0"),
("visible_region()\tST View", "visible_region()$0"),
("window()\tST View", "window()$0"),
("windows()\tsublime", "windows()$0"),
("word()\tST View", "word(${1:region/pt})$0")
]
def match1(rex, str):
m = rex.match(str)
if m:
return m.group(0)
else:
return None
class PythonCompletions(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
global py_compl, subl_compl
if not view.match_selector(locations[0], 'source.python -string -comment -constant'):
return []
pt = locations[0] - len(prefix) - 1
ch = view.substr(sublime.Region(pt, pt + 1))
is_dot = (ch == '.')
if not is_dot:
completions = py_compl # a reference to py_compl (functions)
elif view.find("(?:from|import)\s+sublime", 0) is not None:
completions = subl_compl # Sublime (.methods)
else:
return []
return (completions, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
# return completions | 53.074236 | 102 | 0.587625 |
7944ac200c7af2c0e6d0996314a97619379a5a15 | 15,143 | py | Python | ryu/services/protocols/bgp/net_ctrl.py | jasuade/ryu | 6bc47b7a19dc64c32fda29bae259807365c0478c | [
"Apache-2.0"
] | 9 | 2018-04-11T12:53:08.000Z | 2021-12-14T01:41:22.000Z | ryu/services/protocols/bgp/net_ctrl.py | jasuade/ryu | 6bc47b7a19dc64c32fda29bae259807365c0478c | [
"Apache-2.0"
] | 1 | 2019-05-20T13:23:28.000Z | 2020-12-20T09:06:52.000Z | ryu/services/protocols/bgp/net_ctrl.py | jasuade/ryu | 6bc47b7a19dc64c32fda29bae259807365c0478c | [
"Apache-2.0"
] | 2 | 2020-10-20T13:52:45.000Z | 2021-06-26T02:21:58.000Z | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Network Controller interface to BGP.
Network controller w.r.t BGPS for APGW Automation project is named as APGW
Agent and Route Server.
"""
import logging
import socket
import traceback
import msgpack
from ryu.lib.packet import safi as subaddr_family
from ryu.services.protocols.bgp import api
from ryu.services.protocols.bgp.api.base import ApiException
from ryu.services.protocols.bgp.api.base import NEXT_HOP
from ryu.services.protocols.bgp.api.base import ORIGIN_RD
from ryu.services.protocols.bgp.api.base import PREFIX
from ryu.services.protocols.bgp.api.base import ROUTE_DISTINGUISHER
from ryu.services.protocols.bgp.api.base import VPN_LABEL
from ryu.services.protocols.bgp.base import Activity
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.base import FlexinetPeer
from ryu.services.protocols.bgp.base import NET_CTRL_ERROR_CODE
from ryu.services.protocols.bgp.constants import VRF_TABLE
from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF
from ryu.services.protocols.bgp.rtconf.vrfs import VrfConf
from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
# Logger instance for this module.
LOG = logging.getLogger('bgpspeaker.net_ctrl')
# Network controller service socket constants.
NC_RPC_BIND_IP = 'apgw_rpc_bind_ip'
NC_RPC_BIND_PORT = 'apgw_rpc_bind_port'
# Notification symbols
NOTIFICATION_ADD_REMOTE_PREFIX = 'prefix.add_remote'
NOTIFICATION_DELETE_REMOTE_PREFIX = 'prefix.delete_remote'
NOTIFICATION_ADD_LOCAL_PREFIX = 'prefix.add_local'
NOTIFICATION_DELETE_LOCAL_PREFIX = 'prefix.delete_local'
NOTIFICATION_LOG = 'logging'
# MessagePackRPC message type constants
RPC_MSG_REQUEST = 0
RPC_MSG_RESPONSE = 1
RPC_MSG_NOTIFY = 2
#
# Indexes for various RPC message types.
#
RPC_IDX_MSG_TYP = 0
RPC_IDX_MSG_ID = 1
RPC_IDX_REQ_SYM = 2
RPC_IDX_REQ_PARAM = 3
RPC_IDX_RES_ERR = 2
RPC_IDX_RES_RST = 3
RPC_IDX_NTF_SYM = 1
RPC_IDX_NTF_PARAM = 2
# RPC socket receive buffer size in bytes.
RPC_SOCK_BUFF_SIZE = 4096
@add_bgp_error_metadata(code=NET_CTRL_ERROR_CODE,
sub_code=1,
def_desc='Unknown Network controller exception')
class NetworkControllerError(BGPSException):
"""Common base class for exceptions related to RPC calls.
"""
pass
class RpcSession(Activity):
"""Provides message-pack RPC abstraction for one session.
It contains message-pack packer, un-packer, message ID sequence
and utilities that use these. It also cares about socket communication w/
RPC peer.
"""
NAME_FMT = 'RpcSession%s'
def __init__(self, sock, outgoing_msg_sink_iter):
self.peer_name = str(sock.getpeername())
super(RpcSession, self).__init__(self.NAME_FMT % self.peer_name)
self._packer = msgpack.Packer(encoding='utf-8')
self._unpacker = msgpack.Unpacker(encoding='utf-8')
self._next_msgid = 0
self._socket = sock
self._outgoing_msg_sink_iter = outgoing_msg_sink_iter
self.is_connected = True
def stop(self):
super(RpcSession, self).stop()
self.is_connected = False
LOG.info('RPC Session to %s stopped', self.peer_name)
def _run(self):
# Process outgoing messages in new thread.
green_out = self._spawn('net_ctrl._process_outgoing',
self._process_outgoing_msg,
self._outgoing_msg_sink_iter)
# Process incoming messages in new thread.
green_in = self._spawn('net_ctrl._process_incoming',
self._process_incoming_msgs)
LOG.info('RPC Session to %s started', self.peer_name)
green_in.wait()
green_out.wait()
def _next_msg_id(self):
this_id = self._next_msgid
self._next_msgid += 1
return this_id
def create_request(self, method, params):
msgid = self._next_msg_id()
return self._packer.pack([RPC_MSG_REQUEST, msgid, method, params])
def create_error_response(self, msgid, error):
if error is None:
raise NetworkControllerError(desc='Creating error without body!')
return self._packer.pack([RPC_MSG_RESPONSE, msgid, error, None])
def create_success_response(self, msgid, result):
if result is None:
raise NetworkControllerError(desc='Creating response without '
'body!')
return self._packer.pack([RPC_MSG_RESPONSE, msgid, None, result])
def create_notification(self, method, params):
return self._packer.pack([RPC_MSG_NOTIFY, method, params])
def feed_and_get_messages(self, data):
self._unpacker.feed(data)
messages = []
for msg in self._unpacker:
messages.append(msg)
return messages
def feed_and_get_first_message(self, data):
self._unpacker.feed(data)
for msg in self._unpacker:
return msg
def _send_error_response(self, request, err_msg):
rpc_msg = self.create_error_response(request[RPC_IDX_MSG_ID],
str(err_msg))
return self._sendall(rpc_msg)
def _send_success_response(self, request, result):
rpc_msg = self.create_success_response(request[RPC_IDX_MSG_ID],
result)
return self._sendall(rpc_msg)
def send_notification(self, method, params):
rpc_msg = self.create_notification(method, params)
return self._sendall(rpc_msg)
def _process_incoming_msgs(self):
LOG.debug('NetworkController started processing incoming messages')
assert self._socket
while self.is_connected:
# Wait for request/response/notification from peer.
msg_buff = self._recv()
if len(msg_buff) == 0:
LOG.info('Peer %s disconnected.', self.peer_name)
self.is_connected = False
self._socket.close()
break
messages = self.feed_and_get_messages(msg_buff)
for msg in messages:
if msg[0] == RPC_MSG_REQUEST:
try:
result = _handle_request(msg)
self._send_success_response(msg, result)
except BGPSException as e:
self._send_error_response(msg, e.message)
elif msg[0] == RPC_MSG_RESPONSE:
_handle_response(msg)
elif msg[0] == RPC_MSG_NOTIFY:
_handle_notification(msg)
else:
LOG.error('Invalid message type: %r', msg)
self.pause(0)
def _process_outgoing_msg(self, sink_iter):
"""For every message we construct a corresponding RPC message to be
sent over the given socket inside given RPC session.
This function should be launched in a new green thread as
it loops forever.
"""
LOG.debug('NetworkController processing outgoing request list.')
# TODO(PH): We should try not to sent routes from bgp peer that is not
# in established state.
from ryu.services.protocols.bgp.model import (
FlexinetOutgoingRoute)
while self.is_connected:
# sink iter is Sink instance and next is blocking so this isn't
# active wait.
for outgoing_msg in sink_iter:
if not self.is_connected:
self._socket.close()
return
if isinstance(outgoing_msg, FlexinetOutgoingRoute):
rpc_msg = _create_prefix_notification(outgoing_msg, self)
else:
raise NotImplementedError(
'Do not handle out going message of type %s' %
outgoing_msg.__class__)
if rpc_msg:
self._sendall(rpc_msg)
self.pause(0)
def _recv(self):
return self._sock_wrap(self._socket.recv)(RPC_SOCK_BUFF_SIZE)
def _sendall(self, msg):
return self._sock_wrap(self._socket.sendall)(msg)
def _sock_wrap(self, func):
def wrapper(*args, **kwargs):
try:
ret = func(*args, **kwargs)
except socket.error:
LOG.error(traceback.format_exc())
self._socket_error()
return
return ret
return wrapper
def _socket_error(self):
if self.started:
self.stop()
def _create_prefix_notification(outgoing_msg, rpc_session):
"""Constructs prefix notification with data from given outgoing message.
Given RPC session is used to create RPC notification message.
"""
assert outgoing_msg
path = outgoing_msg.path
assert path
vpn_nlri = path.nlri
assert path.source is not None
if path.source != VRF_TABLE:
# Extract relevant info for update-add/update-delete.
params = [{ROUTE_DISTINGUISHER: outgoing_msg.route_dist,
PREFIX: vpn_nlri.prefix,
NEXT_HOP: path.nexthop,
VRF_RF: VrfConf.rf_2_vrf_rf(path.route_family)}]
if path.nlri.ROUTE_FAMILY.safi not in (subaddr_family.IP_FLOWSPEC,
subaddr_family.VPN_FLOWSPEC):
params[VPN_LABEL] = path.label_list[0]
if not path.is_withdraw:
# Create notification to NetworkController.
rpc_msg = rpc_session.create_notification(
NOTIFICATION_ADD_REMOTE_PREFIX, params)
else:
# Create update-delete request to NetworkController.
rpc_msg = rpc_session.create_notification(
NOTIFICATION_DELETE_REMOTE_PREFIX, params)
else:
# Extract relevant info for update-add/update-delete.
params = [{ROUTE_DISTINGUISHER: outgoing_msg.route_dist,
PREFIX: vpn_nlri.prefix,
NEXT_HOP: path.nexthop,
VRF_RF: VrfConf.rf_2_vrf_rf(path.route_family),
ORIGIN_RD: path.origin_rd}]
if not path.is_withdraw:
# Create notification to NetworkController.
rpc_msg = rpc_session.create_notification(
NOTIFICATION_ADD_LOCAL_PREFIX, params)
else:
# Create update-delete request to NetworkController.
rpc_msg = rpc_session.create_notification(
NOTIFICATION_DELETE_LOCAL_PREFIX, params)
return rpc_msg
def _validate_rpc_ip(rpc_server_ip):
"""Validates given ip for use as rpc host bind address.
"""
if not is_valid_ipv4(rpc_server_ip):
raise NetworkControllerError(desc='Invalid rpc ip address.')
return rpc_server_ip
def _validate_rpc_port(port):
"""Validates give port for use as rpc server port.
"""
if not port:
raise NetworkControllerError(desc='Invalid rpc port number.')
if isinstance(port, str):
port = int(port)
if port <= 0:
raise NetworkControllerError(desc='Invalid rpc port number %s' % port)
return port
class _NetworkController(FlexinetPeer, Activity):
"""Network controller peer.
Provides MessagePackRPC interface for flexinet peers like Network
controller to peer and have RPC session with BGPS process. This RPC
interface provides access to BGPS API.
"""
def __init__(self):
FlexinetPeer.__init__(self)
Activity.__init__(self, name='NETWORK_CONTROLLER')
# Outstanding requests, i.e. requests for which we are yet to receive
# response from peer. We currently do not have any requests going out.
self._outstanding_reqs = {}
# Dictionary for Peer name to RPC session.
self._rpc_sessions = {}
def _run(self, *args, **kwargs):
"""Runs RPC server.
Wait for peer to connect and start rpc session with it.
For every connection we start and new rpc session.
"""
apgw_rpc_bind_ip = _validate_rpc_ip(kwargs.pop(NC_RPC_BIND_IP))
apgw_rpc_bind_port = _validate_rpc_port(kwargs.pop(NC_RPC_BIND_PORT))
sock_addr = (apgw_rpc_bind_ip, apgw_rpc_bind_port)
LOG.debug('NetworkController started listening for connections...')
server_thread, _ = self._listen_tcp(sock_addr,
self._start_rpc_session)
self.pause(0)
server_thread.wait()
def _start_rpc_session(self, sock):
"""Starts a new RPC session with given connection.
"""
session_name = RpcSession.NAME_FMT % str(sock.getpeername())
self._stop_child_activities(session_name)
rpc_session = RpcSession(sock, self)
self._spawn_activity(rpc_session)
def _send_rpc_notification_to_session(self, session, method, params):
if not session.is_connected:
# Stops disconnected RPC session.
self._stop_child_activities(session.name)
return
return session.send_notification(method, params)
def send_rpc_notification(self, method, params):
if not self.started:
return
for session in list(self._child_activity_map.values()):
if not isinstance(session, RpcSession):
continue
self._send_rpc_notification_to_session(session, method, params)
def _handle_response(response):
raise NotImplementedError('BGPS is not making any request hence should not'
' get any response. Response: %s' % response)
def _handle_notification(notification):
LOG.debug('Notification from NetworkController<<: %s %s',
notification[RPC_IDX_NTF_SYM], notification[RPC_IDX_NTF_PARAM])
operation, params = notification[1], notification[2]
return api.base.call(operation, **params[0])
def _handle_request(request):
LOG.debug('Request from NetworkController<<: %s %s',
request[RPC_IDX_REQ_SYM], request[RPC_IDX_REQ_PARAM])
operation, params = request[2], request[3]
kwargs = {}
if len(params) > 0:
kwargs = params[0]
try:
return api.base.call(operation, **kwargs)
except TypeError:
LOG.error(traceback.format_exc())
raise ApiException(desc='Invalid type for RPC parameter.')
# Network controller singleton
NET_CONTROLLER = _NetworkController()
| 36.489157 | 79 | 0.656343 |
7944ace462645f1c48599d79cbcaf116e40b402d | 7,886 | py | Python | Exponential_LSTM/lstm_smooth_2hyp.py | PranavEranki/Time-Series-Experiments | 3c90443fb3070ebe9c2b20c34d5c9a5de52ef374 | [
"MIT"
] | null | null | null | Exponential_LSTM/lstm_smooth_2hyp.py | PranavEranki/Time-Series-Experiments | 3c90443fb3070ebe9c2b20c34d5c9a5de52ef374 | [
"MIT"
] | null | null | null | Exponential_LSTM/lstm_smooth_2hyp.py | PranavEranki/Time-Series-Experiments | 3c90443fb3070ebe9c2b20c34d5c9a5de52ef374 | [
"MIT"
] | null | null | null | import random
import numpy as np
import math
import numpy.polynomial.polynomial as poly
import matplotlib
import matplotlib.pyplot as plt
import operator
import matplotlib.backends.backend_pdf
from scipy.stats import *
import csv
############# Generating Utilities according to Boulware #########
def boulwareUtilities (rv,Deadline):
ut = []
beta = 5.2
beta = float(1)/beta
for i in range(1,Deadline+1):
minm = min(i,Deadline)
time = float(minm)/Deadline
curr_ut = rv + (1-rv)*(math.pow(time,beta))
# print "================"
# print minm
# print time
# print beta
# print "================"
ut.append(float("{0:.4f}".format(curr_ut)))
return ut
#############################################################
########## Generating Utilities according to Tim Barslaag #########
def GenerateTimUtility( rv,rounds):
l=[]
l.append(rv);
for i in range(1,rounds):
l.append(float((l[i-1]+1)*(l[i-1]+1))/4)
return l
###################################################################
def getflag(direction,Gridcoords,GridSize):
flag=0
if(direction==1):
if(Gridcoords[0]!=0):
Gridcoords[0]-=1 ### Moving North
if(Gridcoords[1]==GridSize-1):
flag=3
elif(Gridcoords[1]==0):
flag=2
elif(Gridcoords[0]+1==GridSize-1):
flag=4
elif(direction==2):
if(Gridcoords[1]!=0):
Gridcoords[1]-=1 ### Moving West
if(Gridcoords[0]==0):
flag=1
elif(Gridcoords[1]+1==GridSize-1):
flag==3
elif(Gridcoords[0]==GridSize-1):
flag=4
elif(direction==3):
if(Gridcoords[1]!=GridSize-1):
Gridcoords[1]+=1 ### Moving East
if(Gridcoords[0]==GridSize-1):
flag=4
elif(Gridcoords[0]==0):
flag=1
elif(Gridcoords[1]-1==0):
flag=2
else:
if(Gridcoords[0]!=GridSize-1):
Gridcoords[0]+=1 ### Moving South
if(Gridcoords[0]-1==0):
flag=1
elif(Gridcoords[1]==GridSize-1):
flag=3
elif(Gridcoords[1]==0):
flag=2
return flag
#############################################################
def Firerv(RV,roundnum,Deadline,UpdateRate,GridSize,Gridcoords):
ManPower=[12,10,7,4]
Utilities=[0.75,0.57,0.321,0.12]
if(roundnum==0):
# print "---round 1: =="
direction=random.randint(1,4)
# direction=random.choice([1,4])
### Gridcoords Updation
flag=getflag(direction,Gridcoords,GridSize)
# print "------"
# print direction
# print Gridcoords
# print "------"
if(flag==0 ):
return Utilities[direction-1]
#return getReservationUtility(ManPower[direction-1])
else:
### commented by Kritika
#print "This case: "+str(flag) + " "+ str(ManPower[direction-1] )
###
# return getReservationUtility( max (ManPower[flag-1], ManPower[direction-1] ))
return max (Utilities[flag-1], Utilities[direction-1] )
elif(roundnum%UpdateRate==0):
# print "---update == " + str(roundnum)
direction=random.randint(1,4)
# direction=random.choice([1,4])
flag=getflag(direction,Gridcoords,GridSize)
# print "------"
# print direction
# print Gridcoords
# print "------"
if(flag==0 ):
# return getReservationUtility(ManPower[direction-1])
return Utilities[direction-1]
else:
# print "This case: "+str(flag) + " "+ str(ManPower[direction-1] )
# return getReservationUtility( max (ManPower[flag-1], ManPower[direction-1] ) )
return Utilities[direction-1]
else:
return RV[len(RV)-1]
#############################################################
def getprobability( rows ):
#probabilities=[[0.25,0.25,0.25,0.25]]
probabilities=[[0.5,0.5]]
cnt =0
l=[]
for i in rows:
l.append(float(i))
cnt=cnt+1
if(cnt%2==0):
probabilities.append(l)
l=[]
return probabilities
if __name__ == '__main__':
Average_rv=[]
AverageUtilities_Tims=[]
AverageUtilities_lstm=[]
####----- CSV parsing ---########3
rows=[]
fields=[]
dir_list = [ './Data_2hyp_0.1/', './Data_2hyp_0.2/' ]
class_pred_probs_list = ['pred_fire2.csv', 'pred_fire5.csv', 'pred_fire10.csv', 'pred_fire20.csv', 'pred_fire50.csv', ]
# update rate assumed to be same
# grid size assumed to be same
for dir_name in dir_list:
for file_name in class_pred_probs_list:
print (str(dir_name + "Class_Pred_Probs/" + file_name))
with open(str(dir_name + "Class_Pred_Probs/" + file_name), 'r') as csvfile:
csvreader = csv.reader(csvfile)
fields = csvreader.next()
for row in csvreader:
rows.append(row)
for iterations in xrange(1,100):
probabilities=getprobability(rows[iterations-1])
# probabilities=getprobability(rows[22])
# print probabilities[1]
RV=[0]
Deadline = 100
intervals=2
UpdateRate=2 ##### keep updating the updaterate according to csv file parsed
random_rv=[0.12,0.75]
# iterations=1
Utilities=[]
actual_utility=[]
for rv in random_rv:
# Utilities.append(GenerateTimUtility(rv,Deadline))
Utilities.append(boulwareUtilities(rv,Deadline))
new_probability = probabilities
lstmUtilities=[]
x=[]
for i in xrange(1,Deadline+1):
x.append(i)
x_belief=[]
for i in xrange(0,Deadline+1):
x_belief.append(i)
GridSize=20
Gridcoords=[GridSize/2 ,GridSize/2]
####------ Negotiation starts ------######
for roundnum in xrange(1,Deadline+1):
new_CombinedUtility=0
for i in xrange(0,len(new_probability[0])):
new_CombinedUtility+=new_probability[roundnum-1][i]*Utilities[i][len(Utilities[i])-roundnum]
lstmUtilities.append(float("{0:.4f}".format(new_CombinedUtility)))
# actual_utility.append(float("{0:.4f}".format(utility_RV[len(utility_RV)-roundnum])))
# print lstmUtilities
##### --------- aVerages over iterations ---#####
if(iterations==1):
Average_rv=RV
# AverageUtilities_Tims=actual_utility
AverageUtilities_lstm=lstmUtilities
else:
Average_rv=np.array(Average_rv,dtype=float)*(iterations-1)
# AverageUtilities_Tims=np.array(AverageUtilities_Tims,dtype=float)*(iterations-1)
AverageUtilities_lstm=np.array(AverageUtilities_lstm,dtype=float)*(iterations-1)
# print Average_rv
Average_rv=map(operator.add,Average_rv,RV)
# AverageUtilities_Tims=map(operator.add,AverageUtilities_Tims,actual_utility)
AverageUtilities_lstm=map(operator.add,AverageUtilities_lstm,lstmUtilities)
Average_rv=np.array(Average_rv)/iterations
# AverageUtilities_Tims=np.array(AverageUtilities_Tims)/iterations
AverageUtilities_lstm=np.array(AverageUtilities_lstm)/iterations
### Commented by Kritika
# print "---- " + str(iterations) + " -----"
###
lstmError=0
for i in xrange(2,6):
lstm_fit=np.polyfit(x,AverageUtilities_lstm,i,full=True)
if(i==2):
lstmError=lstm_fit[1]
lstm_index=i
else:
if(lstm_fit[1]<lstmError):
lstmError=lstm_fit[1]
lstm_index=i
legend_properties = {'weight':'bold', 'size':20}
plt.figure('AverageUtilities lstm')
plt.title('LSTM',fontsize=20, fontweight='bold')
coefs=poly.polyfit(x,AverageUtilities_lstm,lstm_index)
ffit=poly.polyval(x,coefs)
Bay,=plt.plot(x,AverageUtilities_lstm, linestyle='-', color='k', linewidth=1.5)
Bayfit,=plt.plot(x,ffit, linestyle='--', color='g', linewidth=3.5)
plt.legend([Bay,Bayfit],["LSTM Utilities","Fitted Utilities"],loc=6,ncol=1, handlelength=4,prop=legend_properties)
plt.yticks(fontsize=20,fontweight='bold')
plt.xticks(fontsize=20,fontweight='bold')
# plt.plot(Average_rv,'ro')
# plt.plot(AverageUtilities_Normalised,'r--',ffit,'g--')
plt.xlabel('Rounds',fontsize=20, fontweight='bold')
plt.ylabel('Utilities',fontsize=20, fontweight='bold')
plt.savefig('lstm.pdf',format='pdf', dpi=1000)
### Commented by Kritika
# print '######################'
# print "Smoothness"
###
print lstmError
### Commented by Kritika
# print '######################'
### | 26.823129 | 120 | 0.631752 |
7944adb1943ab4c9a794191f507252c2625a24e9 | 950 | py | Python | ex6/code/create_data.py | MockyJoke/numbers | 53b0a8e5c5f0edbf01b86fe5968ef02d8d938438 | [
"MIT"
] | 1 | 2018-07-06T18:41:28.000Z | 2018-07-06T18:41:28.000Z | ex6/code/create_data.py | MockyJoke/numbers | 53b0a8e5c5f0edbf01b86fe5968ef02d8d938438 | [
"MIT"
] | null | null | null | ex6/code/create_data.py | MockyJoke/numbers | 53b0a8e5c5f0edbf01b86fe5968ef02d8d938438 | [
"MIT"
] | 1 | 2018-07-11T01:30:54.000Z | 2018-07-11T01:30:54.000Z |
# coding: utf-8
# In[1]:
import sys
import pandas as pd
import numpy as np
import difflib
import gzip
from scipy import stats
import time
from implementations import all_implementations
from random import randint
# In[ ]:
def main():
ARR_SIZE = 18000
SORT_TRIALS = 50
random_arrays = []
for i in range(SORT_TRIALS):
random_arrays.append(np.random.randint(0, ARR_SIZE, ARR_SIZE))
df_result = pd.DataFrame(np.nan, index=np.array(range(SORT_TRIALS)),columns = [fn.__name__ for fn in all_implementations])
# start = time.time()
for sort in all_implementations:
for i in range(SORT_TRIALS):
st = time.time()
res = sort(random_arrays[i])
en = time.time()
df_result.iloc[i][sort.__name__]=en-st
# print("Sorted all data: in "+str(time.time()-start)+" sec(s).")
df_result.to_csv('data.csv', index=False)
if __name__ == '__main__':
main()
| 24.358974 | 126 | 0.650526 |
7944ae17a8554b3c3774fa71268779cad66ba66c | 4,871 | py | Python | src/bindings/python/src/openvino/runtime/utils/input_validation.py | pfinashx/openvino | 1d417e888b508415510fb0a92e4a9264cf8bdef7 | [
"Apache-2.0"
] | 1 | 2022-02-26T17:33:44.000Z | 2022-02-26T17:33:44.000Z | src/bindings/python/src/openvino/runtime/utils/input_validation.py | pfinashx/openvino | 1d417e888b508415510fb0a92e4a9264cf8bdef7 | [
"Apache-2.0"
] | 18 | 2022-01-21T08:42:58.000Z | 2022-03-28T13:21:31.000Z | src/bindings/python/src/openvino/runtime/utils/input_validation.py | AlexRogalskiy/openvino | ac2e639ff8f9a607c3c682a4c4e165c238eb817f | [
"Apache-2.0"
] | 1 | 2020-12-13T22:16:54.000Z | 2020-12-13T22:16:54.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Helper functions for validating user input."""
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
import numpy as np
from openvino.runtime.exceptions import UserInputError
log = logging.getLogger(__name__)
def assert_list_of_ints(value_list: Iterable[int], message: str) -> None:
"""Verify that the provided value is an iterable of integers."""
try:
for value in value_list:
if not isinstance(value, int):
raise TypeError
except TypeError:
log.warning(message)
raise UserInputError(message, value_list)
def _check_value(op_name, attr_key, value, val_type, cond=None):
# type: (str, str, Any, Type, Optional[Callable[[Any], bool]]) -> bool
"""Check whether provided value satisfies specified criteria.
@param op_name: The operator name which attributes are checked.
@param attr_key: The attribute name.
@param value: The value to check.
@param val_type: Required value type.
@param cond: The optional function running additional checks.
:raises UserInputError:
@return True if attribute satisfies all criterias. Otherwise False.
"""
if not np.issubdtype(type(value), val_type):
raise UserInputError(
'{} operator attribute "{}" value must by of type {}.'.format(
op_name, attr_key, val_type
)
)
if cond is not None and not cond(value):
raise UserInputError(
'{} operator attribute "{}" value does not satisfy provided condition.'.format(
op_name, attr_key
)
)
return True
def check_valid_attribute(op_name, attr_dict, attr_key, val_type, cond=None, required=False):
# type: (str, dict, str, Type, Optional[Callable[[Any], bool]], Optional[bool]) -> bool
"""Check whether specified attribute satisfies given criteria.
@param op_name: The operator name which attributes are checked.
@param attr_dict: Dictionary containing key-value attributes to check.
@param attr_key: Key value for validated attribute.
@param val_type: Value type for validated attribute.
@param cond: Any callable wich accept attribute value and returns True or False.
@param required: Whether provided attribute key is not required. This mean it may be missing
from provided dictionary.
:raises UserInputError:
@return True if attribute satisfies all criterias. Otherwise False.
"""
result = True
if required and attr_key not in attr_dict:
raise UserInputError(
'Provided dictionary is missing {} operator required attribute "{}"'.format(
op_name, attr_key
)
)
if attr_key not in attr_dict:
return result
attr_value = attr_dict[attr_key]
if np.isscalar(attr_value):
result = result and _check_value(op_name, attr_key, attr_value, val_type, cond)
else:
for v in attr_value:
result = result and _check_value(op_name, attr_key, v, val_type, cond)
return result
def check_valid_attributes(
op_name, # type: str
attributes, # type: Dict[str, Any]
requirements, # type: List[Tuple[str, bool, Type, Optional[Callable]]]
):
# type: (...) -> bool
"""Perform attributes validation according to specified type, value criteria.
@param op_name: The operator name which attributes are checked.
@param attributes: The dictionary with user provided attributes to check.
@param requirements: The list of tuples describing attributes' requirements. The tuple should
contain following values:
(attr_name: str,
is_required: bool,
value_type: Type,
value_condition: Callable)
:raises UserInputError:
@return True if all attributes satisfies criterias. Otherwise False.
"""
for attr, required, val_type, cond in requirements:
check_valid_attribute(op_name, attributes, attr, val_type, cond, required)
return True
def is_positive_value(x): # type: (Any) -> bool
"""Determine whether the specified x is positive value.
@param x: The value to check.
@return True if the specified x is positive value, False otherwise.
"""
return x > 0
def is_non_negative_value(x): # type: (Any) -> bool
"""Determine whether the specified x is non-negative value.
@param x: The value to check.
@return True if the specified x is non-negative value, False otherwise.
"""
return x >= 0
| 35.554745 | 100 | 0.642579 |
7944aed8277163000fc8cd2b1268bf854def5780 | 43,216 | py | Python | zerver/tests/test_home.py | isakhagg/zulip | d1732fb9da4f9e4aa49bc74bc1ca1ef112f064cc | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_home.py | isakhagg/zulip | d1732fb9da4f9e4aa49bc74bc1ca1ef112f064cc | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_home.py | isakhagg/zulip | d1732fb9da4f9e4aa49bc74bc1ca1ef112f064cc | [
"Apache-2.0"
] | 1 | 2022-01-07T14:15:00.000Z | 2022-01-07T14:15:00.000Z | import calendar
import datetime
import urllib
from datetime import timedelta
from typing import Any
from unittest.mock import patch
import orjson
import pytz
from django.conf import settings
from django.http import HttpResponse
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from corporate.models import Customer, CustomerPlan
from zerver.lib.actions import change_user_is_active, do_change_plan_type, do_create_user
from zerver.lib.compatibility import LAST_SERVER_UPGRADE_TIME, is_outdated_server
from zerver.lib.home import (
get_billing_info,
get_furthest_read_time,
promote_sponsoring_zulip_in_realm,
)
from zerver.lib.soft_deactivation import do_soft_deactivate_users
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_user_messages, queries_captured
from zerver.models import (
DefaultStream,
Draft,
Realm,
UserActivity,
UserProfile,
flush_per_request_caches,
get_realm,
get_stream,
get_system_bot,
get_user,
)
from zerver.worker.queue_processors import UserActivityWorker
logger_string = "zulip.soft_deactivation"
class HomeTest(ZulipTestCase):
# Keep this list sorted!!!
expected_page_params_keys = [
"alert_words",
"apps_page_url",
"avatar_source",
"avatar_url",
"avatar_url_medium",
"bot_types",
"can_create_streams",
"can_invite_others_to_realm",
"can_subscribe_other_users",
"corporate_enabled",
"cross_realm_bots",
"custom_profile_field_types",
"custom_profile_fields",
"delivery_email",
"development_environment",
"drafts",
"email",
"event_queue_longpoll_timeout_seconds",
"first_in_realm",
"full_name",
"furthest_read_time",
"giphy_api_key",
"giphy_rating_options",
"has_zoom_token",
"hotspots",
"insecure_desktop_app",
"is_admin",
"is_billing_admin",
"is_guest",
"is_moderator",
"is_owner",
"is_spectator",
"jitsi_server_url",
"language_list",
"last_event_id",
"login_page",
"max_avatar_file_size_mib",
"max_file_upload_size_mib",
"max_icon_file_size_mib",
"max_logo_file_size_mib",
"max_message_id",
"max_message_length",
"max_stream_description_length",
"max_stream_name_length",
"max_topic_length",
"muted_topics",
"muted_users",
"narrow",
"narrow_stream",
"needs_tutorial",
"never_subscribed",
"no_event_queue",
"password_min_guesses",
"password_min_length",
"presences",
"promote_sponsoring_zulip",
"prompt_for_invites",
"queue_id",
"realm_add_custom_emoji_policy",
"realm_allow_edit_history",
"realm_allow_message_deleting",
"realm_allow_message_editing",
"realm_authentication_methods",
"realm_available_video_chat_providers",
"realm_avatar_changes_disabled",
"realm_bot_creation_policy",
"realm_bot_domain",
"realm_bots",
"realm_community_topic_editing_limit_seconds",
"realm_create_stream_policy",
"realm_default_code_block_language",
"realm_default_external_accounts",
"realm_default_language",
"realm_default_stream_groups",
"realm_default_streams",
"realm_description",
"realm_digest_emails_enabled",
"realm_digest_weekday",
"realm_disallow_disposable_email_addresses",
"realm_domains",
"realm_edit_topic_policy",
"realm_email_address_visibility",
"realm_email_auth_enabled",
"realm_email_changes_disabled",
"realm_emails_restricted_to_domains",
"realm_embedded_bots",
"realm_emoji",
"realm_filters",
"realm_giphy_rating",
"realm_icon_source",
"realm_icon_url",
"realm_incoming_webhook_bots",
"realm_inline_image_preview",
"realm_inline_url_embed_preview",
"realm_invite_required",
"realm_invite_to_realm_policy",
"realm_invite_to_stream_policy",
"realm_is_zephyr_mirror_realm",
"realm_linkifiers",
"realm_logo_source",
"realm_logo_url",
"realm_mandatory_topics",
"realm_message_content_allowed_in_email_notifications",
"realm_message_content_delete_limit_seconds",
"realm_message_content_edit_limit_seconds",
"realm_message_retention_days",
"realm_move_messages_between_streams_policy",
"realm_name",
"realm_name_changes_disabled",
"realm_night_logo_source",
"realm_night_logo_url",
"realm_non_active_users",
"realm_notifications_stream_id",
"realm_password_auth_enabled",
"realm_plan_type",
"realm_playgrounds",
"realm_presence_disabled",
"realm_private_message_policy",
"realm_push_notifications_enabled",
"realm_send_welcome_emails",
"realm_signup_notifications_stream_id",
"realm_upload_quota_mib",
"realm_uri",
"realm_user_group_edit_policy",
"realm_user_groups",
"realm_user_settings_defaults",
"realm_users",
"realm_video_chat_provider",
"realm_waiting_period_threshold",
"realm_wildcard_mention_policy",
"recent_private_conversations",
"request_language",
"search_pills_enabled",
"server_avatar_changes_disabled",
"server_generation",
"server_inline_image_preview",
"server_inline_url_embed_preview",
"server_name_changes_disabled",
"server_needs_upgrade",
"server_timestamp",
"settings_send_digest_emails",
"show_billing",
"show_plans",
"show_webathena",
"starred_messages",
"stop_words",
"subscriptions",
"test_suite",
"translation_data",
"two_fa_enabled",
"two_fa_enabled_user",
"unread_msgs",
"unsubscribed",
"upgrade_text_for_wide_organization_logo",
"user_id",
"user_settings",
"user_status",
"warn_no_email",
"webpack_public_path",
"zulip_feature_level",
"zulip_merge_base",
"zulip_plan_is_not_limited",
"zulip_version",
]
def test_home(self) -> None:
# Keep this list sorted!!!
html_bits = [
"start the conversation",
"Loading...",
# Verify that the app styles get included
"app-stubentry.js",
"data-params",
]
self.login("hamlet")
# Create bot for realm_bots testing. Must be done before fetching home_page.
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
self.client_post("/json/bots", bot_info)
# Verify succeeds once logged-in
flush_per_request_caches()
with queries_captured() as queries:
with patch("zerver.lib.cache.cache_set") as cache_mock:
result = self._get_home_page(stream="Denmark")
self.check_rendered_logged_in_app(result)
self.assertEqual(
set(result["Cache-Control"].split(", ")), {"must-revalidate", "no-store", "no-cache"}
)
self.assert_length(queries, 44)
self.assert_length(cache_mock.call_args_list, 5)
html = result.content.decode()
for html_bit in html_bits:
if html_bit not in html:
raise AssertionError(f"{html_bit} not in result")
page_params = self._get_page_params(result)
actual_keys = sorted(str(k) for k in page_params.keys())
self.assertEqual(actual_keys, self.expected_page_params_keys)
# TODO: Inspect the page_params data further.
# print(orjson.dumps(page_params, option=orjson.OPT_INDENT_2).decode())
realm_bots_expected_keys = [
"api_key",
"avatar_url",
"bot_type",
"default_all_public_streams",
"default_events_register_stream",
"default_sending_stream",
"email",
"full_name",
"is_active",
"owner_id",
"services",
"user_id",
]
realm_bots_actual_keys = sorted(str(key) for key in page_params["realm_bots"][0].keys())
self.assertEqual(realm_bots_actual_keys, realm_bots_expected_keys)
def test_home_demo_organization(self) -> None:
realm = get_realm("zulip")
# We construct a scheduled deletion date that's definitely in
# the future, regardless of how long ago the Zulip realm was
# created.
realm.demo_organization_scheduled_deletion_date = timezone_now() + datetime.timedelta(
days=1
)
realm.save()
self.login("hamlet")
# Verify succeeds once logged-in
flush_per_request_caches()
with queries_captured():
with patch("zerver.lib.cache.cache_set"):
result = self._get_home_page(stream="Denmark")
self.check_rendered_logged_in_app(result)
page_params = self._get_page_params(result)
actual_keys = sorted(str(k) for k in page_params.keys())
expected_keys = self.expected_page_params_keys + [
"demo_organization_scheduled_deletion_date"
]
self.assertEqual(set(actual_keys), set(expected_keys))
def test_logged_out_home(self) -> None:
result = self.client_get("/")
self.assertEqual(result.status_code, 200)
page_params = self._get_page_params(result)
actual_keys = sorted(str(k) for k in page_params.keys())
removed_keys = [
"last_event_id",
"narrow",
"narrow_stream",
]
expected_keys = [i for i in self.expected_page_params_keys if i not in removed_keys]
self.assertEqual(actual_keys, expected_keys)
def test_home_under_2fa_without_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
self.login("iago")
result = self._get_home_page()
# Should be successful because otp device is not configured.
self.check_rendered_logged_in_app(result)
def test_home_under_2fa_with_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
user_profile = self.example_user("iago")
self.create_default_device(user_profile)
self.login_user(user_profile)
result = self._get_home_page()
# User should not log in because otp device is configured but
# 2fa login function was not called.
self.assertEqual(result.status_code, 302)
self.login_2fa(user_profile)
result = self._get_home_page()
# Should be successful after calling 2fa login function.
self.check_rendered_logged_in_app(result)
def test_num_queries_for_realm_admin(self) -> None:
# Verify number of queries for Realm admin isn't much higher than for normal users.
self.login("iago")
flush_per_request_caches()
with queries_captured() as queries:
with patch("zerver.lib.cache.cache_set") as cache_mock:
result = self._get_home_page()
self.check_rendered_logged_in_app(result)
self.assert_length(cache_mock.call_args_list, 6)
self.assert_length(queries, 41)
def test_num_queries_with_streams(self) -> None:
main_user = self.example_user("hamlet")
other_user = self.example_user("cordelia")
realm_id = main_user.realm_id
self.login_user(main_user)
# Try to make page-load do extra work for various subscribed
# streams.
for i in range(10):
stream_name = "test_stream_" + str(i)
stream = self.make_stream(stream_name)
DefaultStream.objects.create(
realm_id=realm_id,
stream_id=stream.id,
)
for user in [main_user, other_user]:
self.subscribe(user, stream_name)
# Simulate hitting the page the first time to avoid some noise
# related to initial logins.
self._get_home_page()
# Then for the second page load, measure the number of queries.
flush_per_request_caches()
with queries_captured() as queries2:
result = self._get_home_page()
self.assert_length(queries2, 39)
# Do a sanity check that our new streams were in the payload.
html = result.content.decode()
self.assertIn("test_stream_7", html)
def _get_home_page(self, **kwargs: Any) -> HttpResponse:
with patch("zerver.lib.events.request_event_queue", return_value=42), patch(
"zerver.lib.events.get_user_events", return_value=[]
):
result = self.client_get("/", dict(**kwargs))
return result
def _sanity_check(self, result: HttpResponse) -> None:
"""
Use this for tests that are geared toward specific edge cases, but
which still want the home page to load properly.
"""
html = result.content.decode()
if "start a conversation" not in html:
raise AssertionError("Home page probably did not load.")
def test_terms_of_service(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
for user_tos_version in [None, "1.1", "2.0.3.4"]:
user.tos_version = user_tos_version
user.save()
with self.settings(TERMS_OF_SERVICE="whatever"), self.settings(TOS_VERSION="99.99"):
result = self.client_get("/", dict(stream="Denmark"))
html = result.content.decode()
self.assertIn("Accept the new Terms of Service", html)
def test_banned_desktop_app_versions(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
result = self.client_get("/", HTTP_USER_AGENT="ZulipElectron/2.3.82")
html = result.content.decode()
self.assertIn("You are using old version of the Zulip desktop", html)
def test_unsupported_browser(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# currently we don't support IE, so some of IE's user agents are added.
unsupported_user_agents = [
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2)",
"Mozilla/5.0 (Windows NT 10.0; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
]
for user_agent in unsupported_user_agents:
result = self.client_get("/", HTTP_USER_AGENT=user_agent)
html = result.content.decode()
self.assertIn("Internet Explorer is not supported by Zulip.", html)
def test_terms_of_service_first_time_template(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
user.tos_version = None
user.save()
with self.settings(FIRST_TIME_TOS_TEMPLATE="hello.html"), self.settings(
TOS_VERSION="99.99"
):
result = self.client_post("/accounts/accept_terms/")
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
self.assert_in_response("Chat for distributed teams", result)
def test_accept_terms_of_service(self) -> None:
self.login("hamlet")
result = self.client_post("/accounts/accept_terms/")
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
result = self.client_post("/accounts/accept_terms/", {"terms": True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "/")
def test_bad_narrow(self) -> None:
self.login("hamlet")
with self.assertLogs(level="WARNING") as m:
result = self._get_home_page(stream="Invalid Stream")
self.assertEqual(m.output, ["WARNING:root:Invalid narrow requested, ignoring"])
self._sanity_check(result)
def test_topic_narrow(self) -> None:
self.login("hamlet")
result = self._get_home_page(stream="Denmark", topic="lunch")
self._sanity_check(result)
html = result.content.decode()
self.assertIn("lunch", html)
self.assertEqual(
set(result["Cache-Control"].split(", ")), {"must-revalidate", "no-store", "no-cache"}
)
def test_notifications_stream(self) -> None:
realm = get_realm("zulip")
realm.notifications_stream_id = get_stream("Denmark", realm).id
realm.save()
self.login("hamlet")
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(
page_params["realm_notifications_stream_id"], get_stream("Denmark", realm).id
)
def create_bot(self, owner: UserProfile, bot_email: str, bot_name: str) -> UserProfile:
user = do_create_user(
email=bot_email,
password="123",
realm=owner.realm,
full_name=bot_name,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=owner,
acting_user=None,
)
return user
def create_non_active_user(self, realm: Realm, email: str, name: str) -> UserProfile:
user = do_create_user(
email=email, password="123", realm=realm, full_name=name, acting_user=None
)
# Doing a full-stack deactivation would be expensive here,
# and we really only need to flip the flag to get a valid
# test.
change_user_is_active(user, False)
return user
def test_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
realm.signup_notifications_stream = get_stream("Denmark", realm)
realm.save()
self.login("hamlet")
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(
page_params["realm_signup_notifications_stream_id"], get_stream("Denmark", realm).id
)
def test_people(self) -> None:
hamlet = self.example_user("hamlet")
realm = get_realm("zulip")
self.login_user(hamlet)
bots = {}
for i in range(3):
bots[i] = self.create_bot(
owner=hamlet,
bot_email=f"bot-{i}@zulip.com",
bot_name=f"Bot {i}",
)
for i in range(3):
defunct_user = self.create_non_active_user(
realm=realm,
email=f"defunct-{i}@zulip.com",
name=f"Defunct User {i}",
)
result = self._get_home_page()
page_params = self._get_page_params(result)
"""
We send three lists of users. The first two below are disjoint
lists of users, and the records we send for them have identical
structure.
The realm_bots bucket is somewhat redundant, since all bots will
be in one of the first two buckets. They do include fields, however,
that normal users don't care about, such as default_sending_stream.
"""
buckets = [
"realm_users",
"realm_non_active_users",
"realm_bots",
]
for field in buckets:
users = page_params[field]
self.assertGreaterEqual(len(users), 3, field)
for rec in users:
self.assertEqual(rec["user_id"], get_user(rec["email"], realm).id)
if field == "realm_bots":
self.assertNotIn("is_bot", rec)
self.assertIn("is_active", rec)
self.assertIn("owner_id", rec)
else:
self.assertIn("is_bot", rec)
self.assertNotIn("is_active", rec)
active_ids = {p["user_id"] for p in page_params["realm_users"]}
non_active_ids = {p["user_id"] for p in page_params["realm_non_active_users"]}
bot_ids = {p["user_id"] for p in page_params["realm_bots"]}
self.assertIn(hamlet.id, active_ids)
self.assertIn(defunct_user.id, non_active_ids)
# Bots can show up in multiple buckets.
self.assertIn(bots[2].id, bot_ids)
self.assertIn(bots[2].id, active_ids)
# Make sure nobody got mis-bucketed.
self.assertNotIn(hamlet.id, non_active_ids)
self.assertNotIn(defunct_user.id, active_ids)
cross_bots = page_params["cross_realm_bots"]
self.assert_length(cross_bots, 3)
cross_bots.sort(key=lambda d: d["email"])
for cross_bot in cross_bots:
# These are either nondeterministic or boring
del cross_bot["timezone"]
del cross_bot["avatar_url"]
del cross_bot["date_joined"]
admin_realm = get_realm(settings.SYSTEM_BOT_REALM)
cross_realm_notification_bot = self.notification_bot(admin_realm)
cross_realm_email_gateway_bot = get_system_bot(settings.EMAIL_GATEWAY_BOT, admin_realm.id)
cross_realm_welcome_bot = get_system_bot(settings.WELCOME_BOT, admin_realm.id)
by_email = lambda d: d["email"]
self.assertEqual(
sorted(cross_bots, key=by_email),
sorted(
[
dict(
avatar_version=cross_realm_email_gateway_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=cross_realm_email_gateway_bot.email,
user_id=cross_realm_email_gateway_bot.id,
full_name=cross_realm_email_gateway_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_billing_admin=False,
role=cross_realm_email_gateway_bot.role,
is_system_bot=True,
is_guest=False,
),
dict(
avatar_version=cross_realm_notification_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=cross_realm_notification_bot.email,
user_id=cross_realm_notification_bot.id,
full_name=cross_realm_notification_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_billing_admin=False,
role=cross_realm_notification_bot.role,
is_system_bot=True,
is_guest=False,
),
dict(
avatar_version=cross_realm_welcome_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=cross_realm_welcome_bot.email,
user_id=cross_realm_welcome_bot.id,
full_name=cross_realm_welcome_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_billing_admin=False,
role=cross_realm_welcome_bot.role,
is_system_bot=True,
is_guest=False,
),
],
key=by_email,
),
)
def test_new_stream(self) -> None:
user_profile = self.example_user("hamlet")
stream_name = "New stream"
self.subscribe(user_profile, stream_name)
self.login_user(user_profile)
result = self._get_home_page(stream=stream_name)
page_params = self._get_page_params(result)
self.assertEqual(page_params["narrow_stream"], stream_name)
self.assertEqual(page_params["narrow"], [dict(operator="stream", operand=stream_name)])
self.assertEqual(page_params["max_message_id"], -1)
def test_get_billing_info(self) -> None:
user = self.example_user("desdemona")
user.role = UserProfile.ROLE_REALM_OWNER
user.save(update_fields=["role"])
# realm owner, but no CustomerPlan and realm plan_type SELF_HOSTED -> neither billing link or plans
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# realm owner, with inactive CustomerPlan and realm plan_type SELF_HOSTED -> show only billing link
customer = Customer.objects.create(realm=get_realm("zulip"), stripe_customer_id="cus_id")
CustomerPlan.objects.create(
customer=customer,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
next_invoice_date=timezone_now(),
tier=CustomerPlan.STANDARD,
status=CustomerPlan.ENDED,
)
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertTrue(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# realm owner, with inactive CustomerPlan and realm plan_type LIMITED -> show billing link and plans
do_change_plan_type(user.realm, Realm.LIMITED, acting_user=None)
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertTrue(billing_info.show_billing)
self.assertTrue(billing_info.show_plans)
# Always false without CORPORATE_ENABLED
with self.settings(CORPORATE_ENABLED=False):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# Always false without a UserProfile
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(None)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# realm admin, with CustomerPlan and realm plan_type LIMITED -> show only billing plans
user.role = UserProfile.ROLE_REALM_ADMINISTRATOR
user.save(update_fields=["role"])
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertTrue(billing_info.show_plans)
# billing admin, with CustomerPlan and realm plan_type STANDARD -> show only billing link
user.role = UserProfile.ROLE_MEMBER
user.is_billing_admin = True
do_change_plan_type(user.realm, Realm.STANDARD, acting_user=None)
user.save(update_fields=["role", "is_billing_admin"])
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertTrue(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# member, with CustomerPlan and realm plan_type STANDARD -> neither billing link or plans
user.is_billing_admin = False
user.save(update_fields=["is_billing_admin"])
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# guest, with CustomerPlan and realm plan_type SELF_HOSTED -> neither billing link or plans
user.role = UserProfile.ROLE_GUEST
user.save(update_fields=["role"])
do_change_plan_type(user.realm, Realm.SELF_HOSTED, acting_user=None)
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# billing admin, but no CustomerPlan and realm plan_type SELF_HOSTED -> neither billing link or plans
user.role = UserProfile.ROLE_MEMBER
user.is_billing_admin = True
user.save(update_fields=["role", "is_billing_admin"])
CustomerPlan.objects.all().delete()
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# billing admin, with sponsorship pending and relam plan_type SELF_HOSTED -> show only billing link
customer.sponsorship_pending = True
customer.save(update_fields=["sponsorship_pending"])
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertTrue(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# billing admin, no customer object and relam plan_type SELF_HOSTED -> neither billing link or plans
customer.delete()
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
def test_promote_sponsoring_zulip_in_realm(self) -> None:
realm = get_realm("zulip")
do_change_plan_type(realm, Realm.STANDARD_FREE, acting_user=None)
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertTrue(promote_zulip)
with self.settings(PROMOTE_SPONSORING_ZULIP=False):
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertFalse(promote_zulip)
do_change_plan_type(realm, Realm.STANDARD_FREE, acting_user=None)
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertTrue(promote_zulip)
do_change_plan_type(realm, Realm.LIMITED, acting_user=None)
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertFalse(promote_zulip)
do_change_plan_type(realm, Realm.STANDARD, acting_user=None)
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertFalse(promote_zulip)
def test_desktop_home(self) -> None:
self.login("hamlet")
result = self.client_get("/desktop_home")
self.assertEqual(result.status_code, 301)
self.assertTrue(result["Location"].endswith("/desktop_home/"))
result = self.client_get("/desktop_home/")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result["Location"]).path
self.assertEqual(path, "/")
@override_settings(SERVER_UPGRADE_NAG_DEADLINE_DAYS=365)
def test_is_outdated_server(self) -> None:
# Check when server_upgrade_nag_deadline > last_server_upgrade_time
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
now = LAST_SERVER_UPGRADE_TIME.replace(tzinfo=pytz.utc)
with patch("zerver.lib.compatibility.timezone_now", return_value=now + timedelta(days=10)):
self.assertEqual(is_outdated_server(iago), False)
self.assertEqual(is_outdated_server(hamlet), False)
self.assertEqual(is_outdated_server(None), False)
with patch("zerver.lib.compatibility.timezone_now", return_value=now + timedelta(days=397)):
self.assertEqual(is_outdated_server(iago), True)
self.assertEqual(is_outdated_server(hamlet), True)
self.assertEqual(is_outdated_server(None), True)
with patch("zerver.lib.compatibility.timezone_now", return_value=now + timedelta(days=380)):
self.assertEqual(is_outdated_server(iago), True)
self.assertEqual(is_outdated_server(hamlet), False)
self.assertEqual(is_outdated_server(None), False)
def test_furthest_read_time(self) -> None:
msg_id = self.send_test_message("hello!", sender_name="iago")
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps([msg_id]).decode(), "op": "add", "flag": "read"},
)
# Manually process the UserActivity
now = timezone_now()
activity_time = calendar.timegm(now.timetuple())
user_activity_event = {
"user_profile_id": hamlet.id,
"client_id": 1,
"query": "update_message_flags",
"time": activity_time,
}
yesterday = now - timedelta(days=1)
activity_time_2 = calendar.timegm(yesterday.timetuple())
user_activity_event_2 = {
"user_profile_id": hamlet.id,
"client_id": 2,
"query": "update_message_flags",
"time": activity_time_2,
}
UserActivityWorker().consume_batch([user_activity_event, user_activity_event_2])
# verify furthest_read_time is last activity time, irrespective of client
furthest_read_time = get_furthest_read_time(hamlet)
self.assertGreaterEqual(furthest_read_time, activity_time)
# Check when user has no activity
UserActivity.objects.filter(user_profile=hamlet).delete()
furthest_read_time = get_furthest_read_time(hamlet)
self.assertIsNone(furthest_read_time)
# Check no user profile handling
furthest_read_time = get_furthest_read_time(None)
self.assertIsNotNone(furthest_read_time)
def test_subdomain_homepage(self) -> None:
self.login("hamlet")
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
with patch("zerver.views.home.get_subdomain", return_value=""):
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
self.assert_in_response("Chat for distributed teams", result)
with patch("zerver.views.home.get_subdomain", return_value="subdomain"):
result = self._get_home_page()
self._sanity_check(result)
def send_test_message(
self,
content: str,
sender_name: str = "iago",
stream_name: str = "Denmark",
topic_name: str = "foo",
) -> int:
sender = self.example_user(sender_name)
return self.send_stream_message(sender, stream_name, content=content, topic_name=topic_name)
def soft_activate_and_get_unread_count(
self, stream: str = "Denmark", topic: str = "foo"
) -> int:
stream_narrow = self._get_home_page(stream=stream, topic=topic)
page_params = self._get_page_params(stream_narrow)
return page_params["unread_msgs"]["count"]
def test_unread_count_user_soft_deactivation(self) -> None:
# In this test we make sure if a soft deactivated user had unread
# messages before deactivation they remain same way after activation.
long_term_idle_user = self.example_user("hamlet")
self.login_user(long_term_idle_user)
message = "Test message 1"
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 1)
query_count = len(queries)
user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(user_msg_list[-1].content, message)
self.logout()
with self.assertLogs(logger_string, level="INFO") as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(
info_log.output,
[
f"INFO:{logger_string}:Soft deactivated user {long_term_idle_user.id}",
f"INFO:{logger_string}:Soft-deactivated batch of 1 users; 0 remain to process",
],
)
self.login_user(long_term_idle_user)
message = "Test message 2"
self.send_test_message(message)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertNotEqual(idle_user_msg_list[-1].content, message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
# Test here for query count to be at least 5 greater than previous count
# This will assure indirectly that add_missing_messages() was called.
self.assertGreaterEqual(len(queries) - query_count, 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
def test_multiple_user_soft_deactivations(self) -> None:
long_term_idle_user = self.example_user("hamlet")
# We are sending this message to ensure that long_term_idle_user has
# at least one UserMessage row.
self.send_test_message("Testing", sender_name="hamlet")
with self.assertLogs(logger_string, level="INFO") as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(
info_log.output,
[
f"INFO:{logger_string}:Soft deactivated user {long_term_idle_user.id}",
f"INFO:{logger_string}:Soft-deactivated batch of 1 users; 0 remain to process",
],
)
message = "Test message 1"
self.send_test_message(message)
self.login_user(long_term_idle_user)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = "Test message 2"
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 3)
# Test here for query count to be at least 5 less than previous count.
# This will assure add_missing_messages() isn't repeatedly called.
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
with self.assertLogs(logger_string, level="INFO") as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(
info_log.output,
[
f"INFO:{logger_string}:Soft deactivated user {long_term_idle_user.id}",
f"INFO:{logger_string}:Soft-deactivated batch of 1 users; 0 remain to process",
],
)
message = "Test message 3"
self.send_test_message(message)
self.login_user(long_term_idle_user)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 4)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = "Test message 4"
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 5)
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
def test_url_language(self) -> None:
user = self.example_user("hamlet")
user.default_language = "es"
user.save()
self.login_user(user)
result = self._get_home_page()
self.check_rendered_logged_in_app(result)
with patch("zerver.lib.events.request_event_queue", return_value=42), patch(
"zerver.lib.events.get_user_events", return_value=[]
):
result = self.client_get("/de/")
page_params = self._get_page_params(result)
self.assertEqual(page_params["user_settings"]["default_language"], "es")
# TODO: Verify that the actual language we're using in the
# translation data is German.
def test_translation_data(self) -> None:
user = self.example_user("hamlet")
user.default_language = "es"
user.save()
self.login_user(user)
result = self._get_home_page()
self.check_rendered_logged_in_app(result)
page_params = self._get_page_params(result)
self.assertEqual(page_params["user_settings"]["default_language"], "es")
# TODO: This test would likely be better written as a /register
# API test with just the drafts event type, to avoid the
# performance cost of fetching /.
@override_settings(MAX_DRAFTS_IN_REGISTER_RESPONSE=5)
def test_limit_drafts(self) -> None:
draft_objects = []
hamlet = self.example_user("hamlet")
base_time = timezone_now()
initial_count = Draft.objects.count()
step_value = timedelta(seconds=1)
# Create 11 drafts.
# TODO: This would be better done as an API request.
for i in range(0, settings.MAX_DRAFTS_IN_REGISTER_RESPONSE + 1):
draft_objects.append(
Draft(
user_profile=hamlet,
recipient=None,
topic="",
content="sample draft",
last_edit_time=base_time + i * step_value,
)
)
Draft.objects.bulk_create(draft_objects)
# Now fetch the drafts part of the initial state and make sure
# that we only got back settings.MAX_DRAFTS_IN_REGISTER_RESPONSE.
# No more. Also make sure that the drafts returned are the most
# recently edited ones.
self.login("hamlet")
page_params = self._get_page_params(self._get_home_page())
self.assertEqual(page_params["user_settings"]["enable_drafts_synchronization"], True)
self.assert_length(page_params["drafts"], settings.MAX_DRAFTS_IN_REGISTER_RESPONSE)
self.assertEqual(
Draft.objects.count(), settings.MAX_DRAFTS_IN_REGISTER_RESPONSE + 1 + initial_count
)
# +2 for what's already in the test DB.
for draft in page_params["drafts"]:
self.assertNotEqual(draft["timestamp"], base_time)
| 40.089054 | 109 | 0.638837 |
7944af59630238971bbe0542e6869389bb488c26 | 245 | py | Python | python-Learning/JCP001.py | JamesKing9/CS-tips | 2ec34d43cf6808cf1ab647eefb67a55dcf0086a4 | [
"MIT"
] | null | null | null | python-Learning/JCP001.py | JamesKing9/CS-tips | 2ec34d43cf6808cf1ab647eefb67a55dcf0086a4 | [
"MIT"
] | null | null | null | python-Learning/JCP001.py | JamesKing9/CS-tips | 2ec34d43cf6808cf1ab647eefb67a55dcf0086a4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*-coding: UTF-8 -*-
'''
需求:有数字 1、2、3、4,能组成多少个互不相同且无重复数字的三位数?
'''
for i in range(1, 5):
for j in range(1, 5):
for k in range(1, 5):
if(i != k) and (i != j) and (j != k):
print i, j, k
| 20.416667 | 49 | 0.457143 |
7944af5f268ce96101bb356f5ac11ad5a23684d8 | 17,846 | py | Python | angr/procedures/definitions/__init__.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2021-07-07T11:18:34.000Z | 2021-07-07T11:18:34.000Z | angr/procedures/definitions/__init__.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/__init__.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2022-02-10T02:29:38.000Z | 2022-02-10T02:29:38.000Z | import copy
import os
import archinfo
from collections import defaultdict
import logging
from ...calling_conventions import DEFAULT_CC
from ...misc import autoimport
from ...sim_type import parse_file
from ..stubs.ReturnUnconstrained import ReturnUnconstrained
from ..stubs.syscall_stub import syscall as stub_syscall
l = logging.getLogger("angr.procedures.definitions")
SIM_LIBRARIES = {}
class SimLibrary(object):
"""
A SimLibrary is the mechanism for describing a dynamic library's API, its functions and metadata.
Any instance of this class (or its subclasses) found in the ``angr.procedures.definitions`` package will be
automatically picked up and added to ``angr.SIM_LIBRARIES`` via all its names.
:ivar fallback_cc: A mapping from architecture to the default calling convention that should be used if no
other information is present. Contains some sane defaults for linux.
:ivar fallback_proc: A SimProcedure class that should be used to provide stub procedures. By default,
``ReturnUnconstrained``.
"""
def __init__(self):
self.procedures = {}
self.non_returning = set()
self.prototypes = {}
self.default_ccs = {}
self.names = []
self.fallback_cc = dict(DEFAULT_CC)
self.fallback_proc = ReturnUnconstrained
def copy(self):
"""
Make a copy of this SimLibrary, allowing it to be mutated without affecting the global version.
:return: A new SimLibrary object with the same library references but different dict/list references
"""
o = SimLibrary()
o.procedures = dict(self.procedures)
o.non_returning = set(self.non_returning)
o.prototypes = dict(self.prototypes)
o.default_ccs = dict(self.default_ccs)
o.names = list(self.names)
return o
def update(self, other):
"""
Augment this SimLibrary with the information from another SimLibrary
:param other: The other SimLibrary
"""
self.procedures.update(other.procedures)
self.non_returning.update(other.non_returning)
self.prototypes.update(other.prototypes)
self.default_ccs.update(other.default_ccs)
@property
def name(self):
"""
The first common name of this library, e.g. libc.so.6, or '??????' if none are known.
"""
return self.names[0] if self.names else '??????'
def set_library_names(self, *names):
"""
Set some common names of this library by which it may be referred during linking
:param names: Any number of string library names may be passed as varargs.
"""
for name in names:
self.names.append(name)
SIM_LIBRARIES[name] = self
def set_default_cc(self, arch_name, cc_cls):
"""
Set the default calling convention used for this library under a given architecture
:param arch_name: The string name of the architecture, i.e. the ``.name`` field from archinfo.
:parm cc_cls: The SimCC class (not an instance!) to use
"""
arch_name = archinfo.arch_from_id(arch_name).name
self.default_ccs[arch_name] = cc_cls
def set_non_returning(self, *names):
"""
Mark some functions in this class as never returning, i.e. loops forever or terminates execution
:param names: Any number of string function names may be passed as varargs
"""
for name in names:
self.non_returning.add(name)
def set_prototype(self, name, proto):
"""
Set the prototype of a function in the form of a SimTypeFunction containing argument and return types
:param name: The name of the function as a string
:param proto: The prototype of the function as a SimTypeFunction
"""
self.prototypes[name] = proto
def set_prototypes(self, protos):
"""
Set the prototypes of many functions
:param protos: Dictionary mapping function names to SimTypeFunction objects
"""
self.prototypes.update(protos)
def set_c_prototype(self, c_decl):
"""
Set the prototype of a function in the form of a C-style function declaration.
:param str c_decl: The C-style declaration of the function.
:return: A tuple of (function name, function prototype)
:rtype: tuple
"""
parsed = parse_file(c_decl)
parsed_decl = parsed[0]
if not parsed_decl:
raise ValueError('Cannot parse the function prototype.')
func_name, func_proto = parsed_decl.items()[0]
self.set_prototype(func_name, func_proto)
return func_name, func_proto
def add(self, name, proc_cls, **kwargs):
"""
Add a function implementation fo the library.
:param name: The name of the function as a string
:param proc_cls: The implementation of the function as a SimProcedure _class_, not instance
:param kwargs: Any additional parameters to the procedure class constructor may be passed as kwargs
"""
self.procedures[name] = proc_cls(display_name=name, **kwargs)
def add_all_from_dict(self, dictionary, **kwargs):
"""
Batch-add function implementations to the library.
:param dictionary: A mapping from name to procedure class, i.e. the first two arguments to add()
:param kwargs: Any additional kwargs will be passed to the constructors of _each_ procedure class
"""
for name, procedure in dictionary.iteritems():
self.add(name, procedure, **kwargs)
def add_alias(self, name, *alt_names):
"""
Add some duplicate names for a given function. The original function's implementation must already be
registered.
:param name: The name of the function for which an implementation is already present
:param alt_names: Any number of alternate names may be passed as varargs
"""
old_procedure = self.procedures[name]
for alt in alt_names:
new_procedure = copy.deepcopy(old_procedure)
new_procedure.display_name = alt
self.procedures[alt] = new_procedure
def _apply_metadata(self, proc, arch):
if proc.cc is None and arch.name in self.default_ccs:
proc.cc = self.default_ccs[arch.name](arch)
if proc.display_name in self.prototypes:
if proc.cc is None:
proc.cc = self.fallback_cc[arch.name](arch)
proc.cc.func_ty = self.prototypes[proc.display_name]
if not proc.ARGS_MISMATCH:
proc.cc.num_args = len(proc.cc.func_ty.args)
proc.num_args = len(proc.cc.func_ty.args)
if proc.display_name in self.non_returning:
proc.returns = False
proc.library_name = self.name
def get(self, name, arch):
"""
Get an implementation of the given function specialized for the given arch, or a stub procedure if none exists.
:param name: The name of the function as a string
:param arch: The architecure to use, as either a string or an archinfo.Arch instance
:return: A SimProcedure instance representing the function as found in the library
"""
if type(arch) is str:
arch = archinfo.arch_from_id(arch)
if name in self.procedures:
proc = copy.deepcopy(self.procedures[name])
self._apply_metadata(proc, arch)
return proc
else:
return self.get_stub(name, arch)
def get_stub(self, name, arch):
"""
Get a stub procedure for the given function, regardless of if a real implementation is available. This will
apply any metadata, such as a default calling convention or a function prototype.
By stub, we pretty much always mean a ``ReturnUnconstrained`` SimProcedure with the appropriate display name
and metadata set. This will appear in ``state.history.descriptions`` as ``<SimProcedure display_name (stub)>``
:param name: The name of the function as a string
:param arch: The architecture to use, as either a string or an archinfo.Arch instance
:return: A SimProcedure instance representing a plausable stub as could be found in the library.
"""
proc = self.fallback_proc(display_name=name, is_stub=True)
self._apply_metadata(proc, arch)
return proc
def has_metadata(self, name):
"""
Check if a function has either an implementation or any metadata associated with it
:param name: The name of the function as a string
:return: A bool indicating if anything is known about the function
"""
return self.has_implementation(name) or \
name in self.non_returning or \
name in self.prototypes
def has_implementation(self, name):
"""
Check if a function has an implementation associated with it
:param name: The name of the function as a string
:return: A bool indicating if an implementation of the function is available
"""
return name in self.procedures
def has_prototype(self, func_name):
"""
Check if a function has a prototype associated with it.
:param str func_name: The name of the function.
:return: A bool indicating if a prototype of the function is available.
:rtype: bool
"""
return func_name in self.prototypes
class SimSyscallLibrary(SimLibrary):
"""
SimSyscallLibrary is a specialized version of SimLibrary for dealing not with a dynamic library's API but rather
an operating system's syscall API. Because this interface is inherantly lower-level than a dynamic library, many
parts of this class has been changed to store data based on an "ABI name" (ABI = application binary interface,
like an API but for when there's no programming language) instead of an architecture. An ABI name is just an
arbitrary string with which a calling convention and a syscall numbering is associated.
All the SimLibrary methods for adding functions still work, but now there's an additional layer on top that
associates them with numbers.
"""
def __init__(self):
super(SimSyscallLibrary, self).__init__()
self.syscall_number_mapping = defaultdict(dict)
self.syscall_name_mapping = defaultdict(dict)
self.default_cc_mapping = {}
self.fallback_proc = stub_syscall
def copy(self):
o = SimSyscallLibrary()
o.procedures = dict(self.procedures)
o.non_returning = set(self.non_returning)
o.prototypes = dict(self.prototypes)
o.default_ccs = dict(self.default_ccs)
o.names = list(self.names)
o.syscall_number_mapping = defaultdict(dict, self.syscall_number_mapping) # {abi: {number: name}}
o.syscall_name_mapping = defaultdict(dict, self.syscall_name_mapping) # {abi: {name: number}}
o.default_cc_mapping = dict(self.default_cc_mapping) # {abi: cc}
return o
def update(self, other):
super(SimSyscallLibrary, self).update(other)
self.syscall_number_mapping.update(other.syscall_number_mapping)
self.syscall_name_mapping.update(other.syscall_name_mapping)
self.default_cc_mapping.update(other.default_cc_mapping)
def minimum_syscall_number(self, abi):
"""
:param abi: The abi to evaluate
:return: The smallest syscall number known for the given abi
"""
if abi not in self.syscall_number_mapping or \
not self.syscall_number_mapping[abi]:
return 0
return min(self.syscall_number_mapping[abi])
def maximum_syscall_number(self, abi):
"""
:param abi: The abi to evaluate
:return: The largest syscall number known for the given abi
"""
if abi not in self.syscall_number_mapping or \
not self.syscall_number_mapping[abi]:
return 0
return max(self.syscall_number_mapping[abi])
def add_number_mapping(self, abi, number, name):
"""
Associate a syscall number with the name of a function present in the underlying SimLibrary
:param abi: The abi for which this mapping applies
:param number: The syscall number
:param name: The name of the function
"""
self.syscall_number_mapping[abi][number] = name
self.syscall_name_mapping[abi][name] = number
def add_number_mapping_from_dict(self, abi, mapping):
"""
Batch-associate syscall numbers with names of functions present in the underlying SimLibrary
:param abi: The abi for which this mapping applies
:param mapping: A dict mapping syscall numbers to function names
"""
self.syscall_number_mapping[abi].update(mapping)
self.syscall_name_mapping[abi].update(dict(reversed(i) for i in mapping.items()))
def set_abi_cc(self, abi, cc_cls):
"""
Set the default calling convention for an abi
:param abi: The name of the abi
:param cc_cls: A SimCC _class_, not an instance, that should be used for syscalls using the abi
"""
self.default_cc_mapping[abi] = cc_cls
def _canonicalize(self, number, arch, abi_list):
if type(arch) is str:
arch = archinfo.arch_from_id(arch)
if type(number) is str:
return number, arch, None
for abi in abi_list:
mapping = self.syscall_number_mapping[abi]
if number in mapping:
return mapping[number], arch, abi
return 'sys_%d' % number, arch, None
def _apply_numerical_metadata(self, proc, number, arch, abi):
proc.syscall_number = number
proc.abi = abi
if abi in self.default_cc_mapping:
cc = self.default_cc_mapping[abi](arch)
if proc.cc is not None:
cc.func_ty = proc.cc.func_ty
proc.cc = cc
# pylint: disable=arguments-differ
def get(self, number, arch, abi_list=()):
"""
The get() function for SimSyscallLibrary looks a little different from its original version.
Instead of providing a name, you provide a number, and you additionally provide a list of abi names that are
applicable. The first abi for which the number is present in the mapping will be chosen. This allows for the
easy abstractions of architectures like ARM or MIPS linux for which there are many ABIs that can be used at any
time by using syscall numbers from various ranges. If no abi knows about the number, the stub procedure with
the name "sys_%d" will be used.
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A SimProcedure representing the implementation of the given syscall, or a stub if no
implementation is available
"""
name, arch, abi = self._canonicalize(number, arch, abi_list)
proc = super(SimSyscallLibrary, self).get(name, arch)
self._apply_numerical_metadata(proc, number, arch, abi)
return proc
def get_stub(self, number, arch, abi_list=()):
"""
Pretty much the intersection of SimLibrary.get_stub() and SimSyscallLibrary.get().
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A SimProcedure representing a plausable stub that could model the syscall
"""
name, arch, abi = self._canonicalize(number, arch, abi_list)
proc = super(SimSyscallLibrary, self).get_stub(name, arch)
self._apply_numerical_metadata(proc, number, arch, abi)
l.debug("unsupported syscall: %s", number)
return proc
def has_metadata(self, number, arch, abi_list=()):
"""
Pretty much the intersection of SimLibrary.has_metadata() and SimSyscallLibrary.get().
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A bool of whether or not any implementation or metadata is known about the given syscall
"""
name, _, _ = self._canonicalize(number, arch, abi_list)
return super(SimSyscallLibrary, self).has_metadata(name)
def has_implementation(self, number, arch, abi_list=()):
"""
Pretty much the intersection of SimLibrary.has_implementation() and SimSyscallLibrary.get().
:param number: The syscall number
:param arch: The architecture being worked with, as either a string name or an archinfo.Arch
:param abi_list: A list of ABI names that could be used
:return: A bool of whether or not an implementation of the syscall is available
"""
name, _, _ = self._canonicalize(number, arch, abi_list)
return super(SimSyscallLibrary, self).has_implementation(name)
for _ in autoimport.auto_import_modules('angr.procedures.definitions', os.path.dirname(os.path.realpath(__file__))):
pass
| 42.899038 | 119 | 0.652639 |
7944af7a04558435d25f966ae9fef8534737ef47 | 2,530 | py | Python | _07_WEB_BROWSER/main.py | khanhtranngoccva/100ProjectsOfCode | ca06ce324c35d150b48a7d8fe5aaba8c06264065 | [
"MIT"
] | 1 | 2021-12-25T13:10:58.000Z | 2021-12-25T13:10:58.000Z | _07_WEB_BROWSER/main.py | khanhtranngoccva/100ProjectsOfCode | ca06ce324c35d150b48a7d8fe5aaba8c06264065 | [
"MIT"
] | null | null | null | _07_WEB_BROWSER/main.py | khanhtranngoccva/100ProjectsOfCode | ca06ce324c35d150b48a7d8fe5aaba8c06264065 | [
"MIT"
] | null | null | null | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtPrintSupport import *
import os
import sys
homepage = QUrl("https://google.com")
class BrowserWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__()
# set geometry and initial title
self.setWindowTitle("Oof Browser")
self.browser = QWebEngineView()
self.setCentralWidget(self.browser)
self.browser.setUrl(homepage)
self.browser.urlChanged.connect(self.update_url_bar)
self.browser.loadFinished.connect(self.update_title_bar)
self.status_bar = QStatusBar()
self.setStatusBar(self.status_bar)
self.navbar = QToolBar("Navigation")
self.addToolBar(self.navbar)
self.back = QAction("Back")
self.back.triggered.connect(self.browser.back)
self.back.setStatusTip("Go back to previous page")
self.forward = QAction("Forward")
self.forward.triggered.connect(self.browser.forward)
self.forward.setStatusTip("Go forward to next page")
self.refresh = QAction("Refresh")
self.refresh.triggered.connect(self.browser.reload)
self.home = QAction("Home")
self.home.triggered.connect(self.navigate_home)
self.home.setStatusTip("Go to homepage")
self.url_bar = QLineEdit(self)
self.url_bar.returnPressed.connect(self.navigate_to_url)
self.stop = QAction("Stop")
self.stop.triggered.connect(self.browser.stop)
self.stop.setStatusTip("Stop loading")
for _ in [self.back, self.forward, self.refresh, self.home]:
self.navbar.addAction(_)
self.navbar.addWidget(self.url_bar)
self.navbar.addAction(self.stop)
def navigate_home(self):
self.browser.setUrl(homepage)
def navigate_to_url(self):
target_url = QUrl(self.url_bar.text())
if target_url.scheme() == "":
target_url.setScheme("https")
self.browser.setUrl(target_url)
# happens if the page redirects, and url changes without input
def update_url_bar(self, q):
self.url_bar.setText(q.toString())
self.url_bar.setCursorPosition(0)
def update_title_bar(self):
self.setWindowTitle(f'{self.browser.page().title()} - Oof Browser')
if __name__ == '__main__':
root_app = QApplication(sys.argv)
browser_window = BrowserWindow()
browser_window.show()
root_app.exec_()
| 31.625 | 75 | 0.670751 |
7944b0b73266b3ed34ea4152422a8ae1cc8e2c41 | 5,789 | py | Python | asapy/out_builder/html_builder.py | mlindauer/asapy | ec1f202b5f612a366f1e4aee7badc55798bdc67d | [
"MIT"
] | 5 | 2017-05-17T15:51:29.000Z | 2021-03-26T18:17:00.000Z | asapy/out_builder/html_builder.py | mlindauer/asapy | ec1f202b5f612a366f1e4aee7badc55798bdc67d | [
"MIT"
] | 5 | 2016-10-11T09:40:32.000Z | 2018-04-16T13:55:43.000Z | asapy/out_builder/html_builder.py | mlindauer/asapy | ec1f202b5f612a366f1e4aee7badc55798bdc67d | [
"MIT"
] | 2 | 2019-11-24T02:41:55.000Z | 2020-12-18T06:02:26.000Z | import argparse
import logging
import sys
import os
import shutil
import inspect
from traceback import print_exc
from collections import namedtuple
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
__author__ = "Marius Lindauer"
__copyright__ = "Copyright 2016, ML4AAD"
__license__ = "MIT"
__email__ = "[email protected]"
class HTMLBuilder(object):
def __init__(self,
output_dn:str,
scenario_name:str):
'''
Constructor
Arguments
---------
output_dn:str
output directory name
scenario_name:str
name of scenario
'''
self.logger = logging.getLogger("HTMLBuilder")
self.own_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
self.output_dn = output_dn
self.header='''
<!DOCTYPE html>
<html>
<head>
<title>ASAPy for {0}</title>
<link href="css/accordion.css" rel="stylesheet" />
<link href="css/table.css" rel="stylesheet" />
<link href="css/lightbox.min.css" rel="stylesheet" />
<link href="css/help-tip.css" rel="stylesheet" />
<link href="css/global.css" rel="stylesheet" />
<link href="css/back-to-top.css" rel="stylesheet" />
</head>
<body>
<script src="http://www.w3schools.com/lib/w3data.js"></script>
<script src="js/lightbox-plus-jquery.min.js"></script>
<header>
<div class='l-wrapper'>
<img class='logo logo--coseal' src="images/COSEAL_small.png" />
<img class='logo logo--ml' src="images/ml4aad.png" />
</div>
</header>
<div class='l-wrapper'>
<h1>{0}</h1>
'''.format(scenario_name)
self.footer = '''
</div>
<footer>
<div class='l-wrapper'>
Powered by <a href="http://www.coseal.net">COSEAL</a> and <a href="http://www.ml4aad.org">ML4AAD</a> -- <a href="https://github.com/mlindauer/asapy">Open Source Code</a>
</div>
</footer>
</body>
<script>
var acc = document.getElementsByClassName("accordion");
var i;
for (i = 0; i < acc.length; i++) {
acc[i].onclick = function(){
this.classList.toggle("active");
this.nextElementSibling.classList.toggle("show");
}
}
</script>
<script src="js/back-to-top.js"></script>
</html>
'''
def generate_html(self, data_dict:dict):
'''
Arguments
---------
data_dict : OrderedDict
{"top1" : {
"tooltip": str|None,
"subtop1: { # generates a further bottom if it is dictionary
"tooltip": str|None,
...
}
"table": str|None (html table)
"figure" : str | None (file name)
}
"top2: { ... }
'''
html = ""
html += self.header
for k,v in data_dict.items():
html = self.add_layer(html_str=html, layer_name=k, data_dict=v)
html += self.footer
with open(os.path.join(self.output_dn, "report.html"), "w") as fp:
fp.write(html)
try:
if not os.path.isdir(os.path.join(self.output_dn,"css")):
shutil.copytree(os.path.join(self.own_folder, "web_files", "css"), os.path.join(self.output_dn,"css"))
except OSError:
print_exc()
try:
if not os.path.isdir(os.path.join(self.output_dn,"images")):
shutil.copytree(os.path.join(self.own_folder, "web_files", "images"), os.path.join(self.output_dn,"images"))
except OSError:
print_exc()
try:
if not os.path.isdir(os.path.join(self.output_dn,"js")):
shutil.copytree(os.path.join(self.own_folder, "web_files", "js"), os.path.join(self.output_dn,"js"))
except OSError:
print_exc()
try:
if not os.path.isdir(os.path.join(self.output_dn,"font")):
shutil.copytree(os.path.join(self.own_folder, "web_files", "font"), os.path.join(self.output_dn,"font"))
except OSError:
print_exc()
def add_layer(self, html_str:str, layer_name, data_dict:dict):
'''
add a further layer of top data_dict keys
'''
tooltip = ""
if data_dict.get("tooltip"):
tooltip = "<div class=\"help-tip\"><p>{}</p></div>".format(data_dict.get("tooltip"))
html_str += "<button class=\"accordion\">{0} {1}</button>\n".format(layer_name,tooltip)
html_str += "<div class=\"panel\">\n"
for k, v in data_dict.items():
if isinstance(v, dict):
html_str = self.add_layer(html_str, k, v)
elif k == "figure":
html_str +="<div align=\"center\">\n"
html_str +="<a href=\"{0}\" data-lightbox=\"{0}\" data-title=\"{0}\"><img src=\"{0}\" alt=\"Plot\" width=\"600px\"></a>\n".format(v[len(self.output_dn):].lstrip("/"))
html_str +="</div>\n"
elif k == "table":
html_str += "<div align=\"center\">\n{}\n</div>\n".format(v)
elif k == "html":
html_str += "<div align=\"center\">\n<a href='{}'>Interactive Plot</a>\n</div>\n".format(v[len(self.output_dn):].lstrip("/"))
#html_str += "<div align=\"center\"><iframe src='{}' frameborder='0' scrolling='no' width='700px' height='500px'></iframe></div>\n".format(v[len(self.output_dn):].lstrip("/"))
html_str += "</div>"
return html_str | 34.254438 | 191 | 0.536708 |
7944b2bff17a0e37670baa6328342abd0e8cc23f | 3,231 | py | Python | src/scs_core/data/lin_regress.py | south-coast-science/scs_core | 81ad4010abb37ca935f3a31ac805639ef53b1bcf | [
"MIT"
] | 3 | 2019-03-12T01:59:58.000Z | 2020-09-12T07:27:42.000Z | src/scs_core/data/lin_regress.py | south-coast-science/scs_core | 81ad4010abb37ca935f3a31ac805639ef53b1bcf | [
"MIT"
] | 1 | 2018-04-20T07:58:38.000Z | 2021-03-27T08:52:45.000Z | src/scs_core/data/lin_regress.py | south-coast-science/scs_core | 81ad4010abb37ca935f3a31ac805639ef53b1bcf | [
"MIT"
] | 4 | 2017-09-29T13:08:43.000Z | 2019-10-09T09:13:58.000Z | """
Created on 18 Sep 2021
@author: Bruno Beloff ([email protected])
A high-performance linear regression utility requiring scipy
https://www.w3schools.com/python/python_ml_linear_regression.asp
"""
from collections import OrderedDict
from scipy import stats
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
class LinRegress(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, x, y, prec=6):
if len(x) != len(y):
raise ValueError("len x (%d) is not the same as len y (%d)" % (len(x), len(y)))
slope, intercept, r, p, std_err = stats.linregress(x, y)
return cls(len(x), round(slope, prec), round(intercept, prec), round(r**2, prec), round(p, prec),
round(std_err, prec))
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
count = jdict.get('count')
slope = jdict.get('slope')
intercept = jdict.get('intercept')
r2 = jdict.get('r2')
p = jdict.get('p')
std_err = jdict.get('std-err')
return cls(count, slope, intercept, r2, p, std_err)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, count, slope, intercept, r2, p, std_err):
"""
Constructor
"""
self.__count = count # int
self.__slope = slope # float
self.__intercept = intercept # float
self.__r2 = r2 # float
self.__p = p # float
self.__std_err = std_err # float
def __len__(self):
return self.__count
# ----------------------------------------------------------------------------------------------------------------
@property
def slope(self):
return self.__slope
@property
def intercept(self):
return self.__intercept
@property
def r2(self):
return self.__r2
@property
def p(self):
return self.__p
@property
def std_err(self):
return self.__std_err
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['count'] = len(self)
jdict['slope'] = self.slope
jdict['intercept'] = self.intercept
jdict['r2'] = self.r2
jdict['p'] = self.p
jdict['std-err'] = self.std_err
return jdict
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "LinRegress:{slope:%s, intercept:%s, r2:%s, p:%s, std_err:%s}" % \
(self.slope, self.intercept, self.r2, self.p, self.std_err)
| 27.151261 | 118 | 0.42216 |
7944b303dce10ba162b7f0db76d88511c9a45cec | 7,833 | py | Python | xclim/sdba/detrending.py | ClimateImpactLab/xclim | b4877050223c58afc1089961a2a516bda26d3917 | [
"Apache-2.0"
] | null | null | null | xclim/sdba/detrending.py | ClimateImpactLab/xclim | b4877050223c58afc1089961a2a516bda26d3917 | [
"Apache-2.0"
] | 1 | 2021-03-29T18:39:50.000Z | 2021-04-05T19:16:05.000Z | xclim/sdba/detrending.py | ClimateImpactLab/xclim | b4877050223c58afc1089961a2a516bda26d3917 | [
"Apache-2.0"
] | 1 | 2021-03-02T20:12:28.000Z | 2021-03-02T20:12:28.000Z | """Detrending objects."""
from typing import Union
import xarray as xr
from .base import Grouper, ParametrizableWithDataset, parse_group
from .loess import loess_smoothing
from .utils import ADDITIVE, apply_correction, invert
class BaseDetrend(ParametrizableWithDataset):
"""Base class for detrending objects.
Defines three methods:
fit(da) : Compute trend from da and return a new _fitted_ Detrend object.
get_trend(da): Return the fitted trend along da's coordinate.
detrend(da) : Return detrended array.
retrend(da) : Puts trend back on da.
* Subclasses should implement _fit() and _get_trend(). Both will be called in a `group.apply()`.
`_fit()` is called with the dataarray and str `dim` that indicates the fitting dimension,
it should return a dataset that will be set as `.fitds`.
`_get_trend()` is called with .fitds broadcasted on the main dim of the input DataArray.
"""
@parse_group
def __init__(
self, *, group: Union[Grouper, str] = "time", kind: str = "+", **kwargs
):
"""Initialize Detrending object.
Parameters
----------
group : Union[str, Grouper]
The grouping information. See :py:class:`xclim.sdba.base.Grouper` for details.
The fit is performed along the group's main dim.
kind : {'*', '+'}
The way the trend is removed or added, either additive or multiplicative.
"""
super().__init__(group=group, kind=kind, **kwargs)
@property
def __fitted(self):
return hasattr(self, "ds")
def fit(self, da: xr.DataArray):
"""Extract the trend of a DataArray along a specific dimension.
Returns a new object storing the fit data that can be used for detrending and retrending.
"""
new = self.__class__(**self.parameters)
new.set_dataset(new.group.apply(new._fit, da, main_only=True))
return new
def get_trend(self, da: xr.DataArray):
"""Get the trend computed from the fit, along the self.group.dim as found on da.
If da is a DataArray (and has a "dtype" attribute), the trend is casted to have the same dtype.
"""
out = self.group.apply(
self._get_trend,
{self.group.dim: da[self.group.dim], **self.ds.data_vars},
main_only=True,
)
if hasattr(da, "dtype"):
out = out.astype(da.dtype)
return out
def detrend(self, da: xr.DataArray):
"""Remove the previously fitted trend from a DataArray."""
if not self.__fitted:
raise ValueError("You must call fit() before detrending.")
trend = self.get_trend(da)
return self._detrend(da, trend)
def retrend(self, da: xr.DataArray):
"""Replace the previously fitted trend on a DataArray."""
if not self.__fitted:
raise ValueError("You must call fit() before retrending")
trend = self.get_trend(da)
return self._retrend(da, trend)
def _detrend(self, da, trend):
# Remove trend from series
return apply_correction(da, invert(trend, self.kind), self.kind)
def _retrend(self, da, trend):
# Add trend to series
return apply_correction(da, trend, self.kind)
def _get_trend(self, grpd, dim="time"):
raise NotImplementedError
def _fit(self, da):
raise NotImplementedError
class NoDetrend(BaseDetrend):
"""Convenience class for polymorphism. Does nothing."""
def _fit(self, da, dim=None):
return da.isel({dim: 0})
def _detrend(self, da, trend):
return da
def _retrend(self, da, trend):
return da
class MeanDetrend(BaseDetrend):
"""Simple detrending removing only the mean from the data, quite similar to normalizing."""
def _fit(self, da, dim="time"):
mean = da.mean(dim=dim)
mean.name = "mean"
return mean
def _get_trend(self, grpd, dim="time"):
return grpd.mean
class PolyDetrend(BaseDetrend):
"""
Detrend time series using a polynomial regression.
Parameters
----------
group : Union[str, Grouper]
The grouping information. See :py:class:`xclim.sdba.base.Grouper` for details.
The fit is performed along the group's main dim.
kind : {'*', '+'}
The way the trend is removed or added, either additive or multiplicative.
degree : int
The order of the polynomial to fit.
preserve_mean : bool
Whether to preserve the mean when de/re-trending. If True, the trend has its mean
removed before it is used.
"""
def __init__(self, group="time", kind=ADDITIVE, degree=4, preserve_mean=False):
super().__init__(
group=group, kind=kind, degree=degree, preserve_mean=preserve_mean
)
def _fit(self, da, dim="time"):
return da.polyfit(dim=dim, deg=self.degree)
def _get_trend(self, grpd, dim="time"):
# Estimate trend over da
trend = xr.polyval(coord=grpd[dim], coeffs=grpd.polyfit_coefficients)
if self.preserve_mean:
trend = apply_correction(
trend, invert(trend.mean(dim=dim), self.kind), self.kind
)
return trend
class LoessDetrend(BaseDetrend):
"""
Detrend time series using a LOESS regression.
The fit is a piecewise linear regression. For each point, the contribution of all
neighbors is weighted by a bell-shaped curve (gaussian) with parameters sigma (std).
The x-coordinate of the dataarray is scaled to [0,1] before the regression is computed.
Parameters
----------
group : Union[str, Grouper]
The grouping information. See :py:class:`xclim.sdba.base.Grouper` for details.
The fit is performed along the group's main dim.
kind : {'*', '+'}
The way the trend is removed or added, either additive or multiplicative.
d: [0, 1]
Order of the local regression. Only 0 and 1 currently implemented.
f : float
Parameter controling the span of the weights, between 0 and 1.
niter : int
Number of robustness iterations to execute.
weights : ["tricube", "gaussian"]
Shape of the weighting function:
"tricube" : a smooth top-hat like curve, f gives the span of non-zero values.
"gaussian" : a gaussian curve, f gives the span for 95% of the values.
Notes
-----
LOESS smoothing is computationally expensive. As it relies on a loop on gridpoints, it
can be useful to use smaller than usual chunks.
Moreover, it suffers from heavy boundary effects. As a rule of thumb, the outermost N * f/2 points
should be considered dubious. (N is the number of points along each group)
"""
def __init__(
self, group="time", kind=ADDITIVE, f=0.2, niter=1, d=0, weights="tricube"
):
super().__init__(group=group, kind=kind, f=f, niter=niter, d=0, weights=weights)
def _fit(self, da, dim="time"):
trend = loess_smoothing(
da,
dim=self.group.dim,
f=self.f,
niter=self.niter,
d=self.d,
weights=self.weights,
)
trend.name = "trend"
return trend.to_dataset()
def get_trend(self, da: xr.DataArray):
"""Get the trend computed from the fit, along the self.group.dim as found on da.
If da is a DataArray (and has a "dtype" attribute), the trend is casted to have the same dtype.
"""
# Check if we need to interpolate
if da[self.group.dim].equals(self.ds[self.group.dim]):
out = self.ds.trend
else:
out = self.ds.trend.interp(coords={self.group.dim: da[self.group.dim]})
if hasattr(da, "dtype"):
out = out.astype(da.dtype)
return out
| 34.506608 | 103 | 0.632069 |
7944b30e6aa8ed9876f67b5e8a4694b7c0c82c29 | 1,132 | py | Python | ssh-for-lab/labssh/myssh.py | LogicJake/tools | 9630dcdbeec92ccc1548bb527fc5268039bb79b8 | [
"MIT"
] | null | null | null | ssh-for-lab/labssh/myssh.py | LogicJake/tools | 9630dcdbeec92ccc1548bb527fc5268039bb79b8 | [
"MIT"
] | null | null | null | ssh-for-lab/labssh/myssh.py | LogicJake/tools | 9630dcdbeec92ccc1548bb527fc5268039bb79b8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: LogicJake
# @Date: 2018-12-30 14:18:43
# @Last Modified time: 2018-12-30 18:42:59
from __future__ import print_function
import os
import requests
import re
class NoIPFoundException(Exception):
def __init__(self, err='fail to find IP from the website'):
Exception.__init__(self, err)
def get_ip():
ip_address = 'http://59.110.167.236/ip.html'
response = requests.get(ip_address).text
ip = re.findall(r'\d+\.\d+\.\d+\.\d+', response)
if len(ip) != 1:
raise NoIPFoundException()
return ip[0]
def ssh(ssh_command):
ip = get_ip()
if len(ssh_command) == 0:
print(ip)
exit(1)
index = None
for i, part in enumerate(ssh_command):
if '@' in part:
index = i
if index is None:
exit(1)
user_host = ssh_command[index]
user, hostname = user_host.split('@')
if hostname == 'lab':
user_host = "{}@{}".format(user, ip)
ssh_command[index] = user_host
new_command = " ".join(ssh_command)
convert_ssh_command = "ssh " + new_command
os.system(convert_ssh_command)
| 22.196078 | 63 | 0.613958 |
7944b3e0d7cd2285d0d18145e9b9b39d4f66b0a3 | 1,895 | py | Python | data.py | ConorFoy/neural-processes | b40219ab26c209f5675d77549e372aa6ac0a6bb4 | [
"Apache-2.0"
] | null | null | null | data.py | ConorFoy/neural-processes | b40219ab26c209f5675d77549e372aa6ac0a6bb4 | [
"Apache-2.0"
] | null | null | null | data.py | ConorFoy/neural-processes | b40219ab26c209f5675d77549e372aa6ac0a6bb4 | [
"Apache-2.0"
] | 2 | 2020-03-06T10:28:39.000Z | 2020-04-05T12:06:59.000Z | import itertools
from midi_to_statematrix import upperBound, lowerBound
def startSentinel():
def noteSentinel(note):
position = note
part_position = [position]
pitchclass = (note + lowerBound) % 12
part_pitchclass = [int(i == pitchclass) for i in range(12)]
return part_position + part_pitchclass + [0]*66 + [1]
return [noteSentinel(note) for note in range(upperBound-lowerBound)]
def getOrDefault(l, i, d):
try:
return l[i]
except IndexError:
return d
def buildContext(state):
context = [0]*12
for note, notestate in enumerate(state):
if notestate[0] == 1:
pitchclass = (note + lowerBound) % 12
context[pitchclass] += 1
return context
def buildBeat(time):
return [2*x-1 for x in [time%2, (time//2)%2, (time//4)%2, (time//8)%2]]
def noteInputForm(note, state, context, beat):
position = note
part_position = [position]
pitchclass = (note + lowerBound) % 12
part_pitchclass = [int(i == pitchclass) for i in range(12)]
# Concatenate the note states for the previous vicinity
part_prev_vicinity = list(itertools.chain.from_iterable((getOrDefault(state, note+i, [0,0]) for i in range(-12, 13))))
part_context = context[pitchclass:] + context[:pitchclass]
return part_position + part_pitchclass + part_prev_vicinity + part_context + beat + [0]
def noteStateSingleToInputForm(state,time):
beat = buildBeat(time)
context = buildContext(state)
return [noteInputForm(note, state, context, beat) for note in range(len(state))]
def noteStateMatrixToInputForm(statematrix):
# NOTE: May have to transpose this or transform it in some way to make Theano like it
#[startSentinel()] +
inputform = [ noteStateSingleToInputForm(state,time) for time,state in enumerate(statematrix) ]
return inputform | 35.092593 | 122 | 0.670185 |
7944b42873efc1a15be3dec22cff14e0b6e4e09b | 18,360 | py | Python | src/talon/post/create_abundance_file_from_database.py | vinay-swamy/TALON | ce6f403035a9697334518c39bfe56a4550884699 | [
"MIT"
] | 47 | 2020-03-31T19:56:11.000Z | 2022-03-31T18:00:21.000Z | src/talon/post/create_abundance_file_from_database.py | vinay-swamy/TALON | ce6f403035a9697334518c39bfe56a4550884699 | [
"MIT"
] | 44 | 2020-03-23T02:15:08.000Z | 2022-03-30T17:27:26.000Z | src/talon/post/create_abundance_file_from_database.py | vinay-swamy/TALON | ce6f403035a9697334518c39bfe56a4550884699 | [
"MIT"
] | 11 | 2020-05-13T18:41:23.000Z | 2021-12-28T07:48:58.000Z | # TALON: Techonology-Agnostic Long Read Analysis Pipeline
# Author: Dana Wyman
# -----------------------------------------------------------------------------
# create_abundance_file_from_database.py is a utility that outputs the abundance
# for each transcript in the TALON database across datasets. Modified by
# filtering option.
import sqlite3
import itertools
import operator
from optparse import OptionParser
from pathlib import Path
from . import filter_talon_transcripts as filt
from .. import dstruct as dstruct
from .. import length_utils as lu
from . import post_utils as putils
from .. import query_utils as qutils
from .. import talon as talon
def getOptions():
parser = OptionParser()
parser.add_option("--db", dest = "database",
help = "TALON database", metavar = "FILE", type = "string")
parser.add_option("--annot", "-a", dest = "annot",
help = """Which annotation version to use. Will determine which
annotation transcripts are considered known or novel
relative to. Note: must be in the TALON database.""",
type = "string")
parser.add_option("--whitelist", dest = "whitelist",
help = "Whitelist file of transcripts to include in the \
output. First column should be TALON gene ID, \
second column should be TALON transcript ID",
metavar = "FILE", type = "string", default = None)
parser.add_option("--build", "-b", dest = "build",
help = "Genome build to use. Note: must be in the TALON database.",
type = "string")
parser.add_option("--datasets", "-d", dest = "datasets_file",
help = """Optional: A file indicating which datasets should be
included (one dataset name per line). Default is to include
all datasets.""",
metavar = "FILE", type = "string", default = None)
parser.add_option("--o", dest = "outprefix", help = "Prefix for output file",
metavar = "FILE", type = "string")
(options, args) = parser.parse_args()
return options
def create_outname(options):
""" Creates filename for the output GTF that reflects the input options that
were used. """
outname = options.outprefix + "_talon_abundance"
if options.whitelist != None:
outname = "_".join([ outname, "filtered" ])
outname += ".tsv"
return outname
def fetch_dataset_list(dataset_file, database):
""" Gets a list of all datasets in the database """
conn = sqlite3.connect(database)
cursor = conn.cursor()
all_db_datasets = qutils.fetch_all_datasets(cursor)
conn.close()
if dataset_file == None:
return all_db_datasets
else:
datasets = []
with open(dataset_file, 'r') as f:
for line in f:
dataset = line.strip()
if dataset not in all_db_datasets:
raise ValueError("Dataset name '%s' not found in database" \
% (dataset))
datasets.append(dataset)
return datasets
def create_abundance_dict(database, datasets):
"""Process the abundance table by dataset in order to create a dictionary
data structure organized like this:
transcript_ID -> dataset -> abundance in that dataset
"""
abundance = {}
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
for dataset in datasets:
query = """ SELECT transcript_ID, count FROM abundance
WHERE dataset = '%s' """ % dataset
cursor.execute(query)
for transcript in cursor.fetchall():
transcript_ID = transcript["transcript_ID"]
count = transcript["count"]
if transcript_ID in abundance:
abundance[transcript_ID][dataset] = count
else:
abundance[transcript_ID] = {}
abundance[transcript_ID][dataset] = count
conn.close()
return abundance
def fetch_abundances(database, datasets, annot, whitelist):
"""Constructs a query to get the following information for every
whitelisted transcript:
1) TALON gene ID
2) TALON transcript ID
3) Gene ID (from annotation specified in 'annot', None otherwise)
4) Transcript ID (from annotation specified in 'annot', None otherwise)
5) Gene name (from annotation specified in 'annot', None otherwise)
6) Transcript name (from annotation specified in 'annot', None otherwise)
7) number of exons in transcript
Returns a list of tuples (one tuple per transcript)
"""
# datasets = fetch_dataset_list(database)
abundance = create_abundance_dict(database, datasets)
col_query = """SELECT
t.gene_ID,
t.transcript_ID,
ga_id.value AS annot_gene_id,
ta_id.value AS annot_transcript_id,
ga_name.value AS annot_gene_name,
ta_name.value AS annot_transcript_name,
t.n_exons"""
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
whitelist_string = "WHERE t.transcript_ID IN (" + ','.join(whitelist) + ");"
name_status_query = """
FROM transcripts t
LEFT JOIN gene_annotations ga_id ON t.gene_ID = ga_id.ID
AND ga_id.annot_name = '%s'
AND ga_id.attribute = 'gene_id'
LEFT JOIN transcript_annotations ta_id ON t.transcript_ID = ta_id.ID
AND ta_id.annot_name = '%s'
AND ta_id.attribute = 'transcript_id'
LEFT JOIN gene_annotations ga_name ON t.gene_ID = ga_name.ID
AND ga_name.annot_name = '%s'
AND ga_name.attribute = 'gene_name'
LEFT JOIN transcript_annotations ta_name ON t.transcript_ID = ta_name.ID
AND ta_name.annot_name = '%s'
AND ta_name.attribute = 'transcript_name'
""" % (annot, annot, annot, annot)
full_query = "\n".join([col_query, name_status_query, whitelist_string])
try:
abundance_tuples = (cursor.execute(full_query)).fetchall()
colnames = list(abundance_tuples[0].keys()) + list(datasets)
except Exception as e:
print(e)
raise RuntimeError("Something went wrong with the database query")
conn.close()
# Now iterate over the query results to incorporate the abundance information
final_abundance = []
for entry in abundance_tuples:
transcript_ID = entry["transcript_ID"]
if transcript_ID not in abundance:
continue
# Get abundance of this transcript in each dataset
dataset_counts = []
for dataset in datasets:
if dataset in abundance[transcript_ID]:
dataset_counts.append(str(abundance[transcript_ID][dataset]))
else:
dataset_counts.append("0")
# Combine abundance info with rest of transcript information
combined_entry = list(entry) + dataset_counts
final_abundance.append(combined_entry)
return final_abundance, colnames
def write_abundance_file(abundances, col_names, prefix, n_places, datasets,
novelty_types, transcript_lengths, outfile):
""" Writes abundances and metadata to an output file """
o = open(outfile, 'w')
novelty_type_cols = ["gene_novelty", "transcript_novelty", "ISM_subtype"]
first_dataset_index = len(col_names) - len(datasets)
first_colnames = col_names[0:first_dataset_index]
dataset_colnames = col_names[first_dataset_index:]
all_colnames = first_colnames + ["length"] + novelty_type_cols + dataset_colnames
o.write("\t".join(all_colnames) + "\n")
abundance_list = [list(elem) for elem in abundances]
# Find indices of columns that may need 'None' replaced
gene_ID_index = all_colnames.index("gene_ID")
transcript_ID_index = all_colnames.index("transcript_ID")
annot_gene_ID_index = all_colnames.index("annot_gene_id")
annot_transcript_ID_index = all_colnames.index("annot_transcript_id")
gene_name_index = all_colnames.index("annot_gene_name")
transcript_name_index = all_colnames.index("annot_transcript_name")
dataset_indices = [i for i,s in enumerate(all_colnames) if s in set(datasets)]
# Iterate over abundances, fixing Nones, and write to file
for transcript in abundances:
curr_novelty = get_gene_and_transcript_novelty_types(transcript[gene_ID_index],
transcript[transcript_ID_index],
novelty_types)
transcript = list(transcript)
transcript = transcript[0:first_dataset_index] + \
[transcript_lengths[transcript[transcript_ID_index]]] + \
[ curr_novelty[x] for x in novelty_type_cols] + \
transcript[first_dataset_index:]
alt_gene_name, alt_transcript_name = talon.construct_names(transcript[gene_ID_index], \
transcript[transcript_ID_index], \
prefix, n_places)
if transcript[annot_gene_ID_index] == None:
transcript[annot_gene_ID_index] = alt_gene_name
if transcript[gene_name_index] == None:
transcript[gene_name_index] = alt_gene_name
if transcript[annot_transcript_ID_index] == None:
transcript[annot_transcript_ID_index] = alt_transcript_name
if transcript[transcript_name_index] == None:
transcript[transcript_name_index] = alt_transcript_name
for index in dataset_indices:
if transcript[index] == None:
transcript[index] = 0
o.write("\t".join([str(x) for x in transcript]) + "\n")
o.close()
return
def get_gene_and_transcript_novelty_types(gene_ID, transcript_ID, novelty_type):
""" Look up gene and transcript IDs in data structure to determine which types
of novelty are present """
curr_novel = {}
# Look for gene type
if gene_ID in novelty_type.antisense_genes:
curr_novel["gene_novelty"] = "Antisense"
elif gene_ID in novelty_type.intergenic_genes:
curr_novel["gene_novelty"] = "Intergenic"
elif gene_ID in novelty_type.known_genes:
curr_novel["gene_novelty"] = "Known"
else:
print("Warning: Could not locate novelty type for gene %s" % gene_ID)
# Look for transcript type
if transcript_ID in novelty_type.ISM_transcripts:
curr_novel["transcript_novelty"] = "ISM"
elif transcript_ID in novelty_type.NIC_transcripts:
curr_novel["transcript_novelty"] = "NIC"
elif transcript_ID in novelty_type.NNC_transcripts:
curr_novel["transcript_novelty"] = "NNC"
elif transcript_ID in novelty_type.antisense_transcripts:
curr_novel["transcript_novelty"] = "Antisense"
elif transcript_ID in novelty_type.intergenic_transcripts:
curr_novel["transcript_novelty"] = "Intergenic"
elif transcript_ID in novelty_type.genomic_transcripts:
curr_novel["transcript_novelty"] = "Genomic"
elif transcript_ID in novelty_type.known_transcripts:
curr_novel["transcript_novelty"] = "Known"
else:
print("Warning: Could not locate novelty type for transcript %s" % transcript_ID)
# Look for ISM subtype
if transcript_ID in novelty_type.ISM_prefix and \
transcript_ID in novelty_type.ISM_suffix:
curr_novel["ISM_subtype"] = "Both"
elif transcript_ID in novelty_type.ISM_prefix:
curr_novel["ISM_subtype"] = "Prefix"
elif transcript_ID in novelty_type.ISM_suffix:
curr_novel["ISM_subtype"] = "Suffix"
else:
curr_novel["ISM_subtype"] = "None"
return curr_novel
def check_annot_validity(annot, database):
""" Make sure that the user has entered a correct annotation name """
conn = sqlite3.connect(database)
cursor = conn.cursor()
cursor.execute("SELECT DISTINCT annot_name FROM gene_annotations")
annotations = [str(x[0]) for x in cursor.fetchall()]
conn.close()
if "TALON" in annotations:
annotations.remove("TALON")
if annot == None:
message = "Please provide a valid annotation name. " + \
"In this database, your options are: " + \
", ".join(annotations)
raise ValueError(message)
if annot not in annotations:
message = "Annotation name '" + annot + \
"' not found in this database. Try one of the following: " + \
", ".join(annotations)
raise ValueError(message)
return
def check_build_validity(build, database):
""" Make sure that the user has entered a correct build name """
conn = sqlite3.connect(database)
cursor = conn.cursor()
cursor.execute("SELECT name FROM genome_build")
builds = [str(x[0]) for x in cursor.fetchall()]
conn.close()
if build == None:
message = "Please provide a valid genome build name. " + \
"In this database, your options are: " + \
", ".join(builds)
raise ValueError(message)
if build not in builds:
message = "Build name '" + build + \
"' not found in this database. Try one of the following: " + \
", ".join(builds)
raise ValueError(message)
return
def make_novelty_type_struct(database, datasets):
""" Create a data structure where it is possible to look up whether a gene
or transcript belongs to a particular category of novelty"""
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
novelty_type = dstruct.Struct()
novelty_type.known_genes = set(qutils.fetch_all_known_genes_detected(cursor, datasets))
novelty_type.antisense_genes = set(qutils.fetch_antisense_genes(cursor, datasets))
novelty_type.intergenic_genes = set(qutils.fetch_intergenic_novel_genes(cursor, datasets))
novelty_type.known_transcripts = set(qutils.fetch_all_known_transcripts_detected(cursor, datasets))
novelty_type.ISM_transcripts = set(qutils.fetch_all_ISM_transcripts(cursor, datasets))
novelty_type.ISM_prefix = set(qutils.fetch_prefix_ISM_transcripts(cursor, datasets))
novelty_type.ISM_suffix = set(qutils.fetch_suffix_ISM_transcripts(cursor, datasets))
novelty_type.NIC_transcripts = set(qutils.fetch_NIC_transcripts(cursor, datasets))
novelty_type.NNC_transcripts = set(qutils.fetch_NNC_transcripts(cursor, datasets))
novelty_type.antisense_transcripts = set(qutils.fetch_antisense_transcripts(cursor, datasets))
novelty_type.intergenic_transcripts = set(qutils.fetch_intergenic_transcripts(cursor, datasets))
novelty_type.genomic_transcripts = set(qutils.fetch_genomic_transcripts(cursor, datasets))
conn.close()
return novelty_type
def fetch_naming_prefix(database):
""" Get naming prefix from the database run_info table """
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT value FROM run_info WHERE item = 'idprefix'")
prefix = cursor.fetchone()[0]
conn.close()
return prefix
def fetch_n_places(database):
""" Get length of name field from the database run_info table """
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT value FROM run_info WHERE item = 'n_places'")
n_places = cursor.fetchone()[0]
conn.close()
return int(n_places)
def get_transcript_lengths(database, build):
""" Read the transcripts from the database. Then compute the lengths.
Store in a dictionary """
transcript_lengths = {}
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Get the exon lengths
exon_lens = lu.get_all_exon_lengths(cursor, build)
cursor.execute("SELECT * FROM transcripts")
for transcript_row in cursor.fetchall():
transcript_ID = transcript_row['transcript_ID']
length = lu.get_transcript_length(transcript_row, exon_lens)
transcript_lengths[transcript_ID] = length
conn.close()
return transcript_lengths
def main():
options = getOptions()
database = options.database
annot = options.annot
build = options.build
whitelist_file = options.whitelist
dataset_file = options.datasets_file
outfile = create_outname(options)
# Make sure that the input database exists!
if not Path(database).exists():
raise ValueError("Database file '%s' does not exist!" % database)
check_annot_validity(annot, database)
check_build_validity(build, database)
# Determine which transcripts to include
whitelist = putils.handle_filtering(database,
annot,
False,
whitelist_file,
dataset_file)
# create transcript whitelist
transcript_whitelist = []
for key,group in itertools.groupby(whitelist,operator.itemgetter(0)):
for id_tuple in list(group):
transcript_whitelist.append(str(id_tuple[1]))
# Get transcript length dict
transcript_lengths = get_transcript_lengths(database, build)
# Create the abundance file
datasets = fetch_dataset_list(dataset_file, database)
novelty_type = make_novelty_type_struct(database, datasets)
abundances, colnames = fetch_abundances(database, datasets, annot, transcript_whitelist)
prefix = fetch_naming_prefix(database)
n_places = fetch_n_places(database)
write_abundance_file(abundances, colnames, prefix, n_places, datasets, novelty_type, transcript_lengths, outfile)
if __name__ == '__main__':
main()
| 38.816068 | 117 | 0.646351 |
7944b47613209347459c86431a4df3fd9e69a972 | 8,024 | py | Python | src/run.py | hex-plex/GNN-MARL | ebe964a4eb749fd8d2780af18aead85e342d2988 | [
"Apache-2.0"
] | 1 | 2022-03-22T14:59:05.000Z | 2022-03-22T14:59:05.000Z | src/run.py | hex-plex/GNN-MARL | ebe964a4eb749fd8d2780af18aead85e342d2988 | [
"Apache-2.0"
] | null | null | null | src/run.py | hex-plex/GNN-MARL | ebe964a4eb749fd8d2780af18aead85e342d2988 | [
"Apache-2.0"
] | null | null | null | import datetime
import os
import pprint
import time
import threading
import torch as th
from types import SimpleNamespace as SN
from utils.logging import Logger
from utils.timehelper import time_left, time_str
from os.path import dirname, abspath
from learners import REGISTRY as le_REGISTRY
from runners import REGISTRY as r_REGISTRY
from controllers import REGISTRY as mac_REGISTRY
from components.episode_buffer import ReplayBuffer
from components.transforms import OneHot
def run(_run, _config, _log):
# check args sanity
_config = args_sanity_check(_config, _log)
args = SN(**_config)
args.device = "cuda" if args.use_cuda else "cpu"
# setup loggers
logger = Logger(_log)
_log.info("Experiment Parameters:")
experiment_params = pprint.pformat(_config,
indent=4,
width=1)
_log.info("\n\n" + experiment_params + "\n")
# configure tensorboard logger
unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
args.unique_token = unique_token
if args.use_tensorboard:
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
logger.setup_tb(tb_exp_direc)
# sacred is on by default
logger.setup_sacred(_run)
# Run and train
run_sequential(args=args, logger=logger)
# Clean up after finishing
print("Exiting Main")
print("Stopping all threads")
for t in threading.enumerate():
if t.name != "MainThread":
print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
t.join(timeout=1)
print("Thread joined")
print("Exiting script")
# Making sure framework really exits
os._exit(os.EX_OK)
def evaluate_sequential(args, runner):
for _ in range(args.test_nepisode):
runner.run(test_mode=True)
if args.save_replay:
runner.save_replay()
runner.close_env()
def run_sequential(args, logger):
# Init runner so we can get env info
runner = r_REGISTRY[args.runner](args=args, logger=logger)
# Set up schemes and groups here
env_info = runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.state_shape = env_info["state_shape"]
# Default/Base scheme
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"adj_matrix": {"vshape":(args.n_agents,), "group": "agents", "dtype":th.int},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runner the scheme
runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
if args.checkpoint_path != "":
timesteps = []
timestep_to_load = 0
if not os.path.isdir(args.checkpoint_path):
logger.console_logger.info("Checkpoint directiory {} doesn't exist".format(args.checkpoint_path))
return
# Go through all files in args.checkpoint_path
for name in os.listdir(args.checkpoint_path):
full_name = os.path.join(args.checkpoint_path, name)
# Check if they are dirs the names of which are numbers
if os.path.isdir(full_name) and name.isdigit():
timesteps.append(int(name))
if args.load_step == 0:
# choose the max timestep
timestep_to_load = max(timesteps)
else:
# choose the timestep closest to load_step
timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))
model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))
logger.console_logger.info("Loading model from {}".format(model_path))
learner.load_models(model_path)
runner.t_env = timestep_to_load
if args.evaluate or args.save_replay:
evaluate_sequential(args, runner)
return
# start training
episode = 0
last_test_T = -args.test_interval - 1
last_log_T = 0
model_save_time = 0
start_time = time.time()
last_time = start_time
logger.console_logger.info("Beginning training for {} timesteps".format(args.t_max))
while runner.t_env <= args.t_max:
# Run for a whole episode at a time
episode_batch = runner.run(test_mode=False)
buffer.insert_episode_batch(episode_batch)
if buffer.can_sample(args.batch_size):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, runner.t_env, episode)
# Execute test runs once in a while
n_test_runs = max(1, args.test_nepisode // runner.batch_size)
if (runner.t_env - last_test_T) / args.test_interval >= 1.0:
logger.console_logger.info("t_env: {} / {}".format(runner.t_env, args.t_max))
logger.console_logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))
last_time = time.time()
last_test_T = runner.t_env
for _ in range(n_test_runs):
runner.run(test_mode=True)
if args.save_model and (runner.t_env - model_save_time >= args.save_model_interval or model_save_time == 0):
model_save_time = runner.t_env
save_path = os.path.join(args.local_results_path, "models", args.unique_token, str(runner.t_env))
#"results/models/{}".format(unique_token)
os.makedirs(save_path, exist_ok=True)
logger.console_logger.info("Saving models to {}".format(save_path))
# learner should handle saving/loading -- delegate actor save/load to mac,
# use appropriate filenames to do critics, optimizer states
learner.save_models(save_path)
episode += args.batch_size_run
if (runner.t_env - last_log_T) >= args.log_interval:
logger.log_stat("episode", episode, runner.t_env)
logger.print_recent_stats()
last_log_T = runner.t_env
runner.close_env()
logger.console_logger.info("Finished Training")
def args_sanity_check(config, _log):
# set CUDA flags
# config["use_cuda"] = True # Use cuda whenever possible!
if config["use_cuda"] and not th.cuda.is_available():
config["use_cuda"] = False
_log.warning("CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!")
if config["test_nepisode"] < config["batch_size_run"]:
config["test_nepisode"] = config["batch_size_run"]
else:
config["test_nepisode"] = (config["test_nepisode"]//config["batch_size_run"]) * config["batch_size_run"]
return config
| 34.735931 | 116 | 0.64332 |
7944b557bbf1ee69d4b4e2050cce201e85ec49c3 | 3,014 | py | Python | brca_exchange_cooccurrence_analysis/extract_roh_region_distributions.plot.py | glennhickey/CharlieSandbox | 2949f8357433a6219abf192f899ab50e2c8edaba | [
"MIT"
] | null | null | null | brca_exchange_cooccurrence_analysis/extract_roh_region_distributions.plot.py | glennhickey/CharlieSandbox | 2949f8357433a6219abf192f899ab50e2c8edaba | [
"MIT"
] | null | null | null | brca_exchange_cooccurrence_analysis/extract_roh_region_distributions.plot.py | glennhickey/CharlieSandbox | 2949f8357433a6219abf192f899ab50e2c8edaba | [
"MIT"
] | 1 | 2021-08-03T17:23:47.000Z | 2021-08-03T17:23:47.000Z | import matplotlib
matplotlib.use('Agg')
import vcf, argparse, sys
import numpy as np
import pandas as pd
import math
from scipy.stats import chisquare
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
import vcf, argparse, sys
import numpy as np
import pandas as pd
import math
import seaborn as sns
import matplotlib.pyplot as plt
def parse_args():
"""
Description:
function 'parse_args' parses arguments from command-line and returns an argparse
object containing the arguments and their values. Default values are 'False' if option
is not listed in the command, else the option value is set to True.
"""
parser = argparse.ArgumentParser('Input bcftools roh tab-delimited file and output roh report and histogram.')
parser.add_argument('-i', '--inROHdistA', type=str,
help='Input 1st roh distribution filepath.')
parser.add_argument('-j', '--inROHdistB', type=str,
help='Input 2nd roh distribution filepath.')
parser.add_argument('-l', '--regionLength', type=int,
help='Input length of region used in calculating SROH.')
parser.add_argument('-o', '--outReport', type=str,
help='Output plot filename.')
options = parser.parse_args()
return options
def main(args):
options = parse_args()
roh_distribution_dict = defaultdict(list)
with open(options.inROHdistA, 'r') as roh_file_a, open(options.inROHdistB, 'r') as roh_file_b:
for line in roh_file_a:
if 'sample_id' in line: continue
parsed_line = line.strip().split('\t')
roh_distribution_dict['SROH'].extend([float(parsed_line[2]),float(parsed_line[3]),float(parsed_line[4]),float(parsed_line[5]),float(parsed_line[6])])
roh_distribution_dict['SROH_length'].extend(['all','100kb','1mb', '1500kb', '5mb'])
roh_distribution_dict['group'].extend(['No']*5)
for line in roh_file_b:
if 'sample_id' in line: continue
parsed_line = line.strip().split('\t')
roh_distribution_dict['SROH'].extend([float(parsed_line[2]),float(parsed_line[3]),float(parsed_line[4]),float(parsed_line[5]),float(parsed_line[6])])
roh_distribution_dict['SROH_length'].extend(['all','100kb','1mb', '1500kb', '5mb'])
roh_distribution_dict['group'].extend(['Yes']*5)
violin_df = pd.DataFrame(data=roh_distribution_dict)
sns.set(style="whitegrid", font_scale=1.5)
fig, axes = plt.subplots(figsize=(10, 10))
order=["all", "100kb", "1mb", "1500kb", "5mb"]
sns.boxplot(
x="SROH_length", y="SROH", hue="group", data=violin_df,
order=order,
ax=axes
)
axes.set_xticklabels(["All", "100 (kb)", "1 (mb)", "1.5 (mb)", "5 (mb)"])
axes.set_xlabel("Minimum ROH Length")
axes.legend("")
fig.savefig("roh_distribution_violin.{}.png".format(options.outReport))
matplotlib.pyplot.close(fig)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 39.657895 | 161 | 0.663238 |
7944b578ca9977be238c38b2e4d747986313a187 | 2,753 | py | Python | calm/dsl/cli/click_options.py | tuxtof/calm-dsl | 5af67435d8304b97e170a690068f2d5975e9bfe6 | [
"Apache-2.0"
] | 37 | 2019-12-23T15:23:20.000Z | 2022-03-15T11:12:11.000Z | calm/dsl/cli/click_options.py | gabybeitler/calm-dsl | bac453413cfcf800eef95d89d5a7323c83654a93 | [
"Apache-2.0"
] | 144 | 2020-03-09T11:22:09.000Z | 2022-03-28T21:34:09.000Z | calm/dsl/cli/click_options.py | gabybeitler/calm-dsl | bac453413cfcf800eef95d89d5a7323c83654a93 | [
"Apache-2.0"
] | 46 | 2020-01-23T14:28:04.000Z | 2022-03-09T04:17:10.000Z | import click
from calm.dsl.config import get_context
from calm.dsl.log import CustomLogging
def simple_verbosity_option(logging_mod=None, *names, **kwargs):
"""A decorator that adds a `--verbose, -v` option to the decorated
command.
Name can be configured through ``*names``. Keyword arguments are passed to
the underlying ``click.option`` decorator.
"""
if not names:
names = ["--verbose", "-v"]
if not isinstance(logging_mod, CustomLogging):
raise TypeError("Logging object should be instance of CustomLogging.")
log_level = "INFO"
try:
ContextObj = get_context()
log_config = ContextObj.get_log_config()
if "level" in log_config:
log_level = log_config.get("level") or log_level
except (FileNotFoundError, ValueError):
# At the time of initializing dsl, config file may not be present or incorrect
pass
logging_levels = logging_mod.get_logging_levels()
if log_level not in logging_levels:
raise ValueError(
"Invalid log level in config. Select from {}".format(logging_levels)
)
log_level = logging_levels.index(log_level) + 1
kwargs.setdefault("default", log_level)
kwargs.setdefault("expose_value", False)
kwargs.setdefault("help", "Verboses the output")
kwargs.setdefault("is_eager", True)
kwargs.setdefault("count", True)
def decorator(f):
def _set_level(ctx, param, value):
logging_levels = logging_mod.get_logging_levels()
if value < 1 or value > len(logging_levels):
raise click.BadParameter(
"Should be atleast 1 and atmost {}".format(len(logging_levels))
)
log_level = logging_levels[value - 1]
x = getattr(logging_mod, log_level, None)
CustomLogging.set_verbose_level(x)
return click.option(*names, callback=_set_level, **kwargs)(f)
return decorator
def show_trace_option(logging_mod=None, **kwargs):
"""A decorator that add --show_trace/-st option to decorated command"""
if not isinstance(logging_mod, CustomLogging):
raise TypeError("Logging object should be instance of CustomLogging.")
names = ["--show_trace", "-st"]
kwargs.setdefault("is_flag", True)
kwargs.setdefault("default", False)
kwargs.setdefault("expose_value", False)
kwargs.setdefault("help", "Show the traceback for the exceptions")
kwargs.setdefault("is_eager", True)
def decorator(f):
def _set_show_trace(ctx, param, value):
if value:
CustomLogging.enable_show_trace()
return click.option(*names, callback=_set_show_trace, **kwargs)(f)
return decorator
| 33.168675 | 86 | 0.660371 |
7944b63be30eba9086c9010aa5c653c16c12e8a4 | 5,563 | py | Python | ekgen/mpasocn.py | grnydawn/ekgen | 9a199104b27cef7fb7a647957167df3ca8dfa7c1 | [
"MIT"
] | null | null | null | ekgen/mpasocn.py | grnydawn/ekgen | 9a199104b27cef7fb7a647957167df3ca8dfa7c1 | [
"MIT"
] | null | null | null | ekgen/mpasocn.py | grnydawn/ekgen | 9a199104b27cef7fb7a647957167df3ca8dfa7c1 | [
"MIT"
] | null | null | null | import os, subprocess, json, shutil
from microapp import App, appdict
from ekgen.utils import xmlquery
here = os.path.dirname(os.path.abspath(__file__))
class MPASOcnKernel(App):
_name_ = "mpasocn"
_version_ = "0.1.0"
def __init__(self, mgr):
self.add_argument("casedir", metavar="casedir", help="E3SM case directory")
self.add_argument("callsitefile", metavar="callsitefile", help="KGen callsite Fortran source file")
self.add_argument("-o", "--outdir", type=str, help="output directory")
self.register_forward("data", help="json object")
def perform(self, args):
casedir = os.path.abspath(os.path.realpath(args.casedir["_"]))
callsitefile = os.path.abspath(os.path.realpath(args.callsitefile["_"]))
csdir, csfile = os.path.split(callsitefile)
csname, csext = os.path.splitext(csfile)
outdir = os.path.abspath(os.path.realpath(args.outdir["_"])) if args.outdir else os.getcwd()
cleancmd = "cd %s; ./case.build --clean-all" % casedir
buildcmd = "cd %s; ./case.build" % casedir
runcmd = "cd %s; ./case.submit" % casedir
# TODO: move this batch support to common area
batch = xmlquery(casedir, "BATCH_SYSTEM", "--value")
if batch == "lsf":
runcmd += " --batch-args='-K'"
elif "slurm" in batch:
runcmd += " --batch-args='-W'"
elif batch == "pbs": # SGE PBS
runcmd += " --batch-args='-sync yes'"
#runcmd += " --batch-args='-Wblock=true'" # PBS
elif batch == "moab":
runcmd += " --batch-args='-K'"
else:
raise Exception("Unknown batch system: %s" % batch)
compjson = os.path.join(outdir, "compile.json")
analysisjson = os.path.join(outdir, "analysis.json")
outfile = os.path.join(outdir, "model.json")
srcbackup = os.path.join(outdir, "backup", "src")
# get mpi and git info here(branch, commit, ...)
srcroot = os.path.abspath(os.path.realpath(xmlquery(casedir, "SRCROOT", "--value")))
reldir = os.path.relpath(csdir, start=os.path.join(srcroot, "components", "mpas-source", "src"))
callsitefile2 = os.path.join(casedir, "bld", "cmake-bld", reldir, "%s.f90" % csname)
# get mpi: mpilib from xmlread , env ldlibrary path with the mpilib
mpidir = os.environ["MPI_ROOT"]
excludefile = os.path.join(here, "exclude_e3sm_mpas.ini")
blddir = xmlquery(casedir, "OBJROOT", "--value")
if not os.path.isfile(compjson) and os.path.isdir(blddir):
shutil.rmtree(blddir)
cmd = " -- buildscan '%s' --savejson '%s' --reuse '%s' --backupdir '%s'" % (
buildcmd, compjson, compjson, srcbackup)
ret, fwds = self.manager.run_command(cmd)
# save compjson with case directory map
# handle mpas converted file for callsitefile2
# TODO: replace kgen contaminated file with original files
# TODO: recover removed e3sm converted files in cmake-bld, ... folders
with open(compjson) as f:
jcomp = json.load(f)
for srcpath, compdata in jcomp.items():
srcbackup = compdata["srcbackup"]
if not srcbackup:
continue
if not os.path.isfile(srcpath) and srcbackup[0] and os.path.isfile(srcbackup[0]):
orgdir = os.path.dirname(srcpath)
if not os.path.isdir(orgdir):
os.makedirs(orgdir)
shutil.copy(srcbackup[0], srcpath)
for incsrc, incbackup in srcbackup[1:]:
if not os.path.isfile(incsrc) and incbackup and os.path.isfile(incbackup):
orgdir = os.path.dirname(incsrc)
if not os.path.isdir(orgdir):
os.makedirs(orgdir)
shutil.copy(incbackup, incsrc)
# TODO: actually scan source files if they should be recovered
statedir = os.path.join(outdir, "state")
etimedir = os.path.join(outdir, "etime")
if os.path.isdir(statedir) and os.path.isfile(os.path.join(statedir, "Makefile")):
stdout = subprocess.check_output("make recover", cwd=statedir, shell=True)
elif os.path.isdir(etimedir) and os.path.isfile(os.path.join(etimedir, "Makefile")):
stdout = subprocess.check_output("make recover", cwd=etimedir, shell=True)
#cmd = " -- resolve --compile-info '@data' '%s'" % callsitefile
rescmd = (" -- resolve --mpi header='%s/include/mpif.h' --openmp enable"
" --compile-info '%s' --keep '%s' --exclude-ini '%s' '%s'" % (
mpidir, compjson, analysisjson, excludefile, callsitefile2))
#ret, fwds = prj.run_command(cmd)
#assert ret == 0
# TODO wait??
#cmd = rescmd + " -- runscan '@analysis' -s 'timing' --outdir '%s' --cleancmd '%s' --buildcmd '%s' --runcmd '%s' --output '%s'" % (
#outdir, cleancmd, buildcmd, runcmd, outfile)
cmd = rescmd + " -- runscan '@analysis' -s 'timing' --outdir '%s' --buildcmd '%s' --runcmd '%s' --output '%s'" % (
outdir, buildcmd, runcmd, outfile)
#ret, fwds = prj.run_command(cmd)
# add model config to analysis
cmd = cmd + " -- kernelgen '@analysis' --model '@model' --repr-etime 'ndata=40,nbins=10' --outdir '%s'" % outdir
ret, fwds = self.manager.run_command(cmd)
| 42.143939 | 139 | 0.577746 |
7944b65e8c798034580c0ec3ca7bc8a93c4c0e16 | 854 | py | Python | userAuth/signUp/admin.py | amandeep4272/online-classroom | afedb3ccfea2cbb53b97c9f507a4fb2caf28c081 | [
"MIT"
] | null | null | null | userAuth/signUp/admin.py | amandeep4272/online-classroom | afedb3ccfea2cbb53b97c9f507a4fb2caf28c081 | [
"MIT"
] | null | null | null | userAuth/signUp/admin.py | amandeep4272/online-classroom | afedb3ccfea2cbb53b97c9f507a4fb2caf28c081 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import User,Subject,Question,Quiz,Answer,Student,TakenQuiz,StudentAnswer
from .models import Room,Message
class RoomAdmin(admin.ModelAdmin):
list_display=['id','name']
class MessageAdmin(admin.ModelAdmin):
list_display=['id','value','date','user','room']
admin.site.register(Room,RoomAdmin)
admin.site.register(Message,MessageAdmin)
class SubjectAdmin(admin.ModelAdmin):
list_display=['name','color']
class AnswerInline(admin.TabularInline):
model=Answer
class QuestionAdmin(admin.ModelAdmin):
inlines=[AnswerInline]
admin.site.register(Subject,SubjectAdmin)
admin.site.register(User)
admin.site.register(Question,QuestionAdmin)
admin.site.register(Answer)
admin.site.register(Quiz)
admin.site.register(Student)
admin.site.register(TakenQuiz)
admin.site.register(StudentAnswer)
| 25.117647 | 85 | 0.790398 |
7944b70cf7040569285f0553f08512b1df00c2df | 6,095 | py | Python | tensorflow_probability/python/distributions/gamma_gamma_test.py | souravsingh/probability | 0519b63094fdaa4e326357a0cdff056d5ef76cd8 | [
"Apache-2.0"
] | 1 | 2019-10-13T19:52:59.000Z | 2019-10-13T19:52:59.000Z | tensorflow_probability/python/distributions/gamma_gamma_test.py | souravsingh/probability | 0519b63094fdaa4e326357a0cdff056d5ef76cd8 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/gamma_gamma_test.py | souravsingh/probability | 0519b63094fdaa4e326357a0cdff056d5ef76cd8 | [
"Apache-2.0"
] | 1 | 2019-10-13T19:52:57.000Z | 2019-10-13T19:52:57.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class GammaGammaTest(tf.test.TestCase):
def testGammaGammaShape(self):
gg = tfd.GammaGamma(
concentration=[[2.], [4.]],
mixing_concentration=[1., 2., 3.],
mixing_rate=0.5)
self.assertAllEqual(self.evaluate(gg.batch_shape_tensor()), [2, 3])
self.assertEqual(gg.batch_shape, tf.TensorShape([2, 3]))
self.assertAllEqual(self.evaluate(gg.event_shape_tensor()), [])
self.assertEqual(gg.event_shape, tf.TensorShape([]))
def testGammaGammaLogPDF(self):
batch_size = 5
alpha = tf.constant([2.] * batch_size)
alpha0 = tf.constant([3.] * batch_size)
beta0 = tf.constant([4.] * batch_size)
x = np.array([6.] * batch_size, dtype=np.float32)
# Let
# alpha = concentration = 2.
# alpha0 = mixing_concentration = 3.,
# beta0 = mixing_rate = 4.
#
# See the PDF derivation in formula (1) of
# http://www.brucehardie.com/notes/025/gamma_gamma.pdf.
#
# x**(alpha - 1) * beta0**alpha0
# prob(x=6) = ------------------------------------------------
# B(alpha, alpha0) * (x + beta0)**(alpha + alpha0)
#
# 6 * 4**3
# = --------------- = 0.04608
# B(2, 3) * 10**5
#
# log_prob(x=6) = -3.077376
expected_log_pdf = [-3.077376] * batch_size
gg = tfd.GammaGamma(
concentration=alpha, mixing_concentration=alpha0, mixing_rate=beta0)
log_pdf = gg.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (5,))
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
def testGammaGammaLogPDFMultidimensional(self):
batch_size = 6
alpha = tf.constant([[2., 4.]] * batch_size)
alpha0 = tf.constant([[3., 6.]] * batch_size)
beta0 = tf.constant([[4., 8.]] * batch_size)
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gg = tfd.GammaGamma(
concentration=alpha, mixing_concentration=alpha0, mixing_rate=beta0)
log_pdf = gg.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6, 2))
def testGammaGammaLogPDFMultidimensionalBroadcasting(self):
batch_size = 6
alpha = tf.constant([[2., 4.]] * batch_size)
alpha0 = tf.constant(3.0)
beta0 = tf.constant([4., 8.])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gg = tfd.GammaGamma(
concentration=alpha, mixing_concentration=alpha0, mixing_rate=beta0)
log_pdf = gg.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6, 2))
def testGammaGammaMeanAllDefined(self):
alpha_v = np.array([2., 4.])
alpha0_v = np.array([3., 6.])
beta0_v = np.array([4., 8.])
expected_mean = alpha_v * beta0_v / (alpha0_v - 1.)
gg = tfd.GammaGamma(
concentration=alpha_v,
mixing_concentration=alpha0_v,
mixing_rate=beta0_v)
self.assertEqual(gg.mean().get_shape(), (2,))
self.assertAllClose(self.evaluate(gg.mean()), expected_mean)
def testGammaGammaMeanAllowNanStats(self):
# Mean will not be defined for the first entry.
alpha_v = np.array([2., 4.])
alpha0_v = np.array([1., 6.])
beta0_v = np.array([4., 8.])
gg = tfd.GammaGamma(
concentration=alpha_v,
mixing_concentration=alpha0_v,
mixing_rate=beta0_v,
allow_nan_stats=False)
with self.assertRaisesOpError('x < y'):
self.evaluate(gg.mean())
def testGammaGammaMeanNanStats(self):
# Mean will not be defined for the first entry.
alpha_v = np.array([2., 4.])
alpha0_v = np.array([1., 6.])
beta0_v = np.array([4., 8.])
expected_mean = np.array([np.nan, 6.4])
gg = tfd.GammaGamma(
concentration=alpha_v,
mixing_concentration=alpha0_v,
mixing_rate=beta0_v)
self.assertEqual(gg.mean().get_shape(), (2,))
self.assertAllClose(self.evaluate(gg.mean()), expected_mean)
def testGammaGammaSample(self):
with tf.Session():
alpha_v = 2.0
alpha0_v = 3.0
beta0_v = 5.0
n = 100000
gg = tfd.GammaGamma(
concentration=alpha_v,
mixing_concentration=alpha0_v,
mixing_rate=beta0_v)
samples = gg.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(), self.evaluate(gg.mean()), rtol=.01)
def testGammaGammaSampleMultidimensionalMean(self):
alpha_v = np.array([np.arange(3, 103, dtype=np.float32)]) # 1 x 100
alpha0_v = 2.
beta0_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
n = 10000
gg = tfd.GammaGamma(
concentration=alpha_v,
mixing_concentration=alpha0_v,
mixing_rate=beta0_v)
samples = gg.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
self.assertAllClose(
sample_values.mean(axis=0), self.evaluate(gg.mean()), rtol=.08)
if __name__ == '__main__':
tf.test.main()
| 34.241573 | 78 | 0.637244 |
7944b858fab0f247ad90ebea472cc36f96ba5322 | 2,433 | py | Python | algolib/disjoint_set/disjoint_set.py | niemmi/algolib | 81a013af5ae1ca1e8cf8d3f2e2f1b4a9bce6ead8 | [
"BSD-3-Clause"
] | null | null | null | algolib/disjoint_set/disjoint_set.py | niemmi/algolib | 81a013af5ae1ca1e8cf8d3f2e2f1b4a9bce6ead8 | [
"BSD-3-Clause"
] | null | null | null | algolib/disjoint_set/disjoint_set.py | niemmi/algolib | 81a013af5ae1ca1e8cf8d3f2e2f1b4a9bce6ead8 | [
"BSD-3-Clause"
] | null | null | null | """Disjoint-set data structure that allows efficient way of finding which set
item belongs to and merging two different sets.
Time complexity of the operations:
- Find which set item belongs to: O(log n)
- Merging two sets: O(log n)
- Checking if two items belong to same set: O(log n)
For more information see Wikipedia:
https://en.wikipedia.org/wiki/Disjoint-set_data_structure
"""
class DisjointSet(object):
"""Disjoint-set data structure that allows user to check which set item
belongs to and merging two different sets.
Attributes:
_items: Dictionary of items belonging to set where keys are items
and values are pairs [parent, number of items in set]
"""
def __init__(self, it):
"""Initializer, initializes Disjoint-set with items from given iterable.
Args:
it: Iterable of items to add to the object.
"""
self._items = {item: [item, 1] for item in it}
def __len__(self):
return len(self._items)
def find(self, item):
"""Returns the set where this item belongs to. If items x & y belong
to the same set then find(x) == find(y).
Args:
item: Item whose set to search.
Returns:
Set identifier which is one of the items in the object.
"""
parent = self._items[item][0]
if item != parent:
parent = self.find(parent)
# Compress path
self._items[item][0] = parent
return parent
def union(self, x, y):
"""Merges sets containing two different items together. If items already
belong to same set does nothing.
Args:
x: First item.
y: Second item.
"""
parent_x = self.find(x)
parent_y = self.find(y)
if parent_x != parent_y:
merge_from, merge_to = sorted([parent_x, parent_y],
key=lambda i: self._items[i][1])
self._items[merge_from][0] = merge_to
self._items[merge_to][1] += self._items[merge_from][1]
def same_component(self, x, y):
"""Returns boolean value telling if two different items belong to
same set.
Args:
x: First item.
y: Second item.
Returns:
True if items belong to same set, False if not.
"""
return self.find(x) == self.find(y)
| 30.037037 | 80 | 0.592684 |
7944b86daa75967f5fe50b9d565e099a8c60f0ae | 682 | py | Python | stream_alert/rule_processor/__init__.py | tuapuikia/streamalert | b1f733259aa051f8d533e7881018280fe77d7bda | [
"Apache-2.0"
] | 1 | 2018-11-18T12:13:44.000Z | 2018-11-18T12:13:44.000Z | stream_alert/rule_processor/__init__.py | tuapuikia/streamalert | b1f733259aa051f8d533e7881018280fe77d7bda | [
"Apache-2.0"
] | 110 | 2019-02-13T05:32:07.000Z | 2021-07-29T05:42:01.000Z | stream_alert/rule_processor/__init__.py | tuapuikia/streamalert | b1f733259aa051f8d533e7881018280fe77d7bda | [
"Apache-2.0"
] | 1 | 2019-11-01T01:03:47.000Z | 2019-11-01T01:03:47.000Z | """Initialize logging for the rule processor."""
import logging
import os
from stream_alert.shared import RULE_PROCESSOR_NAME as FUNCTION_NAME
# Create a package level logger to import
LEVEL = os.environ.get('LOGGER_LEVEL', 'INFO').upper()
# Cast integer levels to avoid a ValueError
if LEVEL.isdigit():
LEVEL = int(LEVEL)
logging.basicConfig(format='%(name)s [%(levelname)s]: [%(module)s.%(funcName)s] %(message)s')
LOGGER = logging.getLogger('StreamAlert')
try:
LOGGER.setLevel(LEVEL)
except (TypeError, ValueError) as err:
LOGGER.setLevel('INFO')
LOGGER.error('Defaulting to INFO logging: %s', err)
LOGGER_DEBUG_ENABLED = LOGGER.isEnabledFor(logging.DEBUG)
| 28.416667 | 93 | 0.741935 |
7944b8eb91b723752af2a0c88d1fe94b9bb1231e | 14,830 | py | Python | middleware/job_information_manager.py | alan-turing-institute/gateway-middleware-old | f09f6099f33f66ec95c4b24c0ae41eebfb68875c | [
"MIT"
] | 3 | 2017-08-03T07:40:08.000Z | 2019-07-29T11:39:52.000Z | middleware/job_information_manager.py | alan-turing-institute/gateway-middleware-old | f09f6099f33f66ec95c4b24c0ae41eebfb68875c | [
"MIT"
] | 58 | 2017-06-22T15:02:53.000Z | 2018-01-08T16:06:01.000Z | middleware/job_information_manager.py | alan-turing-institute/gateway-middleware-old | f09f6099f33f66ec95c4b24c0ae41eebfb68875c | [
"MIT"
] | null | null | null | import os
import posixpath
from mako.template import Template as MakoTemplate
from middleware.job.schema import Template
from middleware.ssh import ssh
import re
import json
from instance.config import *
from werkzeug.exceptions import ServiceUnavailable
# precedence for secrets variables is:
# 1. Via environment varables
# 2. Via instance/config.py
# 3. Via defaults listed below
# defaults
if 'SSH_USR' not in locals():
SSH_USR = 'test_user'
if 'SSH_HOSTNAME' not in locals():
SSH_HOSTNAME = 'test_host'
if 'SSH_PORT' not in locals():
SSH_PORT = 22
if 'SSH_PRIVATE_KEY_PATH' not in locals():
SSH_PRIVATE_KEY_PATH = None
if 'SSH_PRIVATE_KEY_STRING' not in locals():
SSH_PRIVATE_KEY_STRING = None
if 'SIM_ROOT' not in locals():
SIM_ROOT = '/home/test_user'
# Note, os.environ.get() falls back to second argument (instead of None)
SSH_USR = os.environ.get('SSH_USR', SSH_USR)
SSH_HOSTNAME = os.environ.get('SSH_HOSTNAME', SSH_HOSTNAME)
SSH_PORT = os.environ.get('SSH_PORT', SSH_PORT)
SSH_PRIVATE_KEY_PATH = os.environ.get(
'SSH_PRIVATE_KEY_PATH', SSH_PRIVATE_KEY_PATH)
# an SSH_PRIVATE_KEY_STRING environment variable
# is a multi-line string
# here, we replace the raw r'\n' placeholders
# with line breaks "\n"
SSH_PRIVATE_KEY_STRING = os.environ.get(
'SSH_PRIVATE_KEY_STRING', SSH_PRIVATE_KEY_STRING)
if isinstance(SSH_PRIVATE_KEY_STRING, str):
SSH_PRIVATE_KEY_STRING = SSH_PRIVATE_KEY_STRING.replace(r'\n', "\n")
SIM_ROOT = os.environ.get(
'SIM_ROOT', SIM_ROOT)
SSH_PORT = int(SSH_PORT)
debug_variables = False
if debug_variables:
print('SSH_USR', SSH_USR)
print('SSH_HOSTNAME', SSH_HOSTNAME)
print('SSH_PORT', SSH_PORT)
print('SSH_PRIVATE_KEY_PATH', SSH_PRIVATE_KEY_PATH)
print('SSH_PRIVATE_KEY_STRING', SSH_PRIVATE_KEY_STRING)
class job_information_manager():
"""
Class to handle patching parameter files, and the transfer of these files
to the cluster alongside the transfer and execution of scripts to the
cluster.
Needs a better, descriptive name.
"""
def __init__(self, job, job_repository=None):
"""
Create a manager object, which is populated with ssh information from
instance/config.py and job information passed via http post in the api.
"""
self.username = SSH_USR
self.hostname = SSH_HOSTNAME
self.port = SSH_PORT
self.simulation_root = SIM_ROOT
self.private_key_path = SSH_PRIVATE_KEY_PATH
self.private_key_string = SSH_PRIVATE_KEY_STRING
self.job = job
self.jobs = job_repository
self.job_id = job.id
self.template_list = job.templates
self.patched_templates = []
self.families = job.families
self.script_list = job.scripts
self.inputs_list = job.inputs
self.user = job.user
self.extracted_parameters = \
self._extract_parameters(self.families)
# TODO case_label cannot contain spaces
# (test for this in CaseSchema.make_case())
self.case_dir_label = self.job.case.label.replace(" ", "_")
self.job_working_directory_name = "{}-{}".format(self.case_dir_label,
self.job_id)
self.job_working_directory_path = posixpath.join(
self.simulation_root,
self.job_working_directory_name)
def _extract_parameters(self, families):
parameters = []
for family in families:
parameters.extend(family.parameters)
return parameters
def _parameters_to_mako_dict(self, parameters):
mako_dict = {}
if parameters:
for p in parameters:
mako_dict[p.name] = p.value
return mako_dict
def _apply_patch(self, template_path, parameters, destination_path):
"""
Method to apply a patch based on a supplied template file.
Access via the patch_all_templates method.
"""
template = MakoTemplate(filename=template_path, input_encoding='utf-8')
mako_dict = self._parameters_to_mako_dict(parameters)
with open(destination_path, "w") as f:
f.write(template.render(parameters=mako_dict))
def patch_all_templates(self):
"""
Wrapper around the _apply_patch method which patches all files in
self.template_list
"""
for template in self.template_list:
template_file = template.source_uri
template_filename = os.path.basename(template_file)
# make a dedicated directory for patching
# TODO make temporary directory for each new job-id
if template.destination_path:
tmp_path = os.path.join('tmp', template.destination_path)
else:
tmp_path = template.destination_path
tmp_file = os.path.join(tmp_path, template_filename)
os.makedirs(tmp_path, exist_ok=True)
self._apply_patch(template_file,
self.extracted_parameters,
tmp_file)
patched_tempate = Template(
source_uri=tmp_file,
destination_path=template.destination_path)
self.patched_templates.append(patched_tempate)
def _ssh_connection(self):
try:
connection = ssh(
self.hostname, self.username, self.port,
private_key_path=self.private_key_path,
private_key_string=self.private_key_string,
debug=True)
return connection
except Exception:
# If connection cannot be made, raise a ServiceUnavailble
# exception that will be passed to API client as a HTTP error
raise(ServiceUnavailable(
description="Unable to connect to backend compute resource"))
def create_job_directory(self, debug=False):
"""
Create a job directory (All inputs, scripts, templates are transferred
relative to this location). The job directory is named using the
following path structure:
SIM_ROOT/<case.label>-<job.id>
"""
connection = self._ssh_connection()
command = "mkdir -p {}".format(self.job_working_directory_path)
out, err, exit_code = connection.pass_command(command)
if debug:
print(out)
return out, err, exit_code
def transfer_all_files(self, file_system='unix'):
"""
Method to copy all needed files to the cluster using a single
ssh connection.
"""
connection = self._ssh_connection()
all_files = []
all_files.extend(self.script_list)
all_files.extend(self.inputs_list)
all_files.extend(self.patched_templates)
# these are Script and Input model objects
for file_object in all_files:
file_full_path = file_object.source_uri
file_name = os.path.basename(file_full_path)
if file_object.destination_path:
dest_path = posixpath.join(
self.job_working_directory_path,
file_object.destination_path)
else: # support {"destination_path": null} in job json
dest_path = self.job_working_directory_path
connection.secure_copy(file_full_path, dest_path)
# convert line endings
if file_system == 'unix':
destination_full_path = posixpath.join(dest_path, file_name)
dos2unix = "dos2unix {}".format(destination_full_path)
out, err, exit_code = connection.pass_command(dos2unix)
connection.close_connection()
def _run_remote_script(self, script_name, remote_path, debug=False):
"""
Method to run a given script, located in a remote location.
Set the debug flag to print stdout to the terminal, and to enable
logging in ./logs/ssh.log
Shouldnt be called directly.
"""
connection = self._ssh_connection()
command = "cd {}; bash {}".format(remote_path, script_name)
out, err, exit_code = connection.pass_command(command)
if debug:
print(out)
return out, err, exit_code
def _run_remote_command(self, command, debug=False):
"""
Method to run a given command remotely via SSH
Shouldnt be called directly.
"""
connection = self._ssh_connection()
out, err, exit_code = connection.pass_command(command)
if debug:
print(out)
return out, err, exit_code
def _check_for_backend_identifier(self, string):
"""
Check for PBS backend_identifier:
Valid examples:
"5305301.cx1b\n"
Invalid examples:
"d305e01.cx1b\n"
"5305301.cx2b\n"
"""
stripped_string = string.strip("\n")
# Imperial PBS Job IDs
if re.match(r"\d+\.cx1b", stripped_string):
return stripped_string
# Azure Torque Job IDs
if re.match(r"\d+\.science-gateway-cluster", stripped_string):
return stripped_string
else:
return None
def trigger_action_script(self, action):
"""
Pass in the job and the required action (eg 'RUN' or 'CANCEL')
and this method will run the remote script which
corresponds to that action
"""
to_trigger = None
# Cycle through the list of scripts to to get the action script
for i, s in enumerate(self.script_list):
if s.action == action:
to_trigger = self.script_list[i]
break
# If the script isn't found, return a 400 error
if to_trigger:
script_name = os.path.basename(to_trigger.source_uri)
if to_trigger.destination_path:
script_path = posixpath.join(
self.job_working_directory_path,
to_trigger.destination_path)
else: # support {"destination_path": null} in job json
script_path = self.job_working_directory_path
out, err, exit = self._run_remote_script(script_name, script_path)
# for "RUN" actions, we need to persist the backend identifier
# and submission status to the database
if to_trigger.action == "RUN":
backend_identifier = self._check_for_backend_identifier(out)
if backend_identifier:
self.job.backend_identifier = backend_identifier
self.job.status = "Queued"
self.jobs.update(self.job)
if to_trigger.action in ["DATA", "PROGRESS"]:
# convert stdout json string to json
# guard against empty string (for queued jobs)
if out:
out = json.loads(out)
result = {"stdout": out, "stderr": err, "exit_code": exit}
return result, 200
else:
result = {'message': '{} script not found'.format(action)}
return result, 400
def _qstat_status_to_job_status(self, qstat_status):
if(qstat_status == 'Q' or qstat_status == 'W'):
# Q: Job is queued, eligable to run or routed.
# W: Job is waiting for its execution time (-a option) to
# be reached.
return "Queued"
if(qstat_status == 'R'):
# R: Job is running
return "Running"
if(qstat_status == 'C'):
# C: Job is completed
return "Complete"
else:
return None
def _qstat_status(self):
status_cmd = 'qstat {} -x | grep -P -o "<job_state>\K."'.format(
self.job.backend_identifier)
out, err, exit = self._run_remote_command(status_cmd)
# Strip whitespace as we may get a carriage return in the output
qstat_status = out.strip()
return qstat_status
def update_job_status(self):
# No need to make remote call to qstat if Job is not yet submitted or
# has already completed
if(self.job.status not in ["Queued", "Running"]):
# Leave job status unchanged
return self.job.status
# Check current qstat status for job
qstat_status = self._qstat_status()
if(qstat_status is not None):
# If we get a qstat status, try and convert it to a job status
new_job_status = self._qstat_status_to_job_status(qstat_status)
else:
if(self.job.status in ["Submitted", "Queued", "Running"]):
# If we have a previous backend status confirming the job was
# on the queue, an empty qstat status means the Job has
# completed and been removed from the queue.
# Note: Jobs only stay on the queue for abour 5 mins after they
# complete
new_job_status = "Complete"
if(new_job_status is None):
# Leave job status unchanged
new_job_status = self.job.status
return new_job_status
def run(self):
"""
This is the RUN behaviour for this job manager. This method ignores
any data passed as part of the request.
"""
# Call setup to ensure that the latest params and files are loaded
self.setup()
# Now execute the run script
return self.trigger_action_script('RUN')
def setup(self):
"""
This is the SETUP behaviour for this job manager. This method ignores
any data passed as part of the request.
"""
# PATCH EVERYTHING
self.patch_all_templates()
# CREATE REQUIRED REMOTE DIRECTORIES
self.create_job_directory()
# COPY EVERYTHING
self.transfer_all_files()
# EXECUTE SETUP SCRIPT
return self.trigger_action_script('SETUP')
def progress(self):
"""
This is the PROGRESS behaviour for this job manager. Method ignores
any data passed as part of the request.
"""
# Execute the progress script
return self.trigger_action_script('PROGRESS')
def data(self):
"""
This is the DATA behaviour for this job manager. Method ignores
any data passed as part of the request.
"""
# Execute the progress script
return self.trigger_action_script('DATA')
def cancel(self):
"""
This is the CANCEL behaviour for this job manager. Method ignores
any data passed as part of the request.
"""
# Execute the cancel script
return self.trigger_action_script('CANCEL')
| 36.170732 | 79 | 0.621173 |
7944ba2d1962e57676bc5867d046434adc4000ad | 470 | py | Python | pip_services_logging/logic/__init__.py | pip-services-infrastructure/pip-services-logging-python | 5b1f6eb4e0204004fb7b4affa527d9d3325bb3c7 | [
"MIT"
] | null | null | null | pip_services_logging/logic/__init__.py | pip-services-infrastructure/pip-services-logging-python | 5b1f6eb4e0204004fb7b4affa527d9d3325bb3c7 | [
"MIT"
] | null | null | null | pip_services_logging/logic/__init__.py | pip-services-infrastructure/pip-services-logging-python | 5b1f6eb4e0204004fb7b4affa527d9d3325bb3c7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pip_services_logging.logic.__init__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Logic module initialization
:copyright: Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
__all__ = [
'ILoggingBusinessLogic', 'LoggingController'
]
from .ILoggingBusinessLogic import ILoggingBusinessLogic
from .LoggingController import LoggingController | 27.647059 | 89 | 0.665957 |
7944bc0c1e652fd5af2155d7ecb44ee0e5d2ff6b | 88 | py | Python | plugins/splunk/komand_splunk/actions/modify_saved_search_properties/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/splunk/komand_splunk/actions/modify_saved_search_properties/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/splunk/komand_splunk/actions/modify_saved_search_properties/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import ModifySavedSearchProperties
| 29.333333 | 47 | 0.818182 |
7944be2fdbfb92c177326b7bacb81ab6a621e09d | 2,962 | py | Python | test/python/transpiler/test_remove_reset_in_zero_state.py | dmquinones/qiskit-terra | f8fdfc514b051b4a37f7ac738b9716aecba8fc37 | [
"Apache-2.0"
] | null | null | null | test/python/transpiler/test_remove_reset_in_zero_state.py | dmquinones/qiskit-terra | f8fdfc514b051b4a37f7ac738b9716aecba8fc37 | [
"Apache-2.0"
] | null | null | null | test/python/transpiler/test_remove_reset_in_zero_state.py | dmquinones/qiskit-terra | f8fdfc514b051b4a37f7ac738b9716aecba8fc37 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Test RemoveResetInZeroState pass"""
import unittest
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.transpiler import PassManager, transpile
from qiskit.transpiler.passes import RemoveResetInZeroState, DAGFixedPoint
from qiskit.converters import circuit_to_dag
from qiskit.test import QiskitTestCase
class TestRemoveResetInZeroState(QiskitTestCase):
""" Test swap-followed-by-measure optimizations. """
def test_optimize_single_reset(self):
""" Remove a single reset
qr0:--|0>-- ==> qr0:----
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.reset(qr)
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr)
pass_ = RemoveResetInZeroState()
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_dont_optimize_non_zero_state(self):
""" Do not remove reset if not in a zero state
qr0:--[H]--|0>-- ==> qr0:--[H]--|0>--
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr)
circuit.reset(qr)
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr)
expected.h(qr)
expected.reset(qr)
pass_ = RemoveResetInZeroState()
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_optimize_single_reset_in_diff_qubits(self):
""" Remove a single reset in different qubits
qr0:--|0>-- qr0:----
==>
qr1:--|0>-- qr1:----
"""
qr = QuantumRegister(2, 'qr')
circuit = QuantumCircuit(qr)
circuit.reset(qr)
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr)
pass_ = RemoveResetInZeroState()
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
class TestRemoveResetInZeroStateFixedPoint(QiskitTestCase):
""" Test RemoveResetInZeroState in a transpiler, using fixed point. """
def test_two_resets(self):
""" Remove two initial resets
qr0:--|0>-|0>-- ==> qr0:----
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.reset(qr[0])
circuit.reset(qr[0])
expected = QuantumCircuit(qr)
pass_manager = PassManager()
pass_manager.append(
[RemoveResetInZeroState(), DAGFixedPoint()],
do_while=lambda property_set: not property_set['dag_fixed_point'])
after = transpile(circuit, pass_manager=pass_manager)
self.assertEqual(expected, after)
if __name__ == '__main__':
unittest.main()
| 29.326733 | 78 | 0.619176 |
7944be85b9cf10e60d2283f0bf5f17c1fcce88ba | 54,358 | py | Python | datumaro/components/operations.py | shivam124081/datumaro | 3aa8842a3649ec8e05c0bfe042794823375b812b | [
"MIT"
] | null | null | null | datumaro/components/operations.py | shivam124081/datumaro | 3aa8842a3649ec8e05c0bfe042794823375b812b | [
"MIT"
] | null | null | null | datumaro/components/operations.py | shivam124081/datumaro | 3aa8842a3649ec8e05c0bfe042794823375b812b | [
"MIT"
] | null | null | null | # Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
from collections import OrderedDict
from copy import deepcopy
import hashlib
import logging as log
import attr
import cv2
import numpy as np
from attr import attrib, attrs
from unittest import TestCase
from datumaro.components.cli_plugin import CliPlugin
from datumaro.components.extractor import (
AnnotationType,
Bbox,
Label,
LabelCategories,
PointsCategories,
MaskCategories,
)
from datumaro.components.project import Dataset
from datumaro.util import find, filter_dict
from datumaro.util.attrs_util import ensure_cls, default_if_none
from datumaro.util.annotation_util import (
segment_iou,
bbox_iou,
mean_bbox,
OKS,
find_instances,
max_bbox,
smooth_line,
)
def get_ann_type(anns, t):
return [a for a in anns if a.type == t]
def match_annotations_equal(a, b):
matches = []
a_unmatched = a[:]
b_unmatched = b[:]
for a_ann in a:
for b_ann in b_unmatched:
if a_ann != b_ann:
continue
matches.append((a_ann, b_ann))
a_unmatched.remove(a_ann)
b_unmatched.remove(b_ann)
break
return matches, a_unmatched, b_unmatched
def merge_annotations_equal(a, b):
matches, a_unmatched, b_unmatched = match_annotations_equal(a, b)
return [ann_a for (ann_a, _) in matches] + a_unmatched + b_unmatched
def merge_categories(sources):
categories = {}
for source in sources:
for cat_type, source_cat in source.items():
existing_cat = categories.setdefault(cat_type, source_cat)
if existing_cat != source_cat:
raise NotImplementedError(
"Merging of datasets with different categories is "
"only allowed in 'merge' command."
)
return categories
class MergingStrategy(CliPlugin):
@classmethod
def merge(cls, sources, **options):
instance = cls(**options)
return instance(sources)
def __init__(self, **options):
super().__init__(**options)
self.__dict__["_sources"] = None
def __call__(self, sources):
raise NotImplementedError()
@attrs
class DatasetError:
item_id = attrib()
@attrs
class QualityError(DatasetError):
pass
@attrs
class TooCloseError(QualityError):
a = attrib()
b = attrib()
distance = attrib()
def __str__(self):
return "Item %s: annotations are too close: %s, %s, distance = %s" % (
self.item_id,
self.a,
self.b,
self.distance,
)
@attrs
class WrongGroupError(QualityError):
found = attrib(converter=set)
expected = attrib(converter=set)
group = attrib(converter=list)
def __str__(self):
return (
"Item %s: annotation group has wrong labels: "
"found %s, expected %s, group %s"
% (self.item_id, self.found, self.expected, self.group)
)
@attrs
class MergeError(DatasetError):
sources = attrib(converter=set)
@attrs
class NoMatchingAnnError(MergeError):
ann = attrib()
def __str__(self):
return (
"Item %s: can't find matching annotation "
"in sources %s, annotation is %s" % (self.item_id, self.sources, self.ann)
)
@attrs
class NoMatchingItemError(MergeError):
def __str__(self):
return "Item %s: can't find matching item in sources %s" % (
self.item_id,
self.sources,
)
@attrs
class FailedLabelVotingError(MergeError):
votes = attrib()
ann = attrib(default=None)
def __str__(self):
return "Item %s: label voting failed%s, votes %s, sources %s" % (
self.item_id,
"for ann %s" % self.ann if self.ann else "",
self.votes,
self.sources,
)
@attrs
class FailedAttrVotingError(MergeError):
attr = attrib()
votes = attrib()
ann = attrib()
def __str__(self):
return (
"Item %s: attribute voting failed "
"for ann %s, votes %s, sources %s"
% (self.item_id, self.ann, self.votes, self.sources)
)
@attrs
class IntersectMerge(MergingStrategy):
@attrs(repr_ns="IntersectMerge", kw_only=True)
class Conf:
pairwise_dist = attrib(converter=float, default=0.5)
sigma = attrib(converter=list, factory=list)
output_conf_thresh = attrib(converter=float, default=0)
quorum = attrib(converter=int, default=0)
ignored_attributes = attrib(converter=set, factory=set)
def _groups_conveter(value):
result = []
for group in value:
rg = set()
for label in group:
optional = label.endswith("?")
name = label if not optional else label[:-1]
rg.add((name, optional))
result.append(rg)
return result
groups = attrib(converter=_groups_conveter, factory=list)
close_distance = attrib(converter=float, default=0.75)
conf = attrib(converter=ensure_cls(Conf), factory=Conf)
# Error trackers:
errors = attrib(factory=list, init=False)
def add_item_error(self, error, *args, **kwargs):
self.errors.append(error(self._item_id, *args, **kwargs))
# Indexes:
_dataset_map = attrib(init=False) # id(dataset) -> (dataset, index)
_item_map = attrib(init=False) # id(item) -> (item, id(dataset))
_ann_map = attrib(init=False) # id(ann) -> (ann, id(item))
_item_id = attrib(init=False)
_item = attrib(init=False)
# Misc.
_categories = attrib(init=False) # merged categories
def __call__(self, datasets):
self._categories = self._merge_categories([d.categories() for d in datasets])
merged = Dataset(categories=self._categories)
self._check_groups_definition()
item_matches, item_map = self.match_items(datasets)
self._item_map = item_map
self._dataset_map = {id(d): (d, i) for i, d in enumerate(datasets)}
for item_id, items in item_matches.items():
self._item_id = item_id
if len(items) < len(datasets):
missing_sources = set(id(s) for s in datasets) - set(items)
missing_sources = [self._dataset_map[s][1] for s in missing_sources]
self.add_item_error(NoMatchingItemError, missing_sources)
merged.put(self.merge_items(items))
return merged
def get_ann_source(self, ann_id):
return self._item_map[self._ann_map[ann_id][1]][1]
def merge_items(self, items):
self._item = next(iter(items.values()))
self._ann_map = {}
sources = []
for item in items.values():
self._ann_map.update({id(a): (a, id(item)) for a in item.annotations})
sources.append(item.annotations)
log.debug(
"Merging item %s: source annotations %s"
% (self._item_id, list(map(len, sources)))
)
annotations = self.merge_annotations(sources)
annotations = [
a
for a in annotations
if self.conf.output_conf_thresh <= a.attributes.get("score", 1)
]
return self._item.wrap(annotations=annotations)
def merge_annotations(self, sources):
self._make_mergers(sources)
clusters = self._match_annotations(sources)
joined_clusters = sum(clusters.values(), [])
group_map = self._find_cluster_groups(joined_clusters)
annotations = []
for t, clusters in clusters.items():
for cluster in clusters:
self._check_cluster_sources(cluster)
merged_clusters = self._merge_clusters(t, clusters)
for merged_ann, cluster in zip(merged_clusters, clusters):
attributes = self._find_cluster_attrs(cluster, merged_ann)
attributes = {
k: v
for k, v in attributes.items()
if k not in self.conf.ignored_attributes
}
attributes.update(merged_ann.attributes)
merged_ann.attributes = attributes
new_group_id = find(
enumerate(group_map), lambda e: id(cluster) in e[1][0]
)
if new_group_id is None:
new_group_id = 0
else:
new_group_id = new_group_id[0] + 1
merged_ann.group = new_group_id
if self.conf.close_distance:
self._check_annotation_distance(t, merged_clusters)
annotations += merged_clusters
if self.conf.groups:
self._check_groups(annotations)
return annotations
@staticmethod
def match_items(datasets):
item_ids = set((item.id, item.subset) for d in datasets for item in d)
item_map = {} # id(item) -> (item, id(dataset))
matches = OrderedDict()
for (item_id, item_subset) in sorted(item_ids, key=lambda e: e[0]):
items = {}
for d in datasets:
try:
item = d.get(item_id, subset=item_subset)
items[id(d)] = item
item_map[id(item)] = (item, id(d))
except KeyError:
pass
matches[(item_id, item_subset)] = items
return matches, item_map
def _merge_label_categories(self, sources):
same = True
common = None
for src_categories in sources:
src_cat = src_categories.get(AnnotationType.label)
if common is None:
common = src_cat
elif common != src_cat:
same = False
break
if same:
return common
dst_cat = LabelCategories()
for src_id, src_categories in enumerate(sources):
src_cat = src_categories.get(AnnotationType.label)
if src_cat is None:
continue
for src_label in src_cat.items:
dst_label = dst_cat.find(src_label.name)[1]
if dst_label is not None:
if dst_label != src_label:
if (
src_label.parent
and dst_label.parent
and src_label.parent != dst_label.parent
):
raise ValueError(
"Can't merge label category "
"%s (from #%s): "
"parent label conflict: %s vs. %s"
% (
src_label.name,
src_id,
src_label.parent,
dst_label.parent,
)
)
dst_label.parent = dst_label.parent or src_label.parent
dst_label.attributes |= src_label.attributes
else:
pass
else:
dst_cat.add(src_label.name, src_label.parent, src_label.attributes)
return dst_cat
def _merge_point_categories(self, sources, label_cat):
dst_point_cat = PointsCategories()
for src_id, src_categories in enumerate(sources):
src_label_cat = src_categories.get(AnnotationType.label)
src_point_cat = src_categories.get(AnnotationType.points)
if src_label_cat is None or src_point_cat is None:
continue
for src_label_id, src_cat in src_point_cat.items.items():
src_label = src_label_cat.items[src_label_id].name
dst_label_id = label_cat.find(src_label)[0]
dst_cat = dst_point_cat.items.get(dst_label_id)
if dst_cat is not None:
if dst_cat != src_cat:
raise ValueError(
"Can't merge point category for label "
"%s (from #%s): %s vs. %s"
% (src_label, src_id, src_cat, dst_cat)
)
else:
pass
else:
dst_point_cat.add(dst_label_id, src_cat.labels, src_cat.joints)
if len(dst_point_cat.items) == 0:
return None
return dst_point_cat
def _merge_mask_categories(self, sources, label_cat):
dst_mask_cat = MaskCategories()
for src_id, src_categories in enumerate(sources):
src_label_cat = src_categories.get(AnnotationType.label)
src_mask_cat = src_categories.get(AnnotationType.mask)
if src_label_cat is None or src_mask_cat is None:
continue
for src_label_id, src_cat in src_mask_cat.colormap.items():
src_label = src_label_cat.items[src_label_id].name
dst_label_id = label_cat.find(src_label)[0]
dst_cat = dst_mask_cat.colormap.get(dst_label_id)
if dst_cat is not None:
if dst_cat != src_cat:
raise ValueError(
"Can't merge mask category for label "
"%s (from #%s): %s vs. %s"
% (src_label, src_id, src_cat, dst_cat)
)
else:
pass
else:
dst_mask_cat.colormap[dst_label_id] = src_cat
if len(dst_mask_cat.colormap) == 0:
return None
return dst_mask_cat
def _merge_categories(self, sources):
dst_categories = {}
label_cat = self._merge_label_categories(sources)
if label_cat is None:
return dst_categories
dst_categories[AnnotationType.label] = label_cat
points_cat = self._merge_point_categories(sources, label_cat)
if points_cat is not None:
dst_categories[AnnotationType.points] = points_cat
mask_cat = self._merge_mask_categories(sources, label_cat)
if mask_cat is not None:
dst_categories[AnnotationType.mask] = mask_cat
return dst_categories
def _match_annotations(self, sources):
all_by_type = {}
for s in sources:
src_by_type = {}
for a in s:
src_by_type.setdefault(a.type, []).append(a)
for k, v in src_by_type.items():
all_by_type.setdefault(k, []).append(v)
clusters = {}
for k, v in all_by_type.items():
clusters.setdefault(k, []).extend(self._match_ann_type(k, v))
return clusters
def _make_mergers(self, sources):
def _make(c, **kwargs):
kwargs.update(attr.asdict(self.conf))
fields = attr.fields_dict(c)
return c(**{k: v for k, v in kwargs.items() if k in fields}, context=self)
def _for_type(t, **kwargs):
if t is AnnotationType.label:
return _make(LabelMerger, **kwargs)
elif t is AnnotationType.bbox:
return _make(BboxMerger, **kwargs)
elif t is AnnotationType.mask:
return _make(MaskMerger, **kwargs)
elif t is AnnotationType.polygon:
return _make(PolygonMerger, **kwargs)
elif t is AnnotationType.polyline:
return _make(LineMerger, **kwargs)
elif t is AnnotationType.points:
return _make(PointsMerger, **kwargs)
elif t is AnnotationType.caption:
return _make(CaptionsMerger, **kwargs)
else:
raise NotImplementedError("Type %s is not supported" % t)
instance_map = {}
for s in sources:
s_instances = find_instances(s)
for inst in s_instances:
inst_bbox = max_bbox(
[
a
for a in inst
if a.type
in {
AnnotationType.polygon,
AnnotationType.mask,
AnnotationType.bbox,
}
]
)
for ann in inst:
instance_map[id(ann)] = [inst, inst_bbox]
self._mergers = {
t: _for_type(t, instance_map=instance_map) for t in AnnotationType
}
def _match_ann_type(self, t, sources):
return self._mergers[t].match_annotations(sources)
def _merge_clusters(self, t, clusters):
return self._mergers[t].merge_clusters(clusters)
@staticmethod
def _find_cluster_groups(clusters):
cluster_groups = []
visited = set()
for a_idx, cluster_a in enumerate(clusters):
if a_idx in visited:
continue
visited.add(a_idx)
cluster_group = {id(cluster_a)}
# find segment groups in the cluster group
a_groups = set(ann.group for ann in cluster_a)
for cluster_b in clusters[a_idx + 1 :]:
b_groups = set(ann.group for ann in cluster_b)
if a_groups & b_groups:
a_groups |= b_groups
# now we know all the segment groups in this cluster group
# so we can find adjacent clusters
for b_idx, cluster_b in enumerate(clusters[a_idx + 1 :]):
b_idx = a_idx + 1 + b_idx
b_groups = set(ann.group for ann in cluster_b)
if a_groups & b_groups:
cluster_group.add(id(cluster_b))
visited.add(b_idx)
if a_groups == {0}:
continue # skip annotations without a group
cluster_groups.append((cluster_group, a_groups))
return cluster_groups
def _find_cluster_attrs(self, cluster, ann):
quorum = self.conf.quorum or 0
# TODO: when attribute types are implemented, add linear
# interpolation for contiguous values
attr_votes = {} # name -> { value: score , ... }
for s in cluster:
for name, value in s.attributes.items():
votes = attr_votes.get(name, {})
votes[value] = 1 + votes.get(value, 0)
attr_votes[name] = votes
attributes = {}
for name, votes in attr_votes.items():
winner, count = max(votes.items(), key=lambda e: e[1])
if count < quorum:
if sum(votes.values()) < quorum:
# blame provokers
missing_sources = set(
self.get_ann_source(id(a))
for a in cluster
if s.attributes.get(name) == winner
)
else:
# blame outliers
missing_sources = set(
self.get_ann_source(id(a))
for a in cluster
if s.attributes.get(name) != winner
)
missing_sources = [self._dataset_map[s][1] for s in missing_sources]
self.add_item_error(
FailedAttrVotingError, missing_sources, name, votes, ann
)
continue
attributes[name] = winner
return attributes
def _check_cluster_sources(self, cluster):
if len(cluster) == len(self._dataset_map):
return
def _has_item(s):
try:
item = self._dataset_map[s][0].get(*self._item_id)
if len(item.annotations) == 0:
return False
return True
except KeyError:
return False
missing_sources = set(self._dataset_map) - set(
self.get_ann_source(id(a)) for a in cluster
)
missing_sources = [
self._dataset_map[s][1] for s in missing_sources if _has_item(s)
]
if missing_sources:
self.add_item_error(NoMatchingAnnError, missing_sources, cluster[0])
def _check_annotation_distance(self, t, annotations):
for a_idx, a_ann in enumerate(annotations):
for b_ann in annotations[a_idx + 1 :]:
d = self._mergers[t].distance(a_ann, b_ann)
if self.conf.close_distance < d:
self.add_item_error(TooCloseError, a_ann, b_ann, d)
def _check_groups(self, annotations):
check_groups = []
for check_group_raw in self.conf.groups:
check_group = set(l[0] for l in check_group_raw)
optional = set(l[0] for l in check_group_raw if l[1])
check_groups.append((check_group, optional))
def _check_group(group_labels, group):
for check_group, optional in check_groups:
common = check_group & group_labels
real_miss = check_group - common - optional
extra = group_labels - check_group
if common and (extra or real_miss):
self.add_item_error(
WrongGroupError, group_labels, check_group, group
)
break
groups = find_instances(annotations)
for group in groups:
group_labels = set()
for ann in group:
if not hasattr(ann, "label"):
continue
label = self._get_label_name(ann.label)
if ann.group:
group_labels.add(label)
else:
_check_group({label}, [ann])
if not group_labels:
continue
_check_group(group_labels, group)
def _get_label_name(self, label_id):
if label_id is None:
return None
return self._categories[AnnotationType.label].items[label_id].name
def _get_label_id(self, label):
return self._categories[AnnotationType.label].find(label)[0]
def _get_src_label_name(self, ann, label_id):
if label_id is None:
return None
item_id = self._ann_map[id(ann)][1]
dataset_id = self._item_map[item_id][1]
return (
self._dataset_map[dataset_id][0]
.categories()[AnnotationType.label]
.items[label_id]
.name
)
def _get_any_label_name(self, ann, label_id):
if label_id is None:
return None
try:
return self._get_src_label_name(ann, label_id)
except KeyError:
return self._get_label_name(label_id)
def _check_groups_definition(self):
for group in self.conf.groups:
for label, _ in group:
_, entry = self._categories[AnnotationType.label].find(label)
if entry is None:
raise ValueError(
"Datasets do not contain "
"label '%s', available labels %s"
% (
label,
[
i.name
for i in self._categories[AnnotationType.label].items
],
)
)
@attrs(kw_only=True)
class AnnotationMatcher:
_context = attrib(type=IntersectMerge, default=None)
def match_annotations(self, sources):
raise NotImplementedError()
@attrs
class LabelMatcher(AnnotationMatcher):
def distance(self, a, b):
a_label = self._context._get_any_label_name(a, a.label)
b_label = self._context._get_any_label_name(b, b.label)
return a_label == b_label
def match_annotations(self, sources):
return [sum(sources, [])]
@attrs(kw_only=True)
class _ShapeMatcher(AnnotationMatcher):
pairwise_dist = attrib(converter=float, default=0.9)
cluster_dist = attrib(converter=float, default=-1.0)
def match_annotations(self, sources):
distance = self.distance
label_matcher = self.label_matcher
pairwise_dist = self.pairwise_dist
cluster_dist = self.cluster_dist
if cluster_dist < 0:
cluster_dist = pairwise_dist
id_segm = {id(a): (a, id(s)) for s in sources for a in s}
def _is_close_enough(cluster, extra_id):
# check if whole cluster IoU will not be broken
# when this segment is added
b = id_segm[extra_id][0]
for a_id in cluster:
a = id_segm[a_id][0]
if distance(a, b) < cluster_dist:
return False
return True
def _has_same_source(cluster, extra_id):
b = id_segm[extra_id][1]
for a_id in cluster:
a = id_segm[a_id][1]
if a == b:
return True
return False
# match segments in sources, pairwise
adjacent = {i: [] for i in id_segm} # id(sgm) -> [id(adj_sgm1), ...]
for a_idx, src_a in enumerate(sources):
for src_b in sources[a_idx + 1 :]:
matches, _, _, _ = match_segments(
src_a,
src_b,
dist_thresh=pairwise_dist,
distance=distance,
label_matcher=label_matcher,
)
for a, b in matches:
adjacent[id(a)].append(id(b))
# join all segments into matching clusters
clusters = []
visited = set()
for cluster_idx in adjacent:
if cluster_idx in visited:
continue
cluster = set()
to_visit = {cluster_idx}
while to_visit:
c = to_visit.pop()
cluster.add(c)
visited.add(c)
for i in adjacent[c]:
if i in visited:
continue
if 0 < cluster_dist and not _is_close_enough(cluster, i):
continue
if _has_same_source(cluster, i):
continue
to_visit.add(i)
clusters.append([id_segm[i][0] for i in cluster])
return clusters
@staticmethod
def distance(a, b):
return segment_iou(a, b)
def label_matcher(self, a, b):
a_label = self._context._get_any_label_name(a, a.label)
b_label = self._context._get_any_label_name(b, b.label)
return a_label == b_label
@attrs
class BboxMatcher(_ShapeMatcher):
pass
@attrs
class PolygonMatcher(_ShapeMatcher):
pass
@attrs
class MaskMatcher(_ShapeMatcher):
pass
@attrs(kw_only=True)
class PointsMatcher(_ShapeMatcher):
sigma = attrib(type=list, default=None)
instance_map = attrib(converter=dict)
def distance(self, a, b):
a_bbox = self.instance_map[id(a)][1]
b_bbox = self.instance_map[id(b)][1]
if bbox_iou(a_bbox, b_bbox) <= 0:
return 0
bbox = mean_bbox([a_bbox, b_bbox])
return OKS(a, b, sigma=self.sigma, bbox=bbox)
@attrs
class LineMatcher(_ShapeMatcher):
@staticmethod
def distance(a, b):
a_bbox = a.get_bbox()
b_bbox = b.get_bbox()
bbox = max_bbox([a_bbox, b_bbox])
area = bbox[2] * bbox[3]
if not area:
return 1
# compute inter-line area, normalize by common bbox
point_count = max(max(len(a.points) // 2, len(b.points) // 2), 5)
a, sa = smooth_line(a.points, point_count)
b, sb = smooth_line(b.points, point_count)
dists = np.linalg.norm(a - b, axis=1)
dists = (dists[:-1] + dists[1:]) * 0.5
s = np.sum(dists) * 0.5 * (sa + sb) / area
return abs(1 - s)
@attrs
class CaptionsMatcher(AnnotationMatcher):
def match_annotations(self, sources):
raise NotImplementedError()
@attrs(kw_only=True)
class AnnotationMerger:
def merge_clusters(self, clusters):
raise NotImplementedError()
@attrs(kw_only=True)
class LabelMerger(AnnotationMerger, LabelMatcher):
quorum = attrib(converter=int, default=0)
def merge_clusters(self, clusters):
assert len(clusters) <= 1
if len(clusters) == 0:
return []
votes = {} # label -> score
for ann in clusters[0]:
label = self._context._get_src_label_name(ann, ann.label)
votes[label] = 1 + votes.get(label, 0)
merged = []
for label, count in votes.items():
if count < self.quorum:
sources = set(
self.get_ann_source(id(a))
for a in clusters[0]
if label
not in [self._context._get_src_label_name(l, l.label) for l in a]
)
sources = [self._context._dataset_map[s][1] for s in sources]
self._context.add_item_error(FailedLabelVotingError, sources, votes)
continue
merged.append(
Label(
self._context._get_label_id(label),
attributes={"score": count / len(self._context._dataset_map)},
)
)
return merged
@attrs(kw_only=True)
class _ShapeMerger(AnnotationMerger, _ShapeMatcher):
quorum = attrib(converter=int, default=0)
def merge_clusters(self, clusters):
merged = []
for cluster in clusters:
label, label_score = self.find_cluster_label(cluster)
shape, shape_score = self.merge_cluster_shape(cluster)
shape.z_order = max(cluster, key=lambda a: a.z_order).z_order
shape.label = label
shape.attributes["score"] = (
label_score * shape_score if label is not None else shape_score
)
merged.append(shape)
return merged
def find_cluster_label(self, cluster):
votes = {}
for s in cluster:
label = self._context._get_src_label_name(s, s.label)
state = votes.setdefault(label, [0, 0])
state[0] += s.attributes.get("score", 1.0)
state[1] += 1
label, (score, count) = max(votes.items(), key=lambda e: e[1][0])
if count < self.quorum:
self._context.add_item_error(FailedLabelVotingError, votes)
label = None
score = score / len(self._context._dataset_map)
label = self._context._get_label_id(label)
return label, score
@staticmethod
def _merge_cluster_shape_mean_box_nearest(cluster):
mbbox = Bbox(*mean_bbox(cluster))
dist = (segment_iou(mbbox, s) for s in cluster)
nearest_pos, _ = max(enumerate(dist), key=lambda e: e[1])
return cluster[nearest_pos]
def merge_cluster_shape(self, cluster):
shape = self._merge_cluster_shape_mean_box_nearest(cluster)
shape_score = sum(max(0, self.distance(shape, s)) for s in cluster) / len(
cluster
)
return shape, shape_score
@attrs
class BboxMerger(_ShapeMerger, BboxMatcher):
pass
@attrs
class PolygonMerger(_ShapeMerger, PolygonMatcher):
pass
@attrs
class MaskMerger(_ShapeMerger, MaskMatcher):
pass
@attrs
class PointsMerger(_ShapeMerger, PointsMatcher):
pass
@attrs
class LineMerger(_ShapeMerger, LineMatcher):
pass
@attrs
class CaptionsMerger(AnnotationMerger, CaptionsMatcher):
pass
def match_segments(
a_segms,
b_segms,
distance=segment_iou,
dist_thresh=1.0,
label_matcher=lambda a, b: a.label == b.label,
):
assert callable(distance), distance
assert callable(label_matcher), label_matcher
a_segms.sort(key=lambda ann: 1 - ann.attributes.get("score", 1))
b_segms.sort(key=lambda ann: 1 - ann.attributes.get("score", 1))
# a_matches: indices of b_segms matched to a bboxes
# b_matches: indices of a_segms matched to b bboxes
a_matches = -np.ones(len(a_segms), dtype=int)
b_matches = -np.ones(len(b_segms), dtype=int)
distances = np.array([[distance(a, b) for b in b_segms] for a in a_segms])
# matches: boxes we succeeded to match completely
# mispred: boxes we succeeded to match, having label mismatch
matches = []
mispred = []
for a_idx, a_segm in enumerate(a_segms):
if len(b_segms) == 0:
break
matched_b = -1
max_dist = -1
b_indices = np.argsort(
[not label_matcher(a_segm, b_segm) for b_segm in b_segms], kind="stable"
) # prioritize those with same label, keep score order
for b_idx in b_indices:
if 0 <= b_matches[b_idx]: # assign a_segm with max conf
continue
d = distances[a_idx, b_idx]
if d < dist_thresh or d <= max_dist:
continue
max_dist = d
matched_b = b_idx
if matched_b < 0:
continue
a_matches[a_idx] = matched_b
b_matches[matched_b] = a_idx
b_segm = b_segms[matched_b]
if label_matcher(a_segm, b_segm):
matches.append((a_segm, b_segm))
else:
mispred.append((a_segm, b_segm))
# *_umatched: boxes of (*) we failed to match
a_unmatched = [a_segms[i] for i, m in enumerate(a_matches) if m < 0]
b_unmatched = [b_segms[i] for i, m in enumerate(b_matches) if m < 0]
return matches, mispred, a_unmatched, b_unmatched
def mean_std(dataset):
"""
Computes unbiased mean and std. dev. for dataset images, channel-wise.
"""
# Use an online algorithm to:
# - handle different image sizes
# - avoid cancellation problem
if len(dataset) == 0:
return [0, 0, 0], [0, 0, 0]
stats = np.empty((len(dataset), 2, 3), dtype=np.double)
counts = np.empty(len(dataset), dtype=np.uint32)
mean = lambda i, s: s[i][0]
var = lambda i, s: s[i][1]
for i, item in enumerate(dataset):
counts[i] = np.prod(item.image.size)
image = item.image.data
if len(image.shape) == 2:
image = image[:, :, np.newaxis]
else:
image = image[:, :, :3]
# opencv is much faster than numpy here
cv2.meanStdDev(
image.astype(np.double) / 255, mean=mean(i, stats), stddev=var(i, stats)
)
# make variance unbiased
np.multiply(
np.square(stats[:, 1]), (counts / (counts - 1))[:, np.newaxis], out=stats[:, 1]
)
_, mean, var = StatsCounter().compute_stats(stats, counts, mean, var)
return mean * 255, np.sqrt(var) * 255
class StatsCounter:
# Implements online parallel computation of sample variance
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# Needed do avoid catastrophic cancellation in floating point computations
@staticmethod
def pairwise_stats(count_a, mean_a, var_a, count_b, mean_b, var_b):
delta = mean_b - mean_a
m_a = var_a * (count_a - 1)
m_b = var_b * (count_b - 1)
M2 = m_a + m_b + delta ** 2 * count_a * count_b / (count_a + count_b)
return (
count_a + count_b,
mean_a * 0.5 + mean_b * 0.5,
M2 / (count_a + count_b - 1),
)
# stats = float array of shape N, 2 * d, d = dimensions of values
# count = integer array of shape N
# mean_accessor = function(idx, stats) to retrieve element mean
# variance_accessor = function(idx, stats) to retrieve element variance
# Recursively computes total count, mean and variance, does O(log(N)) calls
@staticmethod
def compute_stats(stats, counts, mean_accessor, variance_accessor):
m = mean_accessor
v = variance_accessor
n = len(stats)
if n == 1:
return counts[0], m(0, stats), v(0, stats)
if n == 2:
return __class__.pairwise_stats(
counts[0], m(0, stats), v(0, stats), counts[1], m(1, stats), v(1, stats)
)
h = n // 2
return __class__.pairwise_stats(
*__class__.compute_stats(stats[:h], counts[:h], m, v),
*__class__.compute_stats(stats[h:], counts[h:], m, v)
)
def compute_image_statistics(dataset):
stats = {"dataset": {}, "subsets": {}}
def _extractor_stats(extractor):
available = True
for item in extractor:
if not (item.has_image and item.image.has_data):
available = False
log.warn("Item %s has no image. Image stats won't be computed", item.id)
break
stats = {
"images count": len(extractor),
}
if available:
mean, std = mean_std(extractor)
stats.update(
{
"image mean": [float(n) for n in mean[::-1]],
"image std": [float(n) for n in std[::-1]],
}
)
else:
stats.update(
{
"image mean": "n/a",
"image std": "n/a",
}
)
return stats
stats["dataset"].update(_extractor_stats(dataset))
subsets = dataset.subsets() or [None]
if subsets and 0 < len([s for s in subsets if s]):
for subset_name in subsets:
stats["subsets"][subset_name] = _extractor_stats(
dataset.get_subset(subset_name)
)
return stats
def compute_ann_statistics(dataset):
labels = dataset.categories().get(AnnotationType.label)
def get_label(ann):
return labels.items[ann.label].name if ann.label is not None else None
stats = {
"images count": len(dataset),
"annotations count": 0,
"unannotated images count": 0,
"unannotated images": [],
"annotations by type": {
t.name: {
"count": 0,
}
for t in AnnotationType
},
"annotations": {},
}
by_type = stats["annotations by type"]
attr_template = {
"count": 0,
"values count": 0,
"values present": set(),
"distribution": {}, # value -> (count, total%)
}
label_stat = {
"count": 0,
"distribution": {
l.name: [0, 0] for l in labels.items
}, # label -> (count, total%)
"attributes": {},
}
stats["annotations"]["labels"] = label_stat
segm_stat = {
"avg. area": 0,
"area distribution": [], # a histogram with 10 bins
# (min, min+10%), ..., (min+90%, max) -> (count, total%)
"pixel distribution": {
l.name: [0, 0] for l in labels.items
}, # label -> (count, total%)
}
stats["annotations"]["segments"] = segm_stat
segm_areas = []
pixel_dist = segm_stat["pixel distribution"]
total_pixels = 0
for item in dataset:
if len(item.annotations) == 0:
stats["unannotated images"].append(item.id)
continue
for ann in item.annotations:
by_type[ann.type.name]["count"] += 1
if not hasattr(ann, "label") or ann.label is None:
continue
if ann.type in {
AnnotationType.mask,
AnnotationType.polygon,
AnnotationType.bbox,
}:
area = ann.get_area()
segm_areas.append(area)
pixel_dist[get_label(ann)][0] += int(area)
label_stat["count"] += 1
label_stat["distribution"][get_label(ann)][0] += 1
for name, value in ann.attributes.items():
if name.lower() in {
"occluded",
"visibility",
"score",
"id",
"track_id",
}:
continue
attrs_stat = label_stat["attributes"].setdefault(
name, deepcopy(attr_template)
)
attrs_stat["count"] += 1
attrs_stat["values present"].add(str(value))
attrs_stat["distribution"].setdefault(str(value), [0, 0])[0] += 1
stats["annotations count"] = sum(
t["count"] for t in stats["annotations by type"].values()
)
stats["unannotated images count"] = len(stats["unannotated images"])
for label_info in label_stat["distribution"].values():
label_info[1] = label_info[0] / (label_stat["count"] or 1)
for label_attr in label_stat["attributes"].values():
label_attr["values count"] = len(label_attr["values present"])
label_attr["values present"] = sorted(label_attr["values present"])
for attr_info in label_attr["distribution"].values():
attr_info[1] = attr_info[0] / (label_attr["count"] or 1)
# numpy.sum might be faster, but could overflow with large datasets.
# Python's int can transparently mutate to be of indefinite precision (long)
total_pixels = sum(int(a) for a in segm_areas)
segm_stat["avg. area"] = total_pixels / (len(segm_areas) or 1.0)
for label_info in segm_stat["pixel distribution"].values():
label_info[1] = label_info[0] / (total_pixels or 1)
if len(segm_areas) != 0:
hist, bins = np.histogram(segm_areas)
segm_stat["area distribution"] = [
{
"min": float(bin_min),
"max": float(bin_max),
"count": int(c),
"percent": int(c) / len(segm_areas),
}
for c, (bin_min, bin_max) in zip(hist, zip(bins[:-1], bins[1:]))
]
return stats
@attrs
class DistanceComparator:
iou_threshold = attrib(converter=float, default=0.5)
@staticmethod
def match_datasets(a, b):
a_items = set((item.id, item.subset) for item in a)
b_items = set((item.id, item.subset) for item in b)
matches = a_items & b_items
a_unmatched = a_items - b_items
b_unmatched = b_items - a_items
return matches, a_unmatched, b_unmatched
@staticmethod
def match_classes(a, b):
a_label_cat = a.categories().get(AnnotationType.label, LabelCategories())
b_label_cat = b.categories().get(AnnotationType.label, LabelCategories())
a_labels = set(c.name for c in a_label_cat)
b_labels = set(c.name for c in b_label_cat)
matches = a_labels & b_labels
a_unmatched = a_labels - b_labels
b_unmatched = b_labels - a_labels
return matches, a_unmatched, b_unmatched
def match_annotations(self, item_a, item_b):
return {t: self._match_ann_type(t, item_a, item_b)}
def _match_ann_type(self, t, *args):
# pylint: disable=no-value-for-parameter
if t == AnnotationType.label:
return self.match_labels(*args)
elif t == AnnotationType.bbox:
return self.match_boxes(*args)
elif t == AnnotationType.polygon:
return self.match_polygons(*args)
elif t == AnnotationType.mask:
return self.match_masks(*args)
elif t == AnnotationType.points:
return self.match_points(*args)
elif t == AnnotationType.polyline:
return self.match_lines(*args)
# pylint: enable=no-value-for-parameter
else:
raise NotImplementedError("Unexpected annotation type %s" % t)
@staticmethod
def _get_ann_type(t, item):
return get_ann_type(item.annotations, t)
def match_labels(self, item_a, item_b):
a_labels = set(
a.label for a in self._get_ann_type(AnnotationType.label, item_a)
)
b_labels = set(
a.label for a in self._get_ann_type(AnnotationType.label, item_b)
)
matches = a_labels & b_labels
a_unmatched = a_labels - b_labels
b_unmatched = b_labels - a_labels
return matches, a_unmatched, b_unmatched
def _match_segments(self, t, item_a, item_b):
a_boxes = self._get_ann_type(t, item_a)
b_boxes = self._get_ann_type(t, item_b)
return match_segments(a_boxes, b_boxes, dist_thresh=self.iou_threshold)
def match_polygons(self, item_a, item_b):
return self._match_segments(AnnotationType.polygon, item_a, item_b)
def match_masks(self, item_a, item_b):
return self._match_segments(AnnotationType.mask, item_a, item_b)
def match_boxes(self, item_a, item_b):
return self._match_segments(AnnotationType.bbox, item_a, item_b)
def match_points(self, item_a, item_b):
a_points = self._get_ann_type(AnnotationType.points, item_a)
b_points = self._get_ann_type(AnnotationType.points, item_b)
instance_map = {}
for s in [item_a.annotations, item_b.annotations]:
s_instances = find_instances(s)
for inst in s_instances:
inst_bbox = max_bbox(inst)
for ann in inst:
instance_map[id(ann)] = [inst, inst_bbox]
matcher = PointsMatcher(instance_map=instance_map)
return match_segments(
a_points,
b_points,
dist_thresh=self.iou_threshold,
distance=matcher.distance,
)
def match_lines(self, item_a, item_b):
a_lines = self._get_ann_type(AnnotationType.polyline, item_a)
b_lines = self._get_ann_type(AnnotationType.polyline, item_b)
matcher = LineMatcher()
return match_segments(
a_lines, b_lines, dist_thresh=self.iou_threshold, distance=matcher.distance
)
def match_items_by_id(a, b):
a_items = set((item.id, item.subset) for item in a)
b_items = set((item.id, item.subset) for item in b)
matches = a_items & b_items
matches = [([m], [m]) for m in matches]
a_unmatched = a_items - b_items
b_unmatched = b_items - a_items
return matches, a_unmatched, b_unmatched
def match_items_by_image_hash(a, b):
def _hash(item):
if not item.image.has_data:
log.warning(
"Image (%s, %s) has no image " "data, counted as unmatched",
item.id,
item.subset,
)
return None
return hashlib.md5(item.image.data.tobytes()).hexdigest()
def _build_hashmap(source):
d = {}
for item in source:
h = _hash(item)
if h is None:
h = str(id(item)) # anything unique
d.setdefault(h, []).append((item.id, item.subset))
return d
a_hash = _build_hashmap(a)
b_hash = _build_hashmap(b)
a_items = set(a_hash)
b_items = set(b_hash)
matches = a_items & b_items
a_unmatched = a_items - b_items
b_unmatched = b_items - a_items
matches = [(a_hash[h], b_hash[h]) for h in matches]
a_unmatched = set(i for h in a_unmatched for i in a_hash[h])
b_unmatched = set(i for h in b_unmatched for i in b_hash[h])
return matches, a_unmatched, b_unmatched
@attrs
class ExactComparator:
match_images = attrib(kw_only=True, type=bool, default=False)
ignored_fields = attrib(kw_only=True, factory=set, validator=default_if_none(set))
ignored_attrs = attrib(kw_only=True, factory=set, validator=default_if_none(set))
ignored_item_attrs = attrib(
kw_only=True, factory=set, validator=default_if_none(set)
)
_test = attrib(init=False, type=TestCase)
errors = attrib(init=False, type=list)
def __attrs_post_init__(self):
self._test = TestCase()
self._test.maxDiff = None
def _match_items(self, a, b):
if self.match_images:
return match_items_by_image_hash(a, b)
else:
return match_items_by_id(a, b)
def _compare_categories(self, a, b):
test = self._test
errors = self.errors
try:
test.assertEqual(
sorted(a, key=lambda t: t.value), sorted(b, key=lambda t: t.value)
)
except AssertionError as e:
errors.append({"type": "categories", "message": str(e)})
if AnnotationType.label in a:
try:
test.assertEqual(
a[AnnotationType.label].items,
b[AnnotationType.label].items,
)
except AssertionError as e:
errors.append({"type": "labels", "message": str(e)})
if AnnotationType.mask in a:
try:
test.assertEqual(
a[AnnotationType.mask].colormap,
b[AnnotationType.mask].colormap,
)
except AssertionError as e:
errors.append({"type": "colormap", "message": str(e)})
if AnnotationType.points in a:
try:
test.assertEqual(
a[AnnotationType.points].items,
b[AnnotationType.points].items,
)
except AssertionError as e:
errors.append({"type": "points", "message": str(e)})
def _compare_annotations(self, a, b):
ignored_fields = self.ignored_fields
ignored_attrs = self.ignored_attrs
a_fields = {k: None for k in vars(a) if k in ignored_fields}
b_fields = {k: None for k in vars(b) if k in ignored_fields}
if "attributes" not in ignored_fields:
a_fields["attributes"] = filter_dict(a.attributes, ignored_attrs)
b_fields["attributes"] = filter_dict(b.attributes, ignored_attrs)
result = a.wrap(**a_fields) == b.wrap(**b_fields)
return result
def _compare_items(self, item_a, item_b):
test = self._test
a_id = (item_a.id, item_a.subset)
b_id = (item_b.id, item_b.subset)
matched = []
unmatched = []
errors = []
try:
test.assertEqual(
filter_dict(item_a.attributes, self.ignored_item_attrs),
filter_dict(item_b.attributes, self.ignored_item_attrs),
)
except AssertionError as e:
errors.append(
{"type": "item_attr", "a_item": a_id, "b_item": b_id, "message": str(e)}
)
b_annotations = item_b.annotations[:]
for ann_a in item_a.annotations:
ann_b_candidates = [x for x in item_b.annotations if x.type == ann_a.type]
ann_b = find(
enumerate(
self._compare_annotations(ann_a, x) for x in ann_b_candidates
),
lambda x: x[1],
)
if ann_b is None:
unmatched.append(
{
"item": a_id,
"source": "a",
"ann": str(ann_a),
}
)
continue
else:
ann_b = ann_b_candidates[ann_b[0]]
b_annotations.remove(ann_b) # avoid repeats
matched.append(
{"a_item": a_id, "b_item": b_id, "a": str(ann_a), "b": str(ann_b)}
)
for ann_b in b_annotations:
unmatched.append({"item": b_id, "source": "b", "ann": str(ann_b)})
return matched, unmatched, errors
def compare_datasets(self, a, b):
self.errors = []
errors = self.errors
self._compare_categories(a.categories(), b.categories())
matched = []
unmatched = []
matches, a_unmatched, b_unmatched = self._match_items(a, b)
if a.categories().get(AnnotationType.label) != b.categories().get(
AnnotationType.label
):
return matched, unmatched, a_unmatched, b_unmatched, errors
_dist = lambda s: len(s[1]) + len(s[2])
for a_ids, b_ids in matches:
# build distance matrix
match_status = {} # (a_id, b_id): [matched, unmatched, errors]
a_matches = {a_id: None for a_id in a_ids}
b_matches = {b_id: None for b_id in b_ids}
for a_id in a_ids:
item_a = a.get(*a_id)
candidates = {}
for b_id in b_ids:
item_b = b.get(*b_id)
i_m, i_um, i_err = self._compare_items(item_a, item_b)
candidates[b_id] = [i_m, i_um, i_err]
if len(i_um) == 0:
a_matches[a_id] = b_id
b_matches[b_id] = a_id
matched.extend(i_m)
errors.extend(i_err)
break
match_status[a_id] = candidates
# assign
for a_id in a_ids:
if len(b_ids) == 0:
break
# find the closest, ignore already assigned
matched_b = a_matches[a_id]
if matched_b is not None:
continue
min_dist = -1
for b_id in b_ids:
if b_matches[b_id] is not None:
continue
d = _dist(match_status[a_id][b_id])
if d < min_dist and 0 <= min_dist:
continue
min_dist = d
matched_b = b_id
if matched_b is None:
continue
a_matches[a_id] = matched_b
b_matches[matched_b] = a_id
m = match_status[a_id][matched_b]
matched.extend(m[0])
unmatched.extend(m[1])
errors.extend(m[2])
a_unmatched |= set(a_id for a_id, m in a_matches.items() if not m)
b_unmatched |= set(b_id for b_id, m in b_matches.items() if not m)
return matched, unmatched, a_unmatched, b_unmatched, errors
| 32.627851 | 90 | 0.556735 |
7944bed12193530c79179bad0d0c8a42c5887c38 | 5,882 | py | Python | samuilivanov23-chitanka/import_into_database.py | samuilivanov23/training-projects | 9791f359da9589524231c9c9d4d97f2bfbde71f3 | [
"Apache-2.0"
] | null | null | null | samuilivanov23-chitanka/import_into_database.py | samuilivanov23/training-projects | 9791f359da9589524231c9c9d4d97f2bfbde71f3 | [
"Apache-2.0"
] | null | null | null | samuilivanov23-chitanka/import_into_database.py | samuilivanov23/training-projects | 9791f359da9589524231c9c9d4d97f2bfbde71f3 | [
"Apache-2.0"
] | null | null | null | import psycopg2, multiprocessing
from dbconfig import chitanka_dbname, chitanka_dbuser, chitanka_dbpassword
import os
import re, nltk
from pathlib import Path
import requests, os,re
import glob
def importData(start, end):
my_dirs = glob.glob("../books/*")
my_dirs.sort()
try:
connection = psycopg2.connect("dbname='" + chitanka_dbname +
"' user='" + chitanka_dbuser +
"' password='" + chitanka_dbpassword + "'")
connection.autocommit = True
cur = connection.cursor()
except Exception as e:
print(e)
for my_dir in my_dirs[start:end]:
author_name = re.sub("\-", " ", my_dir[9:]) # 9 -> to skip the ../books/ and use only the author name
try:
sql = 'insert into authors (name) values(%s) RETURNING id'
cur.execute(sql, (author_name, ))
author_id = cur.fetchone()[0]
connection.commit()
except Exception as e:
print(e)
folder_location = "../books/" + my_dir[9:]
my_files = os.listdir("../books/" + my_dir[9:])
author_words_count = dict()
for my_file in my_files:
if my_file.endswith(".txt"):
names = my_file.split("-")
book_name = ""
for name in names[1:]:
book_name += name + " "
book_name.strip()
book_name = book_name[:len(book_name) - 5] #to skip .txt
file_location = os.path.join(folder_location, my_file)
f = open(file_location, encoding='utf-8', mode='r')
file_content = f.read()
f.close()
current_file_words = list(set(re.findall("[а-яА-Я]{3,}", file_content)))
sentences = nltk.tokenize.sent_tokenize(file_content)
try:
words_in_book = len(current_file_words)
sql = 'insert into books (name, words_count, author_id) values(%s, %s, %s) RETURNING id'
cur.execute(sql, (book_name, words_in_book, author_id))
book_id = cur.fetchone()[0]
connection.commit()
except Exception as e:
print(e)
try:
for sentence in sentences[3:len(sentences) - 4]:
if not sentence == "":
words_in_sentence = len(re.findall("[а-яА-Я]{3,}", sentence))
sql = 'insert into sentences (sentence, words_count, book_id) values(%s, %s, %s)'
cur.execute(sql, (sentence, words_in_sentence, book_id))
except Exception as e:
print(e)
for word in current_file_words:
# populate author_words dictionary
word = word.lower()
if word in author_words_count:
author_words_count[word] += 1
else:
author_words_count[word] = 1
try:
sql = 'insert into words (word) values(%s) RETURNING id'
cur.execute(sql, (word.lower(),))
word_id = cur.fetchone()[0]
sql2 = 'insert into books_words (book_id, word_id) values(%s, %s)'
cur.execute(sql2, (book_id, word_id))
print("Word added")
connection.commit()
except Exception as e:
print("Word already exists")
try:
sql2 = 'select id from words where word=%s'
cur.execute(sql2, (word.lower(),))
duplicate_word_id = cur.fetchone()[0]
sql2 = 'insert into books_words (book_id, word_id) values(%s, %s)'
cur.execute(sql2, (book_id, duplicate_word_id))
except:
print("This word has already been linked with this author")
print(e)
connection.commit()
# insert unique words the author has used
author_unique_words = sum(value == 1 for value in author_words_count.values())
sql = "update authors set words_count=(%s) where name=%s"
cur.execute(sql, (author_unique_words, author_name))
cur.close()
connection.close()
if __name__ == '__main__':
start = 21
end = 22
for i in range(1):
p1 = multiprocessing.Process(target=importData, args=(start, end))
p2 = multiprocessing.Process(target=importData, args=(start + 1, end+1))
p3 = multiprocessing.Process(target=importData, args=(start + 2, end+2))
p4 = multiprocessing.Process(target=importData, args=(start + 3, end+3))
p5 = multiprocessing.Process(target=importData, args=(start + 4, end+4))
p6 = multiprocessing.Process(target=importData, args=(start + 5, end + 5))
p7 = multiprocessing.Process(target=importData, args=(start + 6, end+6))
p8 = multiprocessing.Process(target=importData, args=(start + 7, end+7))
p9 = multiprocessing.Process(target=importData, args=(start + 8, end + 8))
p10 = multiprocessing.Process(target=importData, args=(start + 9, end + 9))
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
p9.start()
p10.start()
start += 10
end += 10 | 41.422535 | 109 | 0.4983 |
7944bf6749ac082f1129a802be88b51c42e638f3 | 601 | py | Python | demo/estimate_poses.py | ngerstle/soccerontable | 25426ff0f8fe0ce008b99c5c0fdbb35091d8d92c | [
"BSD-2-Clause"
] | null | null | null | demo/estimate_poses.py | ngerstle/soccerontable | 25426ff0f8fe0ce008b99c5c0fdbb35091d8d92c | [
"BSD-2-Clause"
] | null | null | null | demo/estimate_poses.py | ngerstle/soccerontable | 25426ff0f8fe0ce008b99c5c0fdbb35091d8d92c | [
"BSD-2-Clause"
] | null | null | null | import os, sys
import argparse
import soccer3d
parser = argparse.ArgumentParser(description='Calibrate a soccer video')
parser.add_argument('--path_to_data', default='/opt/datadir', help='path')
parser.add_argument('--openpose_dir', default='/opt/openpose', help='path')
opt, _ = parser.parse_known_args()
db = soccer3d.YoutubeVideo(opt.path_to_data)
db.gather_detectron()
db.digest_metadata()
db.get_boxes_from_detectron()
db.dump_video('detections')
db.estimate_poses(openpose_dir=opt.openpose_dir)
db.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)
db.dump_video('poses')
| 26.130435 | 75 | 0.787022 |
7944bf6816e6492445a83e6b108066643571cee6 | 588 | py | Python | ddtrace/contrib/mako/__init__.py | mbmblbelt/dd-trace-py | 906fb7fa91d0ed59d263df74e14aacc8b2d70251 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/mako/__init__.py | mbmblbelt/dd-trace-py | 906fb7fa91d0ed59d263df74e14aacc8b2d70251 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/mako/__init__.py | mbmblbelt/dd-trace-py | 906fb7fa91d0ed59d263df74e14aacc8b2d70251 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | """
The ``mako`` integration traces templates rendering.
Auto instrumentation is available using the ``patch``. The following is an example::
from ddtrace import patch
from mako.template import Template
patch(mako=True)
t = Template(filename="index.html")
"""
from ...utils.importlib import require_modules
required_modules = ['mako']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch
from .patch import unpatch
__all__ = [
'patch',
'unpatch',
]
| 21.777778 | 84 | 0.664966 |
7944bf955a923a8082b2634833dd588c9b8a8073 | 864 | py | Python | mnist/arg_parser.py | mokpro/tensorflow_examples | c1d600a92a1ede3a187b7839f0842ff188373fdf | [
"MIT"
] | 10 | 2017-04-20T02:53:47.000Z | 2018-04-19T03:52:49.000Z | mnist/arg_parser.py | mokpro/tensorflow_examples | c1d600a92a1ede3a187b7839f0842ff188373fdf | [
"MIT"
] | null | null | null | mnist/arg_parser.py | mokpro/tensorflow_examples | c1d600a92a1ede3a187b7839f0842ff188373fdf | [
"MIT"
] | 13 | 2016-12-23T03:21:02.000Z | 2020-04-11T19:59:37.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import random
ALL_OPTIMIZERS = [
'GradientDescent',
'Adagrad',
'Adadelta',
'ProximalAdagrad',
'ProximalGradientDescent',
'Adam'
]
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'optimizers',
nargs='*',
choices=ALL_OPTIMIZERS,
default=random.choice(ALL_OPTIMIZERS))
parser.add_argument(
'--all',
action='store_true',
help='Runs all optimizers')
args = parser.parse_args()
if args.all:
final_optimizers = ALL_OPTIMIZERS
elif isinstance(args.optimizers, list):
final_optimizers = args.optimizers
else:
final_optimizers = [args.optimizers]
return final_optimizers
| 22.153846 | 46 | 0.672454 |
7944bfaddedc54f861b5239d9632859126c2e27b | 6,791 | py | Python | msgraph/cli/command_modules/identitysignins/azext_identitysignins/vendored_sdks/identitysignins/aio/operations/_informationprotectioninformationprotection_operations.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | null | null | null | msgraph/cli/command_modules/identitysignins/azext_identitysignins/vendored_sdks/identitysignins/aio/operations/_informationprotectioninformationprotection_operations.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | 22 | 2022-03-29T22:54:37.000Z | 2022-03-29T22:55:27.000Z | msgraph/cli/command_modules/identitysignins/azext_identitysignins/vendored_sdks/identitysignins/aio/operations/_informationprotectioninformationprotection_operations.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class informationprotectioninformationprotectionOperations:
"""informationprotectioninformationprotectionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~identity_sign_ins.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_information_protection(
self,
select: Optional[List[Union[str, "models.Enum23"]]] = None,
expand: Optional[List[Union[str, "models.Enum24"]]] = None,
**kwargs
) -> "models.microsoftgraphinformationprotection":
"""Get informationProtection.
Get informationProtection.
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum23]
:param expand: Expand related entities.
:type expand: list[str or ~identity_sign_ins.models.Enum24]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: microsoftgraphinformationprotection, or the result of cls(response)
:rtype: ~identity_sign_ins.models.microsoftgraphinformationprotection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.microsoftgraphinformationprotection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_information_protection.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.odataerror, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('microsoftgraphinformationprotection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_information_protection.metadata = {'url': '/informationProtection'} # type: ignore
async def update_information_protection(
self,
body: "models.microsoftgraphinformationprotection",
**kwargs
) -> None:
"""Update informationProtection.
Update informationProtection.
:param body: New property values.
:type body: ~identity_sign_ins.models.microsoftgraphinformationprotection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_information_protection.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'microsoftgraphinformationprotection')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.odataerror, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
update_information_protection.metadata = {'url': '/informationProtection'} # type: ignore
| 45.273333 | 133 | 0.679281 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.