code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import remarshal
if __name__ == '__main__':
remarshal.main()
|
[
"remarshal.main"
] |
[((49, 65), 'remarshal.main', 'remarshal.main', ([], {}), '()\n', (63, 65), False, 'import remarshal\n')]
|
""" Lower level layer for slicer.
Mom's spaghetti.
"""
# TODO: Consider boolean array indexing.
from typing import Any, AnyStr, Union, List, Tuple
from abc import abstractmethod
import numbers
class AtomicSlicer:
""" Wrapping object that will unify slicing across data structures.
What we support:
Basic indexing (return references):
- (start:stop:step) slicing
- support ellipses
Advanced indexing (return references):
- integer array indexing
Numpy Reference:
Basic indexing (return views):
- (start:stop:step) slicing
- support ellipses and newaxis (alias for None)
Advanced indexing (return copy):
- integer array indexing, i.e. X[[1,2], [3,4]]
- boolean array indexing
- mixed array indexing (has integer array, ellipses, newaxis in same slice)
"""
def __init__(self, o: Any, max_dim: Union[None, int, AnyStr] = "auto"):
""" Provides a consistent slicing API to the object provided.
Args:
o: Object to enable consistent slicing.
Currently supports numpy dense arrays, recursive lists ending with list or numpy.
max_dim: Max number of dimensions the wrapped object has.
If set to "auto", max dimensions will be inferred. This comes at compute cost.
"""
self.o = o
self.max_dim = max_dim
if self.max_dim == "auto":
self.max_dim = UnifiedDataHandler.max_dim(o)
def __repr__(self) -> AnyStr:
""" Override default repr for human readability.
Returns:
String to display.
"""
return f"{self.__class__.__name__}({self.o.__repr__()})"
def __getitem__(self, item: Any) -> Any:
""" Consistent slicing into wrapped object.
Args:
item: Slicing key of type integer or slice.
Returns:
Sliced object.
Raises:
ValueError: If slicing is not compatible with wrapped object.
"""
# Turn item into tuple if not already.
index_tup = unify_slice(item, self.max_dim)
# Slice according to object type.
return UnifiedDataHandler.slice(self.o, index_tup, self.max_dim)
def unify_slice(item: Any, max_dim: int, alias_lookup=None) -> Tuple:
""" Resolves aliases and ellipses in a slice item.
Args:
item: Slicing key that is passed to __getitem__.
max_dim: Max dimension of object to be sliced.
alias_lookup: AliasLookup structure.
Returns:
A tuple representation of the item.
"""
item = _normalize_slice_key(item)
index_tup = _normalize_subkey_types(item)
index_tup = _handle_newaxis_ellipses(index_tup, max_dim)
if alias_lookup:
index_tup = _handle_aliases(index_tup, alias_lookup)
return index_tup
def _normalize_subkey_types(index_tup: Tuple) -> Tuple:
""" Casts subkeys into basic types such as int.
Args:
key: Slicing key that is passed within __getitem__.
Returns:
Tuple with subkeys casted to basic types.
"""
new_index_tup = [] # Gets casted to tuple at the end
np_int_types = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
}
for subkey in index_tup:
if _safe_isinstance(subkey, "numpy", np_int_types):
new_subkey = int(subkey)
elif _safe_isinstance(subkey, "numpy", "ndarray"):
if len(subkey.shape) == 1:
new_subkey = subkey.tolist()
else:
raise ValueError(f"Cannot use array of shape {subkey.shape} as subkey.")
else:
new_subkey = subkey
new_index_tup.append(new_subkey)
return tuple(new_index_tup)
def _normalize_slice_key(key: Any) -> Tuple:
""" Normalizes slice key into always being a top-level tuple.
Args:
key: Slicing key that is passed within __getitem__.
Returns:
Expanded slice as a tuple.
"""
if not isinstance(key, tuple):
return (key,)
else:
return key
def _handle_newaxis_ellipses(index_tup: Tuple, max_dim: int) -> Tuple:
""" Expands newaxis and ellipses within a slice for simplification.
This code is mostly adapted from: https://github.com/clbarnes/h5py_like/blob/master/h5py_like/shape_utils.py#L111
Args:
index_tup: Slicing key as a tuple.
max_dim: Maximum number of dimensions in the respective sliceable object.
Returns:
Expanded slice as a tuple.
"""
non_indexes = (None, Ellipsis)
concrete_indices = sum(idx not in non_indexes for idx in index_tup)
index_list = []
# newaxis_at = []
has_ellipsis = False
int_count = 0
for item in index_tup:
if isinstance(item, numbers.Number):
int_count += 1
# NOTE: If we need locations of new axis, re-enable this.
if item is None: # pragma: no cover
pass
# newaxis_at.append(len(index_list) + len(newaxis_at) - int_count)
elif item == Ellipsis:
if has_ellipsis: # pragma: no cover
raise IndexError("an index can only have a single ellipsis ('...')")
has_ellipsis = True
initial_len = len(index_list)
while len(index_list) + (concrete_indices - initial_len) < max_dim:
index_list.append(slice(None))
else:
index_list.append(item)
if len(index_list) > max_dim: # pragma: no cover
raise IndexError("too many indices for array")
while len(index_list) < max_dim:
index_list.append(slice(None))
# return index_list, newaxis_at
return tuple(index_list)
def _handle_aliases(index_tup: Tuple, alias_lookup) -> Tuple:
new_index_tup = []
def resolve(item, dim):
if isinstance(item, slice):
return item
# Replace element if in alias lookup, otherwise use original.
item = alias_lookup.get(dim, item, item)
return item
# Go through each element within the index and resolve if needed.
for dim, item in enumerate(index_tup):
if isinstance(item, list):
new_item = []
for sub_item in item:
new_item.append(resolve(sub_item, dim))
else:
new_item = resolve(item, dim)
new_index_tup.append(new_item)
return tuple(new_index_tup)
class Tracked(AtomicSlicer):
""" Tracked defines an object that slicer wraps."""
def __init__(self, o: Any, dim: Union[int, List, tuple, None, str] = "auto"):
""" Defines an object that will be wrapped by slicer.
Args:
o: Object that will be tracked for slicer.
dim: Target dimension(s) slicer will index on for this object.
"""
super().__init__(o)
# Protected attribute that can be overriden.
self._name = None
# Place dim into coordinate form.
if dim == "auto":
self.dim = list(range(self.max_dim))
elif dim is None:
self.dim = []
elif isinstance(dim, int):
self.dim = [dim]
elif isinstance(dim, list):
self.dim = dim
elif isinstance(dim, tuple):
self.dim = list(dim)
else: # pragma: no cover
raise ValueError(f"Cannot handle dim of type: {type(dim)}")
class Obj(Tracked):
""" An object that slicer wraps. """
def __init__(self, o, dim="auto"):
super().__init__(o, dim)
class Alias(Tracked):
""" Defines a tracked object as well as additional __getitem__ keys. """
def __init__(self, o, dim):
if not (
isinstance(dim, int) or (isinstance(dim, (list, tuple)) and len(dim) <= 1)
): # pragma: no cover
raise ValueError("Aliases must track a single dimension")
super().__init__(o, dim)
class AliasLookup:
def __init__(self, aliases):
self._lookup = {}
# Populate lookup and merge indexes.
for _, alias in aliases.items():
self.update(alias)
def update(self, alias):
if alias.dim is None or len(alias.dim) == 0:
return
dim = alias.dim[0]
if dim not in self._lookup:
self._lookup[dim] = {}
dim_lookup = self._lookup[dim]
# NOTE: Alias must be backed by either a list or dictionary.
itr = enumerate(alias.o) if isinstance(alias.o, list) else alias.o.items()
for i, x in itr:
if x not in dim_lookup:
dim_lookup[x] = set()
dim_lookup[x].add(i)
def delete(self, alias):
'''Delete an alias that exists from lookup'''
dim = alias.dim[0]
dim_lookup = self._lookup[dim]
# NOTE: Alias must be backed by either a list or dictionary.
itr = enumerate(alias.o) if isinstance(alias.o, list) else alias.o.items()
for i, x in itr:
del dim_lookup[x]
def get(self, dim, target, default=None):
if dim not in self._lookup:
return default
indexes = self._lookup[dim].get(target, None)
if indexes is None:
return default
if len(indexes) == 1:
return next(iter(indexes))
else:
return list(indexes)
def resolve_dim(slicer_index: Tuple, slicer_dim: List) -> List:
""" Extracts new dim after applying slicing index and maps it back to the original index list. """
new_slicer_dim = []
reduced_mask = []
for _, curr_idx in enumerate(slicer_index):
if isinstance(curr_idx, (tuple, list, slice)):
reduced_mask.append(0)
else:
reduced_mask.append(1)
for curr_dim in slicer_dim:
if reduced_mask[curr_dim] == 0:
new_slicer_dim.append(curr_dim - sum(reduced_mask[:curr_dim]))
return new_slicer_dim
def reduced_o(tracked: Tracked) -> Union[List, Any]:
os = [t.o for t in tracked]
os = os[0] if len(os) == 1 else os
return os
class BaseHandler:
@classmethod
@abstractmethod
def head_slice(cls, o, index_tup, max_dim):
raise NotImplementedError() # pragma: no cover
@classmethod
@abstractmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
raise NotImplementedError() # pragma: no cover
@classmethod
@abstractmethod
def max_dim(cls, o):
raise NotImplementedError() # pragma: no cover
@classmethod
def default_alias(cls, o):
return []
class SeriesHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
is_element = True if isinstance(head_index, int) else False
sliced_o = o.iloc[head_index]
return is_element, sliced_o, 1
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
# NOTE: Series only has one dimension,
# call slicer again to end the recursion.
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
@classmethod
def max_dim(cls, o):
return len(o.shape)
@classmethod
def default_alias(cls, o):
index_alias = Alias(o.index.to_list(), 0)
index_alias._name = "index"
return [index_alias]
class DataFrameHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
# NOTE: At head slice, we know there are two fixed dimensions.
cut_index = index_tup
is_element = True if isinstance(cut_index[-1], int) else False
sliced_o = o.iloc[cut_index]
return is_element, sliced_o, 2
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
# NOTE: Dataframe has fixed dimensions,
# call slicer again to end the recursion.
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
@classmethod
def max_dim(cls, o):
return len(o.shape)
@classmethod
def default_alias(cls, o):
index_alias = Alias(o.index.to_list(), 0)
index_alias._name = "index"
column_alias = Alias(o.columns.to_list(), 1)
column_alias._name = "columns"
return [index_alias, column_alias]
class ArrayHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
# Check if head is string
head_index, tail_index = index_tup[0], index_tup[1:]
cut = 1
for sub_index in tail_index:
if isinstance(sub_index, str) or cut == len(o.shape):
break
cut += 1
# Process native array dimensions
cut_index = index_tup[:cut]
is_element = any([True if isinstance(x, int) else False for x in cut_index])
sliced_o = o[cut_index]
return is_element, sliced_o, cut
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
# NOTE: If we're dealing with a scipy matrix,
# we have to manually flatten it ourselves
# to keep consistent to the rest of slicer's API.
if _safe_isinstance(o, "scipy.sparse.csc", "csc_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.csr", "csr_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.dok", "dok_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.lil", "lil_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
else:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
inner = [AtomicSlicer(e, max_dim=max_dim)[tail_index] for e in o]
if _safe_isinstance(o, "numpy", "ndarray"):
import numpy
if len(inner) > 0 and hasattr(inner[0], "__len__"):
ragged = not all(len(x) == len(inner[0]) for x in inner)
else:
ragged = False
if ragged:
return numpy.array(inner, dtype=numpy.object)
else:
return numpy.array(inner)
elif _safe_isinstance(o, "torch", "Tensor"):
import torch
if len(inner) > 0 and isinstance(inner[0], torch.Tensor):
return torch.stack(inner)
else:
return torch.tensor(inner)
elif _safe_isinstance(o, "scipy.sparse.csc", "csc_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='csc')
return out
elif _safe_isinstance(o, "scipy.sparse.csr", "csr_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='csr')
return out
elif _safe_isinstance(o, "scipy.sparse.dok", "dok_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='dok')
return out
elif _safe_isinstance(o, "scipy.sparse.lil", "lil_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='lil')
return out
else:
raise ValueError(f"Cannot handle type {type(o)}.") # pragma: no cover
@classmethod
def max_dim(cls, o):
if _safe_isinstance(o, "numpy", "ndarray") and o.dtype == "object":
return max([UnifiedDataHandler.max_dim(x) for x in o], default=-1) + 1
else:
return len(o.shape)
class DictHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
if isinstance(head_index, (tuple, list)) and len(index_tup) == 0:
return False, o, 1
if isinstance(head_index, (list, tuple)):
return (
False,
{
sub_index: AtomicSlicer(o, max_dim=max_dim)[sub_index]
for sub_index in head_index
},
1,
)
elif isinstance(head_index, slice):
if head_index == slice(None, None, None):
return False, o, 1
return False, o[head_index], 1
else:
return True, o[head_index], 1
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
return {
k: AtomicSlicer(e, max_dim=max_dim)[tail_index] for k, e in o.items()
}
@classmethod
def max_dim(cls, o):
return max([UnifiedDataHandler.max_dim(x) for x in o.values()], default=-1) + 1
class ListTupleHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
if isinstance(head_index, (tuple, list)) and len(index_tup) == 0:
return False, o, 1
if isinstance(head_index, (list, tuple)):
if len(head_index) == 0:
return False, o, 1
else:
results = [
AtomicSlicer(o, max_dim=max_dim)[sub_index]
for sub_index in head_index
]
results = tuple(results) if isinstance(o, tuple) else results
return False, results, 1
elif isinstance(head_index, slice):
return False, o[head_index], 1
elif isinstance(head_index, int):
return True, o[head_index], 1
else: # pragma: no cover
raise ValueError(f"Invalid key {head_index} for {o}")
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
results = [AtomicSlicer(e, max_dim=max_dim)[tail_index] for e in o]
return tuple(results) if isinstance(o, tuple) else results
@classmethod
def max_dim(cls, o):
return max([UnifiedDataHandler.max_dim(x) for x in o], default=-1) + 1
class UnifiedDataHandler:
""" Registry that maps types to their unified slice calls."""
""" Class attribute that maps type to their unified slice calls."""
type_map = {
("builtins", "list"): ListTupleHandler,
("builtins", "tuple"): ListTupleHandler,
("builtins", "dict"): DictHandler,
("torch", "Tensor"): ArrayHandler,
("numpy", "ndarray"): ArrayHandler,
("scipy.sparse.csc", "csc_matrix"): ArrayHandler,
("scipy.sparse.csr", "csr_matrix"): ArrayHandler,
("scipy.sparse.dok", "dok_matrix"): ArrayHandler,
("scipy.sparse.lil", "lil_matrix"): ArrayHandler,
("pandas.core.frame", "DataFrame"): DataFrameHandler,
("pandas.core.series", "Series"): SeriesHandler,
}
@classmethod
def slice(cls, o, index_tup, max_dim):
# NOTE: Unified handles base cases such as empty tuples, which
# specialized handlers do not.
if isinstance(index_tup, (tuple, list)) and len(index_tup) == 0:
return o
# Slice as delegated by data handler.
o_type = _type_name(o)
head_slice = cls.type_map[o_type].head_slice
tail_slice = cls.type_map[o_type].tail_slice
is_element, sliced_o, cut = head_slice(o, index_tup, max_dim)
out = tail_slice(sliced_o, index_tup[cut:], max_dim - cut, is_element)
return out
@classmethod
def max_dim(cls, o):
o_type = _type_name(o)
if o_type not in cls.type_map:
return 0
return cls.type_map[o_type].max_dim(o)
@classmethod
def default_alias(cls, o):
o_type = _type_name(o)
if o_type not in cls.type_map:
return {}
return cls.type_map[o_type].default_alias(o)
def _type_name(o: object) -> Tuple[str, str]:
return o.__class__.__module__, o.__class__.__name__
def _safe_isinstance(
o: object, module_name: str, type_name: Union[str, set, tuple]
) -> bool:
o_module, o_type = _type_name(o)
if isinstance(type_name, str):
return o_module == module_name and o_type == type_name
else:
return o_module == module_name and o_type in type_name
|
[
"scipy.sparse.vstack",
"numpy.array",
"torch.stack",
"torch.tensor"
] |
[((14349, 14387), 'numpy.array', 'numpy.array', (['inner'], {'dtype': 'numpy.object'}), '(inner, dtype=numpy.object)\n', (14360, 14387), False, 'import numpy\n'), ((14437, 14455), 'numpy.array', 'numpy.array', (['inner'], {}), '(inner)\n', (14448, 14455), False, 'import numpy\n'), ((14644, 14662), 'torch.stack', 'torch.stack', (['inner'], {}), '(inner)\n', (14655, 14662), False, 'import torch\n'), ((14712, 14731), 'torch.tensor', 'torch.tensor', (['inner'], {}), '(inner)\n', (14724, 14731), False, 'import torch\n'), ((14874, 14901), 'scipy.sparse.vstack', 'vstack', (['inner'], {'format': '"""csc"""'}), "(inner, format='csc')\n", (14880, 14901), False, 'from scipy.sparse import vstack\n'), ((15071, 15098), 'scipy.sparse.vstack', 'vstack', (['inner'], {'format': '"""csr"""'}), "(inner, format='csr')\n", (15077, 15098), False, 'from scipy.sparse import vstack\n'), ((15268, 15295), 'scipy.sparse.vstack', 'vstack', (['inner'], {'format': '"""dok"""'}), "(inner, format='dok')\n", (15274, 15295), False, 'from scipy.sparse import vstack\n'), ((15465, 15492), 'scipy.sparse.vstack', 'vstack', (['inner'], {'format': '"""lil"""'}), "(inner, format='lil')\n", (15471, 15492), False, 'from scipy.sparse import vstack\n')]
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class BatchCreateBitableTableReqTable(object):
name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "name"}
) # 数据表 名字, 示例值:"table1"
@attr.s
class BatchCreateBitableTableReq(object):
user_id_type: lark_type.IDType = attr.ib(
default=None, metadata={"req_type": "query", "key": "user_id_type"}
) # 用户 ID 类型, 示例值:"open_id", 可选值有: `open_id`:用户的 open id, `union_id`:用户的 union id, `user_id`:用户的 user id, 默认值: `open_id`, 当值为 `user_id`, 字段权限要求: 获取用户 user ID
app_token: str = attr.ib(
default="", metadata={"req_type": "path", "key": "app_token"}
) # bitable app token, 示例值:"<KEY>"
tables: typing.List[BatchCreateBitableTableReqTable] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "tables"}
) # tables
@attr.s
class BatchCreateBitableTableResp(object):
table_ids: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "table_ids"}
) # table ids
def _gen_batch_create_bitable_table_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=BatchCreateBitableTableResp,
scope="Bitable",
api="BatchCreateBitableTable",
method="POST",
url="https://open.feishu.cn/open-apis/bitable/v1/apps/:app_token/tables/batch_create",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
need_user_access_token=True,
)
|
[
"pylark.lark_request._new_method_option",
"attr.ib"
] |
[((289, 354), 'attr.ib', 'attr.ib', ([], {'default': '""""""', 'metadata': "{'req_type': 'json', 'key': 'name'}"}), "(default='', metadata={'req_type': 'json', 'key': 'name'})\n", (296, 354), False, 'import attr\n'), ((482, 558), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'metadata': "{'req_type': 'query', 'key': 'user_id_type'}"}), "(default=None, metadata={'req_type': 'query', 'key': 'user_id_type'})\n", (489, 558), False, 'import attr\n'), ((751, 821), 'attr.ib', 'attr.ib', ([], {'default': '""""""', 'metadata': "{'req_type': 'path', 'key': 'app_token'}"}), "(default='', metadata={'req_type': 'path', 'key': 'app_token'})\n", (758, 821), False, 'import attr\n'), ((929, 1005), 'attr.ib', 'attr.ib', ([], {'factory': '(lambda : [])', 'metadata': "{'req_type': 'json', 'key': 'tables'}"}), "(factory=lambda : [], metadata={'req_type': 'json', 'key': 'tables'})\n", (936, 1005), False, 'import attr\n'), ((1116, 1195), 'attr.ib', 'attr.ib', ([], {'factory': '(lambda : [])', 'metadata': "{'req_type': 'json', 'key': 'table_ids'}"}), "(factory=lambda : [], metadata={'req_type': 'json', 'key': 'table_ids'})\n", (1123, 1195), False, 'import attr\n'), ((1599, 1626), 'pylark.lark_request._new_method_option', '_new_method_option', (['options'], {}), '(options)\n', (1617, 1626), False, 'from pylark.lark_request import RawRequestReq, _new_method_option\n')]
|
import random
import string
from selenium.webdriver.common.by import By
from dialog import DialogPage
from elements import ButtonElement, GridElement, TextElement, InputElement
from workflow import find_workflow_component_figures
from util import ArgsPrompt, NotifierPage
class ComponentPage(DialogPage):
""" Component editor page. """
inputs_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Inputs']"))
slots_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Slots']"))
outputs_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Outputs']"))
events_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Events']"))
inputs = GridElement((By.ID, 'Inputs_props'))
outputs = GridElement((By.ID, 'Outputs_props'))
def __init__(self, browser, port, locator):
super(ComponentPage, self).__init__(browser, port, locator)
# It takes a while for the full load to complete.
NotifierPage.wait(self)
def get_inputs(self):
""" Return inputs grid. """
self('inputs_tab').click()
return self.inputs
def set_input(self, name, value):
""" Set input `name` to `value`. """
self('inputs_tab').click()
grid = self.inputs
found = []
for row in grid.rows:
if row[0] == name:
row[2] = value
return
found.append(row[0])
raise RuntimeError('%r not found in inputs %s' % (name, found))
def get_events(self):
""" Return events grid. """
self('events_tab').click()
return self.events
def get_outputs(self):
""" Return outputs grid. """
self('outputs_tab').click()
return self.outputs
def show_slots(self):
"""switch to slots tab"""
self('slots_tab').click()
class DriverPage(ComponentPage):
""" Driver editor page. """
parameters_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Parameters']"))
workflow_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Workflow']"))
objectives_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Objectives']"))
constraints_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Constraints']"))
triggers_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Triggers']"))
parameters = GridElement((By.ID, 'Parameters_parms'))
objectives = GridElement((By.ID, 'Objectives_objectives'))
constraints = GridElement((By.ID, 'Constraints_constraints'))
triggers = GridElement((By.ID, 'Triggers_triggers'))
add_parameter = ButtonElement((By.XPATH, "//span[text()='Add Parameter']"))
add_objective = ButtonElement((By.XPATH, "//span[text()='Add Objective']"))
add_constraint = ButtonElement((By.XPATH, "//span[text()='Add Constraint']"))
add_trigger = ButtonElement((By.XPATH, "//span[text()='Add Event']"))
def get_parameters(self):
""" Return parameters grid. """
self('parameters_tab').click()
return self.parameters
def get_objectives(self):
""" Return objectives grid. """
self('objectives_tab').click()
return self.objectives
def get_constraints(self):
""" Return constraints grid. """
self('constraints_tab').click()
return self.constraints
def get_triggers(self):
""" Return triggers grid. """
self('triggers_tab').click()
return self.triggers
def new_parameter(self):
""" Return :class:`ParameterDialog`. """
self('add_parameter').click()
return ParameterDialog(self.browser, self.port,
(By.XPATH, "//div[@id='parameter-dialog']/.."))
def new_objective(self):
""" Return :class:`ObjectiveDialog`. """
self('add_objective').click()
return ObjectiveDialog(self.browser, self.port,
(By.XPATH, "//div[@id='objective-dialog']/.."))
def new_constraint(self):
""" Return :class:`ConstraintDialog`. """
self('add_constraint').click()
return ConstraintDialog(self.browser, self.port,
(By.XPATH, "//div[@id='constraint-dialog']/.."))
def new_trigger(self):
""" Return :class:`EventDialog`. """
self('add_trigger').click()
return EventDialog(self.browser, self.port,
(By.XPATH, "//div[@id='event-dialog']/.."))
def show_workflow(self):
"""switch to workflow tab"""
self('workflow_tab').click()
def get_workflow_component_figures(self):
""" Return workflow component figure elements. """
return find_workflow_component_figures(self)
class ParameterDialog(DialogPage):
""" Dialog for adding a new parameter. """
target = InputElement((By.ID, 'parameter-target'))
low = InputElement((By.ID, 'parameter-low'))
high = InputElement((By.ID, 'parameter-high'))
scaler = InputElement((By.ID, 'parameter-scaler'))
adder = InputElement((By.ID, 'parameter-adder'))
name = InputElement((By.ID, 'parameter-name'))
ok = ButtonElement((By.ID, 'parameter-ok'))
cancel = ButtonElement((By.ID, 'parameter-cancel'))
class ObjectiveDialog(DialogPage):
""" Dialog for adding a new objective. """
expr = InputElement((By.ID, 'objective-expr'))
name = InputElement((By.ID, 'objective-name'))
ok = ButtonElement((By.ID, 'objective-ok'))
cancel = ButtonElement((By.ID, 'objective-cancel'))
class ConstraintDialog(DialogPage):
""" Dialog for adding a new constraint. """
expr = InputElement((By.ID, 'constraint-expr'))
scaler = InputElement((By.ID, 'constraint-scaler'))
adder = InputElement((By.ID, 'constraint-adder'))
name = InputElement((By.ID, 'constraint-name'))
ok = ButtonElement((By.ID, 'constraint-ok'))
cancel = ButtonElement((By.ID, 'constraint-cancel'))
class EventDialog(DialogPage):
""" Dialog for adding a new event. """
target = InputElement((By.ID, 'event-target'))
ok = ButtonElement((By.ID, 'event-ok'))
cancel = ButtonElement((By.ID, 'event-cancel'))
class AssemblyPage(ComponentPage):
""" Assembly editor page. """
dataflow_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Dataflow']"))
def show_dataflow(self):
self('dataflow_tab').element.click()
class PropertiesPage(DialogPage):
""" Component properties page. """
header = TextElement((By.XPATH, 'h3[1]'))
inputs = GridElement((By.ID, 'Inputs_props'))
outputs = GridElement((By.ID, 'Outputs_props'))
def set_input(self, name, value):
""" Set input `name` to `value`. """
self('inputs_tab').click()
grid = self.inputs
found = []
for row in grid.rows:
if row[0] == name:
row[1] = value
return
found.append(row[0])
raise RuntimeError('%r not found in inputs %s' % (name, found))
class NameInstanceDialog(ArgsPrompt):
""" Adds :meth:`create_and_dismiss` to :class:`ArgsPrompt`. """
def __init__(self, parent):
super(NameInstanceDialog, self).__init__(parent.browser, parent.port)
def create_and_dismiss(self, name=None):
"""Names the instance. Returns the name. Force a name with the name argument"""
chars = string.ascii_uppercase
name = name or ''.join(random.choice(chars).strip() for x in range(8))
self.name = name
self.click_ok()
return name
|
[
"elements.InputElement",
"random.choice",
"elements.GridElement",
"elements.ButtonElement",
"util.NotifierPage.wait",
"workflow.find_workflow_component_figures",
"elements.TextElement"
] |
[((363, 420), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Inputs\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Inputs\']"))\n', (376, 420), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((439, 495), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Slots\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Slots\']"))\n', (452, 495), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((514, 572), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Outputs\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Outputs\']"))\n', (527, 572), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((591, 648), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Events\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Events\']"))\n', (604, 648), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((664, 700), 'elements.GridElement', 'GridElement', (["(By.ID, 'Inputs_props')"], {}), "((By.ID, 'Inputs_props'))\n", (675, 700), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((715, 752), 'elements.GridElement', 'GridElement', (["(By.ID, 'Outputs_props')"], {}), "((By.ID, 'Outputs_props'))\n", (726, 752), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((1909, 1970), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Parameters\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Parameters\']"))\n', (1922, 1970), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((1993, 2052), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Workflow\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Workflow\']"))\n', (2006, 2052), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2075, 2136), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Objectives\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Objectives\']"))\n', (2088, 2136), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2159, 2221), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Constraints\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Constraints\']"))\n', (2172, 2221), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2244, 2303), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Triggers\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Triggers\']"))\n', (2257, 2303), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2323, 2363), 'elements.GridElement', 'GridElement', (["(By.ID, 'Parameters_parms')"], {}), "((By.ID, 'Parameters_parms'))\n", (2334, 2363), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2382, 2427), 'elements.GridElement', 'GridElement', (["(By.ID, 'Objectives_objectives')"], {}), "((By.ID, 'Objectives_objectives'))\n", (2393, 2427), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2446, 2493), 'elements.GridElement', 'GridElement', (["(By.ID, 'Constraints_constraints')"], {}), "((By.ID, 'Constraints_constraints'))\n", (2457, 2493), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2512, 2553), 'elements.GridElement', 'GridElement', (["(By.ID, 'Triggers_triggers')"], {}), "((By.ID, 'Triggers_triggers'))\n", (2523, 2553), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2576, 2635), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "//span[text()=\'Add Parameter\']")'], {}), '((By.XPATH, "//span[text()=\'Add Parameter\']"))\n', (2589, 2635), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2657, 2716), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "//span[text()=\'Add Objective\']")'], {}), '((By.XPATH, "//span[text()=\'Add Objective\']"))\n', (2670, 2716), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2738, 2798), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "//span[text()=\'Add Constraint\']")'], {}), '((By.XPATH, "//span[text()=\'Add Constraint\']"))\n', (2751, 2798), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((2820, 2875), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "//span[text()=\'Add Event\']")'], {}), '((By.XPATH, "//span[text()=\'Add Event\']"))\n', (2833, 2875), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((4792, 4833), 'elements.InputElement', 'InputElement', (["(By.ID, 'parameter-target')"], {}), "((By.ID, 'parameter-target'))\n", (4804, 4833), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((4848, 4886), 'elements.InputElement', 'InputElement', (["(By.ID, 'parameter-low')"], {}), "((By.ID, 'parameter-low'))\n", (4860, 4886), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((4901, 4940), 'elements.InputElement', 'InputElement', (["(By.ID, 'parameter-high')"], {}), "((By.ID, 'parameter-high'))\n", (4913, 4940), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((4955, 4996), 'elements.InputElement', 'InputElement', (["(By.ID, 'parameter-scaler')"], {}), "((By.ID, 'parameter-scaler'))\n", (4967, 4996), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5011, 5051), 'elements.InputElement', 'InputElement', (["(By.ID, 'parameter-adder')"], {}), "((By.ID, 'parameter-adder'))\n", (5023, 5051), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5066, 5105), 'elements.InputElement', 'InputElement', (["(By.ID, 'parameter-name')"], {}), "((By.ID, 'parameter-name'))\n", (5078, 5105), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5119, 5157), 'elements.ButtonElement', 'ButtonElement', (["(By.ID, 'parameter-ok')"], {}), "((By.ID, 'parameter-ok'))\n", (5132, 5157), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5171, 5213), 'elements.ButtonElement', 'ButtonElement', (["(By.ID, 'parameter-cancel')"], {}), "((By.ID, 'parameter-cancel'))\n", (5184, 5213), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5313, 5352), 'elements.InputElement', 'InputElement', (["(By.ID, 'objective-expr')"], {}), "((By.ID, 'objective-expr'))\n", (5325, 5352), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5367, 5406), 'elements.InputElement', 'InputElement', (["(By.ID, 'objective-name')"], {}), "((By.ID, 'objective-name'))\n", (5379, 5406), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5420, 5458), 'elements.ButtonElement', 'ButtonElement', (["(By.ID, 'objective-ok')"], {}), "((By.ID, 'objective-ok'))\n", (5433, 5458), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5472, 5514), 'elements.ButtonElement', 'ButtonElement', (["(By.ID, 'objective-cancel')"], {}), "((By.ID, 'objective-cancel'))\n", (5485, 5514), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5616, 5656), 'elements.InputElement', 'InputElement', (["(By.ID, 'constraint-expr')"], {}), "((By.ID, 'constraint-expr'))\n", (5628, 5656), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5671, 5713), 'elements.InputElement', 'InputElement', (["(By.ID, 'constraint-scaler')"], {}), "((By.ID, 'constraint-scaler'))\n", (5683, 5713), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5728, 5769), 'elements.InputElement', 'InputElement', (["(By.ID, 'constraint-adder')"], {}), "((By.ID, 'constraint-adder'))\n", (5740, 5769), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5784, 5824), 'elements.InputElement', 'InputElement', (["(By.ID, 'constraint-name')"], {}), "((By.ID, 'constraint-name'))\n", (5796, 5824), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5838, 5877), 'elements.ButtonElement', 'ButtonElement', (["(By.ID, 'constraint-ok')"], {}), "((By.ID, 'constraint-ok'))\n", (5851, 5877), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((5891, 5934), 'elements.ButtonElement', 'ButtonElement', (["(By.ID, 'constraint-cancel')"], {}), "((By.ID, 'constraint-cancel'))\n", (5904, 5934), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((6026, 6063), 'elements.InputElement', 'InputElement', (["(By.ID, 'event-target')"], {}), "((By.ID, 'event-target'))\n", (6038, 6063), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((6077, 6111), 'elements.ButtonElement', 'ButtonElement', (["(By.ID, 'event-ok')"], {}), "((By.ID, 'event-ok'))\n", (6090, 6111), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((6125, 6163), 'elements.ButtonElement', 'ButtonElement', (["(By.ID, 'event-cancel')"], {}), "((By.ID, 'event-cancel'))\n", (6138, 6163), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((6255, 6314), 'elements.ButtonElement', 'ButtonElement', (['(By.XPATH, "div/ul/li/a[text()=\'Dataflow\']")'], {}), '((By.XPATH, "div/ul/li/a[text()=\'Dataflow\']"))\n', (6268, 6314), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((6480, 6512), 'elements.TextElement', 'TextElement', (["(By.XPATH, 'h3[1]')"], {}), "((By.XPATH, 'h3[1]'))\n", (6491, 6512), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((6527, 6563), 'elements.GridElement', 'GridElement', (["(By.ID, 'Inputs_props')"], {}), "((By.ID, 'Inputs_props'))\n", (6538, 6563), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((6578, 6615), 'elements.GridElement', 'GridElement', (["(By.ID, 'Outputs_props')"], {}), "((By.ID, 'Outputs_props'))\n", (6589, 6615), False, 'from elements import ButtonElement, GridElement, TextElement, InputElement\n'), ((936, 959), 'util.NotifierPage.wait', 'NotifierPage.wait', (['self'], {}), '(self)\n', (953, 959), False, 'from util import ArgsPrompt, NotifierPage\n'), ((4655, 4692), 'workflow.find_workflow_component_figures', 'find_workflow_component_figures', (['self'], {}), '(self)\n', (4686, 4692), False, 'from workflow import find_workflow_component_figures\n'), ((7424, 7444), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (7437, 7444), False, 'import random\n')]
|
# Generated by Django 3.2 on 2021-04-18 02:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0002_auto_20210418_0405'),
]
operations = [
migrations.AlterField(
model_name='student',
name='admitted_at',
field=models.DateField(verbose_name='Дата поступления'),
),
migrations.AlterField(
model_name='student',
name='due_at',
field=models.DateField(blank=True, null=True, verbose_name='Дата окончания'),
),
migrations.AlterField(
model_name='student',
name='excluded_at',
field=models.DateField(blank=True, null=True, verbose_name='Дата исключения'),
),
]
|
[
"django.db.models.DateField"
] |
[((341, 390), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""Дата поступления"""'}), "(verbose_name='Дата поступления')\n", (357, 390), False, 'from django.db import migrations, models\n'), ((513, 583), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Дата окончания"""'}), "(blank=True, null=True, verbose_name='Дата окончания')\n", (529, 583), False, 'from django.db import migrations, models\n'), ((711, 782), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Дата исключения"""'}), "(blank=True, null=True, verbose_name='Дата исключения')\n", (727, 782), False, 'from django.db import migrations, models\n')]
|
# This file is part of the GBI project.
# Copyright (C) 2013 Omniscale GmbH & Co. KG <http://omniscale.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shapely
import datetime
from sqlalchemy.orm import backref
from geoalchemy2.types import Geometry
from geoalchemy2.shape import to_shape
from gbi_server.extensions import db
class Log(db.Model):
__tablename__ = 'logs'
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.DateTime, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
user = db.relationship('User', backref=backref('logs', cascade="all,delete,delete-orphan"))
action = db.Column(db.String(24), nullable=False)
geometry = db.Column(Geometry('MULTIPOLYGON', srid=4326))
format = db.Column(db.String)
srs = db.Column(db.String)
mapping = db.Column(db.String)
source = db.Column(db.String)
layer = db.Column(db.String)
zoom_level_start = db.Column(db.Integer)
zoom_level_end = db.Column(db.Integer)
refreshed = db.Column(db.Boolean)
@property
def geometry_as_geojson(self):
if self.geometry is not None:
geom = json.dumps(
shapely.geometry.mapping(to_shape(self.geometry))
)
return geom
return False
class SearchLog(db.Model):
__tablename__ = 'search_logs'
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
user = db.relationship('User', backref=backref('search_logs', cascade="all,delete,delete-orphan"))
class SearchLogGeometry(db.Model):
__tablename__ = 'search_log_geometries'
id = db.Column(db.Integer, primary_key=True)
search_log_id = db.Column(db.Integer, db.ForeignKey('search_logs.id'), nullable=False)
search_log = db.relationship('SearchLog', backref=backref('geometries', cascade="all,delete,delete-orphan"))
geometry = db.Column(Geometry('POLYGON', srid=3857))
identifier = db.Column(db.String)
|
[
"geoalchemy2.types.Geometry",
"gbi_server.extensions.db.Column",
"gbi_server.extensions.db.ForeignKey",
"geoalchemy2.shape.to_shape",
"gbi_server.extensions.db.String",
"sqlalchemy.orm.backref"
] |
[((909, 948), 'gbi_server.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (918, 948), False, 'from gbi_server.extensions import db\n'), ((960, 998), 'gbi_server.extensions.db.Column', 'db.Column', (['db.DateTime'], {'nullable': '(False)'}), '(db.DateTime, nullable=False)\n', (969, 998), False, 'from gbi_server.extensions import db\n'), ((1305, 1325), 'gbi_server.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (1314, 1325), False, 'from gbi_server.extensions import db\n'), ((1336, 1356), 'gbi_server.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (1345, 1356), False, 'from gbi_server.extensions import db\n'), ((1371, 1391), 'gbi_server.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (1380, 1391), False, 'from gbi_server.extensions import db\n'), ((1405, 1425), 'gbi_server.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (1414, 1425), False, 'from gbi_server.extensions import db\n'), ((1438, 1458), 'gbi_server.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (1447, 1458), False, 'from gbi_server.extensions import db\n'), ((1482, 1503), 'gbi_server.extensions.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (1491, 1503), False, 'from gbi_server.extensions import db\n'), ((1525, 1546), 'gbi_server.extensions.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (1534, 1546), False, 'from gbi_server.extensions import db\n'), ((1563, 1584), 'gbi_server.extensions.db.Column', 'db.Column', (['db.Boolean'], {}), '(db.Boolean)\n', (1572, 1584), False, 'from gbi_server.extensions import db\n'), ((1902, 1941), 'gbi_server.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1911, 1941), False, 'from gbi_server.extensions import db\n'), ((1953, 2025), 'gbi_server.extensions.db.Column', 'db.Column', (['db.DateTime'], {'default': 'datetime.datetime.utcnow', 'nullable': '(False)'}), '(db.DateTime, default=datetime.datetime.utcnow, nullable=False)\n', (1962, 2025), False, 'from gbi_server.extensions import db\n'), ((2300, 2339), 'gbi_server.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (2309, 2339), False, 'from gbi_server.extensions import db\n'), ((2620, 2640), 'gbi_server.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (2629, 2640), False, 'from gbi_server.extensions import db\n'), ((1036, 1061), 'gbi_server.extensions.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (1049, 1061), False, 'from gbi_server.extensions import db\n'), ((1199, 1212), 'gbi_server.extensions.db.String', 'db.String', (['(24)'], {}), '(24)\n', (1208, 1212), False, 'from gbi_server.extensions import db\n'), ((1255, 1290), 'geoalchemy2.types.Geometry', 'Geometry', (['"""MULTIPOLYGON"""'], {'srid': '(4326)'}), "('MULTIPOLYGON', srid=4326)\n", (1263, 1290), False, 'from geoalchemy2.types import Geometry\n'), ((2063, 2088), 'gbi_server.extensions.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (2076, 2088), False, 'from gbi_server.extensions import db\n'), ((2383, 2414), 'gbi_server.extensions.db.ForeignKey', 'db.ForeignKey', (['"""search_logs.id"""'], {}), "('search_logs.id')\n", (2396, 2414), False, 'from gbi_server.extensions import db\n'), ((2571, 2601), 'geoalchemy2.types.Geometry', 'Geometry', (['"""POLYGON"""'], {'srid': '(3857)'}), "('POLYGON', srid=3857)\n", (2579, 2601), False, 'from geoalchemy2.types import Geometry\n'), ((1122, 1173), 'sqlalchemy.orm.backref', 'backref', (['"""logs"""'], {'cascade': '"""all,delete,delete-orphan"""'}), "('logs', cascade='all,delete,delete-orphan')\n", (1129, 1173), False, 'from sqlalchemy.orm import backref\n'), ((2149, 2207), 'sqlalchemy.orm.backref', 'backref', (['"""search_logs"""'], {'cascade': '"""all,delete,delete-orphan"""'}), "('search_logs', cascade='all,delete,delete-orphan')\n", (2156, 2207), False, 'from sqlalchemy.orm import backref\n'), ((2486, 2543), 'sqlalchemy.orm.backref', 'backref', (['"""geometries"""'], {'cascade': '"""all,delete,delete-orphan"""'}), "('geometries', cascade='all,delete,delete-orphan')\n", (2493, 2543), False, 'from sqlalchemy.orm import backref\n'), ((1745, 1768), 'geoalchemy2.shape.to_shape', 'to_shape', (['self.geometry'], {}), '(self.geometry)\n', (1753, 1768), False, 'from geoalchemy2.shape import to_shape\n')]
|
from torch import nn
class NeuralNetwork(nn.Module):
def __init__(self, h1, h2, num_classes=10, name='NN'):
super().__init__()
self.name = name
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, h1),
nn.ReLU(),
nn.Linear(h1, h2),
nn.ReLU(),
nn.Linear(h2, 10)
)
def forward(self, x):
x = self.flatten(x)
x = self.linear_relu_stack(x)
return x
def get_name(self): return self.name
def get_type(self): return 'NeuralNetwork'
def num_params(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
class CNN(nn.Module):
def __init__(self, h1=64, h2=128, input_size=28, num_classes=10, name='CNN'):
super().__init__()
self.name = name
self.conv1 = nn.Sequential(
nn.Conv2d(1, h1, 5, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(h1, h2, 5, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
self.flatten = nn.Flatten()
num_neurons = h2 * (input_size // (2*2))**2
self.fc = nn.Linear(num_neurons, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.flatten(x)
x = self.fc(x)
return x
def get_name(self): return self.name
def get_type(self): return 'CNN'
def num_params(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
|
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Flatten"
] |
[((168, 180), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (178, 180), False, 'from torch import nn\n'), ((979, 991), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (989, 991), False, 'from torch import nn\n'), ((1050, 1085), 'torch.nn.Linear', 'nn.Linear', (['num_neurons', 'num_classes'], {}), '(num_neurons, num_classes)\n', (1059, 1085), False, 'from torch import nn\n'), ((226, 248), 'torch.nn.Linear', 'nn.Linear', (['(28 * 28)', 'h1'], {}), '(28 * 28, h1)\n', (235, 248), False, 'from torch import nn\n'), ((252, 261), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (259, 261), False, 'from torch import nn\n'), ((266, 283), 'torch.nn.Linear', 'nn.Linear', (['h1', 'h2'], {}), '(h1, h2)\n', (275, 283), False, 'from torch import nn\n'), ((288, 297), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (295, 297), False, 'from torch import nn\n'), ((303, 320), 'torch.nn.Linear', 'nn.Linear', (['h2', '(10)'], {}), '(h2, 10)\n', (312, 320), False, 'from torch import nn\n'), ((769, 804), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'h1', '(5)'], {'padding': '"""same"""'}), "(1, h1, 5, padding='same')\n", (778, 804), False, 'from torch import nn\n'), ((809, 818), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (816, 818), False, 'from torch import nn\n'), ((823, 841), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (835, 841), False, 'from torch import nn\n'), ((879, 915), 'torch.nn.Conv2d', 'nn.Conv2d', (['h1', 'h2', '(5)'], {'padding': '"""same"""'}), "(h1, h2, 5, padding='same')\n", (888, 915), False, 'from torch import nn\n'), ((920, 929), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (927, 929), False, 'from torch import nn\n'), ((934, 952), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (946, 952), False, 'from torch import nn\n')]
|
import inspect
from typing import Callable, Any, Tuple, get_type_hints
from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left
from amino.dat import Dat
from amino.state import StateT
from amino.util.tpe import first_type_arg, type_arg, is_subclass
from ribosome.rpc.data.nargs import Nargs
def analyse_state_type(tpe: type) -> Tuple[Either[str, type], Either[str, type]]:
return (
(first_type_arg(tpe), type_arg(tpe, 1))
if tpe is not None and is_subclass(tpe, StateT)
else (Left('not a StateT'), Left('not a StateT'))
)
def analyse_return_type(fun: Callable[..., Any], hints: Map[str, type]
) -> Tuple[type, Either[str, type], Either[str, type]]:
main_rettype = getattr(fun, 'tpe', hints.lift('return') | None)
state_type, return_type = analyse_state_type(main_rettype)
return main_rettype, state_type, return_type
def cons_params_spec(fun: Callable[..., Any]) -> None:
argspec = inspect.getfullargspec(fun)
hints = Map(get_type_hints(fun))
params = Lists.wrap(argspec.args)
defaults = Lists.wrap(argspec.defaults or ())
method = Boolean(params.head.contains('self'))
param_count = params.length - method.to_int
min = param_count - defaults.length
max = (~Boolean(argspec.varargs or argspec.varkw)).m(param_count)
nargs = Nargs.cons(min, max)
types = params.traverse(hints.lift, Maybe) | Nil
main_rettype, state_type, return_type = analyse_return_type(fun, hints)
return ParamsSpec(nargs, min, max, method, types, main_rettype, state_type, return_type | (lambda: main_rettype))
class ParamsSpec(Dat['ParamsSpec']):
@staticmethod
def from_function(fun: Callable[..., Any]) -> 'ParamsSpec':
f = getattr(fun, '__wrapped__', fun)
return cons_params_spec(f)
@staticmethod
def from_type(tpe: type) -> 'ParamsSpec':
return cons_params_spec(tpe.__init__)
def __init__(
self,
nargs: Nargs,
min: int,
max: Maybe[int],
method: Boolean,
types: List[type],
rettype: type,
state_type: Maybe[type],
return_type: type,
) -> None:
self.nargs = nargs
self.min = min
self.max = max
self.method = method
self.types = types
self.rettype = rettype
self.state_type = state_type
self.return_type = return_type
@property
def exact_count(self) -> Maybe[int]:
return Just(self.min) if self.max.contains(self.min) else Nothing
class ArgValidator(Dat['ArgValidator']):
def __init__(self, spec: ParamsSpec) -> None:
self.spec = spec
@property
def min(self) -> int:
return self.spec.min
@property
def max(self) -> Maybe[int]:
return self.spec.max
def validate(self, count: int) -> Boolean:
return Boolean(self.min <= count and not self.max.exists(_ < count))
def error(self, args: tuple, desc: str, name: str) -> str:
return f'argument count for {desc} `{name}` is {len(args)}, must be {self.count_spec} ({args})'
@property
def count_spec(self) -> str:
return (
self.spec.exact_count /
(lambda a: f'exactly {a}' if a > 0 else 'none') | (
self.max /
(lambda mx: f'between {self.min} and {mx}') |
f'at least {self.min}'
)
)
def either(self, args: tuple, desc: str, name: str) -> Either[str, None]:
return self.validate(len(args)).e(L(self.error)(args, desc, name), None)
__all__ = ('ArgValidator', 'ParamsSpec')
|
[
"amino.Left",
"inspect.getfullargspec",
"amino.Boolean",
"typing.get_type_hints",
"amino.Lists.wrap",
"amino.util.tpe.first_type_arg",
"amino.util.tpe.type_arg",
"amino.Just",
"ribosome.rpc.data.nargs.Nargs.cons",
"amino.util.tpe.is_subclass",
"amino.L"
] |
[((997, 1024), 'inspect.getfullargspec', 'inspect.getfullargspec', (['fun'], {}), '(fun)\n', (1019, 1024), False, 'import inspect\n'), ((1075, 1099), 'amino.Lists.wrap', 'Lists.wrap', (['argspec.args'], {}), '(argspec.args)\n', (1085, 1099), False, 'from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left\n'), ((1115, 1149), 'amino.Lists.wrap', 'Lists.wrap', (['(argspec.defaults or ())'], {}), '(argspec.defaults or ())\n', (1125, 1149), False, 'from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left\n'), ((1371, 1391), 'ribosome.rpc.data.nargs.Nargs.cons', 'Nargs.cons', (['min', 'max'], {}), '(min, max)\n', (1381, 1391), False, 'from ribosome.rpc.data.nargs import Nargs\n'), ((1041, 1060), 'typing.get_type_hints', 'get_type_hints', (['fun'], {}), '(fun)\n', (1055, 1060), False, 'from typing import Callable, Any, Tuple, get_type_hints\n'), ((504, 528), 'amino.util.tpe.is_subclass', 'is_subclass', (['tpe', 'StateT'], {}), '(tpe, StateT)\n', (515, 528), False, 'from amino.util.tpe import first_type_arg, type_arg, is_subclass\n'), ((434, 453), 'amino.util.tpe.first_type_arg', 'first_type_arg', (['tpe'], {}), '(tpe)\n', (448, 453), False, 'from amino.util.tpe import first_type_arg, type_arg, is_subclass\n'), ((455, 471), 'amino.util.tpe.type_arg', 'type_arg', (['tpe', '(1)'], {}), '(tpe, 1)\n', (463, 471), False, 'from amino.util.tpe import first_type_arg, type_arg, is_subclass\n'), ((543, 563), 'amino.Left', 'Left', (['"""not a StateT"""'], {}), "('not a StateT')\n", (547, 563), False, 'from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left\n'), ((565, 585), 'amino.Left', 'Left', (['"""not a StateT"""'], {}), "('not a StateT')\n", (569, 585), False, 'from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left\n'), ((2543, 2557), 'amino.Just', 'Just', (['self.min'], {}), '(self.min)\n', (2547, 2557), False, 'from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left\n'), ((1301, 1342), 'amino.Boolean', 'Boolean', (['(argspec.varargs or argspec.varkw)'], {}), '(argspec.varargs or argspec.varkw)\n', (1308, 1342), False, 'from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left\n'), ((3599, 3612), 'amino.L', 'L', (['self.error'], {}), '(self.error)\n', (3600, 3612), False, 'from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2017 jianglin
# File Name: jobs.py
# Author: jianglin
# Email: <EMAIL>
# Created: 2017-02-02 14:28:16 (CST)
# Last Update: Sunday 2018-09-30 17:50:05 (CST)
# By:
# Description:
# **************************************************************************
from time import time, sleep
def scheduler_vvv():
'''输出hello world'''
print('hello world')
def scheduler_kkk():
'''输出helloorld'''
print('helloorld')
def scheduler_time(a):
'''
输出时间,参数a
asasdsda
'''
print('{}{}'.format(a, time()))
def scheduler_vvvv():
'''
sleep 20s
'''
print('sleep start')
sleep(10)
print('sleep end')
|
[
"time.time",
"time.sleep"
] |
[((755, 764), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (760, 764), False, 'from time import time, sleep\n'), ((663, 669), 'time.time', 'time', ([], {}), '()\n', (667, 669), False, 'from time import time, sleep\n')]
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='queryCitefile',
install_requires=[],
version='0.1',
description='Simple tool to process mwcites output',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/tarrow/queryCiteFile',
py_modules=['queryCiteFile']
)
|
[
"distutils.core.setup"
] |
[((57, 306), 'distutils.core.setup', 'setup', ([], {'name': '"""queryCitefile"""', 'install_requires': '[]', 'version': '"""0.1"""', 'description': '"""Simple tool to process mwcites output"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/tarrow/queryCiteFile"""', 'py_modules': "['queryCiteFile']"}), "(name='queryCitefile', install_requires=[], version='0.1', description\n ='Simple tool to process mwcites output', author='<NAME>', author_email\n ='<EMAIL>', url='https://github.com/tarrow/queryCiteFile', py_modules=[\n 'queryCiteFile'])\n", (62, 306), False, 'from distutils.core import setup\n')]
|
import fnmatch
import os
import posixpath
import re
from enum import IntEnum
from . import builtin
from ..file_types import File, Directory
from ..iterutils import iterate, listify
from ..backends.make import writer as make
from ..backends.ninja import writer as ninja
from ..backends.make.syntax import Writer, Syntax
from ..build_inputs import build_input
from ..path import Path, Root
from ..platforms import known_platforms
build_input('find_dirs')(lambda build_inputs, env: set())
depfile_name = '.bfg_find_deps'
exclude_globs = ['.*#', '*~', '#*#']
@builtin.function()
class FindResult(IntEnum):
include = 0
not_now = 1
exclude = 2
def write_depfile(env, path, output, seen_dirs, makeify=False):
with open(path.string(env.base_dirs), 'w') as f:
# Since this file is in the build dir, we can use relative dirs for
# deps also in the build dir.
roots = env.base_dirs.copy()
roots[Root.builddir] = None
out = Writer(f)
out.write(output.string(roots), Syntax.target)
out.write_literal(':')
for i in seen_dirs:
out.write_literal(' ')
out.write(i.string(roots), Syntax.dependency)
out.write_literal('\n')
if makeify:
for i in seen_dirs:
out.write(i.string(roots), Syntax.target)
out.write_literal(':\n')
def _listdir(path):
dirs, nondirs = [], []
try:
names = os.listdir(path)
for name in names:
# Use POSIX paths so that the result is platform-agnostic.
curpath = posixpath.join(path, name)
if os.path.isdir(curpath):
dirs.append((name, curpath))
else:
nondirs.append((name, curpath))
except Exception:
pass
return dirs, nondirs
def _walk_flat(top):
if os.path.exists(top):
yield (top,) + _listdir(top)
def _walk_recursive(top):
if not os.path.exists(top):
return
dirs, nondirs = _listdir(top)
yield top, dirs, nondirs
for name, path in dirs:
if not os.path.islink(path):
for i in _walk_recursive(path):
yield i
def _filter_from_glob(match_type, matches, extra, exclude):
matches = [re.compile(fnmatch.translate(i)) for i in iterate(matches)]
extra = [re.compile(fnmatch.translate(i)) for i in iterate(extra)]
exclude = [re.compile(fnmatch.translate(i)) for i in iterate(exclude)]
def fn(name, path, type):
if match_type in {type, '*'}:
if any(ex.match(name) for ex in exclude):
return FindResult.exclude
if any(ex.match(name) for ex in matches):
return FindResult.include
elif any(ex.match(name) for ex in extra):
return FindResult.not_now
return FindResult.exclude
return fn
def _find_files(paths, filter, flat, as_object):
# "Does the walker choose the path, or the path the walker?" - <NAME>
walker = _walk_flat if flat else _walk_recursive
results, dist_results, seen_dirs = [], [], []
filetype = File if isinstance(as_object, bool) else as_object
def do_filter(files, type):
cls = filetype if type == 'f' else lambda p: Directory(p, None)
for name, path in files:
fileobj = cls(Path(path, Root.srcdir))
matched = filter(name, path, type)
if matched == FindResult.include:
dist_results.append(fileobj)
results.append(fileobj if as_object else path)
elif matched == FindResult.not_now:
dist_results.append(fileobj)
do_filter(( (os.path.basename(p), p) for p in paths ), 'd')
for p in paths:
for base, dirs, files in walker(p):
seen_dirs.append(Path(base, Root.srcdir))
do_filter(dirs, 'd')
do_filter(files, 'f')
return results, dist_results, seen_dirs
def find(path='.', name='*', type='*', extra=None, exclude=exclude_globs,
flat=False):
glob_filter = _filter_from_glob(type, name, extra, exclude)
return _find_files(listify(path), glob_filter, flat, False)[0]
@builtin.function('env')
def filter_by_platform(env, name, path, type):
my_plat = set([env.target_platform.name, env.target_platform.flavor])
sub = '|'.join(re.escape(i) for i in known_platforms if i not in my_plat)
ex = r'(^|/|_)(' + sub + r')(\.[^\.]$|$|/)'
return FindResult.not_now if re.search(ex, path) else FindResult.include
@builtin.function('builtins', 'build_inputs', 'env')
def find_files(builtins, build_inputs, env, path='.', name='*', type='*',
extra=None, exclude=exclude_globs, filter=filter_by_platform,
flat=False, cache=True, dist=True, as_object=False):
glob_filter = _filter_from_glob(type, name, extra, exclude)
if filter:
if filter == filter_by_platform:
filter = builtins['filter_by_platform']
def final_filter(name, path, type):
return max(filter(name, path, type), glob_filter(name, path, type))
else:
final_filter = glob_filter
paths = [i.path.string(env.base_dirs) if isinstance(i, File) else i
for i in iterate(path)]
found, dist, seen_dirs = _find_files(paths, final_filter, flat, as_object)
if cache:
build_inputs['find_dirs'].update(seen_dirs)
build_inputs['regenerate'].depfile = depfile_name
if dist:
for i in dist:
build_inputs.add_source(i)
return found
@make.post_rule
def make_find_dirs(build_inputs, buildfile, env):
if build_inputs['find_dirs']:
write_depfile(env, Path(depfile_name), make.filepath,
build_inputs['find_dirs'], makeify=True)
buildfile.include(depfile_name)
@ninja.post_rule
def ninja_find_dirs(build_inputs, buildfile, env):
if build_inputs['find_dirs']:
write_depfile(env, Path(depfile_name), ninja.filepath,
build_inputs['find_dirs'])
|
[
"os.path.basename",
"os.path.isdir",
"os.path.exists",
"re.escape",
"posixpath.join",
"os.path.islink",
"fnmatch.translate",
"re.search",
"os.listdir"
] |
[((1853, 1872), 'os.path.exists', 'os.path.exists', (['top'], {}), '(top)\n', (1867, 1872), False, 'import os\n'), ((1449, 1465), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1459, 1465), False, 'import os\n'), ((1950, 1969), 'os.path.exists', 'os.path.exists', (['top'], {}), '(top)\n', (1964, 1969), False, 'import os\n'), ((4480, 4499), 're.search', 're.search', (['ex', 'path'], {}), '(ex, path)\n', (4489, 4499), False, 'import re\n'), ((1586, 1612), 'posixpath.join', 'posixpath.join', (['path', 'name'], {}), '(path, name)\n', (1600, 1612), False, 'import posixpath\n'), ((1628, 1650), 'os.path.isdir', 'os.path.isdir', (['curpath'], {}), '(curpath)\n', (1641, 1650), False, 'import os\n'), ((2092, 2112), 'os.path.islink', 'os.path.islink', (['path'], {}), '(path)\n', (2106, 2112), False, 'import os\n'), ((2270, 2290), 'fnmatch.translate', 'fnmatch.translate', (['i'], {}), '(i)\n', (2287, 2290), False, 'import fnmatch\n'), ((2343, 2363), 'fnmatch.translate', 'fnmatch.translate', (['i'], {}), '(i)\n', (2360, 2363), False, 'import fnmatch\n'), ((2416, 2436), 'fnmatch.translate', 'fnmatch.translate', (['i'], {}), '(i)\n', (2433, 2436), False, 'import fnmatch\n'), ((4340, 4352), 're.escape', 're.escape', (['i'], {}), '(i)\n', (4349, 4352), False, 'import re\n'), ((3666, 3685), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (3682, 3685), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Automatic testing for worktree """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import py
import mock
import pytest
import qisys.sh
import qisys.worktree
def test_read_projects(tmpdir):
""" Test Read Projects """
tmpdir.mkdir("core").mkdir("naoqi")
tmpdir.mkdir("lib").mkdir("libqi")
xml_path = tmpdir.mkdir(".qi").join("worktree.xml")
xml_path.write("""
<worktree>
<project src="core/naoqi" />
<project src="lib/libqi" />
</worktree>
""")
worktree = qisys.worktree.WorkTree(tmpdir.strpath)
p_srcs = [p.src for p in worktree.projects]
assert p_srcs == ["core/naoqi", "lib/libqi"]
def test_normalize_path(tmpdir):
""" Test Nomalize Path """
worktree = qisys.worktree.WorkTree(tmpdir.strpath)
foo_abs_path = tmpdir.join("bar").join("foo").strpath
assert worktree.normalize_path(foo_abs_path) == "bar/foo"
assert worktree.normalize_path("bar/foo") == "bar/foo"
def test_add_project_simple(worktree):
""" Test Add Project Simple """
tmp = py.path.local(worktree.root) # pylint:disable=no-member
tmp.mkdir("foo")
worktree.add_project("foo")
assert len(worktree.projects) == 1
foo1 = worktree.get_project("foo")
assert foo1.src == "foo"
def test_fails_when_root_does_not_exists(tmpdir):
""" Test Fails When Root Does Not Exists """
non_existing = tmpdir.join("doesnotexist")
with pytest.raises(Exception) as e:
qisys.worktree.WorkTree(non_existing.strpath)
assert "does not exist" in str(e.value)
def test_ignore_src_dot(tmpdir):
""" Test Ignore Src Dot """
_foo_path = tmpdir.mkdir("foo")
tmpdir.join("foo", "qiproject.xml").write("""
<project>
<project src="." />
</project>
""")
worktree = qisys.worktree.WorkTree(tmpdir.strpath)
worktree.add_project("foo")
def test_remove_project(worktree):
""" Test Remove Project """
tmp = py.path.local(worktree.root) # pylint:disable=no-member
foo_src = tmp.mkdir("foo")
worktree.add_project("foo")
with pytest.raises(qisys.worktree.WorkTreeError) as e:
worktree.remove_project("bar")
assert "No project in 'bar'" in str(e)
worktree.remove_project("foo")
assert worktree.projects == list()
worktree.add_project("foo")
assert worktree.projects[0].src == "foo"
worktree.remove_project("foo", from_disk=True)
assert worktree.projects == list()
assert not os.path.exists(foo_src.strpath)
def test_nested_qiprojects(tmpdir):
""" Test Nested Project """
a_project = tmpdir.mkdir("a")
worktree_xml = tmpdir.mkdir(".qi").join("worktree.xml")
worktree_xml.write("""
<worktree>
<project src="a" />
</worktree>
""")
a_xml = a_project.join("qiproject.xml")
a_xml.write("""
<project name="a">
<project src="b" />
</project>
""")
b_project = a_project.mkdir("b")
b_xml = b_project.join("qiproject.xml")
b_xml.write("""
<project name="b">
<project src="c" />
</project>
""")
c_project = b_project.mkdir("c")
c_xml = c_project.join("qiproject.xml")
c_xml.write('<project name="c" />\n')
worktree = qisys.worktree.WorkTree(tmpdir.strpath)
assert len(worktree.projects) == 3
assert [p.src for p in worktree.projects] == ["a", "a/b", "a/b/c"]
def test_non_exiting_path_are_removed(tmpdir, interact):
""" All projects registered should exist """
wt = qisys.worktree.WorkTree(tmpdir.strpath)
a_path = tmpdir.mkdir("a")
wt.add_project(a_path.strpath)
a_path.remove()
wt2 = qisys.worktree.WorkTree(tmpdir.strpath)
assert wt2.projects == list()
def test_check_subprojects_exist(tmpdir):
""" Subprojets in qiproject.xml should exist """
wt = qisys.worktree.WorkTree(tmpdir.strpath)
a_path = tmpdir.mkdir("a")
a_qiproject = a_path.join("qiproject.xml")
a_qiproject.write(""" \
<project >
<project src="b" />
</project>
""")
with pytest.raises(qisys.worktree.WorkTreeError) as e:
wt.add_project("a")
assert "invalid sub project" in str(e.value)
def test_observers_are_notified(worktree):
""" Test Observers Are Notified """
mock_observer = mock.Mock()
worktree.register(mock_observer)
worktree.create_project("foo")
assert mock_observer.reload.called
def test_add_nested_projects(worktree):
""" Test Add Nested Project """
worktree.create_project("foo")
tmpdir = worktree.tmpdir
spam = tmpdir.mkdir("spam")
spam.join("qiproject.xml").write(""" \
<project>
<project src="eggs" />
</project>
""")
spam.mkdir("eggs")
worktree.add_project("spam")
assert [p.src for p in worktree.projects] == ["foo", "spam", "spam/eggs"]
worktree.remove_project("spam")
assert [p.src for p in worktree.projects] == ["foo"]
def test_warns_on_nested_worktrees(tmpdir, record_messages):
""" Test Warns On Nested WorkTrees """
work1 = tmpdir.mkdir("work1")
work1.mkdir(".qi")
work2 = work1.mkdir("work2")
work2.mkdir(".qi")
qisys.worktree.WorkTree(work2.strpath)
assert record_messages.find("Nested worktrees")
@pytest.mark.skip(reason="no way of currently testing this")
def test_non_ascii_path(tmpdir):
""" Test Non ASCII Path """
coffee_dir = tmpdir.mkdir("café")
qisys.worktree.WorkTree(coffee_dir.strpath)
|
[
"os.path.exists",
"pytest.raises",
"mock.Mock",
"py.path.local",
"pytest.mark.skip"
] |
[((5331, 5390), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""no way of currently testing this"""'}), "(reason='no way of currently testing this')\n", (5347, 5390), False, 'import pytest\n'), ((1294, 1322), 'py.path.local', 'py.path.local', (['worktree.root'], {}), '(worktree.root)\n', (1307, 1322), False, 'import py\n'), ((2164, 2192), 'py.path.local', 'py.path.local', (['worktree.root'], {}), '(worktree.root)\n', (2177, 2192), False, 'import py\n'), ((4396, 4407), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4405, 4407), False, 'import mock\n'), ((1668, 1692), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1681, 1692), False, 'import pytest\n'), ((2293, 2336), 'pytest.raises', 'pytest.raises', (['qisys.worktree.WorkTreeError'], {}), '(qisys.worktree.WorkTreeError)\n', (2306, 2336), False, 'import pytest\n'), ((2681, 2712), 'os.path.exists', 'os.path.exists', (['foo_src.strpath'], {}), '(foo_src.strpath)\n', (2695, 2712), False, 'import os\n'), ((4164, 4207), 'pytest.raises', 'pytest.raises', (['qisys.worktree.WorkTreeError'], {}), '(qisys.worktree.WorkTreeError)\n', (4177, 4207), False, 'import pytest\n')]
|
"""
"""
## source.python imports
from commands import CommandReturn
from commands.client import ClientCommand
from commands.say import SayCommand
from listeners import OnClientFullyConnect
## warcraft.package imports
from warcraft.database import session
from warcraft.players import player_dict
## extension imports
from .config import *
from .database import *
from .menus import *
## __all__ declaration
__all__ = (
"levelbank_menu",
)
## handling new players
@OnClientFullyConnect
def _on_client_full_connect_setup_levelbank(index):
player = player_dict[index]
player_levelbank = session.query(Levelbank).filter(Levelbank.parent == player._dbinstance).first()
if not player_levelbank:
start_levels = levelbank_start_amount.cvar.get_int()
player_levelbank = Levelbank(levels=start_levels, parent=player._dbinstance)
session.add(player_levelbank)
session.commit()
## handling client/say commands
@ClientCommand(["levelbank", "wcsbank"])
@SayCommand(["levelbank", "wcsbank"])
def _levelbank_say_command(command, index, team_only=None):
levelbank_menu.send(index)
return CommandReturn.BLOCK
|
[
"commands.say.SayCommand",
"warcraft.database.session.commit",
"commands.client.ClientCommand",
"warcraft.database.session.add",
"warcraft.database.session.query"
] |
[((960, 999), 'commands.client.ClientCommand', 'ClientCommand', (["['levelbank', 'wcsbank']"], {}), "(['levelbank', 'wcsbank'])\n", (973, 999), False, 'from commands.client import ClientCommand\n'), ((1001, 1037), 'commands.say.SayCommand', 'SayCommand', (["['levelbank', 'wcsbank']"], {}), "(['levelbank', 'wcsbank'])\n", (1011, 1037), False, 'from commands.say import SayCommand\n'), ((870, 899), 'warcraft.database.session.add', 'session.add', (['player_levelbank'], {}), '(player_levelbank)\n', (881, 899), False, 'from warcraft.database import session\n'), ((908, 924), 'warcraft.database.session.commit', 'session.commit', ([], {}), '()\n', (922, 924), False, 'from warcraft.database import session\n'), ((607, 631), 'warcraft.database.session.query', 'session.query', (['Levelbank'], {}), '(Levelbank)\n', (620, 631), False, 'from warcraft.database import session\n')]
|
from opytimizer.optimizers.swarm import FFOA
# Creates a FFOA optimizer
o = FFOA()
|
[
"opytimizer.optimizers.swarm.FFOA"
] |
[((77, 83), 'opytimizer.optimizers.swarm.FFOA', 'FFOA', ([], {}), '()\n', (81, 83), False, 'from opytimizer.optimizers.swarm import FFOA\n')]
|
# coding=utf-8
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from copy import deepcopy
import warnings as warnings
from collections import OrderedDict
import numpy as np
from pypint.solvers.i_iterative_time_solver import IIterativeTimeSolver
from pypint.solvers.i_parallel_solver import IParallelSolver
from pypint.communicators.message import Message
from pypint.integrators.integrator_base import IntegratorBase
from pypint.integrators.node_providers.gauss_lobatto_nodes import GaussLobattoNodes
from pypint.integrators.weight_function_providers.polynomial_weight_function import PolynomialWeightFunction
from pypint.problems import IInitialValueProblem, problem_has_exact_solution
from pypint.solvers.states.sdc_solver_state import SdcSolverState
from pypint.solvers.diagnosis import IDiagnosisValue
from pypint.solvers.diagnosis.norms import supremum_norm
from pypint.plugins.timers.timer_base import TimerBase
from pypint.utilities.threshold_check import ThresholdCheck
from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument
from pypint.utilities.logging import *
# General Notes on Implementation
# ===============================
#
# Names and Meaning of Indices
# ----------------------------
# T_max (num_time_steps) | number of time steps
# N (num_nodes) | number of integration nodes per time step
# t | index of current time step; interval: [0, T_max)
# n | index of current node of current time step; interval: [1, N)
# | the current node is always the next node, i.e. the node we are
# | calculating the value for
# i | index of current point in continuous array of points
class ParallelSdc(IIterativeTimeSolver, IParallelSolver):
"""*Spectral Deferred Corrections* method for solving first order ODEs.
The *Spectral Deferred Corrections* (SDC) method is described in [Minion2003]_ (Equation 2.7)
Default Values:
* :py:class:`.ThresholdCheck`
* ``max_threshold``: 10
* ``min_threshold``: 1e-7
* ``conditions``: ``('residual', 'iterations')``
* :py:attr:`.num_time_steps`: 1
* :py:attr:`.num_nodes`: 3
Given the total number of time steps :math:`T_{max}`, number of integration nodes per time
step :math:`N`, current time step :math:`t \\in [0,T_{max})` and the next integration node
to consider :math:`n \\in [0, N)`.
Let :math:`[a,b]` be the total time interval to integrate over.
For :math:`T_{max}=3` and :math:`N=4`, this can be visualized as::
a b
| |
| . . | . . | . . |
t 0 0 0 0 1 1 1 2 2 2
n 0 1 2 3 1 2 3 1 2 3
i 0 1 2 3 4 5 6 7 8 9
In general, the value at :math:`a` (i.e. :math:`t=n=i=0`) is the initial value.
See Also
--------
:py:class:`.IIterativeTimeSolver` :
implemented interface
:py:class:`.IParallelSolver` :
mixed-in interface
"""
def __init__(self, **kwargs):
super(ParallelSdc, self).__init__(**kwargs)
IParallelSolver.__init__(self, **kwargs)
del self._state
self.threshold = ThresholdCheck(min_threshold=1e-7, max_threshold=10, conditions=("residual", "iterations"))
self.timer = TimerBase()
self._num_time_steps = 1
self._dt = 0.0
self._deltas = {
't': 0.0,
'n': np.zeros(0)
}
self._classic = True
self.__nodes_type = GaussLobattoNodes
self.__weights_type = PolynomialWeightFunction
self.__num_nodes = 3
self.__exact = np.zeros(0)
self.__time_points = {
'steps': np.zeros(0),
'nodes': np.zeros(0)
}
def init(self, problem, integrator, **kwargs):
"""Initializes SDC solver with given problem and integrator.
Parameters
----------
num_time_steps : :py:class:`int`
Number of time steps to be used within the time interval of the problem.
num_nodes : :py:class:`int`
*(otional)*
number of nodes per time step
nodes_type : :py:class:`.INodes`
*(optional)*
Type of integration nodes to be used (class name, **NOT instance**).
weights_type : :py:class:`.IWeightFunction`
*(optional)*
Integration weights function to be used (class name, **NOT instance**).
classic : :py:class:`bool`
*(optional)*
Flag for specifying the type of the SDC sweep.
:py:class:`True`: *(default)* For the classic SDC as known from the literature;
:py:class:`False`: For the modified SDC as developed by <NAME>.
Raises
------
ValueError :
* if given problem is not an :py:class:`.IInitialValueProblem`
* if number of nodes per time step is not given; neither through ``num_nodes``, ``nodes_type`` nor
``integrator``
See Also
--------
:py:meth:`.IIterativeTimeSolver.init`
overridden method (with further parameters)
:py:meth:`.IParallelSolver.init`
mixed in overridden method (with further parameters)
"""
assert_is_instance(problem, IInitialValueProblem, descriptor="Initial Value Problem", checking_obj=self)
assert_condition(issubclass(integrator, IntegratorBase),
ValueError, message="Integrator must be an IntegratorBase: NOT %s"
% integrator.__mro__[-2].__name__,
checking_obj=self)
super(ParallelSdc, self).init(problem, integrator=integrator, **kwargs)
if 'num_time_steps' in kwargs:
self._num_time_steps = kwargs['num_time_steps']
if 'num_nodes' in kwargs:
self.__num_nodes = kwargs['num_nodes']
elif 'nodes_type' in kwargs and kwargs['nodes_type'].num_nodes is not None:
self.__num_nodes = kwargs['nodes_type'].num_nodes
elif integrator.nodes_type is not None and integrator.nodes_type.num_nodes is not None:
self.__num_nodes = integrator.nodes_type.num_nodes
else:
raise ValueError(func_name(self) + "Number of nodes per time step not given.")
if 'notes_type' in kwargs:
self.__nodes_type = kwargs['notes_type']
if 'weights_type' in kwargs:
self.__weights_type = kwargs['weights_type']
if 'classic' in kwargs:
assert_is_instance(kwargs['classic'], bool, descriptor="Classic Flag", checking_obj=self)
self._classic = kwargs['classic']
# TODO: need to store the exact solution somewhere else
self.__exact = np.zeros(self.num_time_steps * (self.__num_nodes - 1) + 1, dtype=np.object)
def run(self, core, **kwargs):
"""Applies SDC solver to the initialized problem setup.
Solves the given problem with the explicit SDC algorithm.
Parameters
----------
core : :py:class:`.SdcSolverCore`
core solver stepping method
dt : :py:class:`float`
width of the interval to work on; this is devided into the number of given
time steps this solver has been initialized with
See Also
--------
:py:meth:`.IIterativeTimeSolver.run` : overridden method
"""
super(ParallelSdc, self).run(core, **kwargs)
assert_named_argument('dt', kwargs, types=float, descriptor="Width of Interval", checking_obj=self)
self._dt = kwargs['dt']
self._print_header()
# start iterations
# TODO: exact solution storage handling
self.__exact[0] = self.problem.initial_value
_has_work = True
_previous_flag = Message.SolverFlag.none
_current_flag = Message.SolverFlag.none
__work_loop_count = 1
while _has_work:
LOG.debug("Work Loop: %d" % __work_loop_count)
_previous_flag = _current_flag
_current_flag = Message.SolverFlag.none
# receive dedicated message
_msg = self._communicator.receive()
if _msg.flag == Message.SolverFlag.failed:
# previous solver failed
# --> pass on the failure and abort
_current_flag = Message.SolverFlag.failed
_has_work = False
LOG.debug("Previous Solver Failed")
else:
if _msg.flag == Message.SolverFlag.time_adjusted:
# the previous solver has adjusted its interval
# --> we need to recompute our interval
_current_flag = self._adjust_interval_width()
# we don't immediately start the computation of the newly computed interval
# but try to pass the new interval end to the next solver as soon as possible
# (this should avoid throwing away useless computation)
LOG.debug("Previous Solver Adjusted Time")
else:
if _previous_flag in \
[Message.SolverFlag.none, Message.SolverFlag.converged, Message.SolverFlag.finished,
Message.SolverFlag.time_adjusted]:
# we just started or finished our previous interval
# --> start a new interval
_has_work = self._init_new_interval(_msg.time_point)
if _has_work:
# set initial values
self.state.initial.solution.value = _msg.value.copy()
self.state.initial.solution.time_point = _msg.time_point
self.state.initial.done()
LOG.debug("New Interval Initialized")
# start logging output
self._print_interval_header()
# start global timing (per interval)
self.timer.start()
else:
# pass
LOG.debug("No New Interval Available")
elif _previous_flag == Message.SolverFlag.iterating:
LOG.debug("Next Iteration")
else:
LOG.warn("WARNING!!! Something went wrong here")
if _has_work:
# we are still on the same interval or have just successfully initialized a new interval
# --> do the real computation
LOG.debug("Starting New Solver Main Loop")
# initialize a new iteration state
self.state.proceed()
if _msg.time_point == self.state.initial.time_point:
if _previous_flag == Message.SolverFlag.iterating:
LOG.debug("Updating initial value")
# if the previous solver has a new initial value for us, we use it
self.state.current_iteration.initial.solution.value = _msg.value.copy()
_current_flag = self._main_solver_loop()
if _current_flag in \
[Message.SolverFlag.converged, Message.SolverFlag.finished, Message.SolverFlag.failed]:
_log_msgs = {'': OrderedDict()}
if self.state.last_iteration_index <= self.threshold.max_iterations:
_group = 'Converged after %d iteration(s)' % (self.state.last_iteration_index + 1)
_log_msgs[''][_group] = OrderedDict()
_log_msgs[''][_group] = self.threshold.has_reached(log=True)
_log_msgs[''][_group]['Final Residual'] = "{:.3e}"\
.format(supremum_norm(self.state.last_iteration.final_step.solution.residual))
_log_msgs[''][_group]['Solution Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.solution_reduction(self.state.last_iteration_index)))
if problem_has_exact_solution(self.problem, self):
_log_msgs[''][_group]['Error Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.error_reduction(self.state.last_iteration_index)))
else:
warnings.warn("{}: Did not converged: {:s}".format(self._core.name, self.problem))
_group = "FAILED: After maximum of {:d} iteration(s)"\
.format(self.state.last_iteration_index + 1)
_log_msgs[''][_group] = OrderedDict()
_log_msgs[''][_group]['Final Residual'] = "{:.3e}"\
.format(supremum_norm(self.state.last_iteration.final_step.solution.residual))
_log_msgs[''][_group]['Solution Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.solution_reduction(self.state.last_iteration_index)))
if problem_has_exact_solution(self.problem, self):
_log_msgs[''][_group]['Error Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.error_reduction(self.state.last_iteration_index)))
LOG.warn(" {} Failed: Maximum number iterations reached without convergence."
.format(self._core.name))
print_logging_message_tree(_log_msgs)
elif _previous_flag in [Message.SolverFlag.converged, Message.SolverFlag.finished]:
LOG.debug("Solver Finished.")
self.timer.stop()
self._print_footer()
else:
# something went wrong
# --> we failed
LOG.warn("Solver failed.")
_current_flag = Message.SolverFlag.failed
self._communicator.send(value=self.state.current_iteration.final_step.solution.value,
time_point=self.state.current_iteration.final_step.time_point,
flag=_current_flag)
__work_loop_count += 1
# end while:has_work is None
LOG.debug("Solver Main Loop Done")
return [_s.solution for _s in self._states]
@property
def state(self):
"""Read-only accessor for the sovler's state
Returns
-------
state : :py:class:`.ISolverState`
"""
if len(self._states) > 0:
return self._states[-1]
else:
return None
@property
def num_time_steps(self):
"""Accessor for the number of time steps within the interval.
Returns
-------
number_time_steps : :py:class:`int`
Number of time steps within the problem-given time interval.
"""
return self._num_time_steps
@property
def num_nodes(self):
"""Accessor for the number of integration nodes per time step.
Returns
-------
number_of_nodes : :py:class:`int`
Number of integration nodes used within one time step.
"""
return self.__num_nodes
@property
def classic(self):
"""Read-only accessor for the type of SDC
Returns
-------
is_classic : :py:class:`bool`
:py:class:`True` if it's the classic SDC as known from papers;
:py:class:`False` if it's the modified SDC by <NAME>
"""
return self._classic
def _init_new_state(self):
"""Initialize a new state for a work task
Usually, this starts a new work task.
The previous state, if applicable, is stored in a stack.
"""
if self.state:
# finalize the current state
self.state.finalize()
# initialize solver state
self._states.append(SdcSolverState(num_nodes=self.num_nodes - 1, num_time_steps=self.num_time_steps))
def _init_new_interval(self, start):
"""Initializes a new work interval
Parameters
----------
start : :py:class:`float`
start point of new interval
Returns
-------
has_work : :py:class:`bool`
:py:class:`True` if new interval have been initialized;
:py:class:`False` if no new interval have been initialized (i.e. new interval end would exceed end of time
given by problem)
"""
assert_is_instance(start, float, descriptor="Time Point", checking_obj=self)
if start + self._dt > self.problem.time_end:
return False
if self.state and start == self.state.initial.time_point:
return False
self._init_new_state()
# set width of current interval
self.state.delta_interval = self._dt
# compute time step and node distances
self._deltas['t'] = self.state.delta_interval / self.num_time_steps # width of a single time step (equidistant)
# start time points of time steps
self.__time_points['steps'] = np.linspace(start, start + self._dt, self.num_time_steps + 1)
# initialize and transform integrator for time step width
self._integrator.init(self.__nodes_type, self.__num_nodes, self.__weights_type,
interval=np.array([self.__time_points['steps'][0], self.__time_points['steps'][1]],
dtype=np.float))
self.__time_points['nodes'] = np.zeros((self.num_time_steps, self.num_nodes), dtype=np.float)
_deltas_n = np.zeros(self.num_time_steps * (self.num_nodes - 1) + 1)
# copy the node provider so we do not alter the integrator's one
_nodes = deepcopy(self._integrator.nodes_type)
for _t in range(0, self.num_time_steps):
# transform Nodes (copy) onto new time step for retrieving actual integration nodes
_nodes.interval = np.array([self.__time_points['steps'][_t], self.__time_points['steps'][_t + 1]])
self.__time_points['nodes'][_t] = _nodes.nodes.copy()
for _n in range(0, self.num_nodes - 1):
_i = _t * (self.num_nodes - 1) + _n
_deltas_n[_i + 1] = _nodes.nodes[_n + 1] - _nodes.nodes[_n]
self._deltas['n'] = _deltas_n[1:].copy()
return True
def _adjust_interval_width(self):
"""Adjust width of time interval
"""
raise NotImplementedError("Time Adaptivity not yet implemented.")
# return Message.SolverFlag.time_adjusted
def _main_solver_loop(self):
# initialize iteration timer of same type as global timer
_iter_timer = self.timer.__class__()
self._print_iteration(self.state.current_iteration_index + 1)
# iterate on time steps
_iter_timer.start()
for _current_time_step in self.state.current_iteration:
# run this time step
self._time_step()
if self.state.current_time_step_index < len(self.state.current_iteration) - 1:
self.state.current_iteration.proceed()
_iter_timer.stop()
# check termination criteria
self.threshold.check(self.state)
# log this iteration's summary
if self.state.is_first_iteration:
# on first iteration we do not have comparison values
self._print_iteration_end(None, None, None, _iter_timer.past())
else:
if problem_has_exact_solution(self.problem, self) and not self.state.is_first_iteration:
# we could compute the correct error of our current solution
self._print_iteration_end(self.state.solution.solution_reduction(self.state.current_iteration_index),
self.state.solution.error_reduction(self.state.current_iteration_index),
self.state.current_step.solution.residual,
_iter_timer.past())
else:
self._print_iteration_end(self.state.solution.solution_reduction(self.state.current_iteration_index),
None,
self.state.current_step.solution.residual,
_iter_timer.past())
# finalize this iteration (i.e. TrajectorySolutionData.finalize())
self.state.current_iteration.finalize()
_reason = self.threshold.has_reached()
if _reason is None:
# LOG.debug("solver main loop done: no reason")
return Message.SolverFlag.iterating
elif _reason == ['iterations']:
# LOG.debug("solver main loop done: iterations")
self.state.finalize()
return Message.SolverFlag.finished
else:
# LOG.debug("solver main loop done: other")
self.state.finalize()
return Message.SolverFlag.converged
def _time_step(self):
self.state.current_time_step.delta_time_step = self._deltas['t']
for _step in range(0, len(self.state.current_time_step)):
_node_index = self.state.current_time_step_index * (self.num_nodes - 1) + _step
self.state.current_time_step[_step].delta_tau = self._deltas['n'][_node_index]
self.state.current_time_step[_step].solution.time_point = \
self.__time_points['nodes'][self.state.current_time_step_index][_step + 1]
self._print_time_step(self.state.current_time_step_index + 1,
self.state.current_time_step.initial.time_point,
self.state.current_time_step.last.time_point,
self.state.current_time_step.delta_time_step)
# for classic SDC compute integral
_integral = 0.0
_integrate_values = None
if self.classic:
if not self.state.current_time_step.initial.rhs_evaluated:
self.state.current_time_step.initial.rhs = \
self.problem.evaluate_wrt_time(self.state.current_time_step.initial.time_point,
self.state.current_time_step.initial.value)
_integrate_values = np.array([self.state.current_time_step.initial.rhs], dtype=self.problem.numeric_type)
for _step_index in range(0, len(self.state.current_time_step)):
if self.state.is_first_iteration:
_integrate_values = \
np.append(_integrate_values,
np.array([self.state.current_time_step.initial.rhs], dtype=self.problem.numeric_type),
axis=0)
else:
_step = self.state.previous_iteration[self.state.current_time_step_index][_step_index]
if not _step.rhs_evaluated:
_step.rhs = self.problem.evaluate_wrt_time(_step.time_point, _step.value)
_integrate_values = \
np.append(_integrate_values,
np.array([_step.rhs], dtype=self.problem.numeric_type),
axis=0)
assert_condition(_integrate_values.shape[0] == self.num_nodes,
ValueError, message="Number of integration values not correct: {:d} != {:d}"
.format(_integrate_values.shape[0], self.num_nodes),
checking_obj=self)
_full_integral = 0.0
# do the actual SDC steps of this SDC sweep
for _step_index in range(0, len(self.state.current_time_step)):
_current_step = self.state.current_time_step[_step_index]
if self.classic:
_integral = self._integrator.evaluate(_integrate_values,
from_node=_step_index, target_node=_step_index + 1)
# we successively compute the full integral, which is used for the residual at the end
_full_integral += _integral
_current_step.integral = _integral.copy()
# do the SDC step of this sweep
self._sdc_step()
if self.state.current_step_index < len(self.state.current_time_step) - 1:
self.state.current_time_step.proceed()
del _integrate_values
# compute residual and print step details
for _step_index in range(0, len(self.state.current_time_step)):
_step = self.state.current_time_step[_step_index]
self._core.compute_residual(self.state, step=_step, integral=_full_integral)
# finalize this step (i.e. StepSolutionData.finalize())
_step.done()
if _step_index > 0:
_previous_time = self.state.current_time_step[_step_index - 1].time_point
else:
_previous_time = self.state.current_time_step.initial.time_point
if problem_has_exact_solution(self.problem, self):
self._print_step(_step_index + 2,
_previous_time,
_step.time_point,
supremum_norm(_step.value),
_step.solution.residual,
_step.solution.error)
else:
self._print_step(_step_index + 2,
_previous_time,
_step.time_point,
supremum_norm(_step.value),
_step.solution.residual,
None)
self._print_time_step_end()
# finalizing the current time step (i.e. TrajectorySolutionData.finalize)
self.state.current_time_step.finalize()
def _sdc_step(self):
# helper variables
_current_time_step_index = self.state.current_time_step_index
_current_step_index = self.state.current_step_index
# copy solution of previous iteration to this one
if self.state.is_first_iteration:
self.state.current_step.value = self.state.initial.value.copy()
else:
self.state.current_step.value = \
self.state.previous_iteration[_current_time_step_index][_current_step_index].value.copy()
# TODO: review the custom modification
# if not self.classic:
# # gather values for integration and evaluate problem at given points
# # initial value for this time step
# _integrate_values = \
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step.initial.time_point,
# self.state.current_time_step.initial.value.copy())
# ], dtype=self.problem.numeric_type)
#
# if _current_step_index > 0:
# # values from this iteration (already calculated)
# _from_current_iteration_range = range(0, _current_step_index)
# for _index in _from_current_iteration_range:
# _integrate_values = \
# np.append(_integrate_values,
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step[_index].solution.time_point,
# self.state.current_time_step[_index].solution.value.copy())
# ], dtype=self.problem.numeric_type
# ), axis=0)
#
# # values from previous iteration
# _from_previous_iteration_range = range(_current_step_index, self.num_nodes - 1)
# for _index in _from_previous_iteration_range:
# if self.state.is_first_iteration:
# _this_value = self.problem.initial_value
# else:
# _this_value = self.state.previous_iteration[_current_time_step_index][_index].solution.value.copy()
# _integrate_values = \
# np.append(_integrate_values,
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step[_index].solution.time_point,
# _this_value)
# ], dtype=self.problem.numeric_type
# ), axis=0)
# assert_condition(_integrate_values.shape[0] == self.num_nodes,
# ValueError, message="Number of integration values not correct: {:d} != {:d}"
# .format(_integrate_values.shape[0], self.num_nodes),
# checking_obj=self)
#
# # integrate
# self.state.current_step.integral = self._integrator.evaluate(_integrate_values,
# from_node=_current_step_index,
# target_node=_current_step_index + 1)
# del _integrate_values
# # END if not self.classic
# compute step
self._core.run(self.state, problem=self.problem)
# calculate error
self._core.compute_error(self.state, problem=self.problem)
# step gets finalized after computation of residual
def print_lines_for_log(self):
_lines = super(ParallelSdc, self).print_lines_for_log()
if 'Number Nodes per Time Step' not in _lines['Integrator']:
_lines['Integrator']['Number Nodes per Time Step'] = "%d" % self.__num_nodes
if 'Number Time Steps' not in _lines['Integrator']:
_lines['Integrator']['Number Time Steps'] = "%d" % self._num_time_steps
return _lines
def _print_interval_header(self):
LOG.info("%s%s" % (VERBOSITY_LVL1, SEPARATOR_LVL3))
LOG.info("{} Interval: [{:.3f}, {:.3f}]"
.format(VERBOSITY_LVL1, self.state.initial.time_point, self.state.initial.time_point + self._dt))
self._print_output_tree_header()
def _print_output_tree_header(self):
LOG.info("%s iter" % VERBOSITY_LVL1)
LOG.info("%s \\" % VERBOSITY_LVL2)
LOG.info("%s |- time start end delta" % VERBOSITY_LVL2)
LOG.info("%s | \\" % VERBOSITY_LVL3)
LOG.info("%s | |- step t_0 t_1 phi(t_1) resid err" % VERBOSITY_LVL3)
LOG.info("%s | \\_" % VERBOSITY_LVL2)
LOG.info("%s \\_ sol r.red err r.red resid time" % VERBOSITY_LVL1)
def _print_iteration(self, _iter):
_iter = self._output_format(_iter, 'int', width=5)
LOG.info("%s %s" % (VERBOSITY_LVL1, _iter))
LOG.info("%s \\" % VERBOSITY_LVL2)
def _print_iteration_end(self, solred, errred, resid, time):
_solred = self._output_format(solred, 'exp')
_errred = self._output_format(errred, 'exp')
_resid = self._output_format(resid, 'exp')
_time = self._output_format(time, 'float', width=6.3)
LOG.info("%s \\_ %s %s %s %s" % (VERBOSITY_LVL1, _solred, _errred, _resid, _time))
def _print_time_step(self, time_step, start, end, delta):
_time_step = self._output_format(time_step, 'int', width=3)
_start = self._output_format(start, 'float', width=6.3)
_end = self._output_format(end, 'float', width=6.3)
_delta = self._output_format(delta, 'float', width=6.3)
LOG.info("%s |- %s %s %s %s" % (VERBOSITY_LVL2, _time_step, _start, _end, _delta))
LOG.info("%s | \\" % VERBOSITY_LVL3)
self._print_step(1, None, self.state.current_time_step.initial.time_point,
supremum_norm(self.state.current_time_step.initial.solution.value),
None, None)
def _print_time_step_end(self):
LOG.info("%s | \\_" % VERBOSITY_LVL2)
def _print_step(self, step, t0, t1, phi, resid, err):
_step = self._output_format(step, 'int', width=2)
_t0 = self._output_format(t0, 'float', width=6.3)
_t1 = self._output_format(t1, 'float', width=6.3)
_phi = self._output_format(phi, 'float', width=6.3)
_resid = self._output_format(resid, 'exp')
_err = self._output_format(err, 'exp')
LOG.info("%s | |- %s %s %s %s %s %s"
% (VERBOSITY_LVL3, _step, _t0, _t1, _phi, _resid, _err))
def _output_format(self, value, _type, width=None):
def _value_to_numeric(val):
if isinstance(val, (np.ndarray, IDiagnosisValue)):
return supremum_norm(val)
else:
return val
if _type and width is None:
if _type == 'float':
width = 10.3
elif _type == 'int':
width = 10
elif _type == 'exp':
width = 9.2
else:
width = 10
if value is None:
_outstr = "{: ^{width}s}".format('na', width=int(width))
else:
if _type == 'float':
_outstr = "{: {width}f}".format(_value_to_numeric(value), width=width)
elif _type == 'int':
_outstr = "{: {width}d}".format(_value_to_numeric(value), width=width)
elif _type == 'exp':
_outstr = "{: {width}e}".format(_value_to_numeric(value), width=width)
else:
_outstr = "{: >{width}s}".format(value, width=width)
return _outstr
__all__ = ['ParallelSdc']
|
[
"pypint.solvers.diagnosis.norms.supremum_norm",
"copy.deepcopy",
"pypint.solvers.states.sdc_solver_state.SdcSolverState",
"pypint.solvers.i_parallel_solver.IParallelSolver.__init__",
"pypint.plugins.timers.timer_base.TimerBase",
"pypint.utilities.assert_is_instance",
"numpy.zeros",
"pypint.utilities.assert_named_argument",
"pypint.problems.problem_has_exact_solution",
"pypint.utilities.threshold_check.ThresholdCheck",
"numpy.array",
"numpy.linspace",
"collections.OrderedDict",
"pypint.utilities.func_name"
] |
[((3328, 3368), 'pypint.solvers.i_parallel_solver.IParallelSolver.__init__', 'IParallelSolver.__init__', (['self'], {}), '(self, **kwargs)\n', (3352, 3368), False, 'from pypint.solvers.i_parallel_solver import IParallelSolver\n'), ((3419, 3516), 'pypint.utilities.threshold_check.ThresholdCheck', 'ThresholdCheck', ([], {'min_threshold': '(1e-07)', 'max_threshold': '(10)', 'conditions': "('residual', 'iterations')"}), "(min_threshold=1e-07, max_threshold=10, conditions=(\n 'residual', 'iterations'))\n", (3433, 3516), False, 'from pypint.utilities.threshold_check import ThresholdCheck\n'), ((3532, 3543), 'pypint.plugins.timers.timer_base.TimerBase', 'TimerBase', ([], {}), '()\n', (3541, 3543), False, 'from pypint.plugins.timers.timer_base import TimerBase\n'), ((3870, 3881), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3878, 3881), True, 'import numpy as np\n'), ((5505, 5614), 'pypint.utilities.assert_is_instance', 'assert_is_instance', (['problem', 'IInitialValueProblem'], {'descriptor': '"""Initial Value Problem"""', 'checking_obj': 'self'}), "(problem, IInitialValueProblem, descriptor=\n 'Initial Value Problem', checking_obj=self)\n", (5523, 5614), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((7021, 7096), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps * (self.__num_nodes - 1) + 1)'], {'dtype': 'np.object'}), '(self.num_time_steps * (self.__num_nodes - 1) + 1, dtype=np.object)\n', (7029, 7096), True, 'import numpy as np\n'), ((7738, 7842), 'pypint.utilities.assert_named_argument', 'assert_named_argument', (['"""dt"""', 'kwargs'], {'types': 'float', 'descriptor': '"""Width of Interval"""', 'checking_obj': 'self'}), "('dt', kwargs, types=float, descriptor=\n 'Width of Interval', checking_obj=self)\n", (7759, 7842), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((17641, 17717), 'pypint.utilities.assert_is_instance', 'assert_is_instance', (['start', 'float'], {'descriptor': '"""Time Point"""', 'checking_obj': 'self'}), "(start, float, descriptor='Time Point', checking_obj=self)\n", (17659, 17717), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((18257, 18318), 'numpy.linspace', 'np.linspace', (['start', '(start + self._dt)', '(self.num_time_steps + 1)'], {}), '(start, start + self._dt, self.num_time_steps + 1)\n', (18268, 18318), True, 'import numpy as np\n'), ((18692, 18755), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, self.num_nodes)'], {'dtype': 'np.float'}), '((self.num_time_steps, self.num_nodes), dtype=np.float)\n', (18700, 18755), True, 'import numpy as np\n'), ((18776, 18832), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps * (self.num_nodes - 1) + 1)'], {}), '(self.num_time_steps * (self.num_nodes - 1) + 1)\n', (18784, 18832), True, 'import numpy as np\n'), ((18924, 18961), 'copy.deepcopy', 'deepcopy', (['self._integrator.nodes_type'], {}), '(self._integrator.nodes_type)\n', (18932, 18961), False, 'from copy import deepcopy\n'), ((3665, 3676), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3673, 3676), True, 'import numpy as np\n'), ((3934, 3945), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3942, 3945), True, 'import numpy as np\n'), ((3968, 3979), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3976, 3979), True, 'import numpy as np\n'), ((6797, 6890), 'pypint.utilities.assert_is_instance', 'assert_is_instance', (["kwargs['classic']", 'bool'], {'descriptor': '"""Classic Flag"""', 'checking_obj': 'self'}), "(kwargs['classic'], bool, descriptor='Classic Flag',\n checking_obj=self)\n", (6815, 6890), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((17055, 17140), 'pypint.solvers.states.sdc_solver_state.SdcSolverState', 'SdcSolverState', ([], {'num_nodes': '(self.num_nodes - 1)', 'num_time_steps': 'self.num_time_steps'}), '(num_nodes=self.num_nodes - 1, num_time_steps=self.num_time_steps\n )\n', (17069, 17140), False, 'from pypint.solvers.states.sdc_solver_state import SdcSolverState\n'), ((19137, 19222), 'numpy.array', 'np.array', (["[self.__time_points['steps'][_t], self.__time_points['steps'][_t + 1]]"], {}), "([self.__time_points['steps'][_t], self.__time_points['steps'][_t + 1]]\n )\n", (19145, 19222), True, 'import numpy as np\n'), ((23475, 23565), 'numpy.array', 'np.array', (['[self.state.current_time_step.initial.rhs]'], {'dtype': 'self.problem.numeric_type'}), '([self.state.current_time_step.initial.rhs], dtype=self.problem.\n numeric_type)\n', (23483, 23565), True, 'import numpy as np\n'), ((26265, 26311), 'pypint.problems.problem_has_exact_solution', 'problem_has_exact_solution', (['self.problem', 'self'], {}), '(self.problem, self)\n', (26291, 26311), False, 'from pypint.problems import IInitialValueProblem, problem_has_exact_solution\n'), ((33389, 33455), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['self.state.current_time_step.initial.solution.value'], {}), '(self.state.current_time_step.initial.solution.value)\n', (33402, 33455), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((18513, 18607), 'numpy.array', 'np.array', (["[self.__time_points['steps'][0], self.__time_points['steps'][1]]"], {'dtype': 'np.float'}), "([self.__time_points['steps'][0], self.__time_points['steps'][1]],\n dtype=np.float)\n", (18521, 18607), True, 'import numpy as np\n'), ((20659, 20705), 'pypint.problems.problem_has_exact_solution', 'problem_has_exact_solution', (['self.problem', 'self'], {}), '(self.problem, self)\n', (20685, 20705), False, 'from pypint.problems import IInitialValueProblem, problem_has_exact_solution\n'), ((34307, 34325), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['val'], {}), '(val)\n', (34320, 34325), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((26496, 26522), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['_step.value'], {}), '(_step.value)\n', (26509, 26522), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((26838, 26864), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['_step.value'], {}), '(_step.value)\n', (26851, 26864), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((23816, 23906), 'numpy.array', 'np.array', (['[self.state.current_time_step.initial.rhs]'], {'dtype': 'self.problem.numeric_type'}), '([self.state.current_time_step.initial.rhs], dtype=self.problem.\n numeric_type)\n', (23824, 23906), True, 'import numpy as np\n'), ((24349, 24403), 'numpy.array', 'np.array', (['[_step.rhs]'], {'dtype': 'self.problem.numeric_type'}), '([_step.rhs], dtype=self.problem.numeric_type)\n', (24357, 24403), True, 'import numpy as np\n'), ((6506, 6521), 'pypint.utilities.func_name', 'func_name', (['self'], {}), '(self)\n', (6515, 6521), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((11836, 11849), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11847, 11849), False, 'from collections import OrderedDict\n'), ((12119, 12132), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12130, 12132), False, 'from collections import OrderedDict\n'), ((12739, 12785), 'pypint.problems.problem_has_exact_solution', 'problem_has_exact_solution', (['self.problem', 'self'], {}), '(self.problem, self)\n', (12765, 12785), False, 'from pypint.problems import IInitialValueProblem, problem_has_exact_solution\n'), ((13450, 13463), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13461, 13463), False, 'from collections import OrderedDict\n'), ((13977, 14023), 'pypint.problems.problem_has_exact_solution', 'problem_has_exact_solution', (['self.problem', 'self'], {}), '(self.problem, self)\n', (14003, 14023), False, 'from pypint.problems import IInitialValueProblem, problem_has_exact_solution\n'), ((12354, 12423), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['self.state.last_iteration.final_step.solution.residual'], {}), '(self.state.last_iteration.final_step.solution.residual)\n', (12367, 12423), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((13592, 13661), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['self.state.last_iteration.final_step.solution.residual'], {}), '(self.state.last_iteration.final_step.solution.residual)\n', (13605, 13661), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n')]
|
import os
import shutil
from .storage_driver import StorageDriver
class FileStorageDriver(StorageDriver):
def __init__(self, path):
super().__init__(path)
os.makedirs(path, exist_ok=True)
def pull(self, remote_path, local_path=None, override_ok=False):
if not self.exists(remote_path):
raise FileNotFoundError("That file doesn't exist")
path = os.path.join(self.url, remote_path)
if local_path:
if not override_ok and os.path.exists(local_path):
raise FileExistsError("File already exists at pull location!")
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
return True
with open(path, "rb") as f:
contents = f.read()
return contents
def push(self, remote_path, local_path=None, bytes_data=None, override_ok=False):
if not override_ok and self.exists(remote_path):
raise FileExistsError("This file already exists")
if not local_path and not bytes_data:
raise ValueError("local_path or bytes_data need to have values!")
if local_path and bytes_data:
raise ValueError("local_path and bytes_data are mutually exclusive!")
path = os.path.join(self.url, remote_path)
if local_path:
if not os.path.exists(local_path):
raise FileNotFoundError("Could not find the file to push!")
os.makedirs(os.path.dirname(path), exist_ok=True)
shutil.copyfile(local_path, path)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb+") as f:
f.write(bytes_data)
def delete(self, remote_path):
if not self.exists(remote_path):
raise FileNotFoundError("Could not find the file/folder to delete!")
path = os.path.join(self.url, remote_path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def exists(self, remote_path):
path = os.path.join(self.url, remote_path)
return os.path.exists(path)
def get_files(self, base_url=None):
results = []
path = self.url
if base_url:
path = os.path.join(self.url, base_url)
for root, _, files in os.walk(path):
for f in files:
results.append(os.path.relpath(os.path.join(root, f), path))
return results
|
[
"os.remove",
"os.makedirs",
"os.path.isdir",
"os.path.dirname",
"os.walk",
"os.path.exists",
"shutil.copyfile",
"shutil.rmtree",
"os.path.join"
] |
[((176, 208), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (187, 208), False, 'import os\n'), ((399, 434), 'os.path.join', 'os.path.join', (['self.url', 'remote_path'], {}), '(self.url, remote_path)\n', (411, 434), False, 'import os\n'), ((1305, 1340), 'os.path.join', 'os.path.join', (['self.url', 'remote_path'], {}), '(self.url, remote_path)\n', (1317, 1340), False, 'import os\n'), ((1924, 1959), 'os.path.join', 'os.path.join', (['self.url', 'remote_path'], {}), '(self.url, remote_path)\n', (1936, 1959), False, 'import os\n'), ((1972, 1991), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1985, 1991), False, 'import os\n'), ((2118, 2153), 'os.path.join', 'os.path.join', (['self.url', 'remote_path'], {}), '(self.url, remote_path)\n', (2130, 2153), False, 'import os\n'), ((2169, 2189), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2183, 2189), False, 'import os\n'), ((2381, 2394), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2388, 2394), False, 'import os\n'), ((682, 715), 'shutil.copyfile', 'shutil.copyfile', (['path', 'local_path'], {}), '(path, local_path)\n', (697, 715), False, 'import shutil\n'), ((1563, 1596), 'shutil.copyfile', 'shutil.copyfile', (['local_path', 'path'], {}), '(local_path, path)\n', (1578, 1596), False, 'import shutil\n'), ((2005, 2024), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2018, 2024), False, 'import shutil\n'), ((2051, 2066), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (2060, 2066), False, 'import os\n'), ((2317, 2349), 'os.path.join', 'os.path.join', (['self.url', 'base_url'], {}), '(self.url, base_url)\n', (2329, 2349), False, 'import os\n'), ((494, 520), 'os.path.exists', 'os.path.exists', (['local_path'], {}), '(local_path)\n', (508, 520), False, 'import os\n'), ((626, 653), 'os.path.dirname', 'os.path.dirname', (['local_path'], {}), '(local_path)\n', (641, 653), False, 'import os\n'), ((1384, 1410), 'os.path.exists', 'os.path.exists', (['local_path'], {}), '(local_path)\n', (1398, 1410), False, 'import os\n'), ((1513, 1534), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1528, 1534), False, 'import os\n'), ((1635, 1656), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1650, 1656), False, 'import os\n'), ((2471, 2492), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (2483, 2492), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from odoo import models, fields
class MascotasMascotas(models.Model):
_name = "mascotas.mascotas"
# Lista de campos de la tabla.
name = fields.Char(string="Nombre")
tipo_id = fields.Many2one("mascotas.tipos", string="Tipo")
raza_id = fields.Many2one("mascotas.razas", string="Raza")
fecha_nacimiento = fields.Date(string="Fec. Nac.")
sexo = fields.Selection([("m", "Macho"), ("h", "Hembra")], string="Sexo")
class MascotasRazas(models.Model):
_name = "mascotas.razas"
name = fields.Char(string="Nombre")
codigo = fields.Char(string="Código")
class MascotasTipos(models.Model):
_name = "mascotas.tipos"
name = fields.Char(string="Nombre")
codigo = fields.Char(string="Código")
|
[
"odoo.fields.Selection",
"odoo.fields.Many2one",
"odoo.fields.Date",
"odoo.fields.Char"
] |
[((176, 204), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Nombre"""'}), "(string='Nombre')\n", (187, 204), False, 'from odoo import models, fields\n'), ((219, 267), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mascotas.tipos"""'], {'string': '"""Tipo"""'}), "('mascotas.tipos', string='Tipo')\n", (234, 267), False, 'from odoo import models, fields\n'), ((282, 330), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mascotas.razas"""'], {'string': '"""Raza"""'}), "('mascotas.razas', string='Raza')\n", (297, 330), False, 'from odoo import models, fields\n'), ((354, 385), 'odoo.fields.Date', 'fields.Date', ([], {'string': '"""Fec. Nac."""'}), "(string='Fec. Nac.')\n", (365, 385), False, 'from odoo import models, fields\n'), ((397, 463), 'odoo.fields.Selection', 'fields.Selection', (["[('m', 'Macho'), ('h', 'Hembra')]"], {'string': '"""Sexo"""'}), "([('m', 'Macho'), ('h', 'Hembra')], string='Sexo')\n", (413, 463), False, 'from odoo import models, fields\n'), ((541, 569), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Nombre"""'}), "(string='Nombre')\n", (552, 569), False, 'from odoo import models, fields\n'), ((583, 611), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Código"""'}), "(string='Código')\n", (594, 611), False, 'from odoo import models, fields\n'), ((689, 717), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Nombre"""'}), "(string='Nombre')\n", (700, 717), False, 'from odoo import models, fields\n'), ((731, 759), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Código"""'}), "(string='Código')\n", (742, 759), False, 'from odoo import models, fields\n')]
|
"""
Get the timestamps of all claims and plot the cumulative number vs. time!
"""
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import requests
import sqlite3
import time
def make_graph(mode, show=True):
"""
mode must be "claims" or "channels"
"""
if mode != "claims" and mode != "channels":
return
plt.close("all")
# Open the DB
db_file = "/home/brewer/local/lbry-sdk/lbry/lbryum-data/claims.db"
conn = sqlite3.connect(db_file)
c = conn.cursor()
# List for results
times = []
# Query
if mode == "claims":
x = "<>"
else:
x = "="
query = "SELECT creation_timestamp FROM claim\
WHERE claim_type {x} 2;".format(x=x)
# Iterate over query results
i = 0
for t in c.execute(query):
times.append(t)
i = i + 1
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
# Sort the times and convert to a numpy array
times = np.sort(np.array(times).flatten())
# Save some stats to JSON for Electron
now = time.time()
my_dict = {}
my_dict["unix_time"] = now
my_dict["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC"
my_dict["total_{mode}".format(mode=mode)] = int(\
len(times))
my_dict["new_{mode}_1_hour".format(mode=mode)] = int(\
np.sum(times > (now - 3600.0)))
my_dict["new_{mode}_24_hours".format(mode=mode)] = int(\
np.sum(times > (now - 86400.0)))
my_dict["new_{mode}_7_days".format(mode=mode)] = int(\
np.sum(times > (now - 7*86400.0)))
my_dict["new_{mode}_30_days".format(mode=mode)] = int(\
np.sum(times > (now - 30*86400.0)))
f = open("{mode}_stats.json".format(mode=mode), "w")
f.write(json.dumps(my_dict))
f.close()
# Count new claims this UTC day
count_today = np.sum(times > 86400.0*int(now/86400.0))
if mode == "claims":
string = "publications"
else:
string = "channels"
print("{K} {mode}, {n} from today so far (UTC). ".format(K=len(times), mode=string, n=count_today), end="", flush=True)
# Plotting stuff
plt.rcParams["font.family"] = "Liberation Sans"
plt.rcParams["font.size"] = 14
plt.style.use("dark_background")
plt.rcParams["axes.facecolor"] = "#3c3d3c"
plt.rcParams["savefig.facecolor"] = "#3c3d3c"
plt.figure(figsize=(15, 11))
plt.subplot(2, 1, 1)
times_in_days = (times - 1483228800)/86400.0
days = times_in_days.astype("int64")
plt.plot(times_in_days,
np.arange(len(times)), "w-", linewidth=1.5)
plt.ylabel("Cumulative number of {mode}".format(mode=string))
plt.title("Total number of {mode} = {n}.".format(n=len(times), mode=string))
plt.xlim([0.0, days.max() + 1])
plt.ylim(bottom=-100)
plt.gca().tick_params(labelright=True)
# Add vertical lines for new years (approximately)
new_years = np.arange(0, 5)*365.2425
for year in new_years:
plt.axvline(year, color="r", alpha=0.8, linestyle="--")
# Add text about years
year_names = [2017, 2018, 2019]
for i in range(len(year_names)):
year = new_years[i]
plt.text(year+5.0, 0.95*plt.gca().get_ylim()[1],
"{text} begins".format(text=year_names[i]),
fontsize=10)
# Add line and text about MH's video
plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g")
plt.text(890.0, 0.2*plt.gca().get_ylim()[1],
"@MH video\n\'Why I Left YouTube\'\ngoes viral",
fontsize=10)
plt.subplot(2, 1, 2)
bin_width = 1.0
# Bin edges including right edge of last bin
bins = np.arange(0, np.max(days)+2) - 0.5*bin_width
color = "#6b95ef"
counts = plt.hist(days, bins, alpha=0.9, color=color, label="Raw",
width=bin_width, align="mid")[0]
# Compute 10-day moving average
moving_average = np.zeros(len(bins)-1)
for i in range(len(moving_average)):
subset = counts[0:(i+1)]
if len(subset) >= 10:
subset = subset[-10:]
moving_average[i] = np.mean(subset)
plt.plot(bins[0:-2] + 0.5*bin_width, moving_average[0:-1], "w-",
label="10-day moving average", linewidth=1.5)
plt.xlim([0.0, days.max() + 1])
plt.xlabel("Time (days since 2017-01-01)")
plt.ylabel("New {mode} added each day".format(mode=string))
subset = counts[-31:-1]
plt.title("Recent average rate (last 30 days) = {n} {mode} per day.".\
format(n=int(np.sum(time.time() - times <= 30.0*86400.0)/30.0),
mode=string))
plt.gca().tick_params(labelright=True)
# Year lines
for year in new_years:
plt.axvline(year, color="r", alpha=0.8, linestyle="--")
# MH line
plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g")
# plt.gca().set_yticks([1.0, 10.0, 100.0, 1000.0, 10000.0])
# plt.gca().set_yticklabels(["1", "10", "100", "1000", "10000"])
plt.legend()
plt.savefig("{mode}.svg".format(mode=mode), bbox_inches="tight")
plt.savefig("{mode}.png".format(mode=mode), bbox_inches="tight", dpi=70)
print("Figure saved to {mode}.svg and {mode}.png.".format(mode=mode))
if show:
plt.show()
def aggregate_tips():
"""
Calculate tips over past X amount of time and write JSON output
"""
# The SQL query to perform
now = time.time()
print("Computing tip stats...", end="", flush=True)
labels = ["30_days", "7_days", "24_hours", "1_hour"]
windows = [30*86400.0, 7*86400.0, 1*86400.0, 3600.0]
result = {}
result["unix_time"] = now
result["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC"
# Agrees with old method, but should it be SUM(amount)?
query = "SELECT support_id, amount, time, claim_name, claim_id, is_nsfw, SUM(to_claim_address) tot FROM (SELECT support.id as support_id, support.support_amount amount,\
transaction.transaction_time time, claim.is_nsfw is_nsfw,\
claim.claim_id claim_id, claim.name claim_name,\
(CASE WHEN (output.address_list LIKE CONCAT('%25', claim_address, '%25')) THEN '1' ELSE '0' END) to_claim_address\
FROM claim\
INNER JOIN support ON support.supported_claim_id = claim.claim_id\
INNER JOIN transaction ON support.transaction_hash_id = transaction.hash\
INNER JOIN output ON transaction.hash = output.transaction_hash \
WHERE transaction.transaction_time > ({now} - {window})\
AND transaction.transaction_time <= {now}) AS result\
GROUP BY support_id, amount;".format(now=now, window=windows[0])
request = requests.get("https://chainquery.lbry.com/api/sql?query=" + query)
the_dict = request.json()
# Get tips into numpy array
times = []
tips = []
is_tip = []
links = []
is_nsfw = []
for row in the_dict["data"]:
times.append(float(row["time"]))
tips.append(float(row["amount"]))
links.append("https://open.lbry.com/" + str(row["claim_name"]) + ":"\
+ str(row["claim_id"]))
is_nsfw.append(row["is_nsfw"])
if row["tot"] > 0:
is_tip.append(True)
else:
is_tip.append(False)
times = np.array(times)
tips = np.array(tips)
is_tip = np.array(is_tip)
links = np.array(links)
is_nsfw = np.array(is_nsfw)
# Write tips
for i in range(len(labels)):
keep = (times > (now - windows[i])) & is_tip
_times = times[keep]
_tips = tips[keep]
_links = links[keep]
_is_nsfw = is_nsfw[keep]
result["num_tips_{label}".format(label=labels[i])] = len(_tips)
result["lbc_tipped_{label}".format(label=labels[i])] = float(_tips.sum())
maxtip = 0
maxtip_link = None
maxtip_is_nsfw = None
if len(_tips) > 0:
maxtip = float(_tips.max())
index = np.argmax(_tips)
maxtip_link = _links[index]
maxtip_is_nsfw = _is_nsfw[index]
result["biggest_tip_{label}".format(label=labels[i])] = maxtip
result["biggest_tip_{label}_link".format(label=labels[i])] = maxtip_link
result["biggest_tip_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw)
# Write supports
for i in range(len(labels)):
keep = (times > (now - windows[i])) & (~is_tip)
_times = times[keep]
_tips = tips[keep]
_links = links[keep]
_is_nsfw = is_nsfw[keep]
result["num_supports_{label}".format(label=labels[i])] = len(_tips)
result["lbc_supports_{label}".format(label=labels[i])] = float(_tips.sum())
maxtip = 0
maxtip_link = None
maxtip_is_nsfw = None
if len(_tips) > 0:
maxtip = float(_tips.max())
index = np.argmax(_tips)
maxtip_link = _links[index]
maxtip_is_nsfw = _is_nsfw[index]
result["biggest_support_{label}".format(label=labels[i])] = maxtip
result["biggest_support_{label}_link".format(label=labels[i])] = maxtip_link
result["biggest_support_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw)
f = open("tips_stats.json", "w")
f.write(json.dumps(result))
f.close()
print("done. ", flush=True, end="")
def publish_files():
"""
Publish files to somewhere on the internet.
"""
print("Publishing files to the internet...", end="", flush=True)
import subprocess
try:
subprocess.run("./upload.sh", timeout=120.0)
print("done.\n")
except:
print("failed.\n")
if __name__ == "__main__":
# Do it manually once then enter the infinite loop
now = time.time()
print("The time is " + str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC.")
make_graph("claims")
make_graph("channels")
try:
aggregate_tips()
except:
pass
import os
try:
publish_files()
except:
pass
import time
while True:
print("", flush=True)
time.sleep(530.0)
now = time.time()
print("The time is " + str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC.")
make_graph("claims", show=False)
make_graph("channels", show=False)
try:
aggregate_tips()
except:
pass
try:
publish_files()
except:
pass
|
[
"numpy.sum",
"numpy.argmax",
"json.dumps",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.close",
"numpy.max",
"requests.get",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"time.sleep",
"sqlite3.connect",
"matplotlib.pyplot.subplot",
"subprocess.run",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"time.time",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((373, 389), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (382, 389), True, 'import matplotlib.pyplot as plt\n'), ((491, 515), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (506, 515), False, 'import sqlite3\n'), ((1203, 1214), 'time.time', 'time.time', ([], {}), '()\n', (1212, 1214), False, 'import time\n'), ((2409, 2441), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (2422, 2441), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 11)'}), '(figsize=(15, 11))\n', (2554, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2597), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2588, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2963, 2984), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(-100)'}), '(bottom=-100)\n', (2971, 2984), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3607), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(890.0)'], {'linestyle': '"""dotted"""', 'linewidth': '(2)', 'color': '"""g"""'}), "(890.0, linestyle='dotted', linewidth=2, color='g')\n", (3556, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3748, 3768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3759, 3768), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4428), 'matplotlib.pyplot.plot', 'plt.plot', (['(bins[0:-2] + 0.5 * bin_width)', 'moving_average[0:-1]', '"""w-"""'], {'label': '"""10-day moving average"""', 'linewidth': '(1.5)'}), "(bins[0:-2] + 0.5 * bin_width, moving_average[0:-1], 'w-', label=\n '10-day moving average', linewidth=1.5)\n", (4319, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4479, 4521), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days since 2017-01-01)"""'], {}), "('Time (days since 2017-01-01)')\n", (4489, 4521), True, 'import matplotlib.pyplot as plt\n'), ((4977, 5039), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(890.0)'], {'linestyle': '"""dotted"""', 'linewidth': '(2)', 'color': '"""g"""'}), "(890.0, linestyle='dotted', linewidth=2, color='g')\n", (4988, 5039), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5205), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5203, 5205), True, 'import matplotlib.pyplot as plt\n'), ((5608, 5619), 'time.time', 'time.time', ([], {}), '()\n', (5617, 5619), False, 'import time\n'), ((6996, 7062), 'requests.get', 'requests.get', (["('https://chainquery.lbry.com/api/sql?query=' + query)"], {}), "('https://chainquery.lbry.com/api/sql?query=' + query)\n", (7008, 7062), False, 'import requests\n'), ((7607, 7622), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (7615, 7622), True, 'import numpy as np\n'), ((7634, 7648), 'numpy.array', 'np.array', (['tips'], {}), '(tips)\n', (7642, 7648), True, 'import numpy as np\n'), ((7662, 7678), 'numpy.array', 'np.array', (['is_tip'], {}), '(is_tip)\n', (7670, 7678), True, 'import numpy as np\n'), ((7691, 7706), 'numpy.array', 'np.array', (['links'], {}), '(links)\n', (7699, 7706), True, 'import numpy as np\n'), ((7721, 7738), 'numpy.array', 'np.array', (['is_nsfw'], {}), '(is_nsfw)\n', (7729, 7738), True, 'import numpy as np\n'), ((10059, 10070), 'time.time', 'time.time', ([], {}), '()\n', (10068, 10070), False, 'import time\n'), ((1511, 1539), 'numpy.sum', 'np.sum', (['(times > now - 3600.0)'], {}), '(times > now - 3600.0)\n', (1517, 1539), True, 'import numpy as np\n'), ((1620, 1649), 'numpy.sum', 'np.sum', (['(times > now - 86400.0)'], {}), '(times > now - 86400.0)\n', (1626, 1649), True, 'import numpy as np\n'), ((1728, 1761), 'numpy.sum', 'np.sum', (['(times > now - 7 * 86400.0)'], {}), '(times > now - 7 * 86400.0)\n', (1734, 1761), True, 'import numpy as np\n'), ((1839, 1873), 'numpy.sum', 'np.sum', (['(times > now - 30 * 86400.0)'], {}), '(times > now - 30 * 86400.0)\n', (1845, 1873), True, 'import numpy as np\n'), ((1944, 1963), 'json.dumps', 'json.dumps', (['my_dict'], {}), '(my_dict)\n', (1954, 1963), False, 'import json\n'), ((3100, 3115), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (3109, 3115), True, 'import numpy as np\n'), ((3160, 3215), 'matplotlib.pyplot.axvline', 'plt.axvline', (['year'], {'color': '"""r"""', 'alpha': '(0.8)', 'linestyle': '"""--"""'}), "(year, color='r', alpha=0.8, linestyle='--')\n", (3171, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3930, 4021), 'matplotlib.pyplot.hist', 'plt.hist', (['days', 'bins'], {'alpha': '(0.9)', 'color': 'color', 'label': '"""Raw"""', 'width': 'bin_width', 'align': '"""mid"""'}), "(days, bins, alpha=0.9, color=color, label='Raw', width=bin_width,\n align='mid')\n", (3938, 4021), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4306), 'numpy.mean', 'np.mean', (['subset'], {}), '(subset)\n', (4298, 4306), True, 'import numpy as np\n'), ((4902, 4957), 'matplotlib.pyplot.axvline', 'plt.axvline', (['year'], {'color': '"""r"""', 'alpha': '(0.8)', 'linestyle': '"""--"""'}), "(year, color='r', alpha=0.8, linestyle='--')\n", (4913, 4957), True, 'import matplotlib.pyplot as plt\n'), ((5448, 5458), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5456, 5458), True, 'import matplotlib.pyplot as plt\n'), ((9586, 9604), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (9596, 9604), False, 'import json\n'), ((9855, 9899), 'subprocess.run', 'subprocess.run', (['"""./upload.sh"""'], {'timeout': '(120.0)'}), "('./upload.sh', timeout=120.0)\n", (9869, 9899), False, 'import subprocess\n'), ((10413, 10430), 'time.sleep', 'time.sleep', (['(530.0)'], {}), '(530.0)\n', (10423, 10430), False, 'import time\n'), ((10446, 10457), 'time.time', 'time.time', ([], {}), '()\n', (10455, 10457), False, 'import time\n'), ((2989, 2998), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2996, 2998), True, 'import matplotlib.pyplot as plt\n'), ((4811, 4820), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4818, 4820), True, 'import matplotlib.pyplot as plt\n'), ((8278, 8294), 'numpy.argmax', 'np.argmax', (['_tips'], {}), '(_tips)\n', (8287, 8294), True, 'import numpy as np\n'), ((9177, 9193), 'numpy.argmax', 'np.argmax', (['_tips'], {}), '(_tips)\n', (9186, 9193), True, 'import numpy as np\n'), ((1122, 1137), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (1130, 1137), True, 'import numpy as np\n'), ((3863, 3875), 'numpy.max', 'np.max', (['days'], {}), '(days)\n', (3869, 3875), True, 'import numpy as np\n'), ((3632, 3641), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3639, 3641), True, 'import matplotlib.pyplot as plt\n'), ((3377, 3386), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3384, 3386), True, 'import matplotlib.pyplot as plt\n'), ((4725, 4736), 'time.time', 'time.time', ([], {}), '()\n', (4734, 4736), False, 'import time\n')]
|
from matplotlib import pyplot as pl
def factorial(n):
if n==0:
return 1
else:
return n*factorial(n-1)
def C(n,i):
return factorial(n)/(factorial(n-i)*factorial(i))
def Spline(n,puntos):
coeficientesx = []
coeficientesy = []
for i in range(n+1):
Coef = C(n,i)
coeficientesx.append(Coef*puntos[i][0])
coeficientesy.append(Coef*puntos[i][1])
return [coeficientesx,coeficientesy]
def B(n,t,coef):
ans = 0
for i in range(n+1):
ans += coef[i]*((1-t)**(n-i))*(t**i)
return ans
def graficar(n,T,coeficientes):
x = []
y = []
for t in T:
x.append(B(n,t,coeficientes[0]))
y.append(B(n,t,coeficientes[1]))
pl.plot(x,y)
pl.show()
return None
T = []
for i in range(100):
T.append(i/100.0)
puntos = [[1.67,4.33][0.96,4.33][0.38,4.23][-0.23,4.22][-0.69,3.88][-0.99, 3.54][-1,3][-0.84, 2.66][-0.48,2.43][-0.04,2.30][0.49,2.56][1.09,2.31][1.67,2.25][2.14,1.97][2.41,1.56][2.43,1.06][2.14,0.72][1.63,0.62][1.07,0.60][0.52,0.58][0.07,0.54][-0.32,0.54][-0.79,0.55]]
n = len(puntos)-1
coeficientes = Spline(n,puntos)
graficar(n,T,coeficientes)
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
] |
[((730, 743), 'matplotlib.pyplot.plot', 'pl.plot', (['x', 'y'], {}), '(x, y)\n', (737, 743), True, 'from matplotlib import pyplot as pl\n'), ((747, 756), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (754, 756), True, 'from matplotlib import pyplot as pl\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-12 00:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.wagtailcore.fields
from wapps.utils import get_image_model
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('taggit', '0002_auto_20150616_2121'),
('wagtailimages', '0013_make_rendition_upload_callable'),
('wapps', '0006_add_identity_logo_with_custom_image_model'),
]
operations = [
migrations.CreateModel(
name='StaticPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', models.TextField(blank=True, help_text='An optional introduction used as page heading and summary', null=True, verbose_name='Introduction')),
('body', wagtail.wagtailcore.fields.RichTextField(help_text='The main page content', verbose_name='Body')),
('image_full', models.BooleanField(default=False, help_text='Use the fully sized image', verbose_name='Fully sized image')),
('seo_type', models.CharField(choices=[('article', 'Article'), ('service', 'Service')], help_text='What does this page represents', max_length=10, verbose_name='Search engine type')),
('image', models.ForeignKey(blank=True, help_text='The main page image (seen when shared)', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=get_image_model())),
],
options={
'verbose_name': 'Static Page',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StaticPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='wapps.StaticPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='wapps_staticpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='staticpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='wapps.StaticPageTag', to='taggit.Tag', verbose_name='Tags'),
),
]
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"wapps.utils.get_image_model",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField"
] |
[((747, 917), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""wagtailcore.Page"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'wagtailcore.Page')\n", (767, 917), False, 'from django.db import migrations, models\n'), ((936, 1084), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""An optional introduction used as page heading and summary"""', 'null': '(True)', 'verbose_name': '"""Introduction"""'}), "(blank=True, help_text=\n 'An optional introduction used as page heading and summary', null=True,\n verbose_name='Introduction')\n", (952, 1084), False, 'from django.db import migrations, models\n'), ((1233, 1344), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Use the fully sized image"""', 'verbose_name': '"""Fully sized image"""'}), "(default=False, help_text='Use the fully sized image',\n verbose_name='Fully sized image')\n", (1252, 1344), False, 'from django.db import migrations, models\n'), ((1372, 1549), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('article', 'Article'), ('service', 'Service')]", 'help_text': '"""What does this page represents"""', 'max_length': '(10)', 'verbose_name': '"""Search engine type"""'}), "(choices=[('article', 'Article'), ('service', 'Service')],\n help_text='What does this page represents', max_length=10, verbose_name\n ='Search engine type')\n", (1388, 1549), False, 'from django.db import migrations, models\n'), ((2011, 2104), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2027, 2104), False, 'from django.db import migrations, models\n'), ((2293, 2419), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""wapps_staticpagetag_items"""', 'to': '"""taggit.Tag"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='wapps_staticpagetag_items', to='taggit.Tag')\n", (2310, 2419), False, 'from django.db import migrations, models\n'), ((1729, 1746), 'wapps.utils.get_image_model', 'get_image_model', ([], {}), '()\n', (1744, 1746), False, 'from wapps.utils import get_image_model\n')]
|
import numpy as np
import scipy.sparse as sp
import datasets
import utils
import argparse
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cora', help='Datasets: cora, email, ssets')
parser.add_argument('--version', default='1', help='version for ssets, default 1 for others')
args = parser.parse_args()
if __name__ == '__main__':
A, labels = datasets.load_graph(args.dataset, args.version)
# dense
if not isinstance(A, np.ndarray):
A = np.array(A.todense())
L = utils.laplacian(A)
N = utils.normalized_laplacian(A)
# sparse
A = sp.csr_matrix(A)
L = sp.csr_matrix(L)
N = sp.csr_matrix(N)
matrices = {
'A': A,
'L': L,
'N': N
}
for matrix_id in matrices:
matrix = matrices[matrix_id]
eig_val, eig_vec = np.linalg.eigh(matrix.todense())
path = f"{args.dataset}/embeddings/{matrix_id}_{args.dataset}_v{args.version}.npy"
np.save(path, eig_vec)
|
[
"numpy.save",
"argparse.ArgumentParser",
"datasets.load_graph",
"scipy.sparse.csr_matrix",
"utils.laplacian",
"utils.normalized_laplacian"
] |
[((113, 138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (136, 138), False, 'import argparse\n'), ((390, 437), 'datasets.load_graph', 'datasets.load_graph', (['args.dataset', 'args.version'], {}), '(args.dataset, args.version)\n', (409, 437), False, 'import datasets\n'), ((531, 549), 'utils.laplacian', 'utils.laplacian', (['A'], {}), '(A)\n', (546, 549), False, 'import utils\n'), ((558, 587), 'utils.normalized_laplacian', 'utils.normalized_laplacian', (['A'], {}), '(A)\n', (584, 587), False, 'import utils\n'), ((610, 626), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['A'], {}), '(A)\n', (623, 626), True, 'import scipy.sparse as sp\n'), ((635, 651), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['L'], {}), '(L)\n', (648, 651), True, 'import scipy.sparse as sp\n'), ((660, 676), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['N'], {}), '(N)\n', (673, 676), True, 'import scipy.sparse as sp\n'), ((976, 998), 'numpy.save', 'np.save', (['path', 'eig_vec'], {}), '(path, eig_vec)\n', (983, 998), True, 'import numpy as np\n')]
|
# https://teamtreehouse.com/library/everyone-loves-charlotte
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
site_links = []
def internal_links(linkURL):
html = urlopen('https://treehouse-projects.github.io/horse-land/{}'.format(linkURL))
soup = BeautifulSoup(html, 'html.parser')
return soup.find('a', href=re.compile('(.html)$'))
if __name__ == '__main__':
urls = internal_links('index.html')
while len(urls) > 0:
page = urls.attrs['href']
if page not in site_links:
site_links.append(page)
print(page)
print('\n==============\n')
urls = internal_links(page)
else:
break
|
[
"bs4.BeautifulSoup",
"re.compile"
] |
[((285, 319), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (298, 319), False, 'from bs4 import BeautifulSoup\n'), ((352, 374), 're.compile', 're.compile', (['"""(.html)$"""'], {}), "('(.html)$')\n", (362, 374), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Filters for webassets."""
from __future__ import absolute_import, print_function
import json
import os
import re
from subprocess import PIPE, Popen
from babel.messages.pofile import read_po
from flask import current_app
from webassets.filter import Filter, register_filter
from webassets.filter.cleancss import CleanCSS
from webassets.filter.requirejs import RequireJSFilter as RequireJSFilterBase
__all__ = ('AngularGettextFilter', 'RequireJSFilter', 'CleanCSSFilter', )
class RequireJSFilter(RequireJSFilterBase):
"""Optimize AMD-style modularized JavaScript into a single asset.
Adds support for exclusion of files already in defined in other bundles.
"""
name = 'requirejsexclude'
def __init__(self, *args, **kwargs):
r"""Initialize filter.
:param \*args: Arguments are forwarded to parent class.
:param \**kwargs: Keyword arguments are forwarded to parent class
except the *exclude* keyword.
"""
self.excluded_bundles = kwargs.pop('exclude', [])
super(RequireJSFilter, self).__init__(*args, **kwargs)
def setup(self):
"""Setup filter (only called when filter is actually used)."""
super(RequireJSFilter, self).setup()
excluded_files = []
for bundle in self.excluded_bundles:
excluded_files.extend(
map(lambda f: os.path.splitext(f)[0],
bundle.contents)
)
if excluded_files:
self.argv.append(
'exclude={0}'.format(','.join(excluded_files))
)
class CleanCSSFilter(CleanCSS):
"""Minify CSS using cleancss.
Implements opener capable of rebasing relative CSS URLs against
``COLLECT_STATIC_ROOT`` using both cleancss v3 or v4.
"""
name = 'cleancssurl'
def setup(self):
"""Initialize filter just before it will be used."""
super(CleanCSSFilter, self).setup()
self.root = current_app.config.get('COLLECT_STATIC_ROOT')
@property
def rebase_opt(self):
"""Determine which option name to use."""
if not hasattr(self, '_rebase_opt'):
# out = b"MAJOR.MINOR.REVISION" // b"3.4.19" or b"4.0.0"
out, err = Popen(
['cleancss', '--version'], stdout=PIPE).communicate()
ver = int(out[:out.index(b'.')])
self._rebase_opt = ['--root', self.root] if ver == 3 else []
return self._rebase_opt
def input(self, _in, out, **kw):
"""Input filtering."""
args = [self.binary or 'cleancss'] + self.rebase_opt
if self.extra_args:
args.extend(self.extra_args)
self.subprocess(args, out, _in)
_re_language_code = re.compile(
r'"Language: (?P<language_code>[A-Za-z_]{2,}(_[A-Za-z]{2,})?)\\n"'
)
"""Match language code group in PO file."""
class AngularGettextFilter(Filter):
"""Compile GNU gettext messages to angular-gettext module."""
name = 'angular-gettext'
options = {
'catalog_name': None,
}
def output(self, _in, out, **kwargs):
"""Wrap translation in Angular module."""
out.write(
'angular.module("{0}", ["gettext"]).run('
'["gettextCatalog", function (gettextCatalog) {{'.format(
self.catalog_name
)
)
out.write(_in.read())
out.write('}]);')
def input(self, _in, out, **kwargs):
"""Process individual translation file."""
language_code = _re_language_code.search(_in.read()).group(
'language_code'
)
_in.seek(0) # move at the begining after matching the language
catalog = read_po(_in)
out.write('gettextCatalog.setStrings("{0}", '.format(language_code))
out.write(json.dumps({
key: value.string for key, value in catalog._messages.items()
if key and value.string
}))
out.write(');')
# Register filters on webassets.
register_filter(AngularGettextFilter)
register_filter(CleanCSSFilter)
register_filter(RequireJSFilter)
|
[
"subprocess.Popen",
"babel.messages.pofile.read_po",
"flask.current_app.config.get",
"webassets.filter.register_filter",
"os.path.splitext",
"re.compile"
] |
[((2959, 3038), 're.compile', 're.compile', (['""""Language: (?P<language_code>[A-Za-z_]{2,}(_[A-Za-z]{2,})?)\\\\\\\\n\\""""'], {}), '(\'"Language: (?P<language_code>[A-Za-z_]{2,}(_[A-Za-z]{2,})?)\\\\\\\\n"\')\n', (2969, 3038), False, 'import re\n'), ((4216, 4253), 'webassets.filter.register_filter', 'register_filter', (['AngularGettextFilter'], {}), '(AngularGettextFilter)\n', (4231, 4253), False, 'from webassets.filter import Filter, register_filter\n'), ((4254, 4285), 'webassets.filter.register_filter', 'register_filter', (['CleanCSSFilter'], {}), '(CleanCSSFilter)\n', (4269, 4285), False, 'from webassets.filter import Filter, register_filter\n'), ((4286, 4318), 'webassets.filter.register_filter', 'register_filter', (['RequireJSFilter'], {}), '(RequireJSFilter)\n', (4301, 4318), False, 'from webassets.filter import Filter, register_filter\n'), ((2197, 2242), 'flask.current_app.config.get', 'current_app.config.get', (['"""COLLECT_STATIC_ROOT"""'], {}), "('COLLECT_STATIC_ROOT')\n", (2219, 2242), False, 'from flask import current_app\n'), ((3914, 3926), 'babel.messages.pofile.read_po', 'read_po', (['_in'], {}), '(_in)\n', (3921, 3926), False, 'from babel.messages.pofile import read_po\n'), ((2471, 2516), 'subprocess.Popen', 'Popen', (["['cleancss', '--version']"], {'stdout': 'PIPE'}), "(['cleancss', '--version'], stdout=PIPE)\n", (2476, 2516), False, 'from subprocess import PIPE, Popen\n'), ((1611, 1630), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1627, 1630), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
##############################################################################
#
# translation.py
# Module defining bunch of function to be used for i18n transration
#
# Copyright (c) 2010 Webcore Corp. All Rights Reserved.
#
##############################################################################
""" translation.py - Module defining bunch of function to be used for i18n
transration.
$Id: translation.py 629 2010-06-28 07:57:53Z ats $
"""
__author__ = '<NAME> <<EMAIL>>'
__docformat__ = 'plaintext'
__licence__ = 'BSD'
import os
import gettext
__all__ = ['get_i18ndir', 'get_gettextobject', 'get_languages']
def get_i18ndir():
"""
A function to obtain i18n directory
"""
udir = os.path.dirname(os.path.split(__file__)[0])
dir = os.path.join(udir, 'i18n')
return dir
def get_gettextobject(dimain = 'aha', languages = None):
"""
A function to obtain gettext object
"""
dir = get_i18ndir()
t = gettext.translation(domain = dimain,
languages = languages,
localedir = dir, fallback = True)
return t
def get_languages(s):
"""
A function to obtain language settings via Accept-Language header.
"""
langs = [''.join(x.split(';')[:1]) for x in s]
return langs
def main(): pass;
|
[
"gettext.translation",
"os.path.split",
"os.path.join"
] |
[((854, 880), 'os.path.join', 'os.path.join', (['udir', '"""i18n"""'], {}), "(udir, 'i18n')\n", (866, 880), False, 'import os\n'), ((1042, 1131), 'gettext.translation', 'gettext.translation', ([], {'domain': 'dimain', 'languages': 'languages', 'localedir': 'dir', 'fallback': '(True)'}), '(domain=dimain, languages=languages, localedir=dir,\n fallback=True)\n', (1061, 1131), False, 'import gettext\n'), ((816, 839), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (829, 839), False, 'import os\n')]
|
from contextlib import suppress
import pytest
from bocadillo import WebSocket, API, WebSocketDisconnect
from bocadillo.constants import WEBSOCKET_CLOSE_CODES
# Basic usage
def test_websocket_route(api: API):
@api.websocket_route("/chat")
async def chat(ws: WebSocket):
async with ws:
assert await ws.receive_text() == "ping"
await ws.send_text("pong")
with api.client.websocket_connect("/chat") as client:
client.send_text("ping")
assert client.receive_text() == "pong"
def test_websocket_route_parameters(api: API):
@api.websocket_route("/chat/{room}")
async def chat_room(ws: WebSocket, room: str):
async with ws:
await ws.send(room)
with api.client.websocket_connect("/chat/foo") as client:
assert client.receive_text() == "foo"
def test_if_route_parameter_fails_validation_then_403(api: API):
@api.websocket_route("/chat/{id:d}")
async def chat_room(ws: WebSocket, id: int):
pass
with pytest.raises(WebSocketDisconnect) as ctx:
with api.client.websocket_connect("/chat/foo"):
pass
assert ctx.value.code == 403
def test_non_existing_endpoint_returns_403_as_per_the_asgi_spec(api: API):
with pytest.raises(WebSocketDisconnect) as ctx:
with api.client.websocket_connect("/foo"):
pass
assert ctx.value.code == 403
def test_reject_closes_with_403(api: API):
@api.websocket_route("/foo")
async def foo(ws: WebSocket):
await ws.reject()
with pytest.raises(WebSocketDisconnect) as ctx:
with api.client.websocket_connect("/foo"):
pass
assert ctx.value.code == 403
def test_iter_websocket(api: API):
@api.websocket_route("/chat")
async def chat(ws: WebSocket):
async with ws:
async for message in ws:
await ws.send_text(f"You said: {message}")
with api.client.websocket_connect("/chat") as ws_client:
ws_client.send_text("ping")
assert ws_client.receive_text() == "You said: ping"
ws_client.send_text("pong")
assert ws_client.receive_text() == "You said: pong"
def test_can_close_within_context(api: API):
@api.websocket_route("/test")
async def test(ws: WebSocket):
async with ws:
await ws.close(4242)
with api.client.websocket_connect("/test") as client:
message = client.receive()
assert message == {"type": "websocket.close", "code": 4242}
def test_websocket_url(api: API):
@api.websocket_route("/test")
async def test(ws: WebSocket):
async with ws:
assert ws.url == "ws://testserver/test"
assert ws.url.path == "/test"
assert ws.url.port is None
assert ws.url.scheme == "ws"
assert ws.url.hostname == "testserver"
assert ws.url.query == ""
assert ws.url.is_secure is False
with api.client.websocket_connect("/test"):
pass
# Encoding / decoding of messages
@pytest.mark.parametrize(
"receive_type, example_message, expected_type",
[
("bytes", b"Hello", bytes),
("text", "Hello", str),
("json", {"message": "Hello"}, dict),
],
)
def test_receive_type(api: API, receive_type, example_message, expected_type):
@api.websocket_route("/chat", receive_type=receive_type)
async def chat(ws: WebSocket):
async with ws:
message = await ws.receive()
assert type(message) == expected_type
with api.client.websocket_connect("/chat") as client:
getattr(client, f"send_{receive_type}")(example_message)
@pytest.mark.parametrize(
"send_type, example_message, expected_type",
[
("bytes", b"Hello", bytes),
("text", "Hello", str),
("json", {"message": "Hello"}, dict),
],
)
def test_send_type(api: API, send_type, example_message, expected_type):
@api.websocket_route("/chat", send_type=send_type)
async def chat(ws: WebSocket):
async with ws:
await ws.send(example_message)
with api.client.websocket_connect("/chat") as client:
message = getattr(client, f"receive_{send_type}")()
assert type(message) == expected_type
assert message == example_message
@pytest.mark.parametrize(
"value_type, example_message, expected_type",
[
("bytes", b"Hello", bytes),
("text", "Hello", str),
("json", {"message": "Hello"}, dict),
],
)
def test_value_type(api: API, value_type, example_message, expected_type):
@api.websocket_route("/chat", value_type=value_type)
async def chat(ws: WebSocket):
async with ws:
message = await ws.receive()
assert type(message) == expected_type
await ws.send(example_message)
with api.client.websocket_connect("/chat") as client:
getattr(client, f"send_{value_type}")(example_message)
assert type(getattr(client, f"receive_{value_type}")()) == expected_type
def test_receive_and_send_event(api: API):
@api.websocket_route("/chat", value_type="event")
async def chat(ws: WebSocket):
async with ws:
message = await ws.receive()
assert message == {"type": "websocket.receive", "text": "ping"}
await ws.send({"type": "websocket.send", "text": "pong"})
with api.client.websocket_connect("/chat") as client:
client.send_text("ping")
assert client.receive_text() == "pong"
# Disconnect errors
@pytest.mark.parametrize(
"close_codes, code, expected_caught",
[
*((None, code, True) for code in (1000, 1001)),
*(
(None, code, False)
for code in WEBSOCKET_CLOSE_CODES
if code not in (1000, 1001)
),
((1000,), 1001, False),
((1000,), 1000, True),
*(((), code, False) for code in WEBSOCKET_CLOSE_CODES),
*((all, code, True) for code in WEBSOCKET_CLOSE_CODES),
],
)
def test_catch_disconnect(api: API, close_codes, code, expected_caught):
caught = False
@api.websocket_route("/chat", caught_close_codes=close_codes)
async def chat(ws: WebSocket):
nonlocal caught
try:
async with ws:
await ws.receive() # will never receive
caught = True
except WebSocketDisconnect as exc:
# The exception should have been raised only if we told the
# WebSocket route not to catch it.
assert exc.code not in ws.caught_close_codes
with api.client.websocket_connect("/chat") as client:
# Close immediately.
client.close(code)
assert caught is expected_caught
# Server error handling
class Oops(Exception):
pass
def test_if_exception_raised_in_context_then_closed_with_1011(api: API):
@api.websocket_route("/fail")
async def fail(ws: WebSocket):
async with ws:
raise Oops
with suppress(Oops):
with api.client.websocket_connect("/fail") as client:
message = client.receive()
assert message == {"type": "websocket.close", "code": 1011}
def test_accepted_and_exception_raised_then_closed_with_1011(api: API):
@api.websocket_route("/fail")
async def fail(ws: WebSocket):
await ws.accept()
raise Oops
with suppress(Oops):
with api.client.websocket_connect("/fail") as client:
message = client.receive()
assert message == {"type": "websocket.close", "code": 1011}
def test_if_not_accepted_and_exception_raised_then_closed_with_1011(api: API):
@api.websocket_route("/fail")
async def fail(_):
raise Oops
with pytest.raises(WebSocketDisconnect) as ctx:
with api.client.websocket_connect("/fail"):
pass
assert ctx.value.code == 1011
def test_context_does_not_silence_exceptions(api: API):
cleaned_up = False
@api.websocket_route("/fail")
async def fail(ws):
nonlocal cleaned_up
async with ws:
raise Oops
cleaned_up = True
with suppress(Oops):
with api.client.websocket_connect("/fail"):
pass
assert not cleaned_up
|
[
"pytest.mark.parametrize",
"pytest.raises",
"contextlib.suppress"
] |
[((3041, 3213), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""receive_type, example_message, expected_type"""', "[('bytes', b'Hello', bytes), ('text', 'Hello', str), ('json', {'message':\n 'Hello'}, dict)]"], {}), "('receive_type, example_message, expected_type', [(\n 'bytes', b'Hello', bytes), ('text', 'Hello', str), ('json', {'message':\n 'Hello'}, dict)])\n", (3064, 3213), False, 'import pytest\n'), ((3663, 3832), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""send_type, example_message, expected_type"""', "[('bytes', b'Hello', bytes), ('text', 'Hello', str), ('json', {'message':\n 'Hello'}, dict)]"], {}), "('send_type, example_message, expected_type', [(\n 'bytes', b'Hello', bytes), ('text', 'Hello', str), ('json', {'message':\n 'Hello'}, dict)])\n", (3686, 3832), False, 'import pytest\n'), ((4305, 4475), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value_type, example_message, expected_type"""', "[('bytes', b'Hello', bytes), ('text', 'Hello', str), ('json', {'message':\n 'Hello'}, dict)]"], {}), "('value_type, example_message, expected_type', [(\n 'bytes', b'Hello', bytes), ('text', 'Hello', str), ('json', {'message':\n 'Hello'}, dict)])\n", (4328, 4475), False, 'import pytest\n'), ((5544, 5921), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""close_codes, code, expected_caught"""', '[*((None, code, True) for code in (1000, 1001)), *((None, code, False) for\n code in WEBSOCKET_CLOSE_CODES if code not in (1000, 1001)), ((1000,), \n 1001, False), ((1000,), 1000, True), *(((), code, False) for code in\n WEBSOCKET_CLOSE_CODES), *((all, code, True) for code in\n WEBSOCKET_CLOSE_CODES)]'], {}), "('close_codes, code, expected_caught', [*((None,\n code, True) for code in (1000, 1001)), *((None, code, False) for code in\n WEBSOCKET_CLOSE_CODES if code not in (1000, 1001)), ((1000,), 1001, \n False), ((1000,), 1000, True), *(((), code, False) for code in\n WEBSOCKET_CLOSE_CODES), *((all, code, True) for code in\n WEBSOCKET_CLOSE_CODES)])\n", (5567, 5921), False, 'import pytest\n'), ((1022, 1056), 'pytest.raises', 'pytest.raises', (['WebSocketDisconnect'], {}), '(WebSocketDisconnect)\n', (1035, 1056), False, 'import pytest\n'), ((1257, 1291), 'pytest.raises', 'pytest.raises', (['WebSocketDisconnect'], {}), '(WebSocketDisconnect)\n', (1270, 1291), False, 'import pytest\n'), ((1549, 1583), 'pytest.raises', 'pytest.raises', (['WebSocketDisconnect'], {}), '(WebSocketDisconnect)\n', (1562, 1583), False, 'import pytest\n'), ((6986, 7000), 'contextlib.suppress', 'suppress', (['Oops'], {}), '(Oops)\n', (6994, 7000), False, 'from contextlib import suppress\n'), ((7366, 7380), 'contextlib.suppress', 'suppress', (['Oops'], {}), '(Oops)\n', (7374, 7380), False, 'from contextlib import suppress\n'), ((7715, 7749), 'pytest.raises', 'pytest.raises', (['WebSocketDisconnect'], {}), '(WebSocketDisconnect)\n', (7728, 7749), False, 'import pytest\n'), ((8112, 8126), 'contextlib.suppress', 'suppress', (['Oops'], {}), '(Oops)\n', (8120, 8126), False, 'from contextlib import suppress\n')]
|
"""ARPANSA """
from cProfile import run
from multiprocessing.connection import Client
from bs4 import BeautifulSoup
import lxml
import aiohttp
import asyncio
from .const import ARPANSA_URL
class Arpansa:
"""Arpansa class fetches the latest measurements from the ARPANSA site"""
def __init__(
self,session: aiohttp.ClientSession
) -> None:
self._session = session
self.measurements = None
async def fetchLatestMeasurements(self):
"""Retrieve the latest data from the ARPANSA site."""
try:
async with self._session.get(ARPANSA_URL) as response:
t = await response.text()
self.measurements = BeautifulSoup(t,'xml')
if response.status != 200:
raise ApiError(f"Unexpected response from ARPANSA server: {response.status}")
except Exception as err:
raise ApiError from err
def getAllLocations(self) -> list:
"""Get the names of all locations."""
rs = self.measurements.find_all("location")
allLocations = []
for l in rs:
allLocations.append(l.get("id"))
return allLocations
def getAllLatest(self) -> list:
"""Get the latest measurements and details for all locations."""
rs = self.measurements.find_all("location")
allLocations = []
for l in rs:
thisLocation = extractInfo(l)
thisLocation["friendlyname"] = l.get("id")
allLocations.append(thisLocation)
return allLocations
def getLatest(self,name) -> dict:
"""Get the latest measurements and details for a specified location."""
rs = self.measurements.find("location", {"id": name})
info = extractInfo(rs)
info["friendlyname"] = name
return info
def extractInfo(rs) -> dict:
"""Convert a BeautifulSoup ResultSet into a dictionary."""
extracted = {}
for state in rs:
if state.name is not None:
extracted[state.name] = state.text
return extracted
class ApiError(Exception):
"""Raised when there is a problem accessing the ARPANSA data."""
pass
async def main():
"""Example usage of the class"""
async with aiohttp.ClientSession() as session:
arpansa = Arpansa(session)
await arpansa.fetchLatestMeasurements()
for measurement in arpansa.getAllLatest():
print(measurement)
location = arpansa.getLatest("Brisbane")
print(location)
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.run(main())
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
|
[
"aiohttp.ClientSession",
"time.perf_counter",
"bs4.BeautifulSoup"
] |
[((2581, 2600), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2598, 2600), False, 'import time\n'), ((2255, 2278), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (2276, 2278), False, 'import aiohttp\n'), ((2639, 2658), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2656, 2658), False, 'import time\n'), ((694, 717), 'bs4.BeautifulSoup', 'BeautifulSoup', (['t', '"""xml"""'], {}), "(t, 'xml')\n", (707, 717), False, 'from bs4 import BeautifulSoup\n')]
|
import os
import crud, models, schemas
from database import SessionLocal
from fastapi import FastAPI, Depends, HTTPException, Request, Form
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse
from sqlalchemy.orm import Session
from typing import List
app = FastAPI(root_path=os.environ['ROOT_PATH'])
origins = ['*']
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*']
)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
# MAIN
@app.get("/")
def root():
return {"message": "Welcome to Smart Inventory"}
# USERS
@app.get("/users/", response_model=List[schemas.User])
def read_all_users(db: Session = Depends(get_db)):
return crud.get_all_users(db)
@app.get("/user/{uid}/", response_model=schemas.User)
def read_user_by_uid(uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/user/", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, user.uid)
if db_user:
raise HTTPException(status_code=400, detail="User already exists")
return crud.create_user(db=db, user=user)
@app.delete("/user/{uid}/")
def delete_user_by_uid(uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
db.delete(db_user)
db.commit()
return {'Deleted user with uid': uid}
# CABINETS
@app.get("/cabinets/", response_model=List[schemas.Cabinet])
def read_all_cabinets(db: Session = Depends(get_db)):
return crud.get_all_cabinets(db)
@app.get("/cabinet/{id}/", response_model=schemas.Cabinet)
def read_cabinet_by_id(id: str, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
return db_cabinet
@app.post("/cabinet/", response_model=schemas.Cabinet)
def create_cabinet(cabinet: schemas.CabinetCreate, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, cabinet.id)
if db_cabinet:
raise HTTPException(status_code=400, detail="Cabinet already exists")
return crud.create_cabinet(db, cabinet)
@app.delete("/cabinet/{id}/")
def delete_cabinet_by_id(id: str, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
db.delete(db_cabinet)
db.commit()
return {'Deleted cabinet with id': id}
# CATEGORIES
@app.get("/categories/", response_model=List[schemas.Category]) # reads all categories
def read_all_categories(db: Session = Depends(get_db)):
return crud.get_all_categories(db)
@app.get("/categories/root/", response_model=List[schemas.Category]) # reads all root categories
def read_root_categories(db: Session = Depends(get_db)):
return crud.get_root_categories(db)
@app.get("/category/{id}/", response_model=schemas.Category)
def read_category_by_id(id: str, db: Session = Depends(get_db)):
db_category = crud.get_category_by_id(db, id)
if db_category is None:
raise HTTPException(status_code=404, detail="Category not found")
return db_category
@app.get("/categories/subcategories/{parent_id}/", response_model=List[schemas.Category]) # reads all sub-categories of a category
def read_sub_categories(parent_id: int, db: Session = Depends(get_db)):
parent_category = crud.get_category_by_id(db, parent_id)
if not parent_category:
raise HTTPException(status_code=404, detail="Parent category not found")
return crud.get_sub_categories(db, parent_id)
@app.post("/category/", response_model=schemas.Category)
def create_category(category: schemas.CategoryCreate, db: Session = Depends(get_db)):
db_category = crud.get_category_by_title(db, category.title)
if db_category:
raise HTTPException(status_code=400, detail="Category already exists")
if category.parent_id is not None:
db_parent_category = crud.get_category_by_id(db, category.parent_id)
if db_parent_category is None:
raise HTTPException(status_code=404, detail="Parent category not found")
return crud.create_category(db, category)
@app.delete("/category/{id}/")
def delete_category_by_id(id: int, db: Session = Depends(get_db)):
db_category = crud.get_category_by_id(db, id)
if db_category is None:
raise HTTPException(status_code=404, detail="Category not found")
db.delete(db_category)
db.commit()
return {'Deleted category with id': id}
# ITEMS
@app.get("/items/", response_model=List[schemas.Item])
def read_all_items(db: Session = Depends(get_db)):
return crud.get_all_items(db)
@app.get("/item/{id}/", response_model=schemas.Item)
def read_item_by_id(id: int, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found")
return db_item
@app.get("/categories/{category_id}/items/", response_model=List[schemas.Item]) # reads all items under a category
def read_all_items(category_id: int, db: Session = Depends(get_db)):
category = crud.get_category_by_id(db, category_id)
if not category:
raise HTTPException(status_code=404, detail="Category not found")
return crud.get_items_by_category_id(db, category_id)
@app.post("/item/", response_model=schemas.Item)
def create_item(item: schemas.ItemCreate, db: Session = Depends(get_db)):
if item.category_id is not None:
db_category = crud.get_category_by_id(db, item.category_id)
if not db_category:
raise HTTPException(status_code=404, detail="Category not found")
db_item = crud.get_item_by_title(db, item.title)
if db_item:
raise HTTPException(status_code=400, detail="Item already exists")
return crud.create_item(db, item)
@app.delete("/item/{id}/")
def delete_item_by_id(id: int, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found")
db.delete(db_item)
db.commit()
return {'Deleted item with id': id}
# ORDER REQUESTS
@app.get("/order-requests/", response_model=List[schemas.OrderRequest])
def read_all_order_requests(db: Session = Depends(get_db)):
return crud.get_all_order_requests(db)
@app.get("/order-requests/item/{id}/", response_model=List[schemas.OrderRequest])
def read_order_requests_by_item_id(id: int, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found")
return crud.get_order_requests_by_item_id(db, id)
@app.get("/order-requests/user/{uid}/", response_model=List[schemas.OrderRequest])
def read_order_requests_by_user_id(uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return crud.get_order_requests_by_user_id(db, uid)
@app.get("/order-requests/state/{state}/", response_model=List[schemas.OrderRequest])
def read_order_requests_by_state(state: int, db: Session = Depends(get_db)):
return crud.get_order_requests_by_state(db, state)
@app.post("/order-request/", response_model=schemas.OrderRequest)
def create_order_request(order_request: schemas.OrderRequestCreate, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, order_request.item_id)
db_user = crud.get_user_by_uid(db, order_request.user_id)
if db_item is None or db_user is None:
raise HTTPException(status_code=404, detail="Item or user not found")
db_order_request = crud.get_order_requests_by_item_and_user_id(db, order_request.item_id, order_request.user_id)
if db_order_request:
raise HTTPException(status_code=400, detail="Order already requested by this user")
return crud.create_order_request(db, order_request)
@app.delete("/order-request/{id}/")
def delete_order_request_by_id(id: int, db: Session = Depends(get_db)):
db_order_request = crud.get_order_request_by_id(db, id)
if db_order_request is None:
raise HTTPException(status_code=404, detail="Order request not found")
db.delete(db_order_request)
db.commit()
return {'Deleted order request with id': id}
# STORAGE UNITS
@app.get("/storage-units/", response_model=List[schemas.StorageUnit])
def read_all_storage_units(db: Session = Depends(get_db)):
return crud.get_all_storage_units(db)
@app.get("/storage-unit/{id}/", response_model=schemas.StorageUnit)
def read_storage_unit_by_id(id: int, db: Session = Depends(get_db)):
db_storage_unit = crud.get_storage_unit_by_id(db, id)
if db_storage_unit is None:
raise HTTPException(status_code=404, detail="Storage unit not found")
return db_storage_unit
@app.get("/storage-units/cabinet/{cabinet_id}/", response_model=List[schemas.StorageUnit])
def read_storage_units_by_cabinet_id(cabinet_id: str, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
return crud.get_storage_units_by_cabinet_id(db, cabinet_id)
@app.post("/storage-unit/", response_model=schemas.StorageUnit)
def create_storage_unit(storage_unit: schemas.StorageUnitCreate, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, storage_unit.item_id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found")
if storage_unit.cabinet_id is not None:
db_cabinet = crud.get_cabinet_by_id(db, storage_unit.cabinet_id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
db_storage_unit = crud.get_storage_unit_by_id(db, storage_unit.id)
if db_storage_unit:
raise HTTPException(status_code=400, detail="Storage unit ID already assigned")
return crud.create_storage_unit(db, storage_unit)
@app.delete("/storage-unit/{id}/")
def delete_storage_unit_by_id(id: int, db: Session = Depends(get_db)):
db_storage_unit = crud.get_storage_unit_by_id(db, id)
if db_storage_unit is None:
raise HTTPException(status_code=404, detail="Storage unit not found")
db.delete(db_storage_unit)
db.commit()
return {'Deleted storage unit with id': id}
# CABINETS UNLOCK ATTEMPTS
@app.get("/unlock-attempts/", response_model=List[schemas.CabinetUnlockAttempt])
def read_all_unlock_attempts(db: Session = Depends(get_db)):
return crud.get_all_unlock_attempts(db)
@app.get("/unlock-attempts/cabinet/{cabinet_id}/", response_model=List[schemas.CabinetUnlockAttempt])
def read_unlock_attempts_by_cabinet_id(cabinet_id: str, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
return crud.get_unlock_attempts_by_cabinet_id(db, cabinet_id)
@app.get("/unlock-attempts/user/{uid}/", response_model=List[schemas.CabinetUnlockAttempt])
def read_unlock_attempts_by_user_id(uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return crud.get_unlock_attempts_by_user_id(db, uid)
@app.get("/unlock-attempts/cabinet/{cabinet_id}/user/{uid}/", response_model=List[schemas.CabinetUnlockAttempt])
def read_unlock_attempts_by_cabinet_and_user_id(cabinet_id, uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)
if db_user is None or db_cabinet is None:
raise HTTPException(status_code=404, detail="User or cabinet not found")
return crud.get_unlock_attempts_by_cabinet_and_user_id(db, cabinet_id, uid)
@app.post("/unlock-attempt/", response_model=schemas.CabinetUnlockAttempt)
def create_unlock_attempt(unlock_attempt: schemas.CabinetUnlockAttemptCreate , db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, unlock_attempt.user_id)
db_cabinet = crud.get_cabinet_by_id(db, unlock_attempt.cabinet_id)
if db_user is None or db_cabinet is None:
raise HTTPException(status_code=404, detail="User or cabinet not found")
return crud.create_unlock_attempt(db, unlock_attempt)
@app.delete("/unlock-attempts/days/{n}/")
def delete_unlock_attempts_older_than(n: int, db: Session = Depends(get_db)):
db.execute(f"delete from cabinets_unlock_attempts where date < now() - interval '{n} days';")
db.commit()
return {'Deleted all cabinets unlock attempts older than number of days': n}
|
[
"crud.get_unlock_attempts_by_user_id",
"crud.create_item",
"crud.get_unlock_attempts_by_cabinet_id",
"crud.get_category_by_title",
"crud.get_user_by_uid",
"crud.create_storage_unit",
"crud.get_item_by_title",
"fastapi.Depends",
"crud.get_sub_categories",
"crud.get_storage_unit_by_id",
"crud.get_all_categories",
"crud.create_order_request",
"crud.get_root_categories",
"fastapi.FastAPI",
"crud.get_order_requests_by_user_id",
"crud.get_all_items",
"database.SessionLocal",
"crud.get_all_order_requests",
"crud.get_order_requests_by_item_id",
"crud.get_order_requests_by_state",
"crud.create_user",
"crud.create_unlock_attempt",
"crud.get_category_by_id",
"crud.get_storage_units_by_cabinet_id",
"crud.get_all_cabinets",
"crud.get_all_storage_units",
"crud.get_cabinet_by_id",
"crud.get_unlock_attempts_by_cabinet_and_user_id",
"crud.get_all_users",
"fastapi.HTTPException",
"crud.create_category",
"crud.get_order_request_by_id",
"crud.create_cabinet",
"crud.get_items_by_category_id",
"crud.get_order_requests_by_item_and_user_id",
"crud.get_item_by_id",
"crud.get_all_unlock_attempts"
] |
[((302, 344), 'fastapi.FastAPI', 'FastAPI', ([], {'root_path': "os.environ['ROOT_PATH']"}), "(root_path=os.environ['ROOT_PATH'])\n", (309, 344), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((533, 547), 'database.SessionLocal', 'SessionLocal', ([], {}), '()\n', (545, 547), False, 'from database import SessionLocal\n'), ((793, 808), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (800, 808), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((822, 844), 'crud.get_all_users', 'crud.get_all_users', (['db'], {}), '(db)\n', (840, 844), False, 'import crud, models, schemas\n'), ((945, 960), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (952, 960), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((977, 1006), 'crud.get_user_by_uid', 'crud.get_user_by_uid', (['db', 'uid'], {}), '(db, uid)\n', (997, 1006), False, 'import crud, models, schemas\n'), ((1226, 1241), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (1233, 1241), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((1258, 1292), 'crud.get_user_by_uid', 'crud.get_user_by_uid', (['db', 'user.uid'], {}), '(db, user.uid)\n', (1278, 1292), False, 'import crud, models, schemas\n'), ((1395, 1429), 'crud.create_user', 'crud.create_user', ([], {'db': 'db', 'user': 'user'}), '(db=db, user=user)\n', (1411, 1429), False, 'import crud, models, schemas\n'), ((1506, 1521), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (1513, 1521), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((1538, 1567), 'crud.get_user_by_uid', 'crud.get_user_by_uid', (['db', 'uid'], {}), '(db, uid)\n', (1558, 1567), False, 'import crud, models, schemas\n'), ((1853, 1868), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (1860, 1868), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((1882, 1907), 'crud.get_all_cabinets', 'crud.get_all_cabinets', (['db'], {}), '(db)\n', (1903, 1907), False, 'import crud, models, schemas\n'), ((2014, 2029), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (2021, 2029), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((2049, 2079), 'crud.get_cabinet_by_id', 'crud.get_cabinet_by_id', (['db', 'id'], {}), '(db, id)\n', (2071, 2079), False, 'import crud, models, schemas\n'), ((2323, 2338), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (2330, 2338), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((2358, 2396), 'crud.get_cabinet_by_id', 'crud.get_cabinet_by_id', (['db', 'cabinet.id'], {}), '(db, cabinet.id)\n', (2380, 2396), False, 'import crud, models, schemas\n'), ((2505, 2537), 'crud.create_cabinet', 'crud.create_cabinet', (['db', 'cabinet'], {}), '(db, cabinet)\n', (2524, 2537), False, 'import crud, models, schemas\n'), ((2617, 2632), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (2624, 2632), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((2652, 2682), 'crud.get_cabinet_by_id', 'crud.get_cabinet_by_id', (['db', 'id'], {}), '(db, id)\n', (2674, 2682), False, 'import crud, models, schemas\n'), ((3008, 3023), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (3015, 3023), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((3037, 3064), 'crud.get_all_categories', 'crud.get_all_categories', (['db'], {}), '(db)\n', (3060, 3064), False, 'import crud, models, schemas\n'), ((3202, 3217), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (3209, 3217), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((3231, 3259), 'crud.get_root_categories', 'crud.get_root_categories', (['db'], {}), '(db)\n', (3255, 3259), False, 'import crud, models, schemas\n'), ((3369, 3384), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (3376, 3384), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((3405, 3436), 'crud.get_category_by_id', 'crud.get_category_by_id', (['db', 'id'], {}), '(db, id)\n', (3428, 3436), False, 'import crud, models, schemas\n'), ((3748, 3763), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (3755, 3763), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((3788, 3826), 'crud.get_category_by_id', 'crud.get_category_by_id', (['db', 'parent_id'], {}), '(db, parent_id)\n', (3811, 3826), False, 'import crud, models, schemas\n'), ((3947, 3985), 'crud.get_sub_categories', 'crud.get_sub_categories', (['db', 'parent_id'], {}), '(db, parent_id)\n', (3970, 3985), False, 'import crud, models, schemas\n'), ((4112, 4127), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (4119, 4127), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((4148, 4194), 'crud.get_category_by_title', 'crud.get_category_by_title', (['db', 'category.title'], {}), '(db, category.title)\n', (4174, 4194), False, 'import crud, models, schemas\n'), ((4545, 4579), 'crud.create_category', 'crud.create_category', (['db', 'category'], {}), '(db, category)\n', (4565, 4579), False, 'import crud, models, schemas\n'), ((4661, 4676), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (4668, 4676), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((4697, 4728), 'crud.get_category_by_id', 'crud.get_category_by_id', (['db', 'id'], {}), '(db, id)\n', (4720, 4728), False, 'import crud, models, schemas\n'), ((5016, 5031), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (5023, 5031), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((5045, 5067), 'crud.get_all_items', 'crud.get_all_items', (['db'], {}), '(db)\n', (5063, 5067), False, 'import crud, models, schemas\n'), ((5165, 5180), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (5172, 5180), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((5197, 5224), 'crud.get_item_by_id', 'crud.get_item_by_id', (['db', 'id'], {}), '(db, id)\n', (5216, 5224), False, 'import crud, models, schemas\n'), ((5505, 5520), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (5512, 5520), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((5538, 5578), 'crud.get_category_by_id', 'crud.get_category_by_id', (['db', 'category_id'], {}), '(db, category_id)\n', (5561, 5578), False, 'import crud, models, schemas\n'), ((5685, 5731), 'crud.get_items_by_category_id', 'crud.get_items_by_category_id', (['db', 'category_id'], {}), '(db, category_id)\n', (5714, 5731), False, 'import crud, models, schemas\n'), ((5838, 5853), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (5845, 5853), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((6081, 6119), 'crud.get_item_by_title', 'crud.get_item_by_title', (['db', 'item.title'], {}), '(db, item.title)\n', (6103, 6119), False, 'import crud, models, schemas\n'), ((6222, 6248), 'crud.create_item', 'crud.create_item', (['db', 'item'], {}), '(db, item)\n', (6238, 6248), False, 'import crud, models, schemas\n'), ((6322, 6337), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (6329, 6337), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((6354, 6381), 'crud.get_item_by_id', 'crud.get_item_by_id', (['db', 'id'], {}), '(db, id)\n', (6373, 6381), False, 'import crud, models, schemas\n'), ((6688, 6703), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (6695, 6703), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((6717, 6748), 'crud.get_all_order_requests', 'crud.get_all_order_requests', (['db'], {}), '(db)\n', (6744, 6748), False, 'import crud, models, schemas\n'), ((6890, 6905), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (6897, 6905), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((6922, 6949), 'crud.get_item_by_id', 'crud.get_item_by_id', (['db', 'id'], {}), '(db, id)\n', (6941, 6949), False, 'import crud, models, schemas\n'), ((7055, 7097), 'crud.get_order_requests_by_item_id', 'crud.get_order_requests_by_item_id', (['db', 'id'], {}), '(db, id)\n', (7089, 7097), False, 'import crud, models, schemas\n'), ((7241, 7256), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (7248, 7256), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((7273, 7302), 'crud.get_user_by_uid', 'crud.get_user_by_uid', (['db', 'uid'], {}), '(db, uid)\n', (7293, 7302), False, 'import crud, models, schemas\n'), ((7408, 7451), 'crud.get_order_requests_by_user_id', 'crud.get_order_requests_by_user_id', (['db', 'uid'], {}), '(db, uid)\n', (7442, 7451), False, 'import crud, models, schemas\n'), ((7598, 7613), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (7605, 7613), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((7627, 7670), 'crud.get_order_requests_by_state', 'crud.get_order_requests_by_state', (['db', 'state'], {}), '(db, state)\n', (7659, 7670), False, 'import crud, models, schemas\n'), ((7820, 7835), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (7827, 7835), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((7852, 7898), 'crud.get_item_by_id', 'crud.get_item_by_id', (['db', 'order_request.item_id'], {}), '(db, order_request.item_id)\n', (7871, 7898), False, 'import crud, models, schemas\n'), ((7913, 7960), 'crud.get_user_by_uid', 'crud.get_user_by_uid', (['db', 'order_request.user_id'], {}), '(db, order_request.user_id)\n', (7933, 7960), False, 'import crud, models, schemas\n'), ((8105, 8202), 'crud.get_order_requests_by_item_and_user_id', 'crud.get_order_requests_by_item_and_user_id', (['db', 'order_request.item_id', 'order_request.user_id'], {}), '(db, order_request.item_id,\n order_request.user_id)\n', (8148, 8202), False, 'import crud, models, schemas\n'), ((8327, 8371), 'crud.create_order_request', 'crud.create_order_request', (['db', 'order_request'], {}), '(db, order_request)\n', (8352, 8371), False, 'import crud, models, schemas\n'), ((8463, 8478), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (8470, 8478), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((8504, 8540), 'crud.get_order_request_by_id', 'crud.get_order_request_by_id', (['db', 'id'], {}), '(db, id)\n', (8532, 8540), False, 'import crud, models, schemas\n'), ((8879, 8894), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (8886, 8894), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((8908, 8938), 'crud.get_all_storage_units', 'crud.get_all_storage_units', (['db'], {}), '(db)\n', (8934, 8938), False, 'import crud, models, schemas\n'), ((9059, 9074), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (9066, 9074), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((9099, 9134), 'crud.get_storage_unit_by_id', 'crud.get_storage_unit_by_id', (['db', 'id'], {}), '(db, id)\n', (9126, 9134), False, 'import crud, models, schemas\n'), ((9432, 9447), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (9439, 9447), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((9467, 9505), 'crud.get_cabinet_by_id', 'crud.get_cabinet_by_id', (['db', 'cabinet_id'], {}), '(db, cabinet_id)\n', (9489, 9505), False, 'import crud, models, schemas\n'), ((9617, 9669), 'crud.get_storage_units_by_cabinet_id', 'crud.get_storage_units_by_cabinet_id', (['db', 'cabinet_id'], {}), '(db, cabinet_id)\n', (9653, 9669), False, 'import crud, models, schemas\n'), ((9818, 9833), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (9825, 9833), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((9850, 9895), 'crud.get_item_by_id', 'crud.get_item_by_id', (['db', 'storage_unit.item_id'], {}), '(db, storage_unit.item_id)\n', (9869, 9895), False, 'import crud, models, schemas\n'), ((10237, 10285), 'crud.get_storage_unit_by_id', 'crud.get_storage_unit_by_id', (['db', 'storage_unit.id'], {}), '(db, storage_unit.id)\n', (10264, 10285), False, 'import crud, models, schemas\n'), ((10409, 10451), 'crud.create_storage_unit', 'crud.create_storage_unit', (['db', 'storage_unit'], {}), '(db, storage_unit)\n', (10433, 10451), False, 'import crud, models, schemas\n'), ((10541, 10556), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (10548, 10556), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((10581, 10616), 'crud.get_storage_unit_by_id', 'crud.get_storage_unit_by_id', (['db', 'id'], {}), '(db, id)\n', (10608, 10616), False, 'import crud, models, schemas\n'), ((10975, 10990), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (10982, 10990), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((11004, 11036), 'crud.get_all_unlock_attempts', 'crud.get_all_unlock_attempts', (['db'], {}), '(db)\n', (11032, 11036), False, 'import crud, models, schemas\n'), ((11210, 11225), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (11217, 11225), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((11245, 11283), 'crud.get_cabinet_by_id', 'crud.get_cabinet_by_id', (['db', 'cabinet_id'], {}), '(db, cabinet_id)\n', (11267, 11283), False, 'import crud, models, schemas\n'), ((11395, 11449), 'crud.get_unlock_attempts_by_cabinet_id', 'crud.get_unlock_attempts_by_cabinet_id', (['db', 'cabinet_id'], {}), '(db, cabinet_id)\n', (11433, 11449), False, 'import crud, models, schemas\n'), ((11603, 11618), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (11610, 11618), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((11635, 11664), 'crud.get_user_by_uid', 'crud.get_user_by_uid', (['db', 'uid'], {}), '(db, uid)\n', (11655, 11664), False, 'import crud, models, schemas\n'), ((11770, 11814), 'crud.get_unlock_attempts_by_user_id', 'crud.get_unlock_attempts_by_user_id', (['db', 'uid'], {}), '(db, uid)\n', (11805, 11814), False, 'import crud, models, schemas\n'), ((12013, 12028), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (12020, 12028), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((12045, 12074), 'crud.get_user_by_uid', 'crud.get_user_by_uid', (['db', 'uid'], {}), '(db, uid)\n', (12065, 12074), False, 'import crud, models, schemas\n'), ((12092, 12130), 'crud.get_cabinet_by_id', 'crud.get_cabinet_by_id', (['db', 'cabinet_id'], {}), '(db, cabinet_id)\n', (12114, 12130), False, 'import crud, models, schemas\n'), ((12269, 12337), 'crud.get_unlock_attempts_by_cabinet_and_user_id', 'crud.get_unlock_attempts_by_cabinet_and_user_id', (['db', 'cabinet_id', 'uid'], {}), '(db, cabinet_id, uid)\n', (12316, 12337), False, 'import crud, models, schemas\n'), ((12507, 12522), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (12514, 12522), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((12539, 12587), 'crud.get_user_by_uid', 'crud.get_user_by_uid', (['db', 'unlock_attempt.user_id'], {}), '(db, unlock_attempt.user_id)\n', (12559, 12587), False, 'import crud, models, schemas\n'), ((12605, 12658), 'crud.get_cabinet_by_id', 'crud.get_cabinet_by_id', (['db', 'unlock_attempt.cabinet_id'], {}), '(db, unlock_attempt.cabinet_id)\n', (12627, 12658), False, 'import crud, models, schemas\n'), ((12797, 12843), 'crud.create_unlock_attempt', 'crud.create_unlock_attempt', (['db', 'unlock_attempt'], {}), '(db, unlock_attempt)\n', (12823, 12843), False, 'import crud, models, schemas\n'), ((12947, 12962), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (12954, 12962), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((1045, 1100), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""User not found"""'}), "(status_code=404, detail='User not found')\n", (1058, 1100), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((1323, 1383), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""User already exists"""'}), "(status_code=400, detail='User already exists')\n", (1336, 1383), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((1606, 1661), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""User not found"""'}), "(status_code=404, detail='User not found')\n", (1619, 1661), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((2121, 2179), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Cabinet not found"""'}), "(status_code=404, detail='Cabinet not found')\n", (2134, 2179), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((2430, 2493), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Cabinet already exists"""'}), "(status_code=400, detail='Cabinet already exists')\n", (2443, 2493), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((2724, 2782), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Cabinet not found"""'}), "(status_code=404, detail='Cabinet not found')\n", (2737, 2782), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((3479, 3538), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Category not found"""'}), "(status_code=404, detail='Category not found')\n", (3492, 3538), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((3869, 3935), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Parent category not found"""'}), "(status_code=404, detail='Parent category not found')\n", (3882, 3935), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((4229, 4293), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Category already exists"""'}), "(status_code=400, detail='Category already exists')\n", (4242, 4293), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((4362, 4409), 'crud.get_category_by_id', 'crud.get_category_by_id', (['db', 'category.parent_id'], {}), '(db, category.parent_id)\n', (4385, 4409), False, 'import crud, models, schemas\n'), ((4771, 4830), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Category not found"""'}), "(status_code=404, detail='Category not found')\n", (4784, 4830), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((5263, 5318), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Item not found"""'}), "(status_code=404, detail='Item not found')\n", (5276, 5318), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((5614, 5673), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Category not found"""'}), "(status_code=404, detail='Category not found')\n", (5627, 5673), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((5915, 5960), 'crud.get_category_by_id', 'crud.get_category_by_id', (['db', 'item.category_id'], {}), '(db, item.category_id)\n', (5938, 5960), False, 'import crud, models, schemas\n'), ((6150, 6210), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Item already exists"""'}), "(status_code=400, detail='Item already exists')\n", (6163, 6210), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((6420, 6475), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Item not found"""'}), "(status_code=404, detail='Item not found')\n", (6433, 6475), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((6988, 7043), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Item not found"""'}), "(status_code=404, detail='Item not found')\n", (7001, 7043), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((7341, 7396), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""User not found"""'}), "(status_code=404, detail='User not found')\n", (7354, 7396), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((8018, 8081), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Item or user not found"""'}), "(status_code=404, detail='Item or user not found')\n", (8031, 8081), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((8238, 8315), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Order already requested by this user"""'}), "(status_code=400, detail='Order already requested by this user')\n", (8251, 8315), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((8588, 8652), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Order request not found"""'}), "(status_code=404, detail='Order request not found')\n", (8601, 8652), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((9181, 9244), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Storage unit not found"""'}), "(status_code=404, detail='Storage unit not found')\n", (9194, 9244), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((9547, 9605), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Cabinet not found"""'}), "(status_code=404, detail='Cabinet not found')\n", (9560, 9605), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((9934, 9989), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Item not found"""'}), "(status_code=404, detail='Item not found')\n", (9947, 9989), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((10055, 10106), 'crud.get_cabinet_by_id', 'crud.get_cabinet_by_id', (['db', 'storage_unit.cabinet_id'], {}), '(db, storage_unit.cabinet_id)\n', (10077, 10106), False, 'import crud, models, schemas\n'), ((10324, 10397), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Storage unit ID already assigned"""'}), "(status_code=400, detail='Storage unit ID already assigned')\n", (10337, 10397), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((10663, 10726), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Storage unit not found"""'}), "(status_code=404, detail='Storage unit not found')\n", (10676, 10726), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((11325, 11383), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Cabinet not found"""'}), "(status_code=404, detail='Cabinet not found')\n", (11338, 11383), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((11703, 11758), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""User not found"""'}), "(status_code=404, detail='User not found')\n", (11716, 11758), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((12191, 12257), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""User or cabinet not found"""'}), "(status_code=404, detail='User or cabinet not found')\n", (12204, 12257), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((12719, 12785), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""User or cabinet not found"""'}), "(status_code=404, detail='User or cabinet not found')\n", (12732, 12785), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((4467, 4533), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Parent category not found"""'}), "(status_code=404, detail='Parent category not found')\n", (4480, 4533), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((6007, 6066), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Category not found"""'}), "(status_code=404, detail='Category not found')\n", (6020, 6066), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n'), ((10156, 10214), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Cabinet not found"""'}), "(status_code=404, detail='Cabinet not found')\n", (10169, 10214), False, 'from fastapi import FastAPI, Depends, HTTPException, Request, Form\n')]
|
from unittest import TestCase
from mock import patch, ANY, DEFAULT, Mock, MagicMock
from django.test import RequestFactory
from django_auth_lti import const
from student_locations.views import index, lti_launch, main
@patch.multiple('student_locations.views', render=DEFAULT)
class TestMapView(TestCase):
longMessage = True
def setUp(self):
self.resource_link_id = '1234abcd'
self.section_id = 5678
self.sis_section_id = 8989
self.request = RequestFactory().get('/fake-path')
self.request.user = Mock(name='user_mock')
self.request.user.is_authenticated.return_value = True
self.request.session = {
'LTI_LAUNCH': {
'resource_link_id': self.resource_link_id,
'roles': [const.INSTRUCTOR],
'user_id' : 'user123'
}
}
def getpostrequest(self):
request = RequestFactory().post('/fake-path')
request.user = Mock(name='user_mock')
request.user.is_authenticated.return_value = True
request.session = {
'LTI_LAUNCH': {
'resource_link_id': self.resource_link_id,
'roles': [const.INSTRUCTOR],
'user_id' : 'user123'
}
}
return request
# def get_render_context_value(self, render_mock):
# """ Returns the value of the context dictionary key associated with the render mock object """
# context = render_mock.call_args[0][2]
# return context.get(context_key)
def test_view_index(self, render):
""" test that the index view renders the index page """
request = self.request
index(request)
render.assert_called_with(request, 'student_locations/index.html')
@patch('student_locations.views.redirect')
def test_view_lti_launch_success(self, redirect_mock, render):
""" test that the lti_launch view renders the main view on success """
request = self.getpostrequest()
lti_launch(request)
redirect_mock.assert_called_with('sl:main')
@patch('student_locations.views.validaterequiredltiparams')
def test_view_lti_launch_user_not_authenticated(self, valid_lti_params_mock, render):
""" test that the lti_launch view renders the error page if the required
LTI params are not present in the session """
request = self.getpostrequest()
valid_lti_params_mock.return_value = False
lti_launch(request)
render.assert_called_with(request, 'student_locations/error.html', ANY)
def test_view_main(self, render):
""" test that the main view renders the map_view page """
request = self.request
main(request)
render.assert_called_with(request, 'student_locations/map_view.html', ANY)
|
[
"student_locations.views.main",
"student_locations.views.lti_launch",
"django.test.RequestFactory",
"mock.patch",
"mock.Mock",
"mock.patch.multiple",
"student_locations.views.index"
] |
[((219, 276), 'mock.patch.multiple', 'patch.multiple', (['"""student_locations.views"""'], {'render': 'DEFAULT'}), "('student_locations.views', render=DEFAULT)\n", (233, 276), False, 'from mock import patch, ANY, DEFAULT, Mock, MagicMock\n'), ((1787, 1828), 'mock.patch', 'patch', (['"""student_locations.views.redirect"""'], {}), "('student_locations.views.redirect')\n", (1792, 1828), False, 'from mock import patch, ANY, DEFAULT, Mock, MagicMock\n'), ((2101, 2159), 'mock.patch', 'patch', (['"""student_locations.views.validaterequiredltiparams"""'], {}), "('student_locations.views.validaterequiredltiparams')\n", (2106, 2159), False, 'from mock import patch, ANY, DEFAULT, Mock, MagicMock\n'), ((546, 568), 'mock.Mock', 'Mock', ([], {'name': '"""user_mock"""'}), "(name='user_mock')\n", (550, 568), False, 'from mock import patch, ANY, DEFAULT, Mock, MagicMock\n'), ((967, 989), 'mock.Mock', 'Mock', ([], {'name': '"""user_mock"""'}), "(name='user_mock')\n", (971, 989), False, 'from mock import patch, ANY, DEFAULT, Mock, MagicMock\n'), ((1691, 1705), 'student_locations.views.index', 'index', (['request'], {}), '(request)\n', (1696, 1705), False, 'from student_locations.views import index, lti_launch, main\n'), ((2023, 2042), 'student_locations.views.lti_launch', 'lti_launch', (['request'], {}), '(request)\n', (2033, 2042), False, 'from student_locations.views import index, lti_launch, main\n'), ((2488, 2507), 'student_locations.views.lti_launch', 'lti_launch', (['request'], {}), '(request)\n', (2498, 2507), False, 'from student_locations.views import index, lti_launch, main\n'), ((2732, 2745), 'student_locations.views.main', 'main', (['request'], {}), '(request)\n', (2736, 2745), False, 'from student_locations.views import index, lti_launch, main\n'), ((483, 499), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (497, 499), False, 'from django.test import RequestFactory\n'), ((908, 924), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (922, 924), False, 'from django.test import RequestFactory\n')]
|
from datetime import timedelta
from xml.etree import ElementTree
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import now
from exhaust.tests.factories import CategoryFactory, PostFactory
class SitemapsTestCase(TestCase):
def test_posts_sitemap(self):
PostFactory.create_batch(5, online=True)
# Check that offline ones aren't being shown.
PostFactory.create(online=False, date=now() - timedelta(minutes=1))
# ...and ones with a future publication date.
PostFactory.create(online=True, date=now() + timedelta(days=1))
response = self.client.get(reverse('django.contrib.sitemaps.views.sitemap'))
self.assertEqual(response.status_code, 200)
tree = ElementTree.fromstring(response.content.decode('utf-8'))
self.assertEqual(len(list(tree)), 5)
def test_categories_sitemap(self):
# Ensure unused categories are not shown in the sitemap, which
# includes those that are only assigned to a post that is offline.
used_category, unused_category, unused_category_2 = CategoryFactory.create_batch(3)
post = PostFactory.create(online=True)
post.categories.set([used_category])
offline_post = PostFactory.create(online=False)
offline_post.categories.set([unused_category_2])
response = self.client.get(reverse('django.contrib.sitemaps.views.sitemap'))
self.assertEqual(response.status_code, 200)
tree = ElementTree.fromstring(response.content.decode('utf-8'))
child_items = list(tree)
self.assertEqual(len(child_items), 2)
nsinfo = {'sitemaps': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
for obj in [post, used_category]:
self.assertEqual(len([
True for child in child_items
if child.find('sitemaps:loc', nsinfo).text == f'http://testserver{obj.get_absolute_url()}'
]), 1)
for obj in [unused_category, unused_category_2]:
self.assertEqual(len([
True for child in child_items
if child.find('sitemaps:loc', nsinfo).text == f'http://testserver{obj.get_absolute_url()}'
]), 0)
|
[
"exhaust.tests.factories.CategoryFactory.create_batch",
"django.utils.timezone.now",
"exhaust.tests.factories.PostFactory.create_batch",
"exhaust.tests.factories.PostFactory.create",
"django.urls.reverse",
"datetime.timedelta"
] |
[((313, 353), 'exhaust.tests.factories.PostFactory.create_batch', 'PostFactory.create_batch', (['(5)'], {'online': '(True)'}), '(5, online=True)\n', (337, 353), False, 'from exhaust.tests.factories import CategoryFactory, PostFactory\n'), ((1114, 1145), 'exhaust.tests.factories.CategoryFactory.create_batch', 'CategoryFactory.create_batch', (['(3)'], {}), '(3)\n', (1142, 1145), False, 'from exhaust.tests.factories import CategoryFactory, PostFactory\n'), ((1162, 1193), 'exhaust.tests.factories.PostFactory.create', 'PostFactory.create', ([], {'online': '(True)'}), '(online=True)\n', (1180, 1193), False, 'from exhaust.tests.factories import CategoryFactory, PostFactory\n'), ((1263, 1295), 'exhaust.tests.factories.PostFactory.create', 'PostFactory.create', ([], {'online': '(False)'}), '(online=False)\n', (1281, 1295), False, 'from exhaust.tests.factories import CategoryFactory, PostFactory\n'), ((648, 696), 'django.urls.reverse', 'reverse', (['"""django.contrib.sitemaps.views.sitemap"""'], {}), "('django.contrib.sitemaps.views.sitemap')\n", (655, 696), False, 'from django.urls import reverse\n'), ((1389, 1437), 'django.urls.reverse', 'reverse', (['"""django.contrib.sitemaps.views.sitemap"""'], {}), "('django.contrib.sitemaps.views.sitemap')\n", (1396, 1437), False, 'from django.urls import reverse\n'), ((455, 460), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (458, 460), False, 'from django.utils.timezone import now\n'), ((463, 483), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (472, 483), False, 'from datetime import timedelta\n'), ((585, 590), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (588, 590), False, 'from django.utils.timezone import now\n'), ((593, 610), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (602, 610), False, 'from datetime import timedelta\n')]
|
"""Proof Verification Module for Exonum `ProofListIndex`."""
from typing import Dict, List, Tuple, Any, Callable
import itertools
from logging import getLogger
from exonum_client.crypto import Hash
from ..utils import is_field_hash, is_field_int, calculate_height
from ..hasher import Hasher
from .key import ProofListKey
from .errors import MalformedListProofError, ListProofVerificationError
# pylint: disable=C0103
logger = getLogger(__name__)
class HashedEntry:
""" Element of a proof with a key and a hash. """
def __init__(self, key: ProofListKey, entry_hash: Hash):
self.key = key
self.entry_hash = entry_hash
@classmethod
def parse(cls, data: Dict[Any, Any]) -> "HashedEntry":
""" Creates a HashedEntry object from the provided dict. """
if not isinstance(data, dict) or not is_field_hash(data, "hash"):
err = MalformedListProofError.parse_error(str(data))
logger.warning(
"Could not parse `hash` from dict, which is required for HashedEntry object creation. %s", str(err)
)
raise err
key = ProofListKey.parse(data)
return HashedEntry(key, Hash(bytes.fromhex(data["hash"])))
def __eq__(self, other: object) -> bool:
if not isinstance(other, HashedEntry):
raise TypeError("Attempt to compare HashedEntry with an object of a different type.")
return self.key == other.key and self.entry_hash == other.entry_hash
def _hash_layer(layer: List[HashedEntry], last_index: int) -> List[HashedEntry]:
""" Takes a layer as a list of hashed entries and the last index as an int and returns a new layer. """
new_len = (len(layer) + 1) // 2
new_layer: List[HashedEntry] = []
for i in range(new_len):
left_idx = 2 * i
right_idx = 2 * i + 1
# Check if there are both right and left indices in the layer:
if len(layer) > right_idx:
# Verify that entries are in the correct order:
if not layer[left_idx].key.is_left() or layer[right_idx].key.index != layer[left_idx].key.index + 1:
err = MalformedListProofError.missing_hash()
logger.warning(str(err))
raise err
left_hash = layer[left_idx].entry_hash
right_hash = layer[right_idx].entry_hash
new_entry = HashedEntry(layer[left_idx].key.parent(), Hasher.hash_node(left_hash, right_hash))
else:
# If there is an odd number of entries, the index of the last one should be equal to provided last_index:
full_layer_length = last_index + 1
if full_layer_length % 2 == 0 or layer[left_idx].key.index != last_index:
err = MalformedListProofError.missing_hash()
logger.warning(str(err))
raise err
left_hash = layer[left_idx].entry_hash
new_entry = HashedEntry(layer[left_idx].key.parent(), Hasher.hash_single_node(left_hash))
new_layer.append(new_entry)
return new_layer
class ListProof:
"""ListProof class provides an interface to parse and verify proofs for ProofListIndex retrieved
from the Exonum blockchain.
Example workflow:
>>> proof_json = {
>>> "proof": [
>>> {"index": 1, "height": 1, "hash": "eae60adeb5c681110eb5226a4ef95faa4f993c4a838d368b66f7c98501f2c8f9"}
>>> ],
>>> "entries": [[0, "6b70d869aeed2fe090e708485d9f4b4676ae6984206cf05efc136d663610e5c9"]],
>>> "length": 2,
>>> }
>>> expected_hash = "07df67b1a853551eb05470a03c9245483e5a3731b4b558e634908ff356b69857"
>>> proof = ListProof.parse(proof_json)
>>> result = proof.validate(bytes.fromhex(expected_hash))
>>> assert result == [(0, stored_val)]
"""
def __init__(
self,
proof: List[HashedEntry],
entries: List[Tuple[int, Any]],
length: int,
value_to_bytes: Callable[[Any], bytes],
):
"""
Constructor of the ListProof.
It is not intended to be used directly, use ListProof.Parse instead.
Parameters
----------
proof : List[HashedEntry]
Proof entries.
entries: List[Tuple[int, Any]]
Unhashed entries (leaves).
length: int
Length of the proof list.
value_to_bytes: Callable[[str], bytes]
A function that converts the stored value to bytes for hashing.
"""
self._proof = proof
self._entries = entries
self._length = length
self._value_to_bytes = value_to_bytes
@classmethod
def parse(cls, proof_dict: Dict[str, Any], value_to_bytes: Callable[[Any], bytes] = bytes.fromhex) -> "ListProof":
"""
Method to parse ListProof from the dict.
Expected dict format:
>>>
{
'proof': [
{'index': 1, 'height': 1, 'hash': 'eae60adeb5c681110eb5226a4ef95faa4f993c4a838d368b66f7c98501f2c8f9'}
],
'entries': [
[0, '6b70d869aeed2fe090e708485d9f4b4676ae6984206cf05efc136d663610e5c9']
],
'length': 2
}
If no errors occured during parsing, a ListProof object will be returned.
However, successfull parsing does not mean that the proof is not malformed (it only means that the provided
dict structure matches the expected one).
Actual checks for the proof contents correctness will be performed in the `validate` method.
To convert value to bytes, ListProof attemts to use bytes.fromhex by default.
If your type should be converted to bytes using Protobuf, you can generate a converter function with the use of
`build_encoder_function` from encoder.py.
Otherwise, you have to implement the converter function by yourself.
Parameters
----------
proof_dict : Dict[str, Any]
Proof as a dict.
value_to_bytes: Callable[[str], bytes]
A function that converts the stored value to bytes for hashing.
By default, `bytes.fromhex` is used.
Raises
------
MalformedListProofError
If the structure of the provided dict does not match the expected one,
an exception `MalformedListProofError` is raised.
"""
if (
not isinstance(proof_dict.get("proof"), list)
or not isinstance(proof_dict.get("entries"), list)
or not is_field_int(proof_dict, "length")
):
err = MalformedListProofError.parse_error(str(proof_dict))
logger.warning("The structure of the provided dict does not match the expected one. %s", str(err))
raise err
proof = [HashedEntry.parse(entry) for entry in proof_dict["proof"]]
entries = [cls._parse_entry(entry) for entry in proof_dict["entries"]]
length = proof_dict["length"]
logger.debug("Successfully parsed ListProof from the dict.")
return ListProof(proof, entries, length, value_to_bytes)
def validate(self, expected_hash: Hash) -> List[Tuple[int, Any]]:
"""
This method validates the provided proof against the given expected hash.
Parameters
----------
expected_hash: Hash
Expected root hash.
Returns
-------
result: List[Tuple[int, Any]]
If the hash is correct, a list of the collected values with indices is returned.
Raises
------
ListProofVerificationError
If verification fails, an exception `ListProofVerificationError` is raised.
MalformedListProofError
If the proof is malformed, an exception `MalformedListProofError` is raised.
"""
if not isinstance(expected_hash, Hash):
raise TypeError("`expected_hash` should be of type Hash.")
tree_root = self._collect()
calculated_hash = Hasher.hash_list_node(self._length, tree_root)
if calculated_hash != expected_hash:
logger.warning("Provided root hash does not match the calculated one.")
raise ListProofVerificationError(expected_hash.value, calculated_hash.value)
logger.debug("Successfully validated the provided proof against the given expected hash.")
return self._entries
@staticmethod
def _parse_entry(data: List[Any]) -> Tuple[int, Any]:
if not isinstance(data, list) or not len(data) == 2:
err = MalformedListProofError.parse_error(str(data))
logger.warning("Could not parse a list. %s", err)
raise err
return data[0], data[1]
@staticmethod
def _tree_height_by_length(length: int) -> int:
if length == 0:
return 0
return calculate_height(length)
@staticmethod
def _check_duplicates(entries: List[Any]) -> None:
for idx in range(1, len(entries)):
if entries[idx][0] == entries[idx - 1][0]:
err = MalformedListProofError.duplicate_key()
logger.warning(str(err))
raise err
def _collect(self) -> Hash:
def _hash_entry(entry: Tuple[int, Any]) -> HashedEntry:
""" Creates a hash entry from the value. """
key = ProofListKey(1, entry[0])
entry_hash = Hasher.hash_leaf(self._value_to_bytes(entry[1]))
return HashedEntry(key, entry_hash)
def _split_hashes_by_height(
hashes: List[HashedEntry], height: int
) -> Tuple[List[HashedEntry], List[HashedEntry]]:
""" Splits a list of the hashed entries into two lists by the given height. """
current = list(itertools.takewhile(lambda x: x.key.height == height, hashes))
remaining = hashes[len(current) :]
return current, remaining
tree_height = self._tree_height_by_length(self._length)
# Check an edge case when the list contains no elements:
if tree_height == 0 and (not self._proof or not self._entries):
err = MalformedListProofError.non_empty_proof()
logger.warning(str(err))
raise err
# If there are no entries, the proof should contain only a single root hash:
if not self._entries:
if len(self._proof) != 1:
if self._proof:
err = MalformedListProofError.missing_hash()
logger.warning(str(err))
raise err
err = MalformedListProofError.unexpected_branch()
logger.warning(str(err))
raise err
if self._proof[0].key == ProofListKey(tree_height, 0):
return self._proof[0].entry_hash
err = MalformedListProofError.unexpected_branch()
logger.warning(str(err))
raise err
# Sort the entries and the proof:
self._entries.sort(key=lambda el: el[0])
self._proof.sort(key=lambda el: el.key)
# Check that there are no duplicates:
self._check_duplicates(self._entries)
self._check_duplicates(self._proof)
# Check that the hashes at each height have indices in the allowed range:
for entry in self._proof:
height = entry.key.height
if height == 0:
err = MalformedListProofError.unexpected_leaf()
logger.warning(str(err))
raise err
# self._length -1 is the index of the last element at `height = 1`.
# This index is divided by 2 with each new height:
if height >= tree_height or entry.key.index > (self._length - 1) >> (height - 1):
err = MalformedListProofError.unexpected_branch()
logger.warning(str(err))
raise err
# Create the first layer:
layer = list(map(_hash_entry, self._entries))
hashes = list(self._proof)
last_index = self._length - 1
for height in range(1, tree_height):
# Filter the hashes of the current height and the rest heights (to be processed later):
hashes, remaining_hashes = _split_hashes_by_height(hashes, height)
# Merge the current layer with the hashes that belong to this layer:
layer = sorted(layer + hashes, key=lambda x: x.key)
# Calculate a new layer:
layer = _hash_layer(layer, last_index)
# Size of the next layer is two times smaller:
last_index //= 2
# Make remaining_hashes hashes to be processed:
hashes = remaining_hashes
assert len(layer) == 1, "Result layer length is not 1"
return layer[0].entry_hash
|
[
"itertools.takewhile",
"logging.getLogger"
] |
[((430, 449), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (439, 449), False, 'from logging import getLogger\n'), ((9752, 9813), 'itertools.takewhile', 'itertools.takewhile', (['(lambda x: x.key.height == height)', 'hashes'], {}), '(lambda x: x.key.height == height, hashes)\n', (9771, 9813), False, 'import itertools\n')]
|
from scipy.stats import multivariate_normal
from scipy.signal import convolve2d
import matplotlib
try:
matplotlib.pyplot.figure()
matplotlib.pyplot.close()
except Exception:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
# the colormap should assign light colors to low values
TERRAIN_CMAP = 'Greens'
DEFAULT_PATH = '/tmp/mujoco_terrains'
STEP = 0.1
def generate_hills(width, height, nhills):
'''
@param width float, terrain width
@param height float, terrain height
@param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2
'''
# setup coordinate grid
xmin, xmax = -width/2.0, width/2.0
ymin, ymax = -height/2.0, height/2.0
x, y = np.mgrid[xmin:xmax:STEP, ymin:ymax:STEP]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
# generate hilltops
xm, ym = np.mgrid[xmin:xmax:width/np.sqrt(nhills), ymin:ymax:height/np.sqrt(nhills)]
mu = np.c_[xm.flat, ym.flat]
sigma = float(width*height)/(nhills*8)
for i in range(mu.shape[0]):
mu[i] = multivariate_normal.rvs(mean=mu[i], cov=sigma)
# generate hills
sigma = sigma + sigma*np.random.rand(mu.shape[0])
rvs = [ multivariate_normal(mu[i,:], cov=sigma[i]) for i in range(mu.shape[0]) ]
hfield = np.max([ rv.pdf(pos) for rv in rvs ], axis=0)
return x, y, hfield
def clear_patch(hfield, box):
''' Clears a patch shaped like box, assuming robot is placed in center of hfield
@param box: rllab.spaces.Box-like
'''
if box.flat_dim > 2:
raise ValueError("Provide 2dim box")
# clear patch
h_center = int(0.5 * hfield.shape[0])
w_center = int(0.5 * hfield.shape[1])
fromrow, torow = w_center + int(box.low[0]/STEP), w_center + int(box.high[0] / STEP)
fromcol, tocol = h_center + int(box.low[1]/STEP), h_center + int(box.high[1] / STEP)
hfield[fromrow:torow, fromcol:tocol] = 0.0
# convolve to smoothen edges somewhat, in case hills were cut off
K = np.ones((10,10)) / 100.0
s = convolve2d(hfield[fromrow-9:torow+9, fromcol-9:tocol+9], K, mode='same', boundary='symm')
hfield[fromrow-9:torow+9, fromcol-9:tocol+9] = s
return hfield
def _checkpath(path_):
if path_ is None:
path_ = DEFAULT_PATH
if not os.path.exists(path_):
os.makedirs(path_)
return path_
def save_heightfield(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure the path + fname match the <file> attribute
of the <asset> element in the env XML where the height field is defined
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP) # terrain_cmap is necessary to make sure tops get light color
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
def save_texture(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the
<compiler> element in the env XML
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
# for some reason plt.grid does not work here, so generate gridlines manually
for i in np.arange(xmin,xmax,0.5):
plt.plot([i,i], [ymin,ymax], 'k', linewidth=0.1)
for i in np.arange(ymin,ymax,0.5):
plt.plot([xmin,xmax],[i,i], 'k', linewidth=0.1)
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
|
[
"scipy.stats.multivariate_normal.rvs",
"scipy.signal.convolve2d",
"os.makedirs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.empty",
"os.path.exists",
"scipy.stats.multivariate_normal",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.use",
"matplotlib.pyplot.contourf",
"numpy.arange",
"numpy.random.rand",
"os.path.join",
"numpy.sqrt"
] |
[((107, 133), 'matplotlib.pyplot.figure', 'matplotlib.pyplot.figure', ([], {}), '()\n', (131, 133), False, 'import matplotlib\n'), ((138, 163), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (161, 163), False, 'import matplotlib\n'), ((789, 813), 'numpy.empty', 'np.empty', (['(x.shape + (2,))'], {}), '(x.shape + (2,))\n', (797, 813), True, 'import numpy as np\n'), ((2071, 2173), 'scipy.signal.convolve2d', 'convolve2d', (['hfield[fromrow - 9:torow + 9, fromcol - 9:tocol + 9]', 'K'], {'mode': '"""same"""', 'boundary': '"""symm"""'}), "(hfield[fromrow - 9:torow + 9, fromcol - 9:tocol + 9], K, mode=\n 'same', boundary='symm')\n", (2081, 2173), False, 'from scipy.signal import convolve2d\n'), ((2711, 2723), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2721, 2723), True, 'import matplotlib.pyplot as plt\n'), ((2728, 2779), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', '(-hfield)', '(100)'], {'cmap': 'TERRAIN_CMAP'}), '(x, y, -hfield, 100, cmap=TERRAIN_CMAP)\n', (2740, 2779), True, 'import matplotlib.pyplot as plt\n'), ((2910, 2921), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2919, 2921), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3194), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3192, 3194), True, 'import matplotlib.pyplot as plt\n'), ((3199, 3250), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', '(-hfield)', '(100)'], {'cmap': 'TERRAIN_CMAP'}), '(x, y, -hfield, 100, cmap=TERRAIN_CMAP)\n', (3211, 3250), True, 'import matplotlib.pyplot as plt\n'), ((3414, 3440), 'numpy.arange', 'np.arange', (['xmin', 'xmax', '(0.5)'], {}), '(xmin, xmax, 0.5)\n', (3423, 3440), True, 'import numpy as np\n'), ((3510, 3536), 'numpy.arange', 'np.arange', (['ymin', 'ymax', '(0.5)'], {}), '(ymin, ymax, 0.5)\n', (3519, 3536), True, 'import numpy as np\n'), ((3660, 3671), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3669, 3671), True, 'import matplotlib.pyplot as plt\n'), ((186, 207), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (200, 207), False, 'import matplotlib\n'), ((1096, 1142), 'scipy.stats.multivariate_normal.rvs', 'multivariate_normal.rvs', ([], {'mean': 'mu[i]', 'cov': 'sigma'}), '(mean=mu[i], cov=sigma)\n', (1119, 1142), False, 'from scipy.stats import multivariate_normal\n'), ((1235, 1278), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['mu[i, :]'], {'cov': 'sigma[i]'}), '(mu[i, :], cov=sigma[i])\n', (1254, 1278), False, 'from scipy.stats import multivariate_normal\n'), ((2038, 2055), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (2045, 2055), True, 'import numpy as np\n'), ((2327, 2348), 'os.path.exists', 'os.path.exists', (['path_'], {}), '(path_)\n', (2341, 2348), False, 'import os\n'), ((2358, 2376), 'os.makedirs', 'os.makedirs', (['path_'], {}), '(path_)\n', (2369, 2376), False, 'import os\n'), ((2858, 2883), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (2870, 2883), False, 'import os\n'), ((3448, 3498), 'matplotlib.pyplot.plot', 'plt.plot', (['[i, i]', '[ymin, ymax]', '"""k"""'], {'linewidth': '(0.1)'}), "([i, i], [ymin, ymax], 'k', linewidth=0.1)\n", (3456, 3498), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3594), 'matplotlib.pyplot.plot', 'plt.plot', (['[xmin, xmax]', '[i, i]', '"""k"""'], {'linewidth': '(0.1)'}), "([xmin, xmax], [i, i], 'k', linewidth=0.1)\n", (3552, 3594), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3633), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (3620, 3633), False, 'import os\n'), ((1195, 1222), 'numpy.random.rand', 'np.random.rand', (['mu.shape[0]'], {}), '(mu.shape[0])\n', (1209, 1222), True, 'import numpy as np\n'), ((920, 935), 'numpy.sqrt', 'np.sqrt', (['nhills'], {}), '(nhills)\n', (927, 935), True, 'import numpy as np\n'), ((954, 969), 'numpy.sqrt', 'np.sqrt', (['nhills'], {}), '(nhills)\n', (961, 969), True, 'import numpy as np\n')]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
import frida
import sys
import os
vid_index=0
aud_index = 0
def on_message(message, data):
global vid_index
global aud_index
print(message)
session = frida.attach("avconferenced")
code = open('replay.js', 'r').read()
script = session.create_script(code);
script.on("message", on_message)
script.load()
print("Press Ctrl-C to quit")
sys.stdin.read()
|
[
"frida.attach",
"sys.stdin.read"
] |
[((427, 456), 'frida.attach', 'frida.attach', (['"""avconferenced"""'], {}), "('avconferenced')\n", (439, 456), False, 'import frida\n'), ((610, 626), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (624, 626), False, 'import sys\n')]
|
# Copyright 2020 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ---- END OF LICENSE TEXT ----
import unittest
from datetime import datetime, timezone, timedelta
from ..electricity_provider import ElectricityProvider
class ElectricityProviderTest(unittest.TestCase):
def setUp(self):
config = {
"price_per_kWh_sold": 0.147,
"price_per_kWh_bought": 0.10
}
p = ElectricityProvider()
p.load(config)
self._p = p
self._now = datetime.now(timezone.utc)
self._dt = timedelta(1)
def test_not_loaded(self):
"""
Ensures that an error is raised when trying to use an
electricity provider that hasn't been configured.
This doesn't need the setUp method
"""
p = ElectricityProvider()
when = datetime.now(timezone.utc)
dt = timedelta(1)
with self.assertRaises(RuntimeError):
p.consume_optional(when, dt, 100)
with self.assertRaises(RuntimeError):
p.consume_required(when, dt)
with self.assertRaises(RuntimeError):
p.produce_always(when, dt)
with self.assertRaises(RuntimeError):
p.produce_on_demand(when, dt, 100)
def test_produce_always(self):
self.assertEquals(0, self._p.produce_always(self._now, self._dt))
def test_produce_on_demand(self):
res = self._p.produce_on_demand(self._now, self._dt, 100.0)
self.assertEquals(res[0], 100.0)
self.assertEquals(res[1], 100.0 * 0.147)
def test_consume_required(self):
self.assertEquals(0, self._p.consume_required(self._now, self._dt))
def test_consume_optional(self):
res = self._p.consume_optional(self._now, self._dt, 100.0)
self.assertEquals(res[0], 100.0)
self.assertEquals(res[1], 100.0 * 0.10)
|
[
"datetime.datetime.now",
"datetime.timedelta"
] |
[((1525, 1551), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (1537, 1551), False, 'from datetime import datetime, timezone, timedelta\n'), ((1571, 1583), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (1580, 1583), False, 'from datetime import datetime, timezone, timedelta\n'), ((1852, 1878), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (1864, 1878), False, 'from datetime import datetime, timezone, timedelta\n'), ((1892, 1904), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (1901, 1904), False, 'from datetime import datetime, timezone, timedelta\n')]
|
from datetime import datetime, timedelta
from pyclarify import APIClient
import orchest
import pandas as pd
import numpy as np
from merlion.utils import TimeSeries
from merlion.models.forecast.prophet import Prophet, ProphetConfig
from merlion.transform.base import Identity
def pipeline_data(times, values, new_id,new_name, original_id, original_name):
labels = {"source":["Orchest pipelines"], "original_id":[original_id]}
var_name = "clfy_"+new_id
data = {
"name" : new_name,
"labels" : labels,
"times" : times,
"series" : values,
"kargs" : {"sourceType" : "prediction",
"data-source": ["Orchest"],
"description" : f"Forecast for {original_name}"
}
}
return {var_name : data }
def generate_future_timestamps(n_future, timestamps, start):
deltas = [x-timestamps[0] for x in timestamps]
avg_delta=np.mean(deltas)
future = [(i+1)*avg_delta+start for i in range(n_future)]
return future
client = APIClient("./clarify-credentials.json")
inputs = orchest.get_inputs()
invars = [x for x in inputs.keys() if x.startswith("read_config_forecast")]
print(invars)
output_dict={}
for name in invars:
item_id = inputs[name]['item_id']
days = inputs[name]['lag_days']
test_lag = inputs[name]['time_split']
future = inputs[name]['future']
data_params = {
"items": {
"include": True,
"filter": {
"id": {
"$in": [
item_id
]
}
}
},
"data": {
"include": True,
"notBefore": (datetime.now() - timedelta(days=days)).astimezone().isoformat()
}
}
response = client.select_items(data_params)
signal_name = list(response.result.items.values())[0].name
print(f"Name {signal_name} and id {item_id}")
times = response.result.data.times
series = response.result.data.series
df = pd.DataFrame(series)
df.index = [time.replace(tzinfo=None) for time in times]
if len(times) > 0:
tzinfo = times[0].tzinfo
test_data = TimeSeries.from_pd(df[-test_lag:])
train_data = TimeSeries.from_pd(df[0:-test_lag])
config = ProphetConfig(max_forecast_steps=test_lag, add_seasonality="auto", transform=Identity())
model = Prophet(config)
model.train(train_data=train_data)
test_times = test_data.time_stamps
if future > 0:
test_times=test_times+generate_future_timestamps(future, test_data.time_stamps, start=test_data.time_stamps[-1])
test_pred, test_err = model.forecast(time_stamps=test_times)
col = test_pred.names[0]
col_err = test_err.names[0]
forecast_name=col+"_pred"
forecast_name_upper=col+"_upper"
forecast_name_lower=col+"_lower"
forecast_values = test_pred.univariates[col].values
forecast_upper_values= [x+y for x,y in zip(test_pred.univariates[col].values, test_err.univariates[col_err].values)]
forecast_lower_values= [x-y for x,y in zip(test_pred.univariates[col].values, test_err.univariates[col_err].values)]
output_dict.update(pipeline_data(test_pred.time_stamps,forecast_values, forecast_name, f"Forecast {signal_name}", col, signal_name ))
output_dict.update(pipeline_data(test_err.time_stamps,forecast_upper_values, forecast_name_upper, f"Forecast {signal_name} upper bound", col, signal_name ))
output_dict.update(pipeline_data(test_err.time_stamps,forecast_lower_values, forecast_name_lower, f"Forecast {signal_name} lower bound", col, signal_name ))
orchest.output(output_dict, "clfy_dict")
|
[
"pandas.DataFrame",
"orchest.output",
"orchest.get_inputs",
"merlion.utils.TimeSeries.from_pd",
"datetime.datetime.now",
"merlion.models.forecast.prophet.Prophet",
"numpy.mean",
"datetime.timedelta",
"merlion.transform.base.Identity",
"pyclarify.APIClient"
] |
[((1072, 1111), 'pyclarify.APIClient', 'APIClient', (['"""./clarify-credentials.json"""'], {}), "('./clarify-credentials.json')\n", (1081, 1111), False, 'from pyclarify import APIClient\n'), ((1122, 1142), 'orchest.get_inputs', 'orchest.get_inputs', ([], {}), '()\n', (1140, 1142), False, 'import orchest\n'), ((3609, 3649), 'orchest.output', 'orchest.output', (['output_dict', '"""clfy_dict"""'], {}), "(output_dict, 'clfy_dict')\n", (3623, 3649), False, 'import orchest\n'), ((965, 980), 'numpy.mean', 'np.mean', (['deltas'], {}), '(deltas)\n', (972, 980), True, 'import numpy as np\n'), ((2010, 2030), 'pandas.DataFrame', 'pd.DataFrame', (['series'], {}), '(series)\n', (2022, 2030), True, 'import pandas as pd\n'), ((2169, 2203), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['df[-test_lag:]'], {}), '(df[-test_lag:])\n', (2187, 2203), False, 'from merlion.utils import TimeSeries\n'), ((2221, 2256), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['df[0:-test_lag]'], {}), '(df[0:-test_lag])\n', (2239, 2256), False, 'from merlion.utils import TimeSeries\n'), ((2372, 2387), 'merlion.models.forecast.prophet.Prophet', 'Prophet', (['config'], {}), '(config)\n', (2379, 2387), False, 'from merlion.models.forecast.prophet import Prophet, ProphetConfig\n'), ((2347, 2357), 'merlion.transform.base.Identity', 'Identity', ([], {}), '()\n', (2355, 2357), False, 'from merlion.transform.base import Identity\n'), ((1679, 1693), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1691, 1693), False, 'from datetime import datetime, timedelta\n'), ((1696, 1716), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (1705, 1716), False, 'from datetime import datetime, timedelta\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from driver import bag, compare, err, err_regex, partial, uuid
class PythonTestDriverTest(unittest.TestCase):
def compare(self, expected, result, options=None):
self.assertTrue(compare(expected, result, options=options))
def compareFalse(self, expected, result, options=None):
self.assertFalse(compare(expected, result, options=options))
def test_string(self):
# simple
self.compare('a', 'a')
self.compare('á', 'á')
self.compare('something longer\nwith two lines', 'something longer\nwith two lines')
self.compareFalse('a', 'b')
self.compareFalse('a', 1)
self.compareFalse('a', [])
self.compareFalse('a', None)
self.compareFalse('a', ['a'])
self.compareFalse('a', {'a': 1})
def test_array(self):
# simple pass
self.compare([1, 2, 3], [1, 2, 3])
# out of order
self.compareFalse([1, 2, 3], [1, 3, 2])
# totally mistmatched lists
self.compareFalse([1, 2, 3], [3, 4, 5])
# missing items
self.compareFalse([1, 2, 3], [1, 2])
self.compareFalse([1, 2, 3], [1, 3])
# extra items
self.compareFalse([1, 2, 3], [1, 2, 3, 4])
# empty array
self.compare([], [])
self.compareFalse([1], [])
self.compareFalse([], [1])
self.compareFalse([], None)
# strings
self.compare(['a', 'b'], ['a', 'b'])
self.compareFalse(['a', 'c'], ['a', 'b'])
# multiple of a single value
self.compare([1, 2, 2, 3, 3, 3], [1, 2, 2, 3, 3, 3])
self.compareFalse([1, 2, 2, 3, 3, 3], [1, 2, 3])
self.compareFalse([1, 2, 3], [1, 2, 2, 3, 3, 3])
def test_array_partial(self):
'''note that these are all in-order'''
# simple
self.compare(partial([1]), [1, 2, 3])
self.compare(partial([2]), [1, 2, 3])
self.compare(partial([3]), [1, 2, 3])
self.compare(partial([1, 2]), [1, 2, 3])
self.compare(partial([1, 3]), [1, 2, 3])
self.compare(partial([1, 2, 3]), [1, 2, 3])
self.compareFalse(partial([4]), [1, 2, 3])
# ordered
self.compareFalse(partial([3, 2, 1], ordered=True), [1, 2, 3])
self.compareFalse(partial([1, 3, 2], ordered=True), [1, 2, 3])
# empty array
self.compare(partial([]), [1, 2, 3])
# multiple of a single items
self.compare(partial([1, 2, 2]), [1, 2, 2, 3, 3, 3])
self.compareFalse(partial([1, 2, 2, 2]), [1, 2, 2, 3, 3, 3])
def test_array_unordered(self):
# simple
self.compare(bag([1, 2]), [1, 2])
self.compare(bag([2, 1]), [1, 2])
self.compareFalse(bag([1, 2]), [1, 2, 3])
self.compareFalse(bag([1, 3]), [1, 2, 3])
self.compareFalse(bag([3, 1]), [1, 2, 3])
# empty array
self.compare(bag([]), [])
def test_dict(self):
# simple
self.compare({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'b': 2, 'c': 3})
self.compare({'a': 1, 'b': 2, 'c': 3}, {'c': 3, 'a': 1, 'b': 2})
self.compareFalse({'a': 1, 'b': 2, 'c': 3}, {'a': 1})
self.compareFalse({'a': 1}, {'a': 1, 'b': 2, 'c': 3})
# empty
self.compare({}, {})
self.compareFalse({}, {'a': 1})
self.compareFalse({'a': 1}, {})
def test_dict_partial(self):
# simple
self.compare(partial({'a': 1}), {'a': 1})
self.compare(partial({'a': 1}), {'a': 1, 'b': 2})
self.compareFalse(partial({'a': 2}), {'a': 1, 'b': 2})
self.compareFalse(partial({'c': 1}), {'a': 1, 'b': 2})
self.compareFalse(partial({'a': 1, 'b': 2}), {'b': 2})
# empty
self.compare(partial({}), {})
self.compare(partial({}), {'a': 1})
self.compareFalse(partial({'a': 1}), {})
def test_compare_dict_in_array(self):
# simple
self.compare([{'a': 1}], [{'a': 1}])
self.compare([{'a': 1, 'b': 2}], [{'a': 1, 'b': 2}])
self.compare([{'a': 1}, {'b': 2}], [{'a': 1}, {'b': 2}])
self.compareFalse([{'a': 1}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'b': 2}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'c': 3}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'c': 3}], [{'a': 1}])
self.compareFalse([{'a': 1}, {'b': 2}], [{'a': 1, 'b': 2}])
# order
self.compareFalse([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# partial
self.compare(partial([{}]), [{'a': 1, 'b': 2}])
self.compare(partial([{}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1, 'b': 2}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1}, {'b': 2}]), [{'a': 1}, {'b': 2}, {'c': 3}])
self.compareFalse(partial([{'a': 2}]), [{'a': 1, 'b': 2}])
self.compareFalse(partial([{'a': 1, 'b': 2}]), [{'a': 1}])
# partial order
self.compareFalse(partial([{'a': 1}, {'b': 2}], ordered=True), [{'b': 2}, {'a': 1}])
# partial unordered
self.compare(partial([{'a': 1}, {'b': 2}]), [{'b': 2}, {'a': 1}])
self.compare(partial([{'a': 1}, {'b': 2}], ordered=False), [{'b': 2}, {'a': 1}])
def test_compare_partial_items_in_array(self):
self.compare([{'a': 1, 'b': 1}, partial({'a': 2})], [{'a': 1, 'b': 1}, {'a': 2, 'b': 2}])
def test_compare_array_in_dict(self):
pass
def test_exception(self):
# class only
self.compare(KeyError, KeyError())
self.compare(KeyError(), KeyError())
self.compare(err('KeyError'), KeyError())
self.compare(err(KeyError), KeyError())
self.compareFalse(KeyError, NameError())
self.compareFalse(KeyError(), NameError())
self.compareFalse(err('KeyError'), NameError())
self.compareFalse(err(KeyError), NameError())
# subclass
self.compare(LookupError, KeyError())
self.compare(LookupError(), KeyError())
self.compare(err('LookupError'), KeyError())
self.compare(err(LookupError), KeyError())
self.compareFalse(KeyError, LookupError())
self.compareFalse(KeyError(), LookupError())
self.compareFalse(err('KeyError'), LookupError())
self.compareFalse(err(KeyError), LookupError())
# message
self.compare(err(KeyError), KeyError('alpha'))
self.compare(err(KeyError, 'alpha'), KeyError('alpha'))
self.compareFalse(err(KeyError, 'alpha'), KeyError('beta'))
# regex message
self.compare(err(KeyError), KeyError('alpha'))
# regex message with debug/assertion text
self.compare(err_regex(KeyError, 'alpha'), KeyError('alpha'))
self.compare(err_regex(KeyError, 'alp'), KeyError('alpha'))
self.compare(err_regex(KeyError, '.*pha'), KeyError('alpha'))
self.compareFalse(err_regex(KeyError, 'beta'), KeyError('alpha'))
# ToDo: frames (when/if we support them)
def test_compare_uuid(self):
# simple
self.compare(uuid(), '4e9e5bc2-9b11-4143-9aa1-75c10e7a193a')
self.compareFalse(uuid(), '4')
self.compareFalse(uuid(), '*')
self.compareFalse(uuid(), None)
def test_numbers(self):
# simple
self.compare(1, 1)
self.compare(1, 1.0)
self.compare(1.0, 1)
self.compare(1.0, 1.0)
self.compareFalse(1, 2)
self.compareFalse(1, 2.0)
self.compareFalse(1.0, 2)
self.compareFalse(1.0, 2.0)
# precision
precision = {'precision': 0.5}
self.compare(1, 1.4, precision)
self.compare(1.0, 1.4, precision)
self.compareFalse(1, 2, precision)
self.compareFalse(1, 1.6, precision)
self.compareFalse(1.0, 2, precision)
self.compareFalse(1.0, 1.6, precision)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"driver.partial",
"driver.bag",
"driver.err_regex",
"driver.uuid",
"driver.compare",
"driver.err"
] |
[((7980, 7995), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7993, 7995), False, 'import unittest\n'), ((256, 298), 'driver.compare', 'compare', (['expected', 'result'], {'options': 'options'}), '(expected, result, options=options)\n', (263, 298), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((386, 428), 'driver.compare', 'compare', (['expected', 'result'], {'options': 'options'}), '(expected, result, options=options)\n', (393, 428), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((1896, 1908), 'driver.partial', 'partial', (['[1]'], {}), '([1])\n', (1903, 1908), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((1942, 1954), 'driver.partial', 'partial', (['[2]'], {}), '([2])\n', (1949, 1954), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((1988, 2000), 'driver.partial', 'partial', (['[3]'], {}), '([3])\n', (1995, 2000), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2035, 2050), 'driver.partial', 'partial', (['[1, 2]'], {}), '([1, 2])\n', (2042, 2050), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2084, 2099), 'driver.partial', 'partial', (['[1, 3]'], {}), '([1, 3])\n', (2091, 2099), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2134, 2152), 'driver.partial', 'partial', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2141, 2152), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2192, 2204), 'driver.partial', 'partial', (['[4]'], {}), '([4])\n', (2199, 2204), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2262, 2294), 'driver.partial', 'partial', (['[3, 2, 1]'], {'ordered': '(True)'}), '([3, 2, 1], ordered=True)\n', (2269, 2294), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2333, 2365), 'driver.partial', 'partial', (['[1, 3, 2]'], {'ordered': '(True)'}), '([1, 3, 2], ordered=True)\n', (2340, 2365), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2422, 2433), 'driver.partial', 'partial', (['[]'], {}), '([])\n', (2429, 2433), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2505, 2523), 'driver.partial', 'partial', (['[1, 2, 2]'], {}), '([1, 2, 2])\n', (2512, 2523), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2571, 2592), 'driver.partial', 'partial', (['[1, 2, 2, 2]'], {}), '([1, 2, 2, 2])\n', (2578, 2592), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2689, 2700), 'driver.bag', 'bag', (['[1, 2]'], {}), '([1, 2])\n', (2692, 2700), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2731, 2742), 'driver.bag', 'bag', (['[2, 1]'], {}), '([2, 1])\n', (2734, 2742), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2779, 2790), 'driver.bag', 'bag', (['[1, 2]'], {}), '([1, 2])\n', (2782, 2790), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2829, 2840), 'driver.bag', 'bag', (['[1, 3]'], {}), '([1, 3])\n', (2832, 2840), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2879, 2890), 'driver.bag', 'bag', (['[3, 1]'], {}), '([3, 1])\n', (2882, 2890), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((2947, 2954), 'driver.bag', 'bag', (['[]'], {}), '([])\n', (2950, 2954), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((3472, 3489), 'driver.partial', 'partial', (["{'a': 1}"], {}), "({'a': 1})\n", (3479, 3489), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((3522, 3539), 'driver.partial', 'partial', (["{'a': 1}"], {}), "({'a': 1})\n", (3529, 3539), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((3586, 3603), 'driver.partial', 'partial', (["{'a': 2}"], {}), "({'a': 2})\n", (3593, 3603), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((3649, 3666), 'driver.partial', 'partial', (["{'c': 1}"], {}), "({'c': 1})\n", (3656, 3666), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((3712, 3737), 'driver.partial', 'partial', (["{'a': 1, 'b': 2}"], {}), "({'a': 1, 'b': 2})\n", (3719, 3737), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((3787, 3798), 'driver.partial', 'partial', (['{}'], {}), '({})\n', (3794, 3798), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((3825, 3836), 'driver.partial', 'partial', (['{}'], {}), '({})\n', (3832, 3836), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((3874, 3891), 'driver.partial', 'partial', (["{'a': 1}"], {}), "({'a': 1})\n", (3881, 3891), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((4572, 4585), 'driver.partial', 'partial', (['[{}]'], {}), '([{}])\n', (4579, 4585), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((4628, 4641), 'driver.partial', 'partial', (['[{}]'], {}), '([{}])\n', (4635, 4641), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((4684, 4703), 'driver.partial', 'partial', (["[{'a': 1}]"], {}), "([{'a': 1}])\n", (4691, 4703), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((4746, 4773), 'driver.partial', 'partial', (["[{'a': 1, 'b': 2}]"], {}), "([{'a': 1, 'b': 2}])\n", (4753, 4773), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((4816, 4845), 'driver.partial', 'partial', (["[{'a': 1}, {'b': 2}]"], {}), "([{'a': 1}, {'b': 2}])\n", (4823, 4845), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((4906, 4925), 'driver.partial', 'partial', (["[{'a': 2}]"], {}), "([{'a': 2}])\n", (4913, 4925), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((4973, 5000), 'driver.partial', 'partial', (["[{'a': 1, 'b': 2}]"], {}), "([{'a': 1, 'b': 2}])\n", (4980, 5000), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((5065, 5108), 'driver.partial', 'partial', (["[{'a': 1}, {'b': 2}]"], {'ordered': '(True)'}), "([{'a': 1}, {'b': 2}], ordered=True)\n", (5072, 5108), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((5182, 5211), 'driver.partial', 'partial', (["[{'a': 1}, {'b': 2}]"], {}), "([{'a': 1}, {'b': 2}])\n", (5189, 5211), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((5256, 5300), 'driver.partial', 'partial', (["[{'a': 1}, {'b': 2}]"], {'ordered': '(False)'}), "([{'a': 1}, {'b': 2}], ordered=False)\n", (5263, 5300), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((5691, 5706), 'driver.err', 'err', (['"""KeyError"""'], {}), "('KeyError')\n", (5694, 5706), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((5741, 5754), 'driver.err', 'err', (['KeyError'], {}), '(KeyError)\n', (5744, 5754), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((5895, 5910), 'driver.err', 'err', (['"""KeyError"""'], {}), "('KeyError')\n", (5898, 5910), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((5951, 5964), 'driver.err', 'err', (['KeyError'], {}), '(KeyError)\n', (5954, 5964), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6114, 6132), 'driver.err', 'err', (['"""LookupError"""'], {}), "('LookupError')\n", (6117, 6132), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6167, 6183), 'driver.err', 'err', (['LookupError'], {}), '(LookupError)\n', (6170, 6183), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6328, 6343), 'driver.err', 'err', (['"""KeyError"""'], {}), "('KeyError')\n", (6331, 6343), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6386, 6399), 'driver.err', 'err', (['KeyError'], {}), '(KeyError)\n', (6389, 6399), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6456, 6469), 'driver.err', 'err', (['KeyError'], {}), '(KeyError)\n', (6459, 6469), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6511, 6533), 'driver.err', 'err', (['KeyError', '"""alpha"""'], {}), "(KeyError, 'alpha')\n", (6514, 6533), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6581, 6603), 'driver.err', 'err', (['KeyError', '"""alpha"""'], {}), "(KeyError, 'alpha')\n", (6584, 6603), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6669, 6682), 'driver.err', 'err', (['KeyError'], {}), '(KeyError)\n', (6672, 6682), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6775, 6803), 'driver.err_regex', 'err_regex', (['KeyError', '"""alpha"""'], {}), "(KeyError, 'alpha')\n", (6784, 6803), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6845, 6871), 'driver.err_regex', 'err_regex', (['KeyError', '"""alp"""'], {}), "(KeyError, 'alp')\n", (6854, 6871), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6913, 6941), 'driver.err_regex', 'err_regex', (['KeyError', '""".*pha"""'], {}), "(KeyError, '.*pha')\n", (6922, 6941), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((6989, 7016), 'driver.err_regex', 'err_regex', (['KeyError', '"""beta"""'], {}), "(KeyError, 'beta')\n", (6998, 7016), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((7159, 7165), 'driver.uuid', 'uuid', ([], {}), '()\n', (7163, 7165), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((7233, 7239), 'driver.uuid', 'uuid', ([], {}), '()\n', (7237, 7239), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((7272, 7278), 'driver.uuid', 'uuid', ([], {}), '()\n', (7276, 7278), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((7311, 7317), 'driver.uuid', 'uuid', ([], {}), '()\n', (7315, 7317), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n'), ((5416, 5433), 'driver.partial', 'partial', (["{'a': 2}"], {}), "({'a': 2})\n", (5423, 5433), False, 'from driver import bag, compare, err, err_regex, partial, uuid\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import argparse
import os.path
from collections import namedtuple
from HostsTools import hosts_tools
class HostList(namedtuple('HostList', 'filename set')):
pass
def parse_args() -> sys.argv:
parser = argparse.ArgumentParser()
parser.add_argument('filename_a', type=str, help='First list to compare')
parser.add_argument('filename_b', type=str, help='Second list to compare')
parser.add_argument('--diff', default=False, action='store_true',
help='Show a full diff of the lists')
args = parser.parse_args()
if not (args.filename_a and args.filename_b):
parser.print_help()
exit(1)
validate_filename_args(args)
return args
def validate_filename_args(args) -> None:
if not os.path.isfile(args.filename_a):
raise Exception('Invalid host file: ', args.filename_a)
if not os.path.isfile(args.filename_b):
raise Exception('Invalid host file: ', args.filename_b)
def main() -> None:
args = parse_args()
filename_a = args.filename_a
filename_b = args.filename_b
set_a = hosts_tools.load_domains_from_list(filename_a)
set_b = hosts_tools.load_domains_from_list(filename_b)
list_a = HostList(filename_a, set_a)
list_b = HostList(filename_b, set_b)
print()
print_list_size(list_a, list_b)
print()
print_list_difference(list_a, list_b)
if args.diff:
print()
print_list_diff(list_a, list_b)
def print_list_size(list_a: HostList, list_b: HostList) -> None:
size_a = len(list_a.set)
size_b = len(list_b.set)
difference = size_a - size_b
print('Number of unique host entries: %s' % difference)
print_list_fact(list_a.filename, size_a)
print_list_fact(list_b.filename, size_b)
def print_list_difference(list_a: HostList, list_b: HostList) -> None:
unique_list_a = list_a.set - list_b.set
size_unique_a = len(unique_list_a)
percentage_unique_a = round((size_unique_a / len(list_a.set)) * 100, 2)
unique_list_b = list_b.set - list_a.set
size_unique_b = len(unique_list_b)
percentage_unique_b = round((size_unique_b / len(list_b.set)) * 100, 2)
print('Number of unique hosts not in the other list:')
print_list_fact(list_a.filename, f'{size_unique_a} ({percentage_unique_a}%)')
print_list_fact(list_b.filename, f'{size_unique_b} ({percentage_unique_b}%)')
def print_list_fact(list_name, fact) -> None:
print('{:<30}{:<30}'.format(list_name, fact))
def print_list_diff(list_a: HostList, list_b: HostList) -> None:
full_set = list_a.set.union(list_b.set)
full_set_sorted = hosts_tools.sort_domains(list(full_set))
print('Lists Diff:')
print('{:<50}{:<50}'.format(list_a.filename, list_b.filename))
for domain in full_set_sorted:
list_a_value = domain if domain in list_a.set else ''
list_b_value = domain if domain in list_b.set else ''
if list_a_value != list_b_value:
print('{:<50}{:<50}'.format(list_a_value, list_b_value))
if __name__ == "__main__":
main()
|
[
"HostsTools.hosts_tools.load_domains_from_list",
"collections.namedtuple",
"argparse.ArgumentParser"
] |
[((177, 215), 'collections.namedtuple', 'namedtuple', (['"""HostList"""', '"""filename set"""'], {}), "('HostList', 'filename set')\n", (187, 215), False, 'from collections import namedtuple\n'), ((272, 297), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (295, 297), False, 'import argparse\n'), ((1145, 1191), 'HostsTools.hosts_tools.load_domains_from_list', 'hosts_tools.load_domains_from_list', (['filename_a'], {}), '(filename_a)\n', (1179, 1191), False, 'from HostsTools import hosts_tools\n'), ((1204, 1250), 'HostsTools.hosts_tools.load_domains_from_list', 'hosts_tools.load_domains_from_list', (['filename_b'], {}), '(filename_b)\n', (1238, 1250), False, 'from HostsTools import hosts_tools\n')]
|
from solari import Leona
from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount
def test_champion_pickrate(match_set_2):
l = Leona([
ChampionPickrate()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira got picked 5 times out of 20 games
assert stats["Pickrate"].loc[777] == 5/20
def test_champion_winrate(match_set_2):
l = Leona([
ChampionWinrate()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira won 4 times out of 5 games
assert stats["Winrate"].loc[777] == 4/5
def test_champion_banrate(match_set_2):
l = Leona([
ChampionBanrate()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned in 9 out of 19 games
assert stats["Banrate"].loc[777] == 9/19
def test_champion_banrate_teamwise(match_set_2):
l = Leona([
ChampionBanrate(team_wise=True)
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned in 10 times in 19 games
assert stats["Banrate"].loc[777] == 10/19
def test_champion_pick_count(match_set_2):
l = Leona([
ChampionPickCount()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was picked 5 times
assert stats["Pick Count"].loc[777] == 5
def test_champion_ban_count(match_set_2):
l = Leona([
ChampionBanCount()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned in 9 games
assert stats["Ban Count"].loc[777] == 9
def test_champion_ban_count_teamwise(match_set_2):
l = Leona([
ChampionBanCount(team_wise=True)
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned 10 times
assert stats["Ban Count"].loc[777] == 10
def test_champion_presence(match_set_2):
l = Leona([
ChampionPickrate(),
ChampionBanrate(),
ChampionPresenceRate()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned in 9 games and picked in 5 games out of 20
assert stats["Presence"].loc[777] == (5 + 9) / 20
|
[
"solari.stats.ChampionBanrate",
"solari.stats.ChampionPickCount",
"solari.stats.ChampionPickrate",
"solari.stats.ChampionPresenceRate",
"solari.stats.ChampionWinrate",
"solari.stats.ChampionBanCount"
] |
[((226, 244), 'solari.stats.ChampionPickrate', 'ChampionPickrate', ([], {}), '()\n', (242, 244), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((510, 527), 'solari.stats.ChampionWinrate', 'ChampionWinrate', ([], {}), '()\n', (525, 527), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((788, 805), 'solari.stats.ChampionBanrate', 'ChampionBanrate', ([], {}), '()\n', (803, 805), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((1081, 1112), 'solari.stats.ChampionBanrate', 'ChampionBanrate', ([], {'team_wise': '(True)'}), '(team_wise=True)\n', (1096, 1112), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((1381, 1400), 'solari.stats.ChampionPickCount', 'ChampionPickCount', ([], {}), '()\n', (1398, 1400), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((1651, 1669), 'solari.stats.ChampionBanCount', 'ChampionBanCount', ([], {}), '()\n', (1667, 1669), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((1931, 1963), 'solari.stats.ChampionBanCount', 'ChampionBanCount', ([], {'team_wise': '(True)'}), '(team_wise=True)\n', (1947, 1963), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((2219, 2237), 'solari.stats.ChampionPickrate', 'ChampionPickrate', ([], {}), '()\n', (2235, 2237), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((2247, 2264), 'solari.stats.ChampionBanrate', 'ChampionBanrate', ([], {}), '()\n', (2262, 2264), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n'), ((2274, 2296), 'solari.stats.ChampionPresenceRate', 'ChampionPresenceRate', ([], {}), '()\n', (2294, 2296), False, 'from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount\n')]
|
"""Gets properties from all decorations a .ini database and generates code to be used in the
UnitTypeDefaultValues library. This code is copied to the clipboard and can be pasted in a text
editor or inside the World Editor.
"""
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
#______________________________________________________________________________________________
from myconfigparser import MyConfigParser, load_unit_data, get_decorations
import pyperclip
keys = ['modelScale','red','green','blue','animProps','maxRoll']
def do(file_path='../../development/table/unit.ini'):
with open(file_path) as f:
unit_data = load_unit_data(f)
result = []
decos = get_decorations(unit_data)
for deco in decos:
for key in keys:
if key in unit_data[deco]:
if key == 'animProps':
result.append(" set UnitTypeDefaultValues('{}').{} = {}".format(deco,
key,
unit_data[deco][key].replace(',', ' ')))
elif key != 'maxRoll' or float(unit_data[deco]['maxRoll']) < 0:
result.append(" set UnitTypeDefaultValues('{}').{} = {}".format(deco,
key,
unit_data[deco][key]))
pyperclip.copy('\n'.join(result))
|
[
"myconfigparser.get_decorations",
"os.path.join",
"myconfigparser.load_unit_data"
] |
[((268, 299), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (280, 299), False, 'import os\n'), ((710, 736), 'myconfigparser.get_decorations', 'get_decorations', (['unit_data'], {}), '(unit_data)\n', (725, 736), False, 'from myconfigparser import MyConfigParser, load_unit_data, get_decorations\n'), ((663, 680), 'myconfigparser.load_unit_data', 'load_unit_data', (['f'], {}), '(f)\n', (677, 680), False, 'from myconfigparser import MyConfigParser, load_unit_data, get_decorations\n')]
|
import pytest
from pyecore.ecore import *
from pyecore.utils import DynamicEPackage
@pytest.fixture(scope='module')
def simplemm():
A = EClass('A')
B = EClass('B')
Root = EClass('Root')
pack = EPackage('pack', nsURI='http://pack/1.0', nsPrefix='pack')
pack.eClassifiers.extend([Root, A, B])
return pack
@pytest.fixture(scope='module')
def complexmm():
A = EClass('A')
B = EClass('B')
Root = EClass('Root')
pack = EPackage('pack', nsURI='http://pack/1.0', nsPrefix='pack')
pack.eClassifiers.extend([Root, A, B])
innerpackage = EPackage('inner', nsURI='http://inner', nsPrefix='inner')
C = EClass('C')
D = EClass('D')
innerpackage.eClassifiers.extend([C, D])
pack.eSubpackages.append(innerpackage)
return pack
def test_dynamic_access_eclasses(simplemm):
SimpleMM = DynamicEPackage(simplemm)
assert SimpleMM.A
assert SimpleMM.B
def test_dynamic_access_innerpackage(complexmm):
ComplexMM = DynamicEPackage(complexmm)
assert ComplexMM.A
assert ComplexMM.B
assert ComplexMM.inner.C
assert ComplexMM.inner.D
def test_dynamic_addition_eclasses(complexmm):
ComplexMM = DynamicEPackage(complexmm)
E = EClass('E')
complexmm.eClassifiers.append(E)
assert ComplexMM.E
F = EClass('F')
complexmm.eSubpackages[0].eClassifiers.append(F)
assert ComplexMM.inner.F
G = EClass('G')
H = EClass('H')
complexmm.eClassifiers.extend([G, H])
assert ComplexMM.G
assert ComplexMM.H
def test_dynamic_removal_eclasses(complexmm):
ComplexMM = DynamicEPackage(complexmm)
assert ComplexMM.Root
complexmm.eClassifiers.remove(ComplexMM.Root)
with pytest.raises(AttributeError):
ComplexMM.Root
assert ComplexMM.A
complexmm.eClassifiers[0].delete()
with pytest.raises(AttributeError):
ComplexMM.A
|
[
"pytest.raises",
"pytest.fixture",
"pyecore.utils.DynamicEPackage"
] |
[((87, 117), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (101, 117), False, 'import pytest\n'), ((332, 362), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (346, 362), False, 'import pytest\n'), ((842, 867), 'pyecore.utils.DynamicEPackage', 'DynamicEPackage', (['simplemm'], {}), '(simplemm)\n', (857, 867), False, 'from pyecore.utils import DynamicEPackage\n'), ((979, 1005), 'pyecore.utils.DynamicEPackage', 'DynamicEPackage', (['complexmm'], {}), '(complexmm)\n', (994, 1005), False, 'from pyecore.utils import DynamicEPackage\n'), ((1175, 1201), 'pyecore.utils.DynamicEPackage', 'DynamicEPackage', (['complexmm'], {}), '(complexmm)\n', (1190, 1201), False, 'from pyecore.utils import DynamicEPackage\n'), ((1578, 1604), 'pyecore.utils.DynamicEPackage', 'DynamicEPackage', (['complexmm'], {}), '(complexmm)\n', (1593, 1604), False, 'from pyecore.utils import DynamicEPackage\n'), ((1691, 1720), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1704, 1720), False, 'import pytest\n'), ((1817, 1846), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1830, 1846), False, 'import pytest\n')]
|
import tensorflow as tf
import numpy as np
class RBFolution(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size=(1, 3, 3, 1), padding="VALID", strides=(1, 1, 1, 1), name="RBFolution",
dilation_rate=(1,1),
ccs_initializer=tf.keras.initializers.RandomUniform(0,1),
beta_initilizer=tf.keras.initializers.RandomUniform(0,1)):
super(RBFolution, self).__init__(name=name)
self.padding = padding
self.strides = strides
self.filters = filters
self.kernel_size = kernel_size
self.ccs_initializer = ccs_initializer
self.beta_initilizer = beta_initilizer
self.dilation_rate = dilation_rate
def build(self, input_shape):
self.input_s = input_shape
self.output_s = self.compute_output_shape(input_shape)
patch_dim = np.prod(self.kernel_size[1:])
self.ccs_tensor = self.add_weight("cluster_centers", shape=(patch_dim, self.filters), dtype="float32", initializer=self.ccs_initializer)
self.beta = self.add_weight("beta", shape=[self.filters], dtype="float32", initializer=self.beta_initilizer)
def call(self, input, **kwargs):
return tf.reshape(self.__rbfolution(input), self.output_s)
def compute_output_shape(self, input_shape):
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = RBFolution.conv_output_length(
space[i],
self.kernel_size[1:-1][i],
padding=self.padding.lower(),
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim.value)
return (-1,) + tuple(new_space) + (self.filters,)
def __rbfolution(self, inputs):
batch_size = tf.shape(inputs)[0]
patch_dim = np.prod(self.kernel_size[1:])
# Patches extracted from the images (convolution-like).
# shape=[batch_size, new_height, new_width, patch_dim] (i. e. individual
# patches are flattened)
# tf.extract_image_patches "Only supports ksizes across space" -> we change
# kernel_size[3] to 1.
patches = tf.extract_image_patches(
inputs,
ksizes=list(self.kernel_size[:3]) + [1],
strides=self.strides,
rates=[1, 1, 1, 1],
padding=self.padding
)
patches_shape = tf.shape(patches)
new_height = patches_shape[1]
new_width = patches_shape[2]
# shape=[batch_size, num_patches, patch_dim]
reshaped_patches = tf.reshape(patches, [batch_size, -1, patch_dim])
# all_scores[i,j,k] = sum_{l=0}^{patch_dim-1} (
# (ccs_tensor[l,k] - reshaped_patches[i,j,l]) ** 2
# )
# shape=[batch_size, num_patches, filters]
all_scores = (
tf.reduce_sum(tf.square(reshaped_patches), 2, keepdims=True) -
2 * tf.einsum("aij,jk->aik", reshaped_patches, self.ccs_tensor) +
tf.reduce_sum(tf.square(self.ccs_tensor), 0, keepdims=True)
)
res = tf.reshape(
tf.exp(tf.multiply(-self.beta, all_scores)),
[batch_size, new_height, new_width, self.filters],
name="rbfolution_activation"
)
return res
@staticmethod
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full", "causal"
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
|
[
"tensorflow.einsum",
"tensorflow.reshape",
"tensorflow.multiply",
"tensorflow.keras.initializers.RandomUniform",
"tensorflow.shape",
"tensorflow.square",
"numpy.prod"
] |
[((273, 314), 'tensorflow.keras.initializers.RandomUniform', 'tf.keras.initializers.RandomUniform', (['(0)', '(1)'], {}), '(0, 1)\n', (308, 314), True, 'import tensorflow as tf\n'), ((348, 389), 'tensorflow.keras.initializers.RandomUniform', 'tf.keras.initializers.RandomUniform', (['(0)', '(1)'], {}), '(0, 1)\n', (383, 389), True, 'import tensorflow as tf\n'), ((866, 895), 'numpy.prod', 'np.prod', (['self.kernel_size[1:]'], {}), '(self.kernel_size[1:])\n', (873, 895), True, 'import numpy as np\n'), ((1863, 1892), 'numpy.prod', 'np.prod', (['self.kernel_size[1:]'], {}), '(self.kernel_size[1:])\n', (1870, 1892), True, 'import numpy as np\n'), ((2438, 2455), 'tensorflow.shape', 'tf.shape', (['patches'], {}), '(patches)\n', (2446, 2455), True, 'import tensorflow as tf\n'), ((2612, 2660), 'tensorflow.reshape', 'tf.reshape', (['patches', '[batch_size, -1, patch_dim]'], {}), '(patches, [batch_size, -1, patch_dim])\n', (2622, 2660), True, 'import tensorflow as tf\n'), ((1823, 1839), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (1831, 1839), True, 'import tensorflow as tf\n'), ((3046, 3072), 'tensorflow.square', 'tf.square', (['self.ccs_tensor'], {}), '(self.ccs_tensor)\n', (3055, 3072), True, 'import tensorflow as tf\n'), ((3148, 3183), 'tensorflow.multiply', 'tf.multiply', (['(-self.beta)', 'all_scores'], {}), '(-self.beta, all_scores)\n', (3159, 3183), True, 'import tensorflow as tf\n'), ((2893, 2920), 'tensorflow.square', 'tf.square', (['reshaped_patches'], {}), '(reshaped_patches)\n', (2902, 2920), True, 'import tensorflow as tf\n'), ((2958, 3017), 'tensorflow.einsum', 'tf.einsum', (['"""aij,jk->aik"""', 'reshaped_patches', 'self.ccs_tensor'], {}), "('aij,jk->aik', reshaped_patches, self.ccs_tensor)\n", (2967, 3017), True, 'import tensorflow as tf\n')]
|
import os
from .preprocessor import Preprocessor
from traitlets import Unicode
from shutil import move
import glob
class CreateFolderStructure(Preprocessor):
directory = Unicode('restructured', help='Subfolder where processed files go')
def __init__(self):
super(CreateFolderStructure, self).__init__()
def mkdir(self, path):
if not os.path.exists(path):
os.makedirs(path)
def preprocess_student(self, student, resources):
self.init_logging('Create Folder Structure')
src = os.path.join(self.src, student)
dst = os.path.join(self.dst, student, resources['assignment'])
self.mkdir(os.path.join(self.dst, student))
move(src, dst)
self.log.info('Moved submission to subfolder {}'.format(resources['assignment']))
self.terminate_logging(os.path.join(self.dst, student, resources['assignment'], self.logname))
return student, resources
|
[
"os.makedirs",
"os.path.exists",
"traitlets.Unicode",
"shutil.move",
"os.path.join"
] |
[((176, 242), 'traitlets.Unicode', 'Unicode', (['"""restructured"""'], {'help': '"""Subfolder where processed files go"""'}), "('restructured', help='Subfolder where processed files go')\n", (183, 242), False, 'from traitlets import Unicode\n'), ((560, 591), 'os.path.join', 'os.path.join', (['self.src', 'student'], {}), '(self.src, student)\n', (572, 591), False, 'import os\n'), ((606, 662), 'os.path.join', 'os.path.join', (['self.dst', 'student', "resources['assignment']"], {}), "(self.dst, student, resources['assignment'])\n", (618, 662), False, 'import os\n'), ((740, 754), 'shutil.move', 'move', (['src', 'dst'], {}), '(src, dst)\n', (744, 754), False, 'from shutil import move\n'), ((373, 393), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (387, 393), False, 'import os\n'), ((407, 424), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (418, 424), False, 'import os\n'), ((691, 722), 'os.path.join', 'os.path.join', (['self.dst', 'student'], {}), '(self.dst, student)\n', (703, 722), False, 'import os\n'), ((885, 955), 'os.path.join', 'os.path.join', (['self.dst', 'student', "resources['assignment']", 'self.logname'], {}), "(self.dst, student, resources['assignment'], self.logname)\n", (897, 955), False, 'import os\n')]
|
#!/bin/false
# -*- coding: utf-8 -*-
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Device(Base):
__tablename__ = 'device'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True)
bdf = Column(String(255), unique=True)
class Mapping(Base):
__tablename__ = 'mapping'
id = Column(Integer, primary_key=True)
iova = Column(String(255))
phys_addr = Column(String(255))
size = Column(Integer)
device_id = Column(Integer, ForeignKey('device.id'))
device = relationship('Device', backref='mapping')
engine = create_engine('sqlite:///iommu.db')
Base.metadata.create_all(engine)
|
[
"sqlalchemy.String",
"sqlalchemy.ForeignKey",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.orm.relationship",
"sqlalchemy.Column",
"sqlalchemy.create_engine"
] |
[((267, 285), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (283, 285), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((851, 886), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///iommu.db"""'], {}), "('sqlite:///iommu.db')\n", (864, 886), False, 'from sqlalchemy import create_engine\n'), ((356, 389), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (362, 389), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Boolean\n'), ((568, 601), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (574, 601), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Boolean\n'), ((702, 717), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (708, 717), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Boolean\n'), ((799, 840), 'sqlalchemy.orm.relationship', 'relationship', (['"""Device"""'], {'backref': '"""mapping"""'}), "('Device', backref='mapping')\n", (811, 840), False, 'from sqlalchemy.orm import relationship\n'), ((417, 428), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (423, 428), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Boolean\n'), ((470, 481), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (476, 481), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Boolean\n'), ((629, 640), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (635, 640), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Boolean\n'), ((669, 680), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (675, 680), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Boolean\n'), ((754, 777), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""device.id"""'], {}), "('device.id')\n", (764, 777), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Boolean\n')]
|
import pytest
from . import db
from .db import database
from tagtrain import data
def test_unknown_owner(database):
with pytest.raises(data.Group.DoesNotExist):
group = data.by_owner.remove_user_from_group('non-existent', db.GROUP_NAME, 'doesnt-matter')
def test_unknown_group(database):
with pytest.raises(data.Group.DoesNotExist):
group = data.by_owner.remove_user_from_group(db.OWNER_NAME, 'non-existent', 'doesnt-matter')
def test_unknown_member(database):
with pytest.raises(data.Member.DoesNotExist):
group = data.by_owner.remove_user_from_group(db.OWNER_NAME, db.GROUP_NAME, 'non-existent')
def test_good_non_empty(database):
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
assert group.member_count == 4
assert len(list(group.members)) == 4
group = data.by_owner.remove_user_from_group(db.OWNER_NAME, db.GROUP_NAME, 'one')
assert group.name == db.GROUP_NAME
assert group.reddit_name == db.OWNER_NAME
assert group.member_count == 3
assert len(list(group.members)) == 3
assert group.members[0].reddit_name == 'two'
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
assert group.member_count == 3
assert len(list(group.members)) == 3
def test_good_empty(database):
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
assert group.member_count == 4
assert len(list(group.members)) == 4
members_to_delete = [m.reddit_name for m in group.members]
for m in members_to_delete:
group = data.by_owner.remove_user_from_group(db.OWNER_NAME, db.GROUP_NAME, m)
assert group.name == db.GROUP_NAME
assert group.reddit_name == db.OWNER_NAME
assert group.member_count == 0
assert len(list(group.members)) == 0
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
assert group.member_count == 0
assert len(list(group.members)) == 0
|
[
"pytest.raises",
"tagtrain.data.by_owner.remove_user_from_group",
"tagtrain.data.by_owner.find_group"
] |
[((690, 744), 'tagtrain.data.by_owner.find_group', 'data.by_owner.find_group', (['db.OWNER_NAME', 'db.GROUP_NAME'], {}), '(db.OWNER_NAME, db.GROUP_NAME)\n', (714, 744), False, 'from tagtrain import data\n'), ((835, 908), 'tagtrain.data.by_owner.remove_user_from_group', 'data.by_owner.remove_user_from_group', (['db.OWNER_NAME', 'db.GROUP_NAME', '"""one"""'], {}), "(db.OWNER_NAME, db.GROUP_NAME, 'one')\n", (871, 908), False, 'from tagtrain import data\n'), ((1133, 1187), 'tagtrain.data.by_owner.find_group', 'data.by_owner.find_group', (['db.OWNER_NAME', 'db.GROUP_NAME'], {}), '(db.OWNER_NAME, db.GROUP_NAME)\n', (1157, 1187), False, 'from tagtrain import data\n'), ((1310, 1364), 'tagtrain.data.by_owner.find_group', 'data.by_owner.find_group', (['db.OWNER_NAME', 'db.GROUP_NAME'], {}), '(db.OWNER_NAME, db.GROUP_NAME)\n', (1334, 1364), False, 'from tagtrain import data\n'), ((1800, 1854), 'tagtrain.data.by_owner.find_group', 'data.by_owner.find_group', (['db.OWNER_NAME', 'db.GROUP_NAME'], {}), '(db.OWNER_NAME, db.GROUP_NAME)\n', (1824, 1854), False, 'from tagtrain import data\n'), ((128, 166), 'pytest.raises', 'pytest.raises', (['data.Group.DoesNotExist'], {}), '(data.Group.DoesNotExist)\n', (141, 166), False, 'import pytest\n'), ((184, 272), 'tagtrain.data.by_owner.remove_user_from_group', 'data.by_owner.remove_user_from_group', (['"""non-existent"""', 'db.GROUP_NAME', '"""doesnt-matter"""'], {}), "('non-existent', db.GROUP_NAME,\n 'doesnt-matter')\n", (220, 272), False, 'from tagtrain import data\n'), ((314, 352), 'pytest.raises', 'pytest.raises', (['data.Group.DoesNotExist'], {}), '(data.Group.DoesNotExist)\n', (327, 352), False, 'import pytest\n'), ((370, 458), 'tagtrain.data.by_owner.remove_user_from_group', 'data.by_owner.remove_user_from_group', (['db.OWNER_NAME', '"""non-existent"""', '"""doesnt-matter"""'], {}), "(db.OWNER_NAME, 'non-existent',\n 'doesnt-matter')\n", (406, 458), False, 'from tagtrain import data\n'), ((501, 540), 'pytest.raises', 'pytest.raises', (['data.Member.DoesNotExist'], {}), '(data.Member.DoesNotExist)\n', (514, 540), False, 'import pytest\n'), ((558, 644), 'tagtrain.data.by_owner.remove_user_from_group', 'data.by_owner.remove_user_from_group', (['db.OWNER_NAME', 'db.GROUP_NAME', '"""non-existent"""'], {}), "(db.OWNER_NAME, db.GROUP_NAME,\n 'non-existent')\n", (594, 644), False, 'from tagtrain import data\n'), ((1555, 1624), 'tagtrain.data.by_owner.remove_user_from_group', 'data.by_owner.remove_user_from_group', (['db.OWNER_NAME', 'db.GROUP_NAME', 'm'], {}), '(db.OWNER_NAME, db.GROUP_NAME, m)\n', (1591, 1624), False, 'from tagtrain import data\n')]
|
'''
Action policy methods to sampling actions
Algorithm provides a `calc_pdparam` which takes a state and do a forward pass through its net,
and the pdparam is used to construct an action probability distribution as appropriate per the action type as indicated by the body
Then the prob. dist. is used to sample action.
The default form looks like:
```
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
```
We can also augment pdparam before sampling - as in the case of Boltzmann sampling,
or do epsilon-greedy to use pdparam-sampling or random sampling.
'''
from slm_lab.env.wrapper import LazyFrames
from slm_lab.lib import logger, math_util, util
from torch import distributions
import numpy as np
import pydash as ps
import torch
logger = logger.get_logger(__name__)
# probability distributions constraints for different action types; the first in the list is the default
ACTION_PDS = {
'continuous': ['Normal', 'Beta', 'Gumbel', 'LogNormal'],
'multi_continuous': ['MultivariateNormal'],
'discrete': ['Categorical', 'Argmax'],
'multi_discrete': ['MultiCategorical'],
'multi_binary': ['Bernoulli'],
}
class Argmax(distributions.Categorical):
'''
Special distribution class for argmax sampling, where probability is always 1 for the argmax.
NOTE although argmax is not a sampling distribution, this implementation is for API consistency.
'''
def __init__(self, probs=None, logits=None, validate_args=None):
if probs is not None:
new_probs = torch.zeros_like(probs, dtype=torch.float)
new_probs[torch.argmax(probs, dim=0)] = 1.0
probs = new_probs
elif logits is not None:
new_logits = torch.full_like(logits, -1e8, dtype=torch.float)
max_idx = torch.argmax(logits, dim=0)
new_logits[max_idx] = logits[max_idx]
logits = new_logits
super(Argmax, self).__init__(probs=probs, logits=logits, validate_args=validate_args)
class MultiCategorical(distributions.Categorical):
'''MultiCategorical as collection of Categoricals'''
def __init__(self, probs=None, logits=None, validate_args=None):
self.categoricals = []
if probs is None:
probs = [None] * len(logits)
elif logits is None:
logits = [None] * len(probs)
else:
raise ValueError('Either probs or logits must be None')
for sub_probs, sub_logits in zip(probs, logits):
categorical = distributions.Categorical(probs=sub_probs, logits=sub_logits, validate_args=validate_args)
self.categoricals.append(categorical)
@property
def logits(self):
return [cat.logits for cat in self.categoricals]
@property
def probs(self):
return [cat.probs for cat in self.categoricals]
@property
def param_shape(self):
return [cat.param_shape for cat in self.categoricals]
@property
def mean(self):
return torch.stack([cat.mean for cat in self.categoricals])
@property
def variance(self):
return torch.stack([cat.variance for cat in self.categoricals])
def sample(self, sample_shape=torch.Size()):
return torch.stack([cat.sample(sample_shape=sample_shape) for cat in self.categoricals])
def log_prob(self, value):
return torch.stack([cat.log_prob(value[idx]) for idx, cat in enumerate(self.categoricals)])
def entropy(self):
return torch.stack([cat.entropy() for cat in self.categoricals])
def enumerate_support(self):
return [cat.enumerate_support() for cat in self.categoricals]
setattr(distributions, 'Argmax', Argmax)
setattr(distributions, 'MultiCategorical', MultiCategorical)
# base methods
def try_preprocess(state, algorithm, body, append=True):
'''Try calling preprocess as implemented in body's memory to use for net input'''
if isinstance(state, LazyFrames):
state = state.__array__() # from global env preprocessor
if hasattr(body.memory, 'preprocess_state'):
state = body.memory.preprocess_state(state, append=append)
# as float, and always as minibatch for net input
state = torch.from_numpy(state).float().unsqueeze(dim=0)
return state
def cond_squeeze(out):
'''Helper to squeeze output depending if it is tensor (discrete pdparam) or list of tensors (continuous pdparam of loc and scale)'''
if isinstance(out, list):
return [out_t.squeeze(dim=0) for out_t in out]
else:
return out.squeeze(dim=0)
def init_action_pd(state, algorithm, body, append=True):
'''
Build the proper action prob. dist. to use for action sampling.
state is passed through algorithm's net via calc_pdparam, which the algorithm must implement using its proper net.
This will return body, ActionPD and pdparam to allow augmentation, e.g. applying temperature tau to pdparam for boltzmann.
Then, output must be called with sample_action_pd(body, ActionPD, pdparam) to sample action.
@returns {cls, tensor, *} ActionPD, pdparam, body
'''
pdtypes = ACTION_PDS[body.action_type]
assert body.action_pdtype in pdtypes, f'Pdtype {body.action_pdtype} is not compatible/supported with action_type {body.action_type}. Options are: {ACTION_PDS[body.action_type]}'
ActionPD = getattr(distributions, body.action_pdtype)
state = try_preprocess(state, algorithm, body, append=append)
state = state.to(algorithm.net.device)
pdparam = algorithm.calc_pdparam(state, evaluate=False)
return ActionPD, pdparam, body
def sample_action_pd(ActionPD, pdparam, body):
'''
This uses the outputs from init_action_pd and an optionally augmented pdparam to construct a action_pd for sampling action
@returns {tensor, distribution} action, action_pd A sampled action, and the prob. dist. used for sampling to enable calculations like kl, entropy, etc. later.
'''
pdparam = cond_squeeze(pdparam)
if body.is_discrete:
action_pd = ActionPD(logits=pdparam)
else: # continuous outputs a list, loc and scale
assert len(pdparam) == 2, pdparam
# scale (stdev) must be >0, use softplus
if pdparam[1] < 5:
pdparam[1] = torch.log(1 + torch.exp(pdparam[1])) + 1e-8
action_pd = ActionPD(*pdparam)
action = action_pd.sample()
return action, action_pd
# interface action sampling methods
def default(state, algorithm, body):
'''Plain policy by direct sampling using outputs of net as logits and constructing ActionPD as appropriate'''
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
return action, action_pd
def random(state, algorithm, body):
'''Random action sampling that returns the same data format as default(), but without forward pass. Uses gym.space.sample()'''
state = try_preprocess(state, algorithm, body, append=True) # for consistency with init_action_pd inner logic
if body.action_type == 'discrete':
action_pd = distributions.Categorical(logits=torch.ones(body.action_space.high, device=algorithm.net.device))
elif body.action_type == 'continuous':
# Possibly this should this have a 'device' set
action_pd = distributions.Uniform(
low=torch.tensor(body.action_space.low).float(),
high=torch.tensor(body.action_space.high).float())
elif body.action_type == 'multi_discrete':
action_pd = distributions.Categorical(
logits=torch.ones(body.action_space.high.size, body.action_space.high[0], device=algorithm.net.device))
elif body.action_type == 'multi_continuous':
raise NotImplementedError
elif body.action_type == 'multi_binary':
raise NotImplementedError
else:
raise NotImplementedError
sample = body.action_space.sample()
action = torch.tensor(sample, device=algorithm.net.device)
return action, action_pd
def epsilon_greedy(state, algorithm, body):
'''Epsilon-greedy policy: with probability epsilon, do random action, otherwise do default sampling.'''
epsilon = body.explore_var
if epsilon > np.random.rand():
return random(state, algorithm, body)
else:
return default(state, algorithm, body)
def boltzmann(state, algorithm, body):
'''
Boltzmann policy: adjust pdparam with temperature tau; the higher the more randomness/noise in action.
'''
tau = body.explore_var
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
pdparam /= tau
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
return action, action_pd
# multi-body policy with a single forward pass to calc pdparam
def multi_default(states, algorithm, body_list, pdparam):
'''
Apply default policy body-wise
Note, for efficiency, do a single forward pass to calculate pdparam, then call this policy like:
@example
pdparam = self.calc_pdparam(state, evaluate=False)
action_a, action_pd_a = self.action_policy(pdparam, self, body_list)
'''
pdparam = pdparam.squeeze(dim=0)
# assert pdparam has been chunked
assert len(pdparam.shape) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_random(states, algorithm, body_list, pdparam):
'''Apply random policy body-wise.'''
pdparam = pdparam.squeeze(dim=0)
action_list, action_pd_a = [], []
for idx, body in body_list:
action, action_pd = random(states[idx], algorithm, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_epsilon_greedy(states, algorithm, body_list, pdparam):
'''Apply epsilon-greedy policy body-wise'''
assert len(pdparam) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
epsilon = body.explore_var
if epsilon > np.random.rand():
action, action_pd = random(states[idx], algorithm, body)
else:
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_boltzmann(states, algorithm, body_list, pdparam):
'''Apply Boltzmann policy body-wise'''
assert len(pdparam) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
tau = body.explore_var
sub_pdparam /= tau
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
# action policy update methods
class VarScheduler:
'''
Variable scheduler for decaying variables such as explore_var (epsilon, tau) and entropy
e.g. spec
"explore_var_spec": {
"name": "linear_decay",
"start_val": 1.0,
"end_val": 0.1,
"start_step": 0,
"end_step": 800,
},
'''
def __init__(self, var_decay_spec=None):
self._updater_name = 'no_decay' if var_decay_spec is None else var_decay_spec['name']
self._updater = getattr(math_util, self._updater_name)
util.set_attr(self, dict(
start_val=np.nan,
))
util.set_attr(self, var_decay_spec, [
'start_val',
'end_val',
'start_step',
'end_step',
])
if not getattr(self, 'end_val', None):
self.end_val = self.start_val
def update(self, algorithm, clock):
'''Get an updated value for var'''
if (util.in_eval_lab_modes()) or self._updater_name == 'no_decay':
return self.end_val
step = clock.get(clock.max_tick_unit)
val = self._updater(self.start_val, self.end_val, self.start_step, self.end_step, step)
return val
# misc calc methods
def guard_multi_pdparams(pdparams, body):
'''Guard pdparams for multi action'''
action_dim = body.action_dim
is_multi_action = ps.is_iterable(action_dim)
if is_multi_action:
assert ps.is_list(pdparams)
pdparams = [t.clone() for t in pdparams] # clone for grad safety
assert len(pdparams) == len(action_dim), pdparams
# transpose into (batch_size, [action_dims])
pdparams = [list(torch.split(t, action_dim, dim=0)) for t in torch.cat(pdparams, dim=1)]
return pdparams
def calc_log_probs(algorithm, net, body, batch):
'''
Method to calculate log_probs fresh from batch data
Body already stores log_prob from self.net. This is used for PPO where log_probs needs to be recalculated.
'''
states, actions = batch['states'], batch['actions']
action_dim = body.action_dim
is_multi_action = ps.is_iterable(action_dim)
# construct log_probs for each state-action
pdparams = algorithm.calc_pdparam(states, net=net)
pdparams = guard_multi_pdparams(pdparams, body)
assert len(pdparams) == len(states), f'batch_size of pdparams: {len(pdparams)} vs states: {len(states)}'
pdtypes = ACTION_PDS[body.action_type]
ActionPD = getattr(distributions, body.action_pdtype)
log_probs = []
for idx, pdparam in enumerate(pdparams):
if not is_multi_action: # already cloned for multi_action above
pdparam = pdparam.clone() # clone for grad safety
_action, action_pd = sample_action_pd(ActionPD, pdparam, body)
log_probs.append(action_pd.log_prob(actions[idx].float()).sum(dim=0))
log_probs = torch.stack(log_probs)
assert not torch.isnan(log_probs).any(), f'log_probs: {log_probs}, \npdparams: {pdparams} \nactions: {actions}'
logger.debug(f'log_probs: {log_probs}')
return log_probs
def update_online_stats(body, state):
'''
Method to calculate the running mean and standard deviation of the state space.
See https://www.johndcook.com/blog/standard_deviation/ for more details
for n >= 1
M_n = M_n-1 + (state - M_n-1) / n
S_n = S_n-1 + (state - M_n-1) * (state - M_n)
variance = S_n / (n - 1)
std_dev = sqrt(variance)
'''
logger.debug(f'mean: {body.state_mean}, std: {body.state_std_dev}, num examples: {body.state_n}')
# Assumes only one state is given
if ('Atari' in util.get_class_name(body.memory)):
assert state.ndim == 3
elif getattr(body.memory, 'raw_state_dim', False):
assert state.size == body.memory.raw_state_dim
else:
assert state.size == body.state_dim or state.shape == body.state_dim
mean = body.state_mean
body.state_n += 1
if np.isnan(mean).any():
assert np.isnan(body.state_std_dev_int)
assert np.isnan(body.state_std_dev)
body.state_mean = state
body.state_std_dev_int = 0
body.state_std_dev = 0
else:
assert body.state_n > 1
body.state_mean = mean + (state - mean) / body.state_n
body.state_std_dev_int = body.state_std_dev_int + (state - mean) * (state - body.state_mean)
body.state_std_dev = np.sqrt(body.state_std_dev_int / (body.state_n - 1))
# Guard against very small std devs
if (body.state_std_dev < 1e-8).any():
body.state_std_dev[np.where(body.state_std_dev < 1e-8)] += 1e-8
logger.debug(f'new mean: {body.state_mean}, new std: {body.state_std_dev}, num examples: {body.state_n}')
def normalize_state(body, state):
'''
Normalizes one or more states using a running mean and standard deviation
Details of the normalization from Deep RL Bootcamp, L6
https://www.youtube.com/watch?v=8EcdaCk9KaQ&feature=youtu.be
'''
same_shape = False if type(state) == list else state.shape == body.state_mean.shape
has_preprocess = getattr(body.memory, 'preprocess_state', False)
if ('Atari' in util.get_class_name(body.memory)):
# never normalize atari, it has its own normalization step
logger.debug('skipping normalizing for Atari, already handled by preprocess')
return state
elif ('Replay' in util.get_class_name(body.memory)) and has_preprocess:
# normalization handled by preprocess_state function in the memory
logger.debug('skipping normalizing, already handled by preprocess')
return state
elif same_shape:
# if not atari, always normalize the state the first time we see it during act
# if the shape is not transformed in some way
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
else:
# broadcastable sample from an un-normalized memory so we should normalize
logger.debug('normalizing sample from memory')
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
# TODO Not currently used, this will crash for more exotic memory structures
# def unnormalize_state(body, state):
# '''
# Un-normalizes one or more states using a running mean and new_std_dev
# '''
# return state * body.state_mean + body.state_std_dev
def update_online_stats_and_normalize_state(body, state):
'''
Convenience combination function for updating running state mean and std_dev and normalizing the state in one go.
'''
logger.debug(f'state: {state}')
update_online_stats(body, state)
state = normalize_state(body, state)
logger.debug(f'normalized state: {state}')
return state
def normalize_states_and_next_states(body, batch, episodic_flag=None):
'''
Convenience function for normalizing the states and next states in a batch of data
'''
logger.debug(f'states: {batch["states"]}')
logger.debug(f'next states: {batch["next_states"]}')
episodic = episodic_flag if episodic_flag is not None else body.memory.is_episodic
logger.debug(f'Episodic: {episodic}, episodic_flag: {episodic_flag}, body.memory: {body.memory.is_episodic}')
if episodic:
normalized = []
for epi in batch['states']:
normalized.append(normalize_state(body, epi))
batch['states'] = normalized
normalized = []
for epi in batch['next_states']:
normalized.append(normalize_state(body, epi))
batch['next_states'] = normalized
else:
batch['states'] = normalize_state(body, batch['states'])
batch['next_states'] = normalize_state(body, batch['next_states'])
logger.debug(f'normalized states: {batch["states"]}')
logger.debug(f'normalized next states: {batch["next_states"]}')
return batch
|
[
"slm_lab.lib.util.get_class_name",
"torch.distributions.Categorical",
"slm_lab.lib.logger.debug",
"numpy.sum",
"torch.argmax",
"torch.cat",
"numpy.isnan",
"numpy.clip",
"slm_lab.lib.util.set_attr",
"torch.isnan",
"torch.ones",
"pydash.is_list",
"slm_lab.lib.logger.get_logger",
"torch.exp",
"torch.zeros_like",
"torch.split",
"torch.Size",
"torch.from_numpy",
"torch.full_like",
"torch.stack",
"slm_lab.lib.util.in_eval_lab_modes",
"pydash.is_iterable",
"numpy.where",
"numpy.random.rand",
"torch.tensor",
"numpy.sqrt"
] |
[((824, 851), 'slm_lab.lib.logger.get_logger', 'logger.get_logger', (['__name__'], {}), '(__name__)\n', (841, 851), False, 'from slm_lab.lib import logger, math_util, util\n'), ((7960, 8009), 'torch.tensor', 'torch.tensor', (['sample'], {'device': 'algorithm.net.device'}), '(sample, device=algorithm.net.device)\n', (7972, 8009), False, 'import torch\n'), ((13584, 13610), 'pydash.is_iterable', 'ps.is_iterable', (['action_dim'], {}), '(action_dim)\n', (13598, 13610), True, 'import pydash as ps\n'), ((14318, 14344), 'pydash.is_iterable', 'ps.is_iterable', (['action_dim'], {}), '(action_dim)\n', (14332, 14344), True, 'import pydash as ps\n'), ((15078, 15100), 'torch.stack', 'torch.stack', (['log_probs'], {}), '(log_probs)\n', (15089, 15100), False, 'import torch\n'), ((15221, 15260), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""log_probs: {log_probs}"""'], {}), "(f'log_probs: {log_probs}')\n", (15233, 15260), False, 'from slm_lab.lib import logger, math_util, util\n'), ((15679, 15786), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""mean: {body.state_mean}, std: {body.state_std_dev}, num examples: {body.state_n}"""'], {}), "(\n f'mean: {body.state_mean}, std: {body.state_std_dev}, num examples: {body.state_n}'\n )\n", (15691, 15786), False, 'from slm_lab.lib import logger, math_util, util\n'), ((16823, 16938), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""new mean: {body.state_mean}, new std: {body.state_std_dev}, num examples: {body.state_n}"""'], {}), "(\n f'new mean: {body.state_mean}, new std: {body.state_std_dev}, num examples: {body.state_n}'\n )\n", (16835, 16938), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19001, 19032), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""state: {state}"""'], {}), "(f'state: {state}')\n", (19013, 19032), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19115, 19157), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""normalized state: {state}"""'], {}), "(f'normalized state: {state}')\n", (19127, 19157), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19355, 19397), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""states: {batch[\'states\']}"""'], {}), '(f"states: {batch[\'states\']}")\n', (19367, 19397), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19402, 19454), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""next states: {batch[\'next_states\']}"""'], {}), '(f"next states: {batch[\'next_states\']}")\n', (19414, 19454), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19546, 19665), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""Episodic: {episodic}, episodic_flag: {episodic_flag}, body.memory: {body.memory.is_episodic}"""'], {}), "(\n f'Episodic: {episodic}, episodic_flag: {episodic_flag}, body.memory: {body.memory.is_episodic}'\n )\n", (19558, 19665), False, 'from slm_lab.lib import logger, math_util, util\n'), ((20147, 20200), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""normalized states: {batch[\'states\']}"""'], {}), '(f"normalized states: {batch[\'states\']}")\n', (20159, 20200), False, 'from slm_lab.lib import logger, math_util, util\n'), ((20205, 20268), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""normalized next states: {batch[\'next_states\']}"""'], {}), '(f"normalized next states: {batch[\'next_states\']}")\n', (20217, 20268), False, 'from slm_lab.lib import logger, math_util, util\n'), ((3047, 3099), 'torch.stack', 'torch.stack', (['[cat.mean for cat in self.categoricals]'], {}), '([cat.mean for cat in self.categoricals])\n', (3058, 3099), False, 'import torch\n'), ((3154, 3210), 'torch.stack', 'torch.stack', (['[cat.variance for cat in self.categoricals]'], {}), '([cat.variance for cat in self.categoricals])\n', (3165, 3210), False, 'import torch\n'), ((3246, 3258), 'torch.Size', 'torch.Size', ([], {}), '()\n', (3256, 3258), False, 'import torch\n'), ((8241, 8257), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8255, 8257), True, 'import numpy as np\n'), ((12834, 12925), 'slm_lab.lib.util.set_attr', 'util.set_attr', (['self', 'var_decay_spec', "['start_val', 'end_val', 'start_step', 'end_step']"], {}), "(self, var_decay_spec, ['start_val', 'end_val', 'start_step',\n 'end_step'])\n", (12847, 12925), False, 'from slm_lab.lib import logger, math_util, util\n'), ((13650, 13670), 'pydash.is_list', 'ps.is_list', (['pdparams'], {}), '(pdparams)\n', (13660, 13670), True, 'import pydash as ps\n'), ((15834, 15866), 'slm_lab.lib.util.get_class_name', 'util.get_class_name', (['body.memory'], {}), '(body.memory)\n', (15853, 15866), False, 'from slm_lab.lib import logger, math_util, util\n'), ((16190, 16222), 'numpy.isnan', 'np.isnan', (['body.state_std_dev_int'], {}), '(body.state_std_dev_int)\n', (16198, 16222), True, 'import numpy as np\n'), ((16238, 16266), 'numpy.isnan', 'np.isnan', (['body.state_std_dev'], {}), '(body.state_std_dev)\n', (16246, 16266), True, 'import numpy as np\n'), ((16600, 16652), 'numpy.sqrt', 'np.sqrt', (['(body.state_std_dev_int / (body.state_n - 1))'], {}), '(body.state_std_dev_int / (body.state_n - 1))\n', (16607, 16652), True, 'import numpy as np\n'), ((17359, 17391), 'slm_lab.lib.util.get_class_name', 'util.get_class_name', (['body.memory'], {}), '(body.memory)\n', (17378, 17391), False, 'from slm_lab.lib import logger, math_util, util\n'), ((17469, 17546), 'slm_lab.lib.logger.debug', 'logger.debug', (['"""skipping normalizing for Atari, already handled by preprocess"""'], {}), "('skipping normalizing for Atari, already handled by preprocess')\n", (17481, 17546), False, 'from slm_lab.lib import logger, math_util, util\n'), ((1589, 1631), 'torch.zeros_like', 'torch.zeros_like', (['probs'], {'dtype': 'torch.float'}), '(probs, dtype=torch.float)\n', (1605, 1631), False, 'import torch\n'), ((2566, 2661), 'torch.distributions.Categorical', 'distributions.Categorical', ([], {'probs': 'sub_probs', 'logits': 'sub_logits', 'validate_args': 'validate_args'}), '(probs=sub_probs, logits=sub_logits, validate_args\n =validate_args)\n', (2591, 2661), False, 'from torch import distributions\n'), ((9816, 9870), 'torch.tensor', 'torch.tensor', (['action_list'], {'device': 'algorithm.net.device'}), '(action_list, device=algorithm.net.device)\n', (9828, 9870), False, 'import torch\n'), ((10281, 10335), 'torch.tensor', 'torch.tensor', (['action_list'], {'device': 'algorithm.net.device'}), '(action_list, device=algorithm.net.device)\n', (10293, 10335), False, 'import torch\n'), ((10797, 10813), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (10811, 10813), True, 'import numpy as np\n'), ((11251, 11305), 'torch.tensor', 'torch.tensor', (['action_list'], {'device': 'algorithm.net.device'}), '(action_list, device=algorithm.net.device)\n', (11263, 11305), False, 'import torch\n'), ((12100, 12154), 'torch.tensor', 'torch.tensor', (['action_list'], {'device': 'algorithm.net.device'}), '(action_list, device=algorithm.net.device)\n', (12112, 12154), False, 'import torch\n'), ((13166, 13190), 'slm_lab.lib.util.in_eval_lab_modes', 'util.in_eval_lab_modes', ([], {}), '()\n', (13188, 13190), False, 'from slm_lab.lib import logger, math_util, util\n'), ((16153, 16167), 'numpy.isnan', 'np.isnan', (['mean'], {}), '(mean)\n', (16161, 16167), True, 'import numpy as np\n'), ((17727, 17794), 'slm_lab.lib.logger.debug', 'logger.debug', (['"""skipping normalizing, already handled by preprocess"""'], {}), "('skipping normalizing, already handled by preprocess')\n", (17739, 17794), False, 'from slm_lab.lib import logger, math_util, util\n'), ((1654, 1680), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(0)'}), '(probs, dim=0)\n', (1666, 1680), False, 'import torch\n'), ((1776, 1832), 'torch.full_like', 'torch.full_like', (['logits', '(-100000000.0)'], {'dtype': 'torch.float'}), '(logits, -100000000.0, dtype=torch.float)\n', (1791, 1832), False, 'import torch\n'), ((1847, 1874), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(0)'}), '(logits, dim=0)\n', (1859, 1874), False, 'import torch\n'), ((7160, 7223), 'torch.ones', 'torch.ones', (['body.action_space.high'], {'device': 'algorithm.net.device'}), '(body.action_space.high, device=algorithm.net.device)\n', (7170, 7223), False, 'import torch\n'), ((13881, 13914), 'torch.split', 'torch.split', (['t', 'action_dim'], {'dim': '(0)'}), '(t, action_dim, dim=0)\n', (13892, 13914), False, 'import torch\n'), ((13925, 13951), 'torch.cat', 'torch.cat', (['pdparams'], {'dim': '(1)'}), '(pdparams, dim=1)\n', (13934, 13951), False, 'import torch\n'), ((15116, 15138), 'torch.isnan', 'torch.isnan', (['log_probs'], {}), '(log_probs)\n', (15127, 15138), False, 'import torch\n'), ((16774, 16810), 'numpy.where', 'np.where', (['(body.state_std_dev < 1e-08)'], {}), '(body.state_std_dev < 1e-08)\n', (16782, 16810), True, 'import numpy as np\n'), ((17590, 17622), 'slm_lab.lib.util.get_class_name', 'util.get_class_name', (['body.memory'], {}), '(body.memory)\n', (17609, 17622), False, 'from slm_lab.lib import logger, math_util, util\n'), ((18282, 18328), 'slm_lab.lib.logger.debug', 'logger.debug', (['"""normalizing sample from memory"""'], {}), "('normalizing sample from memory')\n", (18294, 18328), False, 'from slm_lab.lib import logger, math_util, util\n'), ((4242, 4265), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (4258, 4265), False, 'import torch\n'), ((17989, 18015), 'numpy.sum', 'np.sum', (['body.state_std_dev'], {}), '(body.state_std_dev)\n', (17995, 18015), True, 'import numpy as np\n'), ((18041, 18082), 'numpy.clip', 'np.clip', (['(state - body.state_mean)', '(-10)', '(10)'], {}), '(state - body.state_mean, -10, 10)\n', (18048, 18082), True, 'import numpy as np\n'), ((18116, 18180), 'numpy.clip', 'np.clip', (['((state - body.state_mean) / body.state_std_dev)', '(-10)', '(10)'], {}), '((state - body.state_mean) / body.state_std_dev, -10, 10)\n', (18123, 18180), True, 'import numpy as np\n'), ((18340, 18366), 'numpy.sum', 'np.sum', (['body.state_std_dev'], {}), '(body.state_std_dev)\n', (18346, 18366), True, 'import numpy as np\n'), ((18392, 18433), 'numpy.clip', 'np.clip', (['(state - body.state_mean)', '(-10)', '(10)'], {}), '(state - body.state_mean, -10, 10)\n', (18399, 18433), True, 'import numpy as np\n'), ((18467, 18531), 'numpy.clip', 'np.clip', (['((state - body.state_mean) / body.state_std_dev)', '(-10)', '(10)'], {}), '((state - body.state_mean) / body.state_std_dev, -10, 10)\n', (18474, 18531), True, 'import numpy as np\n'), ((6299, 6320), 'torch.exp', 'torch.exp', (['pdparam[1]'], {}), '(pdparam[1])\n', (6308, 6320), False, 'import torch\n'), ((7604, 7704), 'torch.ones', 'torch.ones', (['body.action_space.high.size', 'body.action_space.high[0]'], {'device': 'algorithm.net.device'}), '(body.action_space.high.size, body.action_space.high[0], device=\n algorithm.net.device)\n', (7614, 7704), False, 'import torch\n'), ((7383, 7418), 'torch.tensor', 'torch.tensor', (['body.action_space.low'], {}), '(body.action_space.low)\n', (7395, 7418), False, 'import torch\n'), ((7445, 7481), 'torch.tensor', 'torch.tensor', (['body.action_space.high'], {}), '(body.action_space.high)\n', (7457, 7481), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-29 08:39
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('warehouse', '0002_domainstagingtable_groupstagingtable'),
]
operations = [
migrations.CreateModel(
name='UserStagingTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=255)),
('username', models.CharField(max_length=150)),
('first_name', models.CharField(max_length=30, null=True)),
('last_name', models.CharField(max_length=30, null=True)),
('email', models.CharField(max_length=255, null=True)),
('doc_type', models.CharField(max_length=100)),
('base_doc', models.CharField(max_length=100)),
('is_active', models.BooleanField()),
('is_staff', models.BooleanField()),
('is_superuser', models.BooleanField()),
('last_login', models.DateTimeField(null=True)),
('date_joined', models.DateTimeField()),
('user_last_modified', models.DateTimeField(null=True)),
],
options={
'abstract': False,
},
),
]
|
[
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.BooleanField",
"django.db.models.AutoField"
] |
[((464, 557), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (480, 557), False, 'from django.db import migrations, models\n'), ((584, 616), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (600, 616), False, 'from django.db import migrations, models\n'), ((648, 680), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (664, 680), False, 'from django.db import migrations, models\n'), ((714, 756), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)'}), '(max_length=30, null=True)\n', (730, 756), False, 'from django.db import migrations, models\n'), ((789, 831), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)'}), '(max_length=30, null=True)\n', (805, 831), False, 'from django.db import migrations, models\n'), ((860, 903), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (876, 903), False, 'from django.db import migrations, models\n'), ((935, 967), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (951, 967), False, 'from django.db import migrations, models\n'), ((999, 1031), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1015, 1031), False, 'from django.db import migrations, models\n'), ((1064, 1085), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1083, 1085), False, 'from django.db import migrations, models\n'), ((1117, 1138), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1136, 1138), False, 'from django.db import migrations, models\n'), ((1174, 1195), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1193, 1195), False, 'from django.db import migrations, models\n'), ((1229, 1260), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (1249, 1260), False, 'from django.db import migrations, models\n'), ((1295, 1317), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1315, 1317), False, 'from django.db import migrations, models\n'), ((1359, 1390), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (1379, 1390), False, 'from django.db import migrations, models\n')]
|
"""Setup File."""
from pathlib import Path
from setuptools import setup
from setuptools.command.install import install
# ✓ PACKAGE_NAME = 'common_app'
# ✓ PACKAGE_NAME = 'cn_smtp_sink_server'
# ✓ PACKAGE_NAME = 'common_bootstrap'
# ✓ PACKAGE_NAME = 'common_dash'
# ✓ PACKAGE_NAME = 'common_img'
# ✓ PACKAGE_NAME = 'common_inst'
# ✓ PACKAGE_NAME = 'common_notifier'
# ✓ PACKAGE_NAME = 'common_prstub'
# ✓ PACKAGE_NAME = 'common_tracker'
PACKAGE_NAME = None
"""Modify the package name here which is to be seen on PyPi."""
VERSION = '0.0.0a1'
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
package_init = Path(PACKAGE_NAME).resolve() / '__init__.py'
package_init.parent.mkdir(exist_ok=True)
package_init.write_text('"""Do nothing."""\n')
# --------------------------------------------------------------------------------------
class WrongPackageInstalledError(RuntimeError):
"""More specific error."""
pass
class RaiseErrorPreInstall(install):
"""Customized setuptools install command - prints a friendly greeting."""
def run(self):
raise WrongPackageInstalledError(f"""
\n\n
'{PACKAGE_NAME}' was downloaded from the public pypi.org repository, but is only available on an internal repository
Please update your installer's configuration and download from the proper index-url
\n\n
""")
if __name__ == '__main__':
setup(
name=PACKAGE_NAME,
version=VERSION,
packages=[PACKAGE_NAME],
description = 'Reserved package name',
long_description = Path('README.md').read_text(),
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url='https://github.com/KyleKing/not-on-pypi',
license = 'Unlicense',
classifiers=['License :: Public Domain'],
cmdclass={
'install': RaiseErrorPreInstall,
},
# repository = 'https://github.com/KyleKing/not-on-pypi',
# documentation = 'https://github.com/KyleKing/not-on-pypi',
# 'Bug Tracker' = 'https://github.com/KyleKing/not-on-pypi/issues',
# include = [ 'LICENSE.md',],
# scripts = [],
)
|
[
"pathlib.Path"
] |
[((603, 621), 'pathlib.Path', 'Path', (['PACKAGE_NAME'], {}), '(PACKAGE_NAME)\n', (607, 621), False, 'from pathlib import Path\n'), ((1517, 1534), 'pathlib.Path', 'Path', (['"""README.md"""'], {}), "('README.md')\n", (1521, 1534), False, 'from pathlib import Path\n')]
|
from django.urls import path, re_path
from . import views
urlpatterns = [
re_path(r'^$', views.index, name='homepage'),
re_path(r'^ajax/subscription/$', views.subscription, name='subscription'),
re_path(r'^search/', views.search_results, name='search_results'),
re_path(r'^accomodation/(\d+)', views.accomodation, name='accomodation'),
re_path(r'^new/accomodation$', views.new_accom, name='new_accomodation'),
re_path(r'^profile/(\d+)$', views.profile, name='profile'),
re_path(r'^edit/profile', views.edit_profile, name='edit_profile'),
re_path(r'^api/accomodations$', views.AccomodationList.as_view()),
re_path(r'api/accomodation/accom-id/(\d+)$',
views.AccomodationDetails.as_view()),
re_path(r'^api/profiles$', views.ProfileList.as_view()),
re_path(r'^api/profile/profile-id/(\d+)$', views.ProfileDetails.as_view()),
]
|
[
"django.urls.re_path"
] |
[((79, 122), 'django.urls.re_path', 're_path', (['"""^$"""', 'views.index'], {'name': '"""homepage"""'}), "('^$', views.index, name='homepage')\n", (86, 122), False, 'from django.urls import path, re_path\n'), ((129, 201), 'django.urls.re_path', 're_path', (['"""^ajax/subscription/$"""', 'views.subscription'], {'name': '"""subscription"""'}), "('^ajax/subscription/$', views.subscription, name='subscription')\n", (136, 201), False, 'from django.urls import path, re_path\n'), ((208, 272), 'django.urls.re_path', 're_path', (['"""^search/"""', 'views.search_results'], {'name': '"""search_results"""'}), "('^search/', views.search_results, name='search_results')\n", (215, 272), False, 'from django.urls import path, re_path\n'), ((279, 351), 'django.urls.re_path', 're_path', (['"""^accomodation/(\\\\d+)"""', 'views.accomodation'], {'name': '"""accomodation"""'}), "('^accomodation/(\\\\d+)', views.accomodation, name='accomodation')\n", (286, 351), False, 'from django.urls import path, re_path\n'), ((357, 428), 'django.urls.re_path', 're_path', (['"""^new/accomodation$"""', 'views.new_accom'], {'name': '"""new_accomodation"""'}), "('^new/accomodation$', views.new_accom, name='new_accomodation')\n", (364, 428), False, 'from django.urls import path, re_path\n'), ((435, 493), 'django.urls.re_path', 're_path', (['"""^profile/(\\\\d+)$"""', 'views.profile'], {'name': '"""profile"""'}), "('^profile/(\\\\d+)$', views.profile, name='profile')\n", (442, 493), False, 'from django.urls import path, re_path\n'), ((499, 564), 'django.urls.re_path', 're_path', (['"""^edit/profile"""', 'views.edit_profile'], {'name': '"""edit_profile"""'}), "('^edit/profile', views.edit_profile, name='edit_profile')\n", (506, 564), False, 'from django.urls import path, re_path\n')]
|
import praw
import elasticsearch
import string
import random
from nltk import word_tokenize, pos_tag
class GradiusNlp:
def __init__(self):
self.elastic = elasticsearch.Elasticsearch()
def get_reddit_data(self, subreddit, post_count):
r = praw.Reddit(user_agent="https://github.com/gradiuscypher/internetmademe")
sub = r.get_subreddit(subreddit)
top = sub.get_top_from_day(limit=post_count)
#process post_count top posts
for post in top:
comments = post.comments
#process top comment
for c in comments:
if type(c) is praw.objects.Comment:
tokens = word_tokenize(c.body)
tagged_tokens = pos_tag(tokens)
for tag in tagged_tokens:
print(tag)
if not tag[1] in string.punctuation:
es_index = tag[1].lower()
q_txt = 'word: ' + '"' + tag[0].lower() + '"'
if self.elastic.indices.exists(es_index):
if not (self.elastic.search(index=es_index, q=q_txt)['hits']['total'] > 0):
self.elastic.index(index=es_index, doc_type='word', body={'word': tag[0].lower()})
else:
self.elastic.index(index=es_index, doc_type='word', body={'word': tag[0].lower()})
#process comment replies one tree down
for r in c.replies:
if type(r) is praw.objects.Comment:
tokens = word_tokenize(r.body)
tagged_tokens = pos_tag(tokens)
for tag in tagged_tokens:
print(tag)
if not tag[1] in string.punctuation:
es_index = tag[1].lower()
q_txt = 'word: ' + '"' + tag[0].lower() + '"'
if self.elastic.indices.exists(es_index):
if not (self.elastic.search(index=es_index, q=q_txt)['hits']['total'] > 0):
self.elastic.index(index=es_index, doc_type='word', body={'word': tag[0].lower()})
else:
self.elastic.index(index=es_index, doc_type='word', body={'word': tag[0].lower()})
def reform_sentence(self, sentence, force_replace_count=1):
sentence_tokens = word_tokenize(sentence)
replace_count = random.randint(force_replace_count, len(sentence_tokens))
print(replace_count)
#Ensure at least force_replace_count words are being replaced
for x in range(0, replace_count):
tagged_tokens = pos_tag(sentence_tokens)
choice = random.choice(tagged_tokens)
while choice[0] in string.punctuation:
choice = random.choice(tagged_tokens)
new_word = self.replace_pos(choice)
sentence_tokens[sentence_tokens.index(choice[0])] = new_word
return ' '.join(sentence_tokens)
def replace_pos(self, pos_tuple):
es_index = pos_tuple[1].lower()
results = self.elastic.search(index=es_index, body={"query": {
"function_score": {
"query": {"wildcard": {"word": "*"}},
"random_score": {}
}}})
return random.choice(results['hits']['hits'])['_source']['word']
|
[
"elasticsearch.Elasticsearch",
"random.choice",
"nltk.pos_tag",
"praw.Reddit",
"nltk.word_tokenize"
] |
[((169, 198), 'elasticsearch.Elasticsearch', 'elasticsearch.Elasticsearch', ([], {}), '()\n', (196, 198), False, 'import elasticsearch\n'), ((266, 339), 'praw.Reddit', 'praw.Reddit', ([], {'user_agent': '"""https://github.com/gradiuscypher/internetmademe"""'}), "(user_agent='https://github.com/gradiuscypher/internetmademe')\n", (277, 339), False, 'import praw\n'), ((2640, 2663), 'nltk.word_tokenize', 'word_tokenize', (['sentence'], {}), '(sentence)\n', (2653, 2663), False, 'from nltk import word_tokenize, pos_tag\n'), ((2917, 2941), 'nltk.pos_tag', 'pos_tag', (['sentence_tokens'], {}), '(sentence_tokens)\n', (2924, 2941), False, 'from nltk import word_tokenize, pos_tag\n'), ((2963, 2991), 'random.choice', 'random.choice', (['tagged_tokens'], {}), '(tagged_tokens)\n', (2976, 2991), False, 'import random\n'), ((3069, 3097), 'random.choice', 'random.choice', (['tagged_tokens'], {}), '(tagged_tokens)\n', (3082, 3097), False, 'import random\n'), ((3566, 3604), 'random.choice', 'random.choice', (["results['hits']['hits']"], {}), "(results['hits']['hits'])\n", (3579, 3604), False, 'import random\n'), ((681, 702), 'nltk.word_tokenize', 'word_tokenize', (['c.body'], {}), '(c.body)\n', (694, 702), False, 'from nltk import word_tokenize, pos_tag\n'), ((739, 754), 'nltk.pos_tag', 'pos_tag', (['tokens'], {}), '(tokens)\n', (746, 754), False, 'from nltk import word_tokenize, pos_tag\n'), ((1669, 1690), 'nltk.word_tokenize', 'word_tokenize', (['r.body'], {}), '(r.body)\n', (1682, 1690), False, 'from nltk import word_tokenize, pos_tag\n'), ((1735, 1750), 'nltk.pos_tag', 'pos_tag', (['tokens'], {}), '(tokens)\n', (1742, 1750), False, 'from nltk import word_tokenize, pos_tag\n')]
|
"""Example case for particle travel times in a straight channel."""
import numpy as np
import matplotlib.pyplot as plt
import dorado.particle_track as pt
# fix the random seed so it stays the same as weights change
np.random.seed(1)
# create synthetic domain and flow field
domain = np.zeros((100, 50))
depth = np.zeros_like(domain)
stage = np.zeros_like(domain)
u = np.zeros_like(domain)
v = np.zeros_like(domain)
dx = 50.
Np_tracer = 500
seed_xloc = [10]
seed_yloc = [25]
# set up straight channel
depth[:, 10:40] = 1.0
stage[:, 10:40] = 1.0
v[:, 10:40] = -10.0
# choose number of iterations for particle to route
num_iter = 100
# define your 'known' or 'expected' travel time for this simple geometry
# picking expected time from location x=10 to x=70
# (really the boundary of row 70, so 1/2 a cell)
# 59.5 cells * 50 m/cell / 10 m/s = 297.5 seconds
target_row = 70
expected_time = 297.5
# assign particle parameters
params = pt.modelParams()
params.depth = depth
params.stage = stage
params.u = u
params.v = v
params.dx = dx
# set-up figure
plt.figure()
plt.imshow(np.sqrt(u**2 + v**2))
plt.colorbar()
plt.scatter(seed_yloc, seed_xloc, c='k', marker='o', s=5)
# plot the target line where time is measured
plt.plot(np.linspace(0, 50, 100), np.ones(100)*target_row, c='red')
plt.title('Velocity Field')
plt.legend(labels=['Target Row to Measure Times',
'Particle Seeding Location'],
loc='best')
plt.tight_layout()
plt.show()
# do the routing twice, once without any diffusivity added to the travel times
# (diff_coeff==0) then a second time with significant diffusion (diff_coeff==1)
for dc in list(range(0, 2)):
# set diff_coeff
if dc == 0:
params.diff_coeff = 0.0
else:
params.diff_coeff = 1.0
# make particle
particle = pt.Particles(params)
# walk it
particle.generate_particles(Np_tracer, seed_xloc, seed_yloc)
for i in list(range(0, num_iter)):
walk_data = particle.run_iteration()
# get travel times associated with particles when they are at coord x=70
# use the exposure_time function to measure this
roi = np.zeros_like(depth, dtype='int')
roi[0:target_row, :] = 1
target_times = pt.exposure_time(walk_data, roi)
# plot histogram
plt.subplot(1, 2, dc+1)
n, bins, _ = plt.hist(target_times, bins=100, range=(200, 400),
histtype='bar', density=True,
color=[0.5, 0.5, 1, 0.5])
# plot expected travel time to row 70
plt.scatter(expected_time, np.max(n),
s=75, c='green', marker='x', linewidths=20)
plt.legend(['Expected Travel Time',
'Histogram of Final Travel Times'], ncol=2,
loc='upper left', bbox_to_anchor=(0.0, -0.06), fontsize=16)
plt.title('Travel Time Distribution at Target Row \n'
'Diffusion Coefficient : ' + str(params.diff_coeff), fontsize=20)
plt.xlabel('Travel Time at Target Row [s]', fontsize=16)
plt.ylabel('Probability Density', fontsize=16)
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.random.seed",
"dorado.particle_track.Particles",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.zeros_like",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"dorado.particle_track.exposure_time",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"dorado.particle_track.modelParams",
"numpy.sqrt"
] |
[((216, 233), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (230, 233), True, 'import numpy as np\n'), ((285, 304), 'numpy.zeros', 'np.zeros', (['(100, 50)'], {}), '((100, 50))\n', (293, 304), True, 'import numpy as np\n'), ((313, 334), 'numpy.zeros_like', 'np.zeros_like', (['domain'], {}), '(domain)\n', (326, 334), True, 'import numpy as np\n'), ((343, 364), 'numpy.zeros_like', 'np.zeros_like', (['domain'], {}), '(domain)\n', (356, 364), True, 'import numpy as np\n'), ((369, 390), 'numpy.zeros_like', 'np.zeros_like', (['domain'], {}), '(domain)\n', (382, 390), True, 'import numpy as np\n'), ((395, 416), 'numpy.zeros_like', 'np.zeros_like', (['domain'], {}), '(domain)\n', (408, 416), True, 'import numpy as np\n'), ((936, 952), 'dorado.particle_track.modelParams', 'pt.modelParams', ([], {}), '()\n', (950, 952), True, 'import dorado.particle_track as pt\n'), ((1053, 1065), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1063, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1099, 1113), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1111, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1171), 'matplotlib.pyplot.scatter', 'plt.scatter', (['seed_yloc', 'seed_xloc'], {'c': '"""k"""', 'marker': '"""o"""', 's': '(5)'}), "(seed_yloc, seed_xloc, c='k', marker='o', s=5)\n", (1125, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1313), 'matplotlib.pyplot.title', 'plt.title', (['"""Velocity Field"""'], {}), "('Velocity Field')\n", (1295, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1409), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['Target Row to Measure Times', 'Particle Seeding Location']", 'loc': '"""best"""'}), "(labels=['Target Row to Measure Times',\n 'Particle Seeding Location'], loc='best')\n", (1324, 1409), True, 'import matplotlib.pyplot as plt\n'), ((1436, 1454), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1452, 1454), True, 'import matplotlib.pyplot as plt\n'), ((1455, 1465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1463, 1465), True, 'import matplotlib.pyplot as plt\n'), ((3042, 3052), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3050, 3052), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1101), 'numpy.sqrt', 'np.sqrt', (['(u ** 2 + v ** 2)'], {}), '(u ** 2 + v ** 2)\n', (1084, 1101), True, 'import numpy as np\n'), ((1227, 1250), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(100)'], {}), '(0, 50, 100)\n', (1238, 1250), True, 'import numpy as np\n'), ((1802, 1822), 'dorado.particle_track.Particles', 'pt.Particles', (['params'], {}), '(params)\n', (1814, 1822), True, 'import dorado.particle_track as pt\n'), ((2128, 2161), 'numpy.zeros_like', 'np.zeros_like', (['depth'], {'dtype': '"""int"""'}), "(depth, dtype='int')\n", (2141, 2161), True, 'import numpy as np\n'), ((2210, 2242), 'dorado.particle_track.exposure_time', 'pt.exposure_time', (['walk_data', 'roi'], {}), '(walk_data, roi)\n', (2226, 2242), True, 'import dorado.particle_track as pt\n'), ((2269, 2294), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(dc + 1)'], {}), '(1, 2, dc + 1)\n', (2280, 2294), True, 'import matplotlib.pyplot as plt\n'), ((2310, 2421), 'matplotlib.pyplot.hist', 'plt.hist', (['target_times'], {'bins': '(100)', 'range': '(200, 400)', 'histtype': '"""bar"""', 'density': '(True)', 'color': '[0.5, 0.5, 1, 0.5]'}), "(target_times, bins=100, range=(200, 400), histtype='bar', density=\n True, color=[0.5, 0.5, 1, 0.5])\n", (2318, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2762), 'matplotlib.pyplot.legend', 'plt.legend', (["['Expected Travel Time', 'Histogram of Final Travel Times']"], {'ncol': '(2)', 'loc': '"""upper left"""', 'bbox_to_anchor': '(0.0, -0.06)', 'fontsize': '(16)'}), "(['Expected Travel Time', 'Histogram of Final Travel Times'],\n ncol=2, loc='upper left', bbox_to_anchor=(0.0, -0.06), fontsize=16)\n", (2629, 2762), True, 'import matplotlib.pyplot as plt\n'), ((2933, 2989), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Travel Time at Target Row [s]"""'], {'fontsize': '(16)'}), "('Travel Time at Target Row [s]', fontsize=16)\n", (2943, 2989), True, 'import matplotlib.pyplot as plt\n'), ((2994, 3040), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability Density"""'], {'fontsize': '(16)'}), "('Probability Density', fontsize=16)\n", (3004, 3040), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1264), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (1259, 1264), True, 'import numpy as np\n'), ((2543, 2552), 'numpy.max', 'np.max', (['n'], {}), '(n)\n', (2549, 2552), True, 'import numpy as np\n')]
|
import numpy as np
import esutil as eu
def randcap(*,
rng,
nrand,
ra,
dec,
radius,
get_radius=False,
dorot=False):
"""
Generate random points in a sherical cap
parameters
----------
nrand:
The number of random points
ra,dec:
The center of the cap in degrees. The ra should be within [0,360) and
dec from [-90,90]
radius: float
radius of the cap, same units as ra,dec
get_radius: bool, optional
if true, return radius of each point in radians
dorot: bool
If dorot is True, generate the points on the equator and rotate them to
be centered at the desired location. This is the default when the dec
is within 0.1 degrees of the pole, to avoid calculation issues
"""
# generate uniformly in r**2
if dec >= 89.9 or dec <= -89.9:
dorot = True
if dorot:
tra, tdec = 90.0, 0.0
rand_ra, rand_dec, rand_r = randcap(
rng=rng,
nrand=nrand,
ra=90.0,
dec=0.0,
radius=radius,
get_radius=True,
)
rand_ra, rand_dec = eu.coords.rotate(
0.0,
dec-tdec,
0.0,
rand_ra,
rand_dec,
)
rand_ra, rand_dec = eu.coords.rotate(
ra-tra,
0.0,
0.0,
rand_ra,
rand_dec,
)
else:
rand_r = rng.uniform(size=nrand)
rand_r = np.sqrt(rand_r)*radius
# put in degrees
np.deg2rad(rand_r, rand_r)
# generate position angle uniformly 0, 2*PI
rand_posangle = rng.uniform(low=0, high=2*np.pi, size=nrand)
theta = np.array(dec, dtype='f8', ndmin=1, copy=True)
phi = np.array(ra, dtype='f8', ndmin=1, copy=True)
theta += 90
np.deg2rad(theta, theta)
np.deg2rad(phi, phi)
sintheta = np.sin(theta)
costheta = np.cos(theta)
sinr = np.sin(rand_r)
cosr = np.cos(rand_r)
cospsi = np.cos(rand_posangle)
costheta2 = costheta*cosr + sintheta*sinr*cospsi
np.clip(costheta2, -1, 1, costheta2)
# gives [0,pi)
theta2 = np.arccos(costheta2)
sintheta2 = np.sin(theta2)
cos_dphi = (cosr - costheta*costheta2)/(sintheta*sintheta2)
np.clip(cos_dphi, -1, 1, cos_dphi)
dphi = np.arccos(cos_dphi)
# note fancy usage of where
phi2 = np.where(rand_posangle > np.pi, phi+dphi, phi-dphi)
np.rad2deg(phi2, phi2)
np.rad2deg(theta2, theta2)
rand_ra = phi2
rand_dec = theta2-90.0
eu.coords.atbound(rand_ra, 0.0, 360.0)
if get_radius:
np.rad2deg(rand_r, rand_r)
return rand_ra, rand_dec, rand_r
else:
return rand_ra, rand_dec
def randsphere(rng, num, ra_range=None, dec_range=None):
"""Generate random points on the sphere, possibly on a subset of it.
Routine due to Erin Sheldon.
Parameters
----------
num: integer
The number of randoms to generate
ra_range: list, optional
Should be within range [0,360]. Default [0,360]
dec_range: list, optional
Should be within range [-90,90]. Default [-90,90]
Returns
-------
ra : array-like
ra values for the random points
dec : array-like
dec values for the random points
"""
ra_range = _check_range(ra_range, [0.0, 360.0])
dec_range = _check_range(dec_range, [-90.0, 90.0])
ra = rng.uniform(
size=num,
low=ra_range[0],
high=ra_range[1],
)
cosdec_min = np.cos(np.radians(90.0+dec_range[0]))
cosdec_max = np.cos(np.radians(90.0+dec_range[1]))
v = rng.uniform(
size=num,
low=cosdec_min,
high=cosdec_max,
)
np.clip(v, -1.0, 1.0, v)
# Now this generates on [0,pi)
dec = np.arccos(v)
# convert to degrees
np.degrees(dec, dec)
# now in range [-90,90.0)
dec -= 90.0
return ra, dec
def _check_range(rng, allowed):
if rng is None:
rng = allowed
else:
if not hasattr(rng, '__len__'):
raise ValueError("range input does not have len() method")
if rng[0] < allowed[0] or rng[1] > allowed[1]:
raise ValueError("%s should be within %s" % (rng, allowed))
return rng
|
[
"numpy.radians",
"esutil.coords.rotate",
"numpy.degrees",
"numpy.deg2rad",
"numpy.clip",
"numpy.rad2deg",
"numpy.sin",
"numpy.array",
"numpy.where",
"numpy.cos",
"esutil.coords.atbound",
"numpy.arccos",
"numpy.sqrt"
] |
[((3910, 3934), 'numpy.clip', 'np.clip', (['v', '(-1.0)', '(1.0)', 'v'], {}), '(v, -1.0, 1.0, v)\n', (3917, 3934), True, 'import numpy as np\n'), ((3981, 3993), 'numpy.arccos', 'np.arccos', (['v'], {}), '(v)\n', (3990, 3993), True, 'import numpy as np\n'), ((4024, 4044), 'numpy.degrees', 'np.degrees', (['dec', 'dec'], {}), '(dec, dec)\n', (4034, 4044), True, 'import numpy as np\n'), ((1220, 1277), 'esutil.coords.rotate', 'eu.coords.rotate', (['(0.0)', '(dec - tdec)', '(0.0)', 'rand_ra', 'rand_dec'], {}), '(0.0, dec - tdec, 0.0, rand_ra, rand_dec)\n', (1236, 1277), True, 'import esutil as eu\n'), ((1375, 1430), 'esutil.coords.rotate', 'eu.coords.rotate', (['(ra - tra)', '(0.0)', '(0.0)', 'rand_ra', 'rand_dec'], {}), '(ra - tra, 0.0, 0.0, rand_ra, rand_dec)\n', (1391, 1430), True, 'import esutil as eu\n'), ((1626, 1652), 'numpy.deg2rad', 'np.deg2rad', (['rand_r', 'rand_r'], {}), '(rand_r, rand_r)\n', (1636, 1652), True, 'import numpy as np\n'), ((1792, 1837), 'numpy.array', 'np.array', (['dec'], {'dtype': '"""f8"""', 'ndmin': '(1)', 'copy': '(True)'}), "(dec, dtype='f8', ndmin=1, copy=True)\n", (1800, 1837), True, 'import numpy as np\n'), ((1852, 1896), 'numpy.array', 'np.array', (['ra'], {'dtype': '"""f8"""', 'ndmin': '(1)', 'copy': '(True)'}), "(ra, dtype='f8', ndmin=1, copy=True)\n", (1860, 1896), True, 'import numpy as np\n'), ((1926, 1950), 'numpy.deg2rad', 'np.deg2rad', (['theta', 'theta'], {}), '(theta, theta)\n', (1936, 1950), True, 'import numpy as np\n'), ((1959, 1979), 'numpy.deg2rad', 'np.deg2rad', (['phi', 'phi'], {}), '(phi, phi)\n', (1969, 1979), True, 'import numpy as np\n'), ((2000, 2013), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2006, 2013), True, 'import numpy as np\n'), ((2033, 2046), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2039, 2046), True, 'import numpy as np\n'), ((2063, 2077), 'numpy.sin', 'np.sin', (['rand_r'], {}), '(rand_r)\n', (2069, 2077), True, 'import numpy as np\n'), ((2093, 2107), 'numpy.cos', 'np.cos', (['rand_r'], {}), '(rand_r)\n', (2099, 2107), True, 'import numpy as np\n'), ((2126, 2147), 'numpy.cos', 'np.cos', (['rand_posangle'], {}), '(rand_posangle)\n', (2132, 2147), True, 'import numpy as np\n'), ((2214, 2250), 'numpy.clip', 'np.clip', (['costheta2', '(-1)', '(1)', 'costheta2'], {}), '(costheta2, -1, 1, costheta2)\n', (2221, 2250), True, 'import numpy as np\n'), ((2292, 2312), 'numpy.arccos', 'np.arccos', (['costheta2'], {}), '(costheta2)\n', (2301, 2312), True, 'import numpy as np\n'), ((2333, 2347), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (2339, 2347), True, 'import numpy as np\n'), ((2426, 2460), 'numpy.clip', 'np.clip', (['cos_dphi', '(-1)', '(1)', 'cos_dphi'], {}), '(cos_dphi, -1, 1, cos_dphi)\n', (2433, 2460), True, 'import numpy as np\n'), ((2476, 2495), 'numpy.arccos', 'np.arccos', (['cos_dphi'], {}), '(cos_dphi)\n', (2485, 2495), True, 'import numpy as np\n'), ((2548, 2603), 'numpy.where', 'np.where', (['(rand_posangle > np.pi)', '(phi + dphi)', '(phi - dphi)'], {}), '(rand_posangle > np.pi, phi + dphi, phi - dphi)\n', (2556, 2603), True, 'import numpy as np\n'), ((2609, 2631), 'numpy.rad2deg', 'np.rad2deg', (['phi2', 'phi2'], {}), '(phi2, phi2)\n', (2619, 2631), True, 'import numpy as np\n'), ((2640, 2666), 'numpy.rad2deg', 'np.rad2deg', (['theta2', 'theta2'], {}), '(theta2, theta2)\n', (2650, 2666), True, 'import numpy as np\n'), ((2730, 2768), 'esutil.coords.atbound', 'eu.coords.atbound', (['rand_ra', '(0.0)', '(360.0)'], {}), '(rand_ra, 0.0, 360.0)\n', (2747, 2768), True, 'import esutil as eu\n'), ((2797, 2823), 'numpy.rad2deg', 'np.rad2deg', (['rand_r', 'rand_r'], {}), '(rand_r, rand_r)\n', (2807, 2823), True, 'import numpy as np\n'), ((3725, 3756), 'numpy.radians', 'np.radians', (['(90.0 + dec_range[0])'], {}), '(90.0 + dec_range[0])\n', (3735, 3756), True, 'import numpy as np\n'), ((3780, 3811), 'numpy.radians', 'np.radians', (['(90.0 + dec_range[1])'], {}), '(90.0 + dec_range[1])\n', (3790, 3811), True, 'import numpy as np\n'), ((1569, 1584), 'numpy.sqrt', 'np.sqrt', (['rand_r'], {}), '(rand_r)\n', (1576, 1584), True, 'import numpy as np\n')]
|
import os
from datetime import datetime, timedelta
import numpy as np
import xarray as xr
import parcels
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import cartopy
import cartopy.util
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
u_filename = '/home/alir/hawaii_npac/0000969408_U_10800.8150.1_1080.3720.90'
v_filename = '/home/alir/hawaii_npac/0000969408_V_10800.8150.1_1080.3720.90'
level = 0
with open(u_filename, 'rb') as f:
nx, ny = 1080, 3720 # parse; advance file-pointer to data segment
record_length = 4 # [bytes]
f.seek(level * record_length * nx*ny, os.SEEK_SET)
u_data = np.fromfile(f, dtype='>f4', count=nx*ny)
u_array = np.reshape(u_data, [ny, nx], order='F')
with open(v_filename, 'rb') as f:
nx, ny = 1080, 3720 # parse; advance file-pointer to data segment
record_length = 4 # [bytes]
f.seek(level * record_length * nx*ny, os.SEEK_SET)
v_data = np.fromfile(f, dtype='>f4', count=nx*ny)
v_array = np.reshape(v_data, [ny, nx], order='F')
u_data = u_array
v_data = v_array
# u_data = np.ma.masked_where(u_array == 0, u_array)
# v_data = np.ma.masked_where(v_array == 0, v_array)
lats = np.arange(ny)/48
lons = np.arange(nx)/48
depth = np.array([0.0])
u_field = parcels.field.Field(name='U', data=u_data,
lon=lons, lat=lats, depth=depth, mesh='spherical')
v_field = parcels.field.Field(name='V', data=v_data,
lon=lons, lat=lats, depth=depth, mesh='spherical')
u_magnitude = np.sqrt(u_data*u_data + v_data*v_data)
fieldset = parcels.fieldset.FieldSet(u_field, v_field)
# fieldset.U.show()
lats_pset = np.tile(np.linspace(5, 70, 11), 11)
lons_pset = np.repeat(np.linspace(5, 15, 11), 11)
# species_field = -1 * np.ones((11,11), dtype=np.int32)
# for i, lat in enumerate(np.linspace(10, 50, 11)):
# for j, lon in enumerate(np.linspace(-170, -130, 11)):
# pass
# species_pfield = parcels.field.Field(name='species', data=species_field,
# lat=np.linspace(10, 50, 11), lon=np.linspace(-170, -130, 11), depth=depth, mesh='spherical')
class MicrobeParticle(parcels.JITParticle):
species = parcels.Variable('species', dtype=np.int32, initial=-1)
pset = parcels.ParticleSet.from_list(fieldset=fieldset, pclass=MicrobeParticle,
lon=lons_pset, lat=lats_pset)
for i, particle in enumerate(pset):
if 37.5 <= particle.lat <= 52.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 1
elif 37.5 <= particle.lat <= 52.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 2
elif 37.5 <= particle.lat <= 52.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 3
elif 22.5 <= particle.lat <= 37.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 3
elif 22.5 <= particle.lat <= 37.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 1
elif 22.5 <= particle.lat <= 37.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 2
elif 7.5 <= particle.lat <= 22.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 2
elif 7.5 <= particle.lat <= 22.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 3
elif 7.5 <= particle.lat <= 22.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 1
particle.species = 1
print("Particle {:03d} @({:.2f},{:.2f}) [species={:d}]".format(i, particle.lat, particle.lon, particle.species))
def rock_paper_scissors_type(n):
if n == 1:
return "rock"
elif n == 2:
return "paper"
elif n == 3:
return "scissors"
return None
vector_crs = ccrs.PlateCarree()
land_50m = cartopy.feature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='face',facecolor='dimgray', linewidth=0)
t = datetime(2017, 1, 1)
dt = timedelta(hours=1)
for n in range(1):
print("Advecting: {:} -> {:}".format(t, t+dt))
nc_filename = "advected_microbes_" + str(n).zfill(4) + ".nc"
pset.execute(parcels.AdvectionRK4, runtime=dt, dt=dt, verbose_progress=True,
output_file=pset.ParticleFile(name=nc_filename, outputdt=dt))
# print("Computing microbe interactions...")
# N = len(pset)
# for i, p1 in enumerate(pset):
# for j, p2 in enumerate(pset[i+1:]):
# if np.abs(p1.lat - p2.lat) < 1 and np.abs(p1.lon - p2.lon) < 1:
# p1_type = rock_paper_scissors_type(p1.species)
# p2_type = rock_paper_scissors_type(p2.species)
# winner = None
# if p1_type == "rock" and p2_type == "scissors":
# winner = p1
# elif p1_type == "rock" and p2_type == "paper":
# winner = p2
# elif p1_type == "paper" and p2_type == "rock":
# winner = p1
# elif p1_type == "paper" and p2_type == "scissors":
# winner = p2
# elif p1_type == "scissors" and p2_type == "rock":
# winner = p2
# elif p1_type == "scissors" and p2_type == "paper":
# winner = p1
# else:
# winner = None
# if winner == p1:
# p2.species = p1.species
# print("[{:s}#{:d}] @({:.2f}, {:.2f}) vs. [{:s}#{:d}] @({:.2f}, {:.2f}): #{:d} wins!"
# .format(p1_type, i, p1.lat, p1.lon, p2_type, j+i, p2.lat, p2.lon, i))
# elif winner == p2:
# p1.species = p2.species
# print("[{:s}#{:d}] @({:.2f}, {:.2f}) vs. [{:s}#{:d}] @({:.2f}, {:.2f}): #{:d} wins!"
# .format(p1_type, i, p1.lat, p1.lon, p2_type, j+i, p2.lat, p2.lon, j+i))
# for i, p in enumerate(pset):
# if p.lat >= 59 or p.lat <= 1 or p.lon <= -179 or p.lon >= -121:
# print("Removing particle #{:d} @({:.2f},{:.2f}). Too close to boundary"
# .format(i, p.lat, p.lon))
# pset.remove(i)
t = t+dt
print("Plotting figure...")
fig = plt.figure(figsize=(16, 9))
matplotlib.rcParams.update({'font.size': 10})
crs_sps = ccrs.PlateCarree(central_longitude=-150)
crs_sps._threshold = 1000.0 # This solves https://github.com/SciTools/cartopy/issues/363
ax = plt.subplot(111, projection=crs_sps)
ax.add_feature(land_50m)
ax.set_extent([0, 22.5, 0, 77.5], ccrs.PlateCarree())
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='black',
alpha=0.8, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlocator = mticker.FixedLocator([0, 7.5, 15, 22.5])
gl.ylocator = mticker.FixedLocator([0, 15.5, 31, 46.5, 62, 77.5])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
im = ax.pcolormesh(lons, lats, u_magnitude, transform=vector_crs, vmin=0, vmax=1, cmap='Blues_r')
clb = fig.colorbar(im, ax=ax, extend='max', fraction=0.046, pad=0.1)
clb.ax.set_title(r'm/s')
rock_lats, rock_lons = [], []
paper_lats, paper_lons = [], []
scissors_lats, scissors_lons = [], []
for microbe in pset:
if microbe.species == 1:
rock_lats.append(microbe.lat)
rock_lons.append(microbe.lon)
elif microbe.species == 2:
paper_lats.append(microbe.lat)
paper_lons.append(microbe.lon)
elif microbe.species == 3:
scissors_lats.append(microbe.lat)
scissors_lons.append(microbe.lon)
# ax.plot(rock_lons, rock_lats, marker='o', linestyle='', color='red', ms=4, label='Rocks', transform=vector_crs)
# ax.plot(paper_lons, paper_lats, marker='o', linestyle='', color='lime', ms=4, label='Papers', transform=vector_crs)
# ax.plot(scissors_lons, scissors_lats, marker='o', linestyle='', color='cyan', ms=4, label='Scissors', transform=vector_crs)
plt.title(str(t))
ax.legend()
# plt.show()
png_filename = "advected_microbes_" + str(n).zfill(4) + ".png"
print("Saving figure: {:s}".format(png_filename))
plt.savefig(png_filename, dpi=300, format='png', transparent=False)
plt.close('all')
|
[
"cartopy.feature.NaturalEarthFeature",
"matplotlib.pyplot.figure",
"parcels.field.Field",
"numpy.arange",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"matplotlib.ticker.FixedLocator",
"datetime.timedelta",
"numpy.reshape",
"numpy.linspace",
"parcels.ParticleSet.from_list",
"datetime.datetime",
"parcels.Variable",
"matplotlib.pyplot.subplot",
"parcels.fieldset.FieldSet",
"numpy.fromfile",
"numpy.array",
"cartopy.crs.PlateCarree",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((1328, 1343), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1336, 1343), True, 'import numpy as np\n'), ((1355, 1452), 'parcels.field.Field', 'parcels.field.Field', ([], {'name': '"""U"""', 'data': 'u_data', 'lon': 'lons', 'lat': 'lats', 'depth': 'depth', 'mesh': '"""spherical"""'}), "(name='U', data=u_data, lon=lons, lat=lats, depth=depth,\n mesh='spherical')\n", (1374, 1452), False, 'import parcels\n'), ((1463, 1560), 'parcels.field.Field', 'parcels.field.Field', ([], {'name': '"""V"""', 'data': 'v_data', 'lon': 'lons', 'lat': 'lats', 'depth': 'depth', 'mesh': '"""spherical"""'}), "(name='V', data=v_data, lon=lons, lat=lats, depth=depth,\n mesh='spherical')\n", (1482, 1560), False, 'import parcels\n'), ((1576, 1618), 'numpy.sqrt', 'np.sqrt', (['(u_data * u_data + v_data * v_data)'], {}), '(u_data * u_data + v_data * v_data)\n', (1583, 1618), True, 'import numpy as np\n'), ((1627, 1670), 'parcels.fieldset.FieldSet', 'parcels.fieldset.FieldSet', (['u_field', 'v_field'], {}), '(u_field, v_field)\n', (1652, 1670), False, 'import parcels\n'), ((2266, 2372), 'parcels.ParticleSet.from_list', 'parcels.ParticleSet.from_list', ([], {'fieldset': 'fieldset', 'pclass': 'MicrobeParticle', 'lon': 'lons_pset', 'lat': 'lats_pset'}), '(fieldset=fieldset, pclass=MicrobeParticle,\n lon=lons_pset, lat=lats_pset)\n', (2295, 2372), False, 'import parcels\n'), ((3677, 3695), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3693, 3695), True, 'import cartopy.crs as ccrs\n'), ((3707, 3826), 'cartopy.feature.NaturalEarthFeature', 'cartopy.feature.NaturalEarthFeature', (['"""physical"""', '"""land"""', '"""50m"""'], {'edgecolor': '"""face"""', 'facecolor': '"""dimgray"""', 'linewidth': '(0)'}), "('physical', 'land', '50m', edgecolor=\n 'face', facecolor='dimgray', linewidth=0)\n", (3742, 3826), False, 'import cartopy\n'), ((3830, 3850), 'datetime.datetime', 'datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (3838, 3850), False, 'from datetime import datetime, timedelta\n'), ((3856, 3874), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3865, 3874), False, 'from datetime import datetime, timedelta\n'), ((731, 773), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '""">f4"""', 'count': '(nx * ny)'}), "(f, dtype='>f4', count=nx * ny)\n", (742, 773), True, 'import numpy as np\n'), ((786, 825), 'numpy.reshape', 'np.reshape', (['u_data', '[ny, nx]'], {'order': '"""F"""'}), "(u_data, [ny, nx], order='F')\n", (796, 825), True, 'import numpy as np\n'), ((1035, 1077), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '""">f4"""', 'count': '(nx * ny)'}), "(f, dtype='>f4', count=nx * ny)\n", (1046, 1077), True, 'import numpy as np\n'), ((1090, 1129), 'numpy.reshape', 'np.reshape', (['v_data', '[ny, nx]'], {'order': '"""F"""'}), "(v_data, [ny, nx], order='F')\n", (1100, 1129), True, 'import numpy as np\n'), ((1279, 1292), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (1288, 1292), True, 'import numpy as np\n'), ((1303, 1316), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (1312, 1316), True, 'import numpy as np\n'), ((1712, 1734), 'numpy.linspace', 'np.linspace', (['(5)', '(70)', '(11)'], {}), '(5, 70, 11)\n', (1723, 1734), True, 'import numpy as np\n'), ((1762, 1784), 'numpy.linspace', 'np.linspace', (['(5)', '(15)', '(11)'], {}), '(5, 15, 11)\n', (1773, 1784), True, 'import numpy as np\n'), ((2202, 2257), 'parcels.Variable', 'parcels.Variable', (['"""species"""'], {'dtype': 'np.int32', 'initial': '(-1)'}), "('species', dtype=np.int32, initial=-1)\n", (2218, 2257), False, 'import parcels\n'), ((6134, 6161), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6144, 6161), True, 'import matplotlib.pyplot as plt\n'), ((6166, 6211), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 10}"], {}), "({'font.size': 10})\n", (6192, 6211), False, 'import matplotlib\n'), ((6231, 6271), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {'central_longitude': '(-150)'}), '(central_longitude=-150)\n', (6247, 6271), True, 'import cartopy.crs as ccrs\n'), ((6376, 6412), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': 'crs_sps'}), '(111, projection=crs_sps)\n', (6387, 6412), True, 'import matplotlib.pyplot as plt\n'), ((6701, 6741), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[0, 7.5, 15, 22.5]'], {}), '([0, 7.5, 15, 22.5])\n', (6721, 6741), True, 'import matplotlib.ticker as mticker\n'), ((6760, 6811), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[0, 15.5, 31, 46.5, 62, 77.5]'], {}), '([0, 15.5, 31, 46.5, 62, 77.5])\n', (6780, 6811), True, 'import matplotlib.ticker as mticker\n'), ((8159, 8226), 'matplotlib.pyplot.savefig', 'plt.savefig', (['png_filename'], {'dpi': '(300)', 'format': '"""png"""', 'transparent': '(False)'}), "(png_filename, dpi=300, format='png', transparent=False)\n", (8170, 8226), True, 'import matplotlib.pyplot as plt\n'), ((8236, 8252), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8245, 8252), True, 'import matplotlib.pyplot as plt\n'), ((6480, 6498), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6496, 6498), True, 'import cartopy.crs as ccrs\n'), ((6527, 6545), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6543, 6545), True, 'import cartopy.crs as ccrs\n')]
|
# %% =======================================================================
# import libraries
#===========================================================================
# default
import os
import datetime
import hashlib
# conda
import pandas as pd
import PySimpleGUI as sg
# user
from b01_schedule_class_base import ScheduleManageBase
from b01_schedule_class import ScheduleManage
# %% =======================================================================
# pysimple gui settings
#===========================================================================
# window layout is defined in b01_schedule_class_base
# button or other functions are difined in b01_schedule_class
sch_m = ScheduleManage()
sch_m.create_window()
sch_m.window.read(timeout=1)
# %% =======================================================================
# simple gyi while
#===========================================================================
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, pos, item, eid = sch_m.parse_event()
# if event and "MV" not in event:
# print(event, pos, item, eid)
if event == sg.WIN_CLOSED: # if user closes window or clicks cancel
break
# %% =======================================================================
# header
#===========================================================================
if pos == "hd": # header
if item == "btn": # click button
if eid == 0: # all
sch_m.header_all_button_pressed()
sch_m.header_checkbox_changed()
if eid == 1: # clear
sch_m.header_clear_button_pressed()
sch_m.header_checkbox_changed()
if eid == 2: # drawchart
sch_m.header_refresh_button_pressed()
if eid == 3: # upload
sch_m.header_upload_button_pressed()
if eid == 4: # download
sch_m.header_reload_button_pressed()
if item == "cbx": # check box was updated
sch_m.header_checkbox_changed([eid])
if item == "rdi": # radio button
sch_m.header_member_raido_button_changed()
continue
# %% =======================================================================
# left tab
#===========================================================================
if pos == "lt":
if item == "grp":
sch_m.l1_chart_draw()
continue
if pos == "l1": # left tab1
if item[:3] == "grp":
if len(item) == 3:
sch_m.l1_graph_area_clicked()
if "MV" in item:
sch_m.l1_graphs_capture_mouse_motion(eid)
if "RC" in item:
sch_m.l1_graph_right_click_menu_selected(event, eid)
continue
if pos == "l2": # left tab2
continue
if pos == "l3": # left tab2
if item == "btn":
if eid < 4:
sch_m.l3_priority_updown_button_pressed(eid)
if eid == 4:
sch_m.l3_priority_auto_button_pressed()
if "tbl" in item:
sch_m.l3_table_selected_ticket_changed()
continue
if pos == "l0":
if item == "btn":
if eid == 0:
sch_m.l0_settings_save_and_restart_button_pressed()
# %% =======================================================================
# right tab
#===========================================================================
if pos == "r1": # right tab1
if item[:3] == "inp":
if len(item) == 6:
if item[4:6] == "LC":
sch_m.r1_input_date_box_selected(eid)
sch_m.r1_input_check()
# sch_m._r1_pre_next_ticket_table_update()
if item[:3] == "btn":
if eid == 0:
sch_m.r1_apply_button_pressed()
if eid == 1:
sch_m.r1_delete_button_pressed()
if item == "right_menu":
sch_m.r1_right_click_menu_clicked(event)
continue
if pos == "r2": # right tab2
if item == "btn":
# if eid == 0:
# sch_m.r2_save_plan_button_pressed()
if eid == 1:
sch_m.r2_save_record_button_pressed()
if eid == 4:
sch_m.r2_delete_button_pressed()
if item == "txt":
if eid == 2:
sch_m.r2_date_txt_pressed()
if item == "inp":
sch_m.r2_information_box_inputed()
continue
if pos == "r3": # right tab3
if item == "btn":
if eid == 0:
sch_m.r3_mail_button_pressed()
if eid == 1:
sch_m.r3_folder_button_pressed()
if eid == 2:
sch_m.r3_memo_button_pressed()
continue
if pos == "r4": # right tab4
continue
if pos == "r5": # right tab5
if item == "btn":
sch_m.r5_arrow_button_pressed(eid)
if item == "mul":
sch_m.r5_df_from_multiline(eid)
sch_m.window.close()
|
[
"b01_schedule_class.ScheduleManage"
] |
[((692, 708), 'b01_schedule_class.ScheduleManage', 'ScheduleManage', ([], {}), '()\n', (706, 708), False, 'from b01_schedule_class import ScheduleManage\n')]
|
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
class Sensor_DB:
def __init__(self, host="127.0.0.1", database="postgres", user="postgres", password=""):
self.__DBconnection = psycopg2.connect(
host=host, database="postgres", user=user, password=password
)
self.__DBconnection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.__DBcursor = self.__DBconnection.cursor()
self.__DBcursor.execute("SELECT 1 FROM pg_catalog.pg_database WHERE datname = %s", (database,),)
self.__DBexists = self.__DBcursor.fetchone()
if not self.__DBexists:
self.__DBcursor.execute("CREATE DATABASE " + database)
self.__DBcursor.close()
self.__DBconnection.close()
###############################################
self.__DBconnection = psycopg2.connect(
host=host, database=database, user=user, password=password
)
self.__DBcursor = self.__DBconnection.cursor()
self.__DBtables = set()
def __createTable(self, tableID):
self.__DBcursor.execute(
"CREATE TABLE IF NOT EXISTS sensor%s ("
" id_entry SERIAL PRIMARY KEY,"
" temperature DOUBLE PRECISION,"
" humidity INT,"
" lux INT,"
" latitude DOUBLE PRECISION,"
" longitude DOUBLE PRECISION,"
" dt TIMESTAMP"
")",
(int(tableID),),
)
self.__DBconnection.commit()
self.__DBtables.add(tableID)
def saveData(self, sensorID, temperature, humidity, lux, latitude, longitude, date, time):
if sensorID not in self.__DBtables:
self.__createTable(sensorID)
self.__DBcursor.execute(
"INSERT INTO sensor%s (temperature, humidity, lux, latitude, longitude, dt) VALUES(%s, %s, %s, %s, %s, %s)",
(
int(sensorID),
temperature,
humidity,
lux,
latitude,
longitude,
date + " " + time,
),
)
self.__DBconnection.commit()
|
[
"psycopg2.connect"
] |
[((217, 295), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': 'host', 'database': '"""postgres"""', 'user': 'user', 'password': 'password'}), "(host=host, database='postgres', user=user, password=password)\n", (233, 295), False, 'import psycopg2\n'), ((865, 941), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': 'host', 'database': 'database', 'user': 'user', 'password': 'password'}), '(host=host, database=database, user=user, password=password)\n', (881, 941), False, 'import psycopg2\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`refex.python.syntactic_template`
--------------------------------------
Syntax-aware Python substitution templates, as described in
:doc:`/guide/patterns_templates`.
The core problem with lexical or textual substitution of Python code, as with
e.g. :class:`refex.formatting.ShTemplate`, is that the substitution can be
unintentionally wrong. For example:
If you replace ``f($x)`` with ``$x``, what if ``$x`` contains a newline?
If you replace ``$a`` with ``$a * 2``, what if ``$a`` is ``1 + 2``?
The template classes here aim to make replacements that match the intended
syntax -- i.e. the structure of the template -- and will parenthesize as
necessary.
.. autoclass:: PythonExprTemplate
.. autoclass:: PythonStmtTemplate
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import tokenize
from typing import Text
from absl import logging
import attr
import cached_property
import six
from refex import formatting
from refex.python import matcher
from refex.python import python_pattern
from refex.python.matchers import ast_matchers
from refex.python.matchers import base_matchers
from refex.python.matchers import syntax_matchers
@attr.s(frozen=True)
class _LexicalTemplate(object):
"""Lexically-aware Python templates.
$variables are only used for replacements if they occur inside Python, not
inside of e.g. a string literal or a comment. So "a = $x" has an x variable
in the template, but "a = '$x'" does not.
"""
template = attr.ib()
_tokens = attr.ib(init=False, repr=False)
_var_to_i = attr.ib(init=False, repr=False)
variables = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
# Because we have frozen=True, creating values for _tokens and _var_to_i
# is a little complex, and requires invoking object.__setattr__.
tokenized, metavar_indices = python_pattern.token_pattern(self.template)
var_to_i = {}
for i in metavar_indices:
var = tokenized[i][1]
var_to_i.setdefault(var, []).append(i)
object.__setattr__(self, '_tokens', tokenized)
object.__setattr__(self, '_var_to_i', var_to_i)
object.__setattr__(self, 'variables', six.viewkeys(var_to_i))
def substitute(self, replacements):
"""Lexically-aware substitution.
Args:
replacements: A map from metavariable name to new content.
Returns:
Substituted Python code.
Raises:
KeyError: A variable in the template was not specified in the
replacements. THis is to be compatible with strings.Template.
"""
if not self.template:
# Work around: if u'' is untokenized and then retokenized,
# it comes back out as b'' on Python 2!
return self.template
# Take a copy of tokens to modify the target slots.
tokens = list(self._tokens)
free_vars = set(self._var_to_i)
logging.debug('Applying %r to tokens %r for substitution of %r', free_vars,
tokens, replacements)
for var, new in six.iteritems(replacements):
try:
all_i = self._var_to_i[var]
except KeyError:
# overspecified: a replacement for a variable not in the template.
continue
free_vars.remove(var)
for i in all_i:
tok = list(tokens[i])
tok[1] = new
tokens[i] = tuple(tok)
if free_vars:
raise KeyError(next(iter(free_vars)))
return tokenize.untokenize(tokens)
@attr.s(frozen=True)
class _BasePythonTemplate(formatting.Template):
"""Base class for syntax-aware templates.
The templates use ShTemplate/string.Template style templates. e.g. "$x + $y".
Subclasses must override _pattern_factory to provide a matcher for the
template.
Attributes:
template: The source template
"""
template = attr.ib(type=Text)
_lexical_template = attr.ib(repr=False, init=False, type=_LexicalTemplate)
_ast_matcher = attr.ib(repr=False, init=False, type=matcher.Matcher)
def __attrs_post_init__(self):
if not isinstance(self.template, six.text_type):
raise TypeError('Expected text, got: {}'.format(
type(self.template).__name__))
@_lexical_template.default
def _lexical_template_default(self):
return _LexicalTemplate(self.template)
@_ast_matcher.default
def _ast_matcher_default(self):
return self._pattern_factory(self.template)
def substitute_match(self, parsed, match, matches):
"""Syntax-aware substitution, parsing the template using _pattern_factory.
Args:
parsed: The ParsedFile being substituted into.
match: The match being replaced.
matches: The matches used for formatting.
Returns:
Substituted Python code.
"""
replacement, _ = self._parenthesized(
_matchers_for_matches(matches),
formatting.stringify_matches(matches),
)
if not isinstance(parsed, matcher.PythonParsedFile):
# This file is not (known to be) a Python file, so we can't (efficiently)
# check the replacement into the parent AST node.
# TODO: It would help if we could automatically obtain a
# PythonParsedFile for any given ParsedFile. This also lets us compose
# arbitrary searchers together, some of which take a PythonParsedFile, and
# others which don't.
return replacement
# replacement is now a safely-interpolated string: all of the substitutions
# are parenthesized where needed. The only thing left is to ensure that when
# replacement itself is substituted in, it itself is parenthesized
# correctly.
#
# To do this, we essentially repeat the same safe substitution algorithm,
# except using the context of the replacement as a fake pattern.
#
# Note that this whole process is only valid if we are matching an actual
# AST node, and an expr node at that.
# For example, it is _not_ valid to replace b'foo' with b('foo'), or 'foo'
# with 'f(o)o'.
if not isinstance(match, matcher.LexicalASTMatch):
# The match wasn't even for an AST node. We'll assume they know what
# they're doing and pass thru verbatim. Non-AST matches can't be
# automatically parenthesized -- for example, b'foo' vs b('foo')
return replacement
if not isinstance(match.matched, ast.expr):
# Only expressions need to be parenthesized, so this is fine as-is.
return replacement
parent = parsed.nav.get_parent(match.matched)
if isinstance(parent, ast.Expr):
# the matched object is already a top-level expression, and will never
# need extra parentheses.
# We are assuming here that the template does not contain comments,
# which is not enforced. We also assume that it doesn't contain raw
# newlines, but this is enforced by the template and substitution both
# needing to parse on their own.
return replacement
if isinstance(parent, ast.stmt) and hasattr(parent, 'body'):
# TODO(b/139758169): re-enable reparsing of statements in templating.
# Multi-line statements can't be reparsed due to issues with
# indentation. We can usually safely assume that it doesn't need parens,
# although exceptions exist. (Including, in an ironic twist, "except".)
return replacement
# keep navigating up until we reach a lexical AST node.
# e.g. skip over lists.
while True:
parent_span = matcher.create_match(parsed, parent).span
if parent_span is not None and isinstance(parent, (ast.expr, ast.stmt)):
# We need a parent node which has a known source span,
# and which is by itself parseable (i.e. is an expr or stmt).
break
next_parent = parsed.nav.get_parent(parent)
if isinstance(next_parent, ast.stmt) and hasattr(next_parent, 'body'):
# TODO(b/139758169): re-enable reparsing of statements in templating.
# We encountered no reparseable parents between here and a
# non-reparseable statement, so, as before, we must fall back to
# returning the replacement verbatim and hoping it isn't broken.
# (For example, replacing T with T1, T2 in "class A(T)" is incorrect.)
return replacement
parent = next_parent
else:
raise formatting.RewriteError(
"Bug in Python formatter? Couldn't find parent of %r" % match)
# Re-apply the safe substitution, but on the parent.
context_start, context_end = parent_span
match_start, match_end = match.span
prefix = parsed.text[context_start:match_start]
suffix = parsed.text[match_end:context_end]
if isinstance(parent, ast.expr):
# expressions can occur in a multiline context, but now that we've
# removed it from its context for reparsing, we're in a single-line
# unparenthesized context. We need to add parens to make sure this
# parses correctly.
prefix = '(' + prefix
suffix += ')'
parent_pseudotemplate = PythonTemplate(prefix + u'$current_expr' + suffix)
parsed_replacement = ast.parse(replacement)
if len(parsed_replacement.body) != 1 or not isinstance(
parsed_replacement.body[0], ast.Expr):
raise formatting.RewriteError(
"Non-expression template can't be used in expression context: %s" %
self.template)
current_matcher = syntax_matchers.ast_matchers_matcher(
parsed_replacement.body[0].value)
_, safe_mapping = parent_pseudotemplate._parenthesized( # pylint: disable=protected-access
{u'current_expr': current_matcher}, {u'current_expr': replacement})
return safe_mapping[u'current_expr']
def _parenthesized(self, matchers, stringified_matches):
"""Parenthesizes a substitution for a template.
Args:
matchers: Dict mapping {var: Matcher that must match this variable}
stringified_matches: Dict mapping variable -> string for match.
Returns:
A tuple of two elements:
0: The full safely parenthesized substitution.
1: A dict dict mapping var -> parenthesized string, for each match.
"""
safe_mapping = {}
unparenthesized_mapping = {}
for k, v in stringified_matches.items():
raw_string = stringified_matches[k]
if k in matchers:
safe_mapping[k] = '(%s)' % raw_string
unparenthesized_mapping[k] = raw_string
else:
# Non-expressions cannot be parenthesized and must be inserted verbatim.
safe_mapping[k] = raw_string
# We start parenthesized and try dropping parentheses to see if things
# still match the same way.
# First, build up the canonical replacement:
replacement = self._lexical_template.substitute(safe_mapping)
# Now let's parse the produced canonical replacement and make sure that it
# looks "correct" -- it is structurally the same as our template is,
# and the substituted in values are identical.
try:
parsed_template = matcher.parse_ast(replacement,
'<_BasePythonTemplate pattern>')
except matcher.ParseError as e:
raise formatting.RewriteError(
'Bug in Python formatter? Failed to parse formatted Python string as '
'Python: template=%r, substitute(matches for %r) -> %r: %s' %
(self.template, stringified_matches, replacement, e))
m = self._ast_matcher.match(
matcher.MatchContext(parsed_template), parsed_template.tree)
if m is None:
raise formatting.RewriteError(
'Bug in Python formatter? Even "safe" formatting of Python template '
'produced an incorrect and different AST, so it must be discarded: '
' template=%r, substitute(matches for %r) -> %r' %
(self.template, stringified_matches, replacement ))
for k, bound in m.bindings.items():
v = bound.value
if not matchers[k].match(
matcher.MatchContext(parsed_template), v.matched):
raise formatting.RewriteError(
'Bug in Python formatter? Even "safe" formatting of Python template'
' produced an incorrect and different AST, so it must be discarded '
'[variable %s=%r was corrupted -- %r]: '
'template=%r, substitute(matches for %r) -> %r' %
(k, matchers[k], v, self.template, stringified_matches,
replacement))
# The preliminaries are done: safe templating worked, and all that's left is
# to try to make the substitutions less over-parenthesized.
candidate_matcher = syntax_matchers.ast_matchers_matcher(
parsed_template.tree)
for k, unparenthesized in unparenthesized_mapping.items():
parenthesized = safe_mapping[k]
safe_mapping[k] = unparenthesized
try:
alt_replacement = self._lexical_template.substitute(safe_mapping)
alt_parsed = matcher.parse_ast(
alt_replacement, '<_BasePythonTemplate alternate proposal>')
except matcher.ParseError as e:
pass
else:
if candidate_matcher.match(
matcher.MatchContext(alt_parsed), alt_parsed.tree):
replacement = alt_replacement
continue
# if we made it this far, the replacement was not a success.
safe_mapping[k] = parenthesized
return replacement, safe_mapping
@cached_property.cached_property
def variables(self):
return self._lexical_template.variables
def _matchers_for_matches(matches):
"""Returns AST matchers for all expressions in `matches`.
Args:
matches: A mapping of variable name -> match
Returns:
A mapping of <variable name> -> <matcher that must match>.
Only variables that can be parenthesized are in this mapping, and the
matcher must match where those variables are substituted in.
"""
matchers = {}
for k, v in matches.items():
if (isinstance(v, matcher.LexicalASTMatch) and
isinstance(v.matched, ast.expr)):
matchers[k] = syntax_matchers.ast_matchers_matcher(v.matched)
else:
# as a fallback, treat it as a black box, and assume that the rest of the
# expression will catch things.
matchers[k] = base_matchers.Anything()
return matchers
class PythonTemplate(_BasePythonTemplate):
_pattern_factory = syntax_matchers.ModulePattern
class PythonExprTemplate(_BasePythonTemplate):
"""A template for a Python expression."""
@staticmethod
def _pattern_factory(pattern):
return ast_matchers.Module(
body=base_matchers.ItemsAre(
[ast_matchers.Expr(value=syntax_matchers.ExprPattern(pattern))]))
class PythonStmtTemplate(_BasePythonTemplate):
"""A template for a single Python statement."""
@staticmethod
def _pattern_factory(pattern):
return ast_matchers.Module(
body=base_matchers.ItemsAre([syntax_matchers.StmtPattern(pattern)]))
|
[
"refex.python.matchers.syntax_matchers.ast_matchers_matcher",
"refex.python.matchers.base_matchers.Anything",
"six.viewkeys",
"attr.s",
"absl.logging.debug",
"attr.ib",
"refex.python.matcher.parse_ast",
"refex.python.matchers.syntax_matchers.ExprPattern",
"refex.formatting.RewriteError",
"refex.python.matcher.create_match",
"refex.python.matchers.syntax_matchers.StmtPattern",
"refex.python.python_pattern.token_pattern",
"refex.python.matcher.MatchContext",
"tokenize.untokenize",
"refex.formatting.stringify_matches",
"ast.parse",
"six.iteritems"
] |
[((1804, 1823), 'attr.s', 'attr.s', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1810, 1823), False, 'import attr\n'), ((4021, 4040), 'attr.s', 'attr.s', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4027, 4040), False, 'import attr\n'), ((2115, 2124), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2122, 2124), False, 'import attr\n'), ((2137, 2168), 'attr.ib', 'attr.ib', ([], {'init': '(False)', 'repr': '(False)'}), '(init=False, repr=False)\n', (2144, 2168), False, 'import attr\n'), ((2183, 2214), 'attr.ib', 'attr.ib', ([], {'init': '(False)', 'repr': '(False)'}), '(init=False, repr=False)\n', (2190, 2214), False, 'import attr\n'), ((2229, 2260), 'attr.ib', 'attr.ib', ([], {'init': '(False)', 'repr': '(False)'}), '(init=False, repr=False)\n', (2236, 2260), False, 'import attr\n'), ((4368, 4386), 'attr.ib', 'attr.ib', ([], {'type': 'Text'}), '(type=Text)\n', (4375, 4386), False, 'import attr\n'), ((4409, 4463), 'attr.ib', 'attr.ib', ([], {'repr': '(False)', 'init': '(False)', 'type': '_LexicalTemplate'}), '(repr=False, init=False, type=_LexicalTemplate)\n', (4416, 4463), False, 'import attr\n'), ((4481, 4534), 'attr.ib', 'attr.ib', ([], {'repr': '(False)', 'init': '(False)', 'type': 'matcher.Matcher'}), '(repr=False, init=False, type=matcher.Matcher)\n', (4488, 4534), False, 'import attr\n'), ((2474, 2517), 'refex.python.python_pattern.token_pattern', 'python_pattern.token_pattern', (['self.template'], {}), '(self.template)\n', (2502, 2517), False, 'from refex.python import python_pattern\n'), ((3458, 3559), 'absl.logging.debug', 'logging.debug', (['"""Applying %r to tokens %r for substitution of %r"""', 'free_vars', 'tokens', 'replacements'], {}), "('Applying %r to tokens %r for substitution of %r', free_vars,\n tokens, replacements)\n", (3471, 3559), False, 'from absl import logging\n'), ((3594, 3621), 'six.iteritems', 'six.iteritems', (['replacements'], {}), '(replacements)\n', (3607, 3621), False, 'import six\n'), ((3990, 4017), 'tokenize.untokenize', 'tokenize.untokenize', (['tokens'], {}), '(tokens)\n', (4009, 4017), False, 'import tokenize\n'), ((9571, 9593), 'ast.parse', 'ast.parse', (['replacement'], {}), '(replacement)\n', (9580, 9593), False, 'import ast\n'), ((9864, 9934), 'refex.python.matchers.syntax_matchers.ast_matchers_matcher', 'syntax_matchers.ast_matchers_matcher', (['parsed_replacement.body[0].value'], {}), '(parsed_replacement.body[0].value)\n', (9900, 9934), False, 'from refex.python.matchers import syntax_matchers\n'), ((13031, 13089), 'refex.python.matchers.syntax_matchers.ast_matchers_matcher', 'syntax_matchers.ast_matchers_matcher', (['parsed_template.tree'], {}), '(parsed_template.tree)\n', (13067, 13089), False, 'from refex.python.matchers import syntax_matchers\n'), ((2784, 2806), 'six.viewkeys', 'six.viewkeys', (['var_to_i'], {}), '(var_to_i)\n', (2796, 2806), False, 'import six\n'), ((5365, 5402), 'refex.formatting.stringify_matches', 'formatting.stringify_matches', (['matches'], {}), '(matches)\n', (5393, 5402), False, 'from refex import formatting\n'), ((8795, 8886), 'refex.formatting.RewriteError', 'formatting.RewriteError', (['("Bug in Python formatter? Couldn\'t find parent of %r" % match)'], {}), '(\n "Bug in Python formatter? Couldn\'t find parent of %r" % match)\n', (8818, 8886), False, 'from refex import formatting\n'), ((9713, 9828), 'refex.formatting.RewriteError', 'formatting.RewriteError', (['("Non-expression template can\'t be used in expression context: %s" % self.\n template)'], {}), '(\n "Non-expression template can\'t be used in expression context: %s" %\n self.template)\n', (9736, 9828), False, 'from refex import formatting\n'), ((11458, 11521), 'refex.python.matcher.parse_ast', 'matcher.parse_ast', (['replacement', '"""<_BasePythonTemplate pattern>"""'], {}), "(replacement, '<_BasePythonTemplate pattern>')\n", (11475, 11521), False, 'from refex.python import matcher\n'), ((11896, 11933), 'refex.python.matcher.MatchContext', 'matcher.MatchContext', (['parsed_template'], {}), '(parsed_template)\n', (11916, 11933), False, 'from refex.python import matcher\n'), ((11987, 12255), 'refex.formatting.RewriteError', 'formatting.RewriteError', (['(\'Bug in Python formatter? Even "safe" formatting of Python template produced an incorrect and different AST, so it must be discarded: template=%r, substitute(matches for %r) -> %r\'\n % (self.template, stringified_matches, replacement))'], {}), '(\n \'Bug in Python formatter? Even "safe" formatting of Python template produced an incorrect and different AST, so it must be discarded: template=%r, substitute(matches for %r) -> %r\'\n % (self.template, stringified_matches, replacement))\n', (12010, 12255), False, 'from refex import formatting\n'), ((14444, 14491), 'refex.python.matchers.syntax_matchers.ast_matchers_matcher', 'syntax_matchers.ast_matchers_matcher', (['v.matched'], {}), '(v.matched)\n', (14480, 14491), False, 'from refex.python.matchers import syntax_matchers\n'), ((14640, 14664), 'refex.python.matchers.base_matchers.Anything', 'base_matchers.Anything', ([], {}), '()\n', (14662, 14664), False, 'from refex.python.matchers import base_matchers\n'), ((7954, 7990), 'refex.python.matcher.create_match', 'matcher.create_match', (['parsed', 'parent'], {}), '(parsed, parent)\n', (7974, 7990), False, 'from refex.python import matcher\n'), ((11612, 11829), 'refex.formatting.RewriteError', 'formatting.RewriteError', (["('Bug in Python formatter? Failed to parse formatted Python string as Python: template=%r, substitute(matches for %r) -> %r: %s'\n % (self.template, stringified_matches, replacement, e))"], {}), "(\n 'Bug in Python formatter? Failed to parse formatted Python string as Python: template=%r, substitute(matches for %r) -> %r: %s'\n % (self.template, stringified_matches, replacement, e))\n", (11635, 11829), False, 'from refex import formatting\n'), ((12464, 12787), 'refex.formatting.RewriteError', 'formatting.RewriteError', (['(\'Bug in Python formatter? Even "safe" formatting of Python template produced an incorrect and different AST, so it must be discarded [variable %s=%r was corrupted -- %r]: template=%r, substitute(matches for %r) -> %r\'\n % (k, matchers[k], v, self.template, stringified_matches, replacement))'], {}), '(\n \'Bug in Python formatter? Even "safe" formatting of Python template produced an incorrect and different AST, so it must be discarded [variable %s=%r was corrupted -- %r]: template=%r, substitute(matches for %r) -> %r\'\n % (k, matchers[k], v, self.template, stringified_matches, replacement))\n', (12487, 12787), False, 'from refex import formatting\n'), ((13347, 13425), 'refex.python.matcher.parse_ast', 'matcher.parse_ast', (['alt_replacement', '"""<_BasePythonTemplate alternate proposal>"""'], {}), "(alt_replacement, '<_BasePythonTemplate alternate proposal>')\n", (13364, 13425), False, 'from refex.python import matcher\n'), ((12399, 12436), 'refex.python.matcher.MatchContext', 'matcher.MatchContext', (['parsed_template'], {}), '(parsed_template)\n', (12419, 12436), False, 'from refex.python import matcher\n'), ((13550, 13582), 'refex.python.matcher.MatchContext', 'matcher.MatchContext', (['alt_parsed'], {}), '(alt_parsed)\n', (13570, 13582), False, 'from refex.python import matcher\n'), ((15287, 15323), 'refex.python.matchers.syntax_matchers.StmtPattern', 'syntax_matchers.StmtPattern', (['pattern'], {}), '(pattern)\n', (15314, 15323), False, 'from refex.python.matchers import syntax_matchers\n'), ((15028, 15064), 'refex.python.matchers.syntax_matchers.ExprPattern', 'syntax_matchers.ExprPattern', (['pattern'], {}), '(pattern)\n', (15055, 15064), False, 'from refex.python.matchers import syntax_matchers\n')]
|
from subprocess import check_output
from os import system
system ("wp post delete 1")
system("wp post create --post_title='A first post' --post_status=publish --post_date='2014-10-01 07:00:00' --post_content=\"This is the content\" --post_author=1")
system("wp post create --post_title='A second post' --post_status=publish --post_date='2014-10-02 07:00:00' --post_content=\"This is the second post content\" --post_author=1 ")
system("wp post create --post_title='A third post' --post_status=publish --post_date='2014-10-10 07:00:00' --post_content=\"This is the third post content\" --post_author=1 ")
system("wp post create --post_title='A fourth post' --post_status=publish --post_date='2014-10-15 07:00:00' --post_content=\"This is the fourth post content\" --post_author=1 ")
system("wp post create --post_title='A sports post' --post_status=publish --post_date='2014-10-20 07:00:00' --post_content=\"This is the sports post content\" --post_author=1")
system("wp post create --post_type=page --post_title='A first page' --post_status=publish --post_date='2014-10-15 07:00:00' --post_content=\"This is the first page content\" --post_author=1")
system("wp option update permalink_structure '/%year%/%monthnum%/%postname%/' ")
system("wp plugin activate --all")
system("wp user update 1 --display_name='<NAME>' --first_name='Ira' --last_name='Rubel' ")
system("wp eval-file create_password.php")
(p5, p4, p3, p2, p1) = check_output(["wp","post","list","--field=ID"]).split()
system("wp post term add %s post_tag tag1" % p1)
system("wp post term add %s post_tag tag1" % p2)
system("wp post term add %s post_tag tag2" % p2)
system("wp post term add %s post_tag tag1" % p3)
system("wp post term add %s category cat1" % p1)
system("wp post term add %s departments sports" % p5)
|
[
"subprocess.check_output",
"os.system"
] |
[((59, 85), 'os.system', 'system', (['"""wp post delete 1"""'], {}), "('wp post delete 1')\n", (65, 85), False, 'from os import system\n'), ((88, 263), 'os.system', 'system', (['"""wp post create --post_title=\'A first post\' --post_status=publish --post_date=\'2014-10-01 07:00:00\' --post_content="This is the content" --post_author=1"""'], {}), '(\n \'wp post create --post_title=\\\'A first post\\\' --post_status=publish --post_date=\\\'2014-10-01 07:00:00\\\' --post_content="This is the content" --post_author=1\'\n )\n', (94, 263), False, 'from os import system\n'), ((252, 441), 'os.system', 'system', (['"""wp post create --post_title=\'A second post\' --post_status=publish --post_date=\'2014-10-02 07:00:00\' --post_content="This is the second post content" --post_author=1 """'], {}), '(\n \'wp post create --post_title=\\\'A second post\\\' --post_status=publish --post_date=\\\'2014-10-02 07:00:00\\\' --post_content="This is the second post content" --post_author=1 \'\n )\n', (258, 441), False, 'from os import system\n'), ((430, 617), 'os.system', 'system', (['"""wp post create --post_title=\'A third post\' --post_status=publish --post_date=\'2014-10-10 07:00:00\' --post_content="This is the third post content" --post_author=1 """'], {}), '(\n \'wp post create --post_title=\\\'A third post\\\' --post_status=publish --post_date=\\\'2014-10-10 07:00:00\\\' --post_content="This is the third post content" --post_author=1 \'\n )\n', (436, 617), False, 'from os import system\n'), ((606, 795), 'os.system', 'system', (['"""wp post create --post_title=\'A fourth post\' --post_status=publish --post_date=\'2014-10-15 07:00:00\' --post_content="This is the fourth post content" --post_author=1 """'], {}), '(\n \'wp post create --post_title=\\\'A fourth post\\\' --post_status=publish --post_date=\\\'2014-10-15 07:00:00\\\' --post_content="This is the fourth post content" --post_author=1 \'\n )\n', (612, 795), False, 'from os import system\n'), ((784, 972), 'os.system', 'system', (['"""wp post create --post_title=\'A sports post\' --post_status=publish --post_date=\'2014-10-20 07:00:00\' --post_content="This is the sports post content" --post_author=1"""'], {}), '(\n \'wp post create --post_title=\\\'A sports post\\\' --post_status=publish --post_date=\\\'2014-10-20 07:00:00\\\' --post_content="This is the sports post content" --post_author=1\'\n )\n', (790, 972), False, 'from os import system\n'), ((961, 1164), 'os.system', 'system', (['"""wp post create --post_type=page --post_title=\'A first page\' --post_status=publish --post_date=\'2014-10-15 07:00:00\' --post_content="This is the first page content" --post_author=1"""'], {}), '(\n \'wp post create --post_type=page --post_title=\\\'A first page\\\' --post_status=publish --post_date=\\\'2014-10-15 07:00:00\\\' --post_content="This is the first page content" --post_author=1\'\n )\n', (967, 1164), False, 'from os import system\n'), ((1153, 1238), 'os.system', 'system', (['"""wp option update permalink_structure \'/%year%/%monthnum%/%postname%/\' """'], {}), '("wp option update permalink_structure \'/%year%/%monthnum%/%postname%/\' "\n )\n', (1159, 1238), False, 'from os import system\n'), ((1234, 1268), 'os.system', 'system', (['"""wp plugin activate --all"""'], {}), "('wp plugin activate --all')\n", (1240, 1268), False, 'from os import system\n'), ((1269, 1369), 'os.system', 'system', (['"""wp user update 1 --display_name=\'<NAME>\' --first_name=\'Ira\' --last_name=\'Rubel\' """'], {}), '(\n "wp user update 1 --display_name=\'<NAME>\' --first_name=\'Ira\' --last_name=\'Rubel\' "\n )\n', (1275, 1369), False, 'from os import system\n'), ((1360, 1402), 'os.system', 'system', (['"""wp eval-file create_password.php"""'], {}), "('wp eval-file create_password.php')\n", (1366, 1402), False, 'from os import system\n'), ((1484, 1532), 'os.system', 'system', (["('wp post term add %s post_tag tag1' % p1)"], {}), "('wp post term add %s post_tag tag1' % p1)\n", (1490, 1532), False, 'from os import system\n'), ((1533, 1581), 'os.system', 'system', (["('wp post term add %s post_tag tag1' % p2)"], {}), "('wp post term add %s post_tag tag1' % p2)\n", (1539, 1581), False, 'from os import system\n'), ((1582, 1630), 'os.system', 'system', (["('wp post term add %s post_tag tag2' % p2)"], {}), "('wp post term add %s post_tag tag2' % p2)\n", (1588, 1630), False, 'from os import system\n'), ((1631, 1679), 'os.system', 'system', (["('wp post term add %s post_tag tag1' % p3)"], {}), "('wp post term add %s post_tag tag1' % p3)\n", (1637, 1679), False, 'from os import system\n'), ((1680, 1728), 'os.system', 'system', (["('wp post term add %s category cat1' % p1)"], {}), "('wp post term add %s category cat1' % p1)\n", (1686, 1728), False, 'from os import system\n'), ((1729, 1782), 'os.system', 'system', (["('wp post term add %s departments sports' % p5)"], {}), "('wp post term add %s departments sports' % p5)\n", (1735, 1782), False, 'from os import system\n'), ((1427, 1477), 'subprocess.check_output', 'check_output', (["['wp', 'post', 'list', '--field=ID']"], {}), "(['wp', 'post', 'list', '--field=ID'])\n", (1439, 1477), False, 'from subprocess import check_output\n')]
|
"""
CLI utilities.
"""
import click
def running_command_name() -> str:
"""
Returns the current CLI command name as a space-separated string, or
``id3c`` if not running under any command.
"""
appname = None
context = click.get_current_context(silent = True)
if context:
appname = context.command_path
if not appname:
appname = "id3c"
return appname
|
[
"click.get_current_context"
] |
[((242, 280), 'click.get_current_context', 'click.get_current_context', ([], {'silent': '(True)'}), '(silent=True)\n', (267, 280), False, 'import click\n')]
|
#!/usr/bin/env python
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
runtime_props = ctx.instance.runtime_properties
SERVICE_NAME = runtime_props['service_name']
HOME_DIR = runtime_props['home_dir']
@utils.retry(ValueError)
def check_worker_running():
"""Use `celery status` to check if the worker is running."""
work_dir = join(HOME_DIR, 'work')
celery_path = join(HOME_DIR, 'env', 'bin', 'celery')
result = utils.sudo([
'CELERY_WORK_DIR={0}'.format(work_dir),
celery_path,
'--config=cloudify.broker_config',
'status'
], ignore_failures=True)
if result.returncode != 0:
raise ValueError('celery status: worker not running')
ctx.logger.info('Starting Management Worker Service...')
utils.start_service(SERVICE_NAME)
utils.systemd.verify_alive(SERVICE_NAME)
try:
check_worker_running()
except ValueError:
ctx.abort_operation('Celery worker failed to start')
|
[
"utils.start_service",
"os.path.dirname",
"cloudify.ctx.logger.info",
"cloudify.ctx.abort_operation",
"utils.retry",
"os.path.join",
"utils.systemd.verify_alive"
] |
[((339, 362), 'utils.retry', 'utils.retry', (['ValueError'], {}), '(ValueError)\n', (350, 362), False, 'import utils\n'), ((830, 886), 'cloudify.ctx.logger.info', 'ctx.logger.info', (['"""Starting Management Worker Service..."""'], {}), "('Starting Management Worker Service...')\n", (845, 886), False, 'from cloudify import ctx\n'), ((887, 920), 'utils.start_service', 'utils.start_service', (['SERVICE_NAME'], {}), '(SERVICE_NAME)\n', (906, 920), False, 'import utils\n'), ((922, 962), 'utils.systemd.verify_alive', 'utils.systemd.verify_alive', (['SERVICE_NAME'], {}), '(SERVICE_NAME)\n', (948, 962), False, 'import utils\n'), ((111, 141), 'os.path.join', 'join', (['"""components"""', '"""utils.py"""'], {}), "('components', 'utils.py')\n", (115, 141), False, 'from os.path import join, dirname\n'), ((471, 493), 'os.path.join', 'join', (['HOME_DIR', '"""work"""'], {}), "(HOME_DIR, 'work')\n", (475, 493), False, 'from os.path import join, dirname\n'), ((512, 550), 'os.path.join', 'join', (['HOME_DIR', '"""env"""', '"""bin"""', '"""celery"""'], {}), "(HOME_DIR, 'env', 'bin', 'celery')\n", (516, 550), False, 'from os.path import join, dirname\n'), ((152, 169), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (159, 169), False, 'from os.path import join, dirname\n'), ((1019, 1071), 'cloudify.ctx.abort_operation', 'ctx.abort_operation', (['"""Celery worker failed to start"""'], {}), "('Celery worker failed to start')\n", (1038, 1071), False, 'from cloudify import ctx\n')]
|
from typing import Any, List, Literal, TypedDict
from .FHIR_Attachment import FHIR_Attachment
from .FHIR_ClaimResponse_AddItem import FHIR_ClaimResponse_AddItem
from .FHIR_ClaimResponse_Adjudication import FHIR_ClaimResponse_Adjudication
from .FHIR_ClaimResponse_Error import FHIR_ClaimResponse_Error
from .FHIR_ClaimResponse_Insurance import FHIR_ClaimResponse_Insurance
from .FHIR_ClaimResponse_Item import FHIR_ClaimResponse_Item
from .FHIR_ClaimResponse_Payment import FHIR_ClaimResponse_Payment
from .FHIR_ClaimResponse_ProcessNote import FHIR_ClaimResponse_ProcessNote
from .FHIR_ClaimResponse_Total import FHIR_ClaimResponse_Total
from .FHIR_code import FHIR_code
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_dateTime import FHIR_dateTime
from .FHIR_Element import FHIR_Element
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_Meta import FHIR_Meta
from .FHIR_Narrative import FHIR_Narrative
from .FHIR_Period import FHIR_Period
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
from .FHIR_uri import FHIR_uri
# This resource provides the adjudication details from the processing of a Claim resource.
FHIR_ClaimResponse = TypedDict(
"FHIR_ClaimResponse",
{
# This is a ClaimResponse resource
"resourceType": Literal["ClaimResponse"],
# The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes.
"id": FHIR_id,
# The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource.
"meta": FHIR_Meta,
# A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc.
"implicitRules": FHIR_uri,
# Extensions for implicitRules
"_implicitRules": FHIR_Element,
# The base language in which the resource is written.
"language": FHIR_code,
# Extensions for language
"_language": FHIR_Element,
# A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety.
"text": FHIR_Narrative,
# These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope.
"contained": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# A unique identifier assigned to this claim response.
"identifier": List[FHIR_Identifier],
# The status of the resource instance.
"status": FHIR_code,
# Extensions for status
"_status": FHIR_Element,
# A finer grained suite of claim type codes which may convey additional information such as Inpatient vs Outpatient and/or a specialty service.
"type": FHIR_CodeableConcept,
# A finer grained suite of claim type codes which may convey additional information such as Inpatient vs Outpatient and/or a specialty service.
"subType": FHIR_CodeableConcept,
# A code to indicate whether the nature of the request is: to request adjudication of products and services previously rendered; or requesting authorization and adjudication for provision in the future; or requesting the non-binding adjudication of the listed products and services which could be provided in the future.
"use": FHIR_code,
# Extensions for use
"_use": FHIR_Element,
# The party to whom the professional services and/or products have been supplied or are being considered and for whom actual for facast reimbursement is sought.
"patient": FHIR_Reference,
# The date this resource was created.
"created": FHIR_dateTime,
# Extensions for created
"_created": FHIR_Element,
# The party responsible for authorization, adjudication and reimbursement.
"insurer": FHIR_Reference,
# The provider which is responsible for the claim, predetermination or preauthorization.
"requestor": FHIR_Reference,
# Original request resource reference.
"request": FHIR_Reference,
# The outcome of the claim, predetermination, or preauthorization processing.
"outcome": FHIR_code,
# Extensions for outcome
"_outcome": FHIR_Element,
# A human readable description of the status of the adjudication.
"disposition": FHIR_string,
# Extensions for disposition
"_disposition": FHIR_Element,
# Reference from the Insurer which is used in later communications which refers to this adjudication.
"preAuthRef": FHIR_string,
# Extensions for preAuthRef
"_preAuthRef": FHIR_Element,
# The time frame during which this authorization is effective.
"preAuthPeriod": FHIR_Period,
# Type of Party to be reimbursed: subscriber, provider, other.
"payeeType": FHIR_CodeableConcept,
# A claim line. Either a simple (a product or service) or a 'group' of details which can also be a simple items or groups of sub-details.
"item": List[FHIR_ClaimResponse_Item],
# The first-tier service adjudications for payor added product or service lines.
"addItem": List[FHIR_ClaimResponse_AddItem],
# The adjudication results which are presented at the header level rather than at the line-item or add-item levels.
"adjudication": List[FHIR_ClaimResponse_Adjudication],
# Categorized monetary totals for the adjudication.
"total": List[FHIR_ClaimResponse_Total],
# Payment details for the adjudication of the claim.
"payment": FHIR_ClaimResponse_Payment,
# A code, used only on a response to a preauthorization, to indicate whether the benefits payable have been reserved and for whom.
"fundsReserve": FHIR_CodeableConcept,
# A code for the form to be used for printing the content.
"formCode": FHIR_CodeableConcept,
# The actual form, by reference or inclusion, for printing the content or an EOB.
"form": FHIR_Attachment,
# A note that describes or explains adjudication results in a human readable form.
"processNote": List[FHIR_ClaimResponse_ProcessNote],
# Request for additional supporting or authorizing information.
"communicationRequest": List[FHIR_Reference],
# Financial instruments for reimbursement for the health care products and services specified on the claim.
"insurance": List[FHIR_ClaimResponse_Insurance],
# Errors encountered during the processing of the adjudication.
"error": List[FHIR_ClaimResponse_Error],
},
total=False,
)
|
[
"typing.TypedDict"
] |
[((1218, 2721), 'typing.TypedDict', 'TypedDict', (['"""FHIR_ClaimResponse"""', "{'resourceType': Literal['ClaimResponse'], 'id': FHIR_id, 'meta': FHIR_Meta,\n 'implicitRules': FHIR_uri, '_implicitRules': FHIR_Element, 'language':\n FHIR_code, '_language': FHIR_Element, 'text': FHIR_Narrative,\n 'contained': List[Any], 'extension': List[Any], 'modifierExtension':\n List[Any], 'identifier': List[FHIR_Identifier], 'status': FHIR_code,\n '_status': FHIR_Element, 'type': FHIR_CodeableConcept, 'subType':\n FHIR_CodeableConcept, 'use': FHIR_code, '_use': FHIR_Element, 'patient':\n FHIR_Reference, 'created': FHIR_dateTime, '_created': FHIR_Element,\n 'insurer': FHIR_Reference, 'requestor': FHIR_Reference, 'request':\n FHIR_Reference, 'outcome': FHIR_code, '_outcome': FHIR_Element,\n 'disposition': FHIR_string, '_disposition': FHIR_Element, 'preAuthRef':\n FHIR_string, '_preAuthRef': FHIR_Element, 'preAuthPeriod': FHIR_Period,\n 'payeeType': FHIR_CodeableConcept, 'item': List[FHIR_ClaimResponse_Item\n ], 'addItem': List[FHIR_ClaimResponse_AddItem], 'adjudication': List[\n FHIR_ClaimResponse_Adjudication], 'total': List[\n FHIR_ClaimResponse_Total], 'payment': FHIR_ClaimResponse_Payment,\n 'fundsReserve': FHIR_CodeableConcept, 'formCode': FHIR_CodeableConcept,\n 'form': FHIR_Attachment, 'processNote': List[\n FHIR_ClaimResponse_ProcessNote], 'communicationRequest': List[\n FHIR_Reference], 'insurance': List[FHIR_ClaimResponse_Insurance],\n 'error': List[FHIR_ClaimResponse_Error]}"], {'total': '(False)'}), "('FHIR_ClaimResponse', {'resourceType': Literal['ClaimResponse'],\n 'id': FHIR_id, 'meta': FHIR_Meta, 'implicitRules': FHIR_uri,\n '_implicitRules': FHIR_Element, 'language': FHIR_code, '_language':\n FHIR_Element, 'text': FHIR_Narrative, 'contained': List[Any],\n 'extension': List[Any], 'modifierExtension': List[Any], 'identifier':\n List[FHIR_Identifier], 'status': FHIR_code, '_status': FHIR_Element,\n 'type': FHIR_CodeableConcept, 'subType': FHIR_CodeableConcept, 'use':\n FHIR_code, '_use': FHIR_Element, 'patient': FHIR_Reference, 'created':\n FHIR_dateTime, '_created': FHIR_Element, 'insurer': FHIR_Reference,\n 'requestor': FHIR_Reference, 'request': FHIR_Reference, 'outcome':\n FHIR_code, '_outcome': FHIR_Element, 'disposition': FHIR_string,\n '_disposition': FHIR_Element, 'preAuthRef': FHIR_string, '_preAuthRef':\n FHIR_Element, 'preAuthPeriod': FHIR_Period, 'payeeType':\n FHIR_CodeableConcept, 'item': List[FHIR_ClaimResponse_Item], 'addItem':\n List[FHIR_ClaimResponse_AddItem], 'adjudication': List[\n FHIR_ClaimResponse_Adjudication], 'total': List[\n FHIR_ClaimResponse_Total], 'payment': FHIR_ClaimResponse_Payment,\n 'fundsReserve': FHIR_CodeableConcept, 'formCode': FHIR_CodeableConcept,\n 'form': FHIR_Attachment, 'processNote': List[\n FHIR_ClaimResponse_ProcessNote], 'communicationRequest': List[\n FHIR_Reference], 'insurance': List[FHIR_ClaimResponse_Insurance],\n 'error': List[FHIR_ClaimResponse_Error]}, total=False)\n", (1227, 2721), False, 'from typing import Any, List, Literal, TypedDict\n')]
|
from .quadpack import quad, dblquad, tplquad, nquad
excluded = ['excluded', 'quadpack']
__all__ = [s for s in dir() if not ((s in excluded)or s.startswith('_'))]
from labugr.testing.utils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
[
"labugr.testing.utils.PytestTester"
] |
[((218, 240), 'labugr.testing.utils.PytestTester', 'PytestTester', (['__name__'], {}), '(__name__)\n', (230, 240), False, 'from labugr.testing.utils import PytestTester\n')]
|
from __future__ import annotations
import itertools
from collections.abc import Iterator, Sequence
import more_itertools
from tqdm import trange
Arrows = dict[int, int]
def main():
# arrangement = [3, 8, 9, 1, 2, 5, 4, 6, 7]
arrangement = [4, 5, 9, 6, 7, 2, 8, 1, 3]
# Part 1
arrows = build_circular_arrows(arrangement)
crab_modify_arrows(arrows, arrangement[0], plucks=3, repeat=100)
final_arrangement = list(nodes_in_circle(arrows, start=1))
p1_answer = ''.join(str(n) for n in final_arrangement[1:])
print(p1_answer)
# Part 2
arrangement = arrangement + list(range(10, 1_000_001))
arrows = build_circular_arrows(arrangement)
crab_modify_arrows(arrows, arrangement[0], plucks=3, repeat=10_000_000)
fst, snd, trd = more_itertools.take(3, nodes_in_circle(arrows, start=1))
p2_answer = snd * trd
print(p2_answer)
def build_circular_arrows(arrangement: Sequence[int]) -> Arrows:
"""
Builds a circular graph as a dictionary mapping from one node label to the next.
"""
looped_arrangement = itertools.chain(arrangement, arrangement[:1])
arrows = {u: v for u, v in more_itertools.windowed(looped_arrangement, n=2)}
return arrows
def nodes_in_circle(arrows: Arrows, start: int) -> Iterator[int]:
"""
Obtains a sequence of node labels around the arrows graph
starting from the given `start` label until it reaches back to start.
"""
current = start
while True:
yield current
current = arrows[current]
if current == start:
break
def crab_modify_arrows(arrows: Arrows, current: int, plucks: int, repeat: int = 1) -> int:
"""
Modifies the arrows graph in-place according to crab's challenge
starting at the given current node label.
It returns the next *current* node label to resume the next step.
"""
for _ in trange(repeat):
plucked = more_itertools.take(plucks, nodes_in_circle(arrows, arrows[current]))
candidates = count_in_modulus(current - 1, -1, modulo=len(arrows))
dest = more_itertools.first_true(candidates, pred=lambda v: v not in plucked)
rear = plucked[-1]
arrows[current], arrows[rear], arrows[dest] = arrows[rear], arrows[dest], arrows[current]
current = arrows[current]
return current
def count_in_modulus(start: int, step: int = 1, *, modulo: int) -> Iterator[int]:
"""
Produces an arithmetic sequence of numbers under the given modulus
with 1-indexing (so remainder 0 would actually yield the modulus itself).
"""
for value in itertools.count(start, step):
yield value % modulo or modulo
if __name__ == '__main__':
main()
|
[
"more_itertools.windowed",
"tqdm.trange",
"itertools.count",
"itertools.chain",
"more_itertools.first_true"
] |
[((1074, 1119), 'itertools.chain', 'itertools.chain', (['arrangement', 'arrangement[:1]'], {}), '(arrangement, arrangement[:1])\n', (1089, 1119), False, 'import itertools\n'), ((1885, 1899), 'tqdm.trange', 'trange', (['repeat'], {}), '(repeat)\n', (1891, 1899), False, 'from tqdm import trange\n'), ((2594, 2622), 'itertools.count', 'itertools.count', (['start', 'step'], {}), '(start, step)\n', (2609, 2622), False, 'import itertools\n'), ((2079, 2149), 'more_itertools.first_true', 'more_itertools.first_true', (['candidates'], {'pred': '(lambda v: v not in plucked)'}), '(candidates, pred=lambda v: v not in plucked)\n', (2104, 2149), False, 'import more_itertools\n'), ((1151, 1199), 'more_itertools.windowed', 'more_itertools.windowed', (['looped_arrangement'], {'n': '(2)'}), '(looped_arrangement, n=2)\n', (1174, 1199), False, 'import more_itertools\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 15:44:11 2021
@author: lihuanyu
"""
import torch.nn as nn
import torch
import math
import torchsummary as summary
from torchstat import stat
import torch.nn.functional as F
from blurpool import BlurPool
__all__ = ["seedsortnet","seedsortnet75"]
class SubSpace_SFSAM(nn.Module):
def __init__(self, nin):
super(SubSpace_SFSAM, self).__init__()
self.conv_7x7 = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, groups=1)
self.bn_point = nn.BatchNorm2d(1, momentum=0.9)
self.relu_point = nn.ReLU(inplace=False)
self.softmax = nn.Softmax(dim=2)
def forward(self, x):
out_mean = torch.mean(x, dim=1, keepdim=True)
out_max, _ = torch.max(x, dim=1, keepdim=True)
out = [out_max, out_mean]
out = torch.cat(out,dim=1)
out = self.conv_7x7(out)
out = self.bn_point(out)
out = self.relu_point(out)
m, n, p, q = out.shape
out = self.softmax(out.view(m, n, -1))
out = out.view(m, n, p, q)
out = out.expand(x.shape[0], x.shape[1], x.shape[2], x.shape[3])
out = torch.mul(out, x)
out = out + x
return out
class SFSAM(nn.Module):
def __init__(self, nin, nout, h, w, num_splits):
super(SFSAM, self).__init__()
assert nin % num_splits == 0
self.nin = nin
self.nout = nout
self.h = h
self.w = w
self.num_splits = num_splits
self.subspaces = nn.ModuleList(
[SubSpace_SFSAM(int(self.nin / self.num_splits)) for i in range(self.num_splits)]
)
def forward(self, x):
group_size = int(self.nin / self.num_splits)
sub_feat = torch.chunk(x, self.num_splits, dim=1)
out = []
for idx, l in enumerate(self.subspaces):
out.append(self.subspaces[idx](sub_feat[idx]))
out = torch.cat(out, dim=1)
return out
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.relu6 = nn.ReLU6(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu6(x)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class subsampled(nn.Module):
def __init__(self,in_channels,out_channels,filter_size=2,**kwargs):
super(subsampled, self).__init__()
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=1)
self.blurpool=BlurPool(in_channels, filt_size=filter_size, stride=2)
def forward(self, x):
x = self.maxpool(x)
x = self.blurpool(x)
return x
class Root_module(nn.Module):
def __init__(self, in_channels,ch3x3_conv,ch1x1_first,ch3x3,pool_proj):
super(Root_module, self).__init__()
self.conv1 = BasicConv2d(in_channels, ch3x3_conv, kernel_size=3, stride=1, padding=1)
self.branch1 = nn.Sequential(
BasicConv2d(ch3x3_conv, ch1x1_first, kernel_size=3,padding=1,stride=2),
BasicConv2d(ch1x1_first, ch3x3, kernel_size=1)
)
self.branch2 = nn.Sequential(
subsampled(16,16)
)
def forward(self, x):
x = self.conv1(x)
branch1 = self.branch1(x)
branch2 = self.branch2(x)
outputs = [branch1, branch2]
return torch.cat(outputs, 1)
class shield_block(nn.Module):
def __init__(self, inp, oup, expand_ratio,expand_channel):
self.identity_map = False
super(shield_block, self).__init__()
hidden_dim = inp // expand_ratio
if hidden_dim < oup / 6.:
hidden_dim = math.ceil(oup / 6.)
hidden_dim = _make_divisible(hidden_dim, 16)
oup1 = math.ceil((oup/6.) * expand_channel)
oup2 = oup - oup1
if inp != oup:
self.conv = nn.Sequential(
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True),
)
if inp == oup:
self.identity_map = True
self.conv = nn.Sequential(
nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.Conv2d(hidden_dim, oup1, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup1),
nn.ReLU6(inplace=True),
nn.Conv2d(oup1, oup1, 3, 1, 1, groups=oup1, bias=False),
nn.BatchNorm2d(oup1),
)
self.branch1 = nn.Sequential(
nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
nn.Conv2d(inp, oup2, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup2),
nn.ReLU6(inplace=True),
)
def forward(self, x):
out = self.conv(x)
if self.identity_map == True:
identity = x
branch1 = self.branch1(x)
out = [out, branch1]
out = torch.cat(out, 1)
out += identity
return out
class Seedsortnet(nn.Module):
def __init__(self, num_classes=2, width=1,groups=4,expand_channel=4,init_weights=True):
super(Seedsortnet, self).__init__()
self.root_module = Root_module(3,16,32,16,16) # [-1, 32, 112, 112]
out1 = int(64*width)
out2 = int(128 *width)
out3 = int(192*width)
out4 = int(256 *width)
self.stage1_up = shield_block(32,out1,2,1)
self.stage1_1 = shield_block(out1,out1,6,expand_channel)
self.sfsam1 = SFSAM(out1,out1,112,112, groups)
self.translayer1 = subsampled(out1,out1)
self.stage2_up = shield_block(out1,out2,2,1)
self.stage2_1 = shield_block(out2,out2,6,expand_channel)
self.stage2_2 = shield_block(out2,out2,6,expand_channel)
self.stage2_3 = shield_block(out2,out2,6,expand_channel)
self.sfsam2 = SFSAM(out2,out2,56,56, groups)
self.translayer2 = subsampled(out2,out2)
self.stage3_up = shield_block(out2,out3,2,1)
self.stage3_1 = shield_block(out3,out3,6,expand_channel)
self.stage3_2 = shield_block(out3,out3,6,expand_channel)
self.stage3_3 = shield_block(out3,out3,6,expand_channel)
self.stage3_4 = shield_block(out3,out3,6,expand_channel)
self.sfsam3 = SFSAM(out3,out3,28,28,groups)
self.translayer3 = subsampled(out3,out3)
self.stage4_up = shield_block(out3,out4,2,1)
self.stage4_1 = shield_block(out4,out4,6,expand_channel)
self.stage4_2 = shield_block(out4,out4,6,expand_channel)
self.stage4_3 = shield_block(out4,out4,6,expand_channel)
self.sfsam4 = SFSAM(out4,out4,14,14,groups)
self.translayer4 = subsampled(out4,out4)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(out4, num_classes))
if init_weights==True:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.root_module(x)
x = self.stage1_up(x)
x = self.stage1_1(x)
x = self.sfsam1(x)
x = self.translayer1(x)
x = self.stage2_up(x)
x = self.stage2_1(x)
x = self.stage2_2(x)
x = self.stage2_3(x)
x = self.sfsam2(x)
x = self.translayer2(x)
x = self.stage3_up(x)
x = self.stage3_1(x)
x = self.stage3_2(x)
x = self.stage3_3(x)
x = self.stage3_4(x)
x = self.sfsam3(x)
x = self.translayer3(x)
x = self.stage4_up(x)
x = self.stage4_1(x)
x = self.stage4_2(x)
x = self.stage4_3(x)
x = self.sfsam4(x)
x = self.translayer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def seedsortnet(**kwargs):
"""
Constructs a Seedsortnet model
"""
return Seedsortnet(**kwargs)
def seedsortnet75(**kwargs):
"""
Constructs a Seedsortnet model
"""
return Seedsortnet(width=0.75,**kwargs)
if __name__=='__main__':
model = seedsortnet(groups=4)
model.eval()
print(model)
stat(model,(3, 224, 224))
|
[
"torch.nn.Dropout",
"scipy.stats.truncnorm",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"torch.no_grad",
"torchstat.stat",
"torch.nn.Linear",
"torch.mean",
"math.ceil",
"torch.nn.Conv2d",
"torch.mul",
"torch.nn.BatchNorm2d",
"torch.max",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU",
"torch.nn.ReLU6",
"blurpool.BlurPool",
"torch.chunk"
] |
[((10182, 10208), 'torchstat.stat', 'stat', (['model', '(3, 224, 224)'], {}), '(model, (3, 224, 224))\n', (10186, 10208), False, 'from torchstat import stat\n'), ((458, 519), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2)', '(1)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'groups': '(1)'}), '(2, 1, kernel_size=7, stride=1, padding=3, groups=1)\n', (467, 519), True, 'import torch.nn as nn\n'), ((545, 576), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1)'], {'momentum': '(0.9)'}), '(1, momentum=0.9)\n', (559, 576), True, 'import torch.nn as nn\n'), ((603, 625), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (610, 625), True, 'import torch.nn as nn\n'), ((650, 667), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (660, 667), True, 'import torch.nn as nn\n'), ((716, 750), 'torch.mean', 'torch.mean', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (726, 750), False, 'import torch\n'), ((775, 808), 'torch.max', 'torch.max', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (784, 808), False, 'import torch\n'), ((874, 895), 'torch.cat', 'torch.cat', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (883, 895), False, 'import torch\n'), ((1202, 1219), 'torch.mul', 'torch.mul', (['out', 'x'], {}), '(out, x)\n', (1211, 1219), False, 'import torch\n'), ((1813, 1851), 'torch.chunk', 'torch.chunk', (['x', 'self.num_splits'], {'dim': '(1)'}), '(x, self.num_splits, dim=1)\n', (1824, 1851), False, 'import torch\n'), ((1993, 2014), 'torch.cat', 'torch.cat', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (2002, 2014), False, 'import torch\n'), ((2195, 2253), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'bias': '(False)'}), '(in_channels, out_channels, bias=False, **kwargs)\n', (2204, 2253), True, 'import torch.nn as nn\n'), ((2272, 2300), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (2286, 2300), True, 'import torch.nn as nn\n'), ((2322, 2344), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2330, 2344), True, 'import torch.nn as nn\n'), ((2872, 2909), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(1)'}), '(kernel_size=2, stride=1)\n', (2884, 2909), True, 'import torch.nn as nn\n'), ((2932, 2986), 'blurpool.BlurPool', 'BlurPool', (['in_channels'], {'filt_size': 'filter_size', 'stride': '(2)'}), '(in_channels, filt_size=filter_size, stride=2)\n', (2940, 2986), False, 'from blurpool import BlurPool\n'), ((3882, 3903), 'torch.cat', 'torch.cat', (['outputs', '(1)'], {}), '(outputs, 1)\n', (3891, 3903), False, 'import torch\n'), ((4296, 4333), 'math.ceil', 'math.ceil', (['(oup / 6.0 * expand_channel)'], {}), '(oup / 6.0 * expand_channel)\n', (4305, 4333), False, 'import math\n'), ((8105, 8133), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (8125, 8133), True, 'import torch.nn as nn\n'), ((4195, 4215), 'math.ceil', 'math.ceil', (['(oup / 6.0)'], {}), '(oup / 6.0)\n', (4204, 4215), False, 'import math\n'), ((5939, 5956), 'torch.cat', 'torch.cat', (['out', '(1)'], {}), '(out, 1)\n', (5948, 5956), False, 'import torch\n'), ((8184, 8199), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (8194, 8199), True, 'import torch.nn as nn\n'), ((8209, 8237), 'torch.nn.Linear', 'nn.Linear', (['out4', 'num_classes'], {}), '(out4, num_classes)\n', (8218, 8237), True, 'import torch.nn as nn\n'), ((4452, 4499), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'hidden_dim', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, hidden_dim, 1, 1, 0, bias=False)\n', (4461, 4499), True, 'import torch.nn as nn\n'), ((4517, 4543), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['hidden_dim'], {}), '(hidden_dim)\n', (4531, 4543), True, 'import torch.nn as nn\n'), ((4561, 4608), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'oup', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(hidden_dim, oup, 1, 1, 0, bias=False)\n', (4570, 4608), True, 'import torch.nn as nn\n'), ((4626, 4645), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (4640, 4645), True, 'import torch.nn as nn\n'), ((4663, 4685), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4671, 4685), True, 'import torch.nn as nn\n'), ((4818, 4870), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'inp', '(3)', '(1)', '(1)'], {'groups': 'inp', 'bias': '(False)'}), '(inp, inp, 3, 1, 1, groups=inp, bias=False)\n', (4827, 4870), True, 'import torch.nn as nn\n'), ((4888, 4907), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['inp'], {}), '(inp)\n', (4902, 4907), True, 'import torch.nn as nn\n'), ((4925, 4947), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4933, 4947), True, 'import torch.nn as nn\n'), ((4965, 5012), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'hidden_dim', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, hidden_dim, 1, 1, 0, bias=False)\n', (4974, 5012), True, 'import torch.nn as nn\n'), ((5030, 5056), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['hidden_dim'], {}), '(hidden_dim)\n', (5044, 5056), True, 'import torch.nn as nn\n'), ((5076, 5124), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'oup1', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(hidden_dim, oup1, 1, 1, 0, bias=False)\n', (5085, 5124), True, 'import torch.nn as nn\n'), ((5142, 5162), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup1'], {}), '(oup1)\n', (5156, 5162), True, 'import torch.nn as nn\n'), ((5180, 5202), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5188, 5202), True, 'import torch.nn as nn\n'), ((5220, 5275), 'torch.nn.Conv2d', 'nn.Conv2d', (['oup1', 'oup1', '(3)', '(1)', '(1)'], {'groups': 'oup1', 'bias': '(False)'}), '(oup1, oup1, 3, 1, 1, groups=oup1, bias=False)\n', (5229, 5275), True, 'import torch.nn as nn\n'), ((5293, 5313), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup1'], {}), '(oup1)\n', (5307, 5313), True, 'import torch.nn as nn\n'), ((5404, 5456), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'inp', '(3)', '(1)', '(1)'], {'groups': 'inp', 'bias': '(False)'}), '(inp, inp, 3, 1, 1, groups=inp, bias=False)\n', (5413, 5456), True, 'import torch.nn as nn\n'), ((5478, 5497), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['inp'], {}), '(inp)\n', (5492, 5497), True, 'import torch.nn as nn\n'), ((5519, 5541), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5527, 5541), True, 'import torch.nn as nn\n'), ((5584, 5625), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'oup2', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, oup2, 1, 1, 0, bias=False)\n', (5593, 5625), True, 'import torch.nn as nn\n'), ((5647, 5667), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup2'], {}), '(oup2)\n', (5661, 5667), True, 'import torch.nn as nn\n'), ((5689, 5711), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5697, 5711), True, 'import torch.nn as nn\n'), ((8513, 8547), 'scipy.stats.truncnorm', 'stats.truncnorm', (['(-2)', '(2)'], {'scale': '(0.01)'}), '(-2, 2, scale=0.01)\n', (8528, 8547), True, 'import scipy.stats as stats\n'), ((8711, 8726), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8724, 8726), False, 'import torch\n'), ((8835, 8865), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (8852, 8865), True, 'import torch.nn as nn\n'), ((8882, 8910), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (8899, 8910), True, 'import torch.nn as nn\n')]
|
import ast
import Numberjack
from data.logic import _dimension_factory, _expr_transformer, _model, \
_predicates, _reference, dsl
from spec.mamba import *
with description('_expr_transformer.ExprTransformer'):
with it('instantiates'):
expect(calling(_expr_transformer.ExprTransformer, None)).not_to(raise_error)
with description('compile'):
with before.each:
self.factory = _dimension_factory._DimensionFactory()
self.model = _model._Model(self.factory)
self.transformer = _expr_transformer.ExprTransformer(self.model)
self.andy, self.bob = self.name = self.factory(name=['andy', 'bob'])
self.cherries, self.dates = self.fruit = self.factory(
fruit=['cherries', 'dates'])
self._10, self._11 = self.age = self.factory(age=[10, 11])
with it('resolves names'):
node = ast.Name(id='name["andy"].fruit["cherries"]', ctx=ast.Load())
transformed = self.transformer.visit(node)
expect(transformed).to(be_a(_reference.Reference))
expect(transformed._constraints).to(equal({
'name': 'andy',
'fruit': 'cherries'
}))
with it('resolves numbers'):
node = ast.Num(n=10)
transformed = self.transformer.visit(node)
expect(transformed).to(be_a(_reference.Reference))
expect(transformed._constraints).to(equal({'age': 10}))
with it('resolves strings'):
node = ast.Str(s='cherries')
transformed = self.transformer.visit(node)
expect(transformed).to(be_a(_reference.Reference))
expect(transformed._constraints).to(equal({'fruit': 'cherries'}))
with it('fails to visit unsupported nodes'):
expect(calling(self.transformer.compile, ast.Await())).to(
raise_error(NotImplementedError))
expect(calling(self.transformer.visit, ast.Await())).to(
raise_error(NotImplementedError))
expect(calling(self.transformer.generic_visit, ast.Await())).to(
raise_error(NotImplementedError))
with it('supports precise (2d) assignment'):
expr = self.name['andy'].fruit == self.fruit['cherries']
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'(name["andy"].fruit["cherries"] == True)'))
with it('supports OR operation'):
expr = (self.name['andy'].fruit['cherries'] |
self.fruit['cherries'].name['bob'])
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'(name["andy"].fruit["cherries"] or name["bob"].fruit["cherries"])'))
with it('supports XOR operation'):
expr = (self.name['andy'].fruit['cherries'] ^
self.fruit['cherries'].name['bob'])
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'((name["andy"].fruit["cherries"] +'
' name["bob"].fruit["cherries"]) == 1)'))
with it('supports + operation, int on right'):
expr = self.name['andy'].age + 2
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('(name["andy"].age in {10,11} + 2)'))
with it('supports + operation, int on left'):
expr = 2 + self.name['andy'].age
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('(2 + name["andy"].age in {10,11})'))
with it('supports - operation, int on right'):
expr = self.name['andy'].age - 2
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('(name["andy"].age in {10,11} - 2)'))
with it('supports - operation, int on left'):
expr = 2 - self.name['andy'].age
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('(2 - name["andy"].age in {10,11})'))
with it('supports * operation, int on right'):
expr = self.name['andy'].age[10] * 10
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('((name["andy"].age == 10) * 10)'))
with it('supports * operation, int on left'):
expr = 10 * self.name['andy'].age[10]
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
# For some reason(?) the operations are switched here.
expect(str(compiled)).to(equal('((name["andy"].age == 10) * 10)'))
with it('supports & operation'):
expr = self.andy[10] & self.bob[11]
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'((name["andy"].age == 10) & (name["bob"].age == 11))'))
with it('supports ~ operation'):
expr = ~self.andy[10]
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'((1 - (name["andy"].age == 10)) == True)'))
with it('supports call expressions'):
expr = dsl.abs(self.andy.age - self.bob.age)
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
# For some reason(?) the operations are switched here.
s = str(compiled).replace(' in {0,1}', '')
expect(s).to(equal(
'Abs((name["andy"].age in {10,11} -'
' name["bob"].age in {10,11}))'
))
with it('supports naked _DimensionSlice expressions'):
expr = self.name['andy'].age[10]
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('((name["andy"].age == 10) == True)'))
with it('supports Call with builtin functions'):
expr = ast.parse('max(1, 3)').body[0]
compiled = self.transformer.compile(expr)
expect(str(compiled)).to(equal('3'))
with it('supports Call with function pointers'):
fn = mock.Mock(return_value=3)
expr = ast.Call(
func=fn,
args=[],
keywords=[],
)
compiled = self.transformer.compile(expr)
expect(fn).to(have_been_called)
expect(str(compiled)).to(equal('3'))
with description('regression tests'):
with before.each:
self.model = Numberjack.Model()
with it('resolves values before Numberjack uses them'):
a = dsl.variable('a')
b = dsl.variable('b')
c = a * b
expr = c == 1462
compiled = self.transformer.compile(expr)
expect(str(compiled)).to(equal('((a * b) == 1462)'))
expect(calling(self.model.add, compiled)).not_to(raise_error)
|
[
"ast.Num",
"data.logic.dsl.abs",
"ast.Load",
"ast.Call",
"Numberjack.Model",
"data.logic.dsl.variable",
"data.logic._expr_transformer.ExprTransformer",
"ast.Await",
"data.logic._dimension_factory._DimensionFactory",
"data.logic._model._Model",
"ast.parse",
"ast.Str"
] |
[((392, 430), 'data.logic._dimension_factory._DimensionFactory', '_dimension_factory._DimensionFactory', ([], {}), '()\n', (428, 430), False, 'from data.logic import _dimension_factory, _expr_transformer, _model, _predicates, _reference, dsl\n'), ((448, 475), 'data.logic._model._Model', '_model._Model', (['self.factory'], {}), '(self.factory)\n', (461, 475), False, 'from data.logic import _dimension_factory, _expr_transformer, _model, _predicates, _reference, dsl\n'), ((499, 544), 'data.logic._expr_transformer.ExprTransformer', '_expr_transformer.ExprTransformer', (['self.model'], {}), '(self.model)\n', (532, 544), False, 'from data.logic import _dimension_factory, _expr_transformer, _model, _predicates, _reference, dsl\n'), ((1129, 1142), 'ast.Num', 'ast.Num', ([], {'n': '(10)'}), '(n=10)\n', (1136, 1142), False, 'import ast\n'), ((1348, 1369), 'ast.Str', 'ast.Str', ([], {'s': '"""cherries"""'}), "(s='cherries')\n", (1355, 1369), False, 'import ast\n'), ((5125, 5162), 'data.logic.dsl.abs', 'dsl.abs', (['(self.andy.age - self.bob.age)'], {}), '(self.andy.age - self.bob.age)\n', (5132, 5162), False, 'from data.logic import _dimension_factory, _expr_transformer, _model, _predicates, _reference, dsl\n'), ((6033, 6072), 'ast.Call', 'ast.Call', ([], {'func': 'fn', 'args': '[]', 'keywords': '[]'}), '(func=fn, args=[], keywords=[])\n', (6041, 6072), False, 'import ast\n'), ((6309, 6327), 'Numberjack.Model', 'Numberjack.Model', ([], {}), '()\n', (6325, 6327), False, 'import Numberjack\n'), ((6399, 6416), 'data.logic.dsl.variable', 'dsl.variable', (['"""a"""'], {}), "('a')\n", (6411, 6416), False, 'from data.logic import _dimension_factory, _expr_transformer, _model, _predicates, _reference, dsl\n'), ((6427, 6444), 'data.logic.dsl.variable', 'dsl.variable', (['"""b"""'], {}), "('b')\n", (6439, 6444), False, 'from data.logic import _dimension_factory, _expr_transformer, _model, _predicates, _reference, dsl\n'), ((868, 878), 'ast.Load', 'ast.Load', ([], {}), '()\n', (876, 878), False, 'import ast\n'), ((5817, 5839), 'ast.parse', 'ast.parse', (['"""max(1, 3)"""'], {}), "('max(1, 3)')\n", (5826, 5839), False, 'import ast\n'), ((1635, 1646), 'ast.Await', 'ast.Await', ([], {}), '()\n', (1644, 1646), False, 'import ast\n'), ((1738, 1749), 'ast.Await', 'ast.Await', ([], {}), '()\n', (1747, 1749), False, 'import ast\n'), ((1849, 1860), 'ast.Await', 'ast.Await', ([], {}), '()\n', (1858, 1860), False, 'import ast\n')]
|
'''
GraphPy: Python Module for Graph-based learning algorithms. Efficient implementations of modern methods for graph-based semi-supervised learning, and graph clustering.
See README.md file for usage.
Author: <NAME>, 2020
'''
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib
import scipy.spatial as spatial
import scipy.optimize as opt
import numpy.random as random
import scipy.sparse as sparse
import scipy.sparse.linalg as splinalg
import scipy.sparse.csgraph as csgraph
import sklearn.cluster as cluster
from sklearn.decomposition import PCA
import sys, getopt, time, csv, torch, os, multiprocessing
from joblib import Parallel, delayed
from utils.non_neg_qpsolver import non_negative_qpsolver
clustering_algorithms = ['incres', 'spectral', 'spectralshimalik', 'spectralngjordanweiss']
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
# Print New Line on Complete
if iteration == total:
print()
def load_mbo_eig(dataset, metric, k):
# Load eigenvector data if MBO selected
try:
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_" + metric + "_k%d" % k + "_spectrum.npz"
dataFile_path = os.path.join(location, 'MBOdata', dataFile)
M = np.load(dataFile_path, allow_pickle=True)
eigvals = M['eigenvalues']
eigvecs = M['eigenvectors']
except:
print("Could not find MBOdata/" + dataset + "_" + metric + "_k%d" % k + "_spectrum.npz")
print('You need to run ComputeEigenvectorsMBO.py first.')
sys.exit(2)
return eigvals, eigvecs
def load_label_permutation(dataset, label_perm='', t='-1'):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + label_perm + "_permutations.npz"
dataFile_path = os.path.join(location, 'LabelPermutations', dataFile)
# Load label permutation
try:
M = np.load(dataFile_path, allow_pickle=True)
perm = M['perm']
except:
print('Cannot find ' + dataFile)
print('You need to run CreateLabelPermutation.py first.')
sys.exit(2)
# Restrict trials
t = [int(e) for e in t.split(',')]
if t[0] > -1:
if len(t) == 1:
perm = perm[0:t[0]]
else:
perm = perm[(t[0] - 1):t[1]]
return perm
def load_dataset(dataset, metric='L2'):
# For variational autoencoder the vae data, e.g., Data/MNIST_vae.npz must exist.
if metric[0:3] == 'vae' or metric[0:3] == 'aet':
dataFile = dataset + "_" + metric + ".npz"
else:
dataFile = dataset + "_raw.npz"
location = os.path.dirname(os.path.realpath(__file__))
dataFile_path = os.path.join(location, 'Data', dataFile)
# Try to Load data
try:
M = np.load(dataFile_path, allow_pickle=True)
data = M['data']
except:
print('Cannot find ' + dataFile + '.')
sys.exit(2)
return data
def load_labels(dataset):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_labels.npz"
dataFile_path = os.path.join(location, 'Data', dataFile)
# Load labels
try:
M = np.load(dataFile_path, allow_pickle=True)
labels = M['labels']
except:
print('Cannot find dataset Data/' + dataFile)
sys.exit(2)
return labels
def load_kNN_data(dataset, metric='L2'):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_" + metric + ".npz"
dataFile_path = os.path.join(location, 'kNNData', dataFile)
# Load kNN data
try:
M = np.load(dataFile_path, allow_pickle=True)
I = M['I']
J = M['J']
D = M['D']
except:
print('Cannot find ' + dataFile)
print('You need to run ComputeKNN.py.')
sys.exit(2)
return I, J, D
# Compute sizes of each class
def label_proportions(labels):
L = np.unique(labels)
L = L[L >= 0]
k = len(L)
# n = len(labels)
n = np.sum(labels >= 0)
beta = np.zeros((k,))
for i in range(k):
beta[i] = np.sum(labels == L[i]) / n
return beta
# Constructs a weight matrix for graph on mxn grid with NSEW neighbors
# def grid_graph(m, n):
# X, Y = np.mgrid[:m, :n]
#
# return W
# Reweights the graph to use self-tuning weights
def self_tuning(W, D, alpha):
if alpha != 0:
n = D.shape[0]
k = D.shape[1]
d = D[:, k - 1]
d = sparse.spdiags(d ** (-alpha), 0, n, n)
W = d * W * d
return W
# Reweights the graph based on a clustering prior
def cluster_prior(W, cluster_labels):
n = W.shape[0]
I, J, V = sparse.find(W)
K = cluster_labels[I] == cluster_labels[J]
V[K] = V[K] * 10
V = V / np.max(V)
W = sparse.coo_matrix((V, (I, J)), shape=(n, n)).tocsr()
return W
# Computes scattering transform of depth 2 of I
# Bruna, Joan, and <NAME>. "Invariant scattering convolution networks." IEEE transactions on pattern analysis and machine intelligence 35.8 (2013): 1872-1886.
def scattering_transform(I, n, m, depth=2):
from kymatio import Scattering2D
num_pts = I.shape[0]
K = torch.from_numpy(I.reshape((num_pts, n, m))).float().contiguous()
scattering = Scattering2D(J=depth, shape=(n, m))
Z = scattering(K).numpy()
l = Z.shape[1] * Z.shape[2] * Z.shape[3]
return Z.reshape((num_pts, l))
# Label permutations
# labels = labels
# T = number of trials
# r = label rate in (0,1)
def create_label_permutations_rate(labels, T, R):
perm = list()
n = labels.shape[0]
labelvals = np.unique(labels)
labelvals = labelvals[labelvals >= 0]
num_labels = len(labelvals)
num = np.zeros((num_labels,))
for i in range(num_labels):
num[i] = np.sum(labels == labelvals[i])
J = np.arange(n).astype(int)
for k in range(T):
for r in R:
L = []
for i in range(num_labels):
l = labelvals[i]
I = labels == l
K = J[I]
m = round(num[i] * r / 100)
L = L + random.choice(K, size=m.astype(int), replace=False).tolist()
L = np.array(L)
perm.append(L)
return perm
# Label permutations
# labels = labels
# T = number of trials
# m = vector of number of labels
def create_label_permutations(labels, T, m, multiplier=None):
# Find all unique labels >= 0
# Negative numbers indicate unlabeled nodes
unique_labels = np.unique(labels)
unique_labels = unique_labels[unique_labels >= 0]
perm = list()
n = labels.shape[0]
J = np.arange(n).astype(int)
for k in range(T):
for i in m:
L = []
ind = 0
for l in unique_labels:
I = labels == l
K = J[I]
if multiplier is None:
L = L + random.choice(K, size=i, replace=False).tolist()
else:
sze = int(np.round(i * multiplier[ind]))
L = L + random.choice(K, size=sze, replace=False).tolist()
ind = ind + 1
L = np.array(L)
perm.append(L)
return perm
# Randomly choose m labels per class
def randomize_labels(L, m):
perm = create_label_permutations(L, 1, [m])
return perm[0]
# Default function
def exp_weight(x):
return np.exp(-x)
# Pointwise max of non-negative sparse matrices A and B
def sparse_max(A, B):
I = (A + B) > 0
IB = B > A
IA = I - IB
return A.multiply(IA) + B.multiply(IB)
# Compute degrees of weight matrix W
def degrees(W):
return np.squeeze(np.array(np.sum(W, axis=1)))
# Multiply diagonal of matrix by degree
def diag_multiply(W, b):
n = W.shape[0] # Number of points
D = sparse.spdiags(W.diagonal(), 0, n, n)
return W - (1 - b) * D
# Compute degrees of weight matrix W
# Returns sparse matrix with degrees on diagonal
def degree_matrix(W, p=1):
n = W.shape[0] # Number of points
# Construct sparse degree matrix
d = degrees(W)
D = sparse.spdiags(d ** p, 0, n, n)
return D.tocsr()
# Construct robin boundary condition matrix
def robin_bc_matrix(X, nu, eps, gamma):
n = X.shape[0]
Xtree = spatial.cKDTree(X)
_, nn_ind = Xtree.query(X + eps * nu)
# nn_dist = np.linalg.norm(X - X[nn_ind,:],axis=1)
nn_dist = eps * np.ones((n,))
# Robin matrix
A = sparse.spdiags(gamma + (1 - gamma) / nn_dist, 0, n, n)
B = sparse.coo_matrix(((1 - gamma) / nn_dist, (range(n), nn_ind)), shape=(n, n))
R = (A - B).tocsr()
return R
# Laplace matrix
# W = weight matrix
# norm = type of normalization
# Options: none, randomwalk, normalized
def graph_laplacian(W, norm="none"):
D = degree_matrix(W)
if norm == "none":
L = D - W
elif norm == "randomwalk1":
Dinv = degree_matrix(W, p=-1)
L = Dinv * (D - W)
elif norm == "randomwalk2":
Dinv = degree_matrix(W, p=-1)
L = (D - W) * Dinv
elif norm == "normalized":
Dinv2 = degree_matrix(W, p=-1 / 2)
L = Dinv2 * (D - W) * Dinv2
else:
print("Invalid option for graph Laplacian normalization. Returning unnormalized Laplacian.")
L = D - W
return L.tocsr()
# Graph infinity Laplacian
# W = sparse weight matrix
# u = function on graph
def graph_phi_laplacian(W, u, phi, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
w = u[J] - u[I]
a = np.absolute(w)
pa = phi(a)
m = pa / (a + 1e-13)
M = sparse.coo_matrix((V * pa / (a + 1e-13), (I, J)), shape=(n, n)).tocsr()
m = degrees(M)
M = sparse.coo_matrix((V * pa * np.sign(w), (I, J)), shape=(n, n)).tocsr()
M = np.squeeze(np.array(np.sum(M, axis=1)))
return M, m
# Graph infinity Laplacian
# W = sparse weight matrix
# u = function on graph
def graph_infinity_laplacian(W, u, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
M = sparse.coo_matrix((V * (u[J] - u[I]), (I, J)), shape=(n, n)).tocsr()
M = M.min(axis=1) + M.max(axis=1)
return M.toarray().flatten()
# Construct epsilon-graph sparse distance matrix
def eps_weight_matrix(X, eps, f=exp_weight):
n = X.shape[0] # Number of points
# Rangesearch to find nearest neighbors
Xtree = spatial.cKDTree(X)
M = Xtree.query_pairs(eps)
M = np.array(list(M))
# Differences between points and neighbors
V = X[M[:, 0], :] - X[M[:, 1], :]
D = np.sum(V * V, axis=1)
# Weights
D = f(4 * D / (eps * eps))
# Symmetrize weights and add diagonal entries
D = np.concatenate((D, D, f(0) * np.ones(n, )))
M1 = np.concatenate((M[:, 0], M[:, 1], np.arange(0, n)))
M2 = np.concatenate((M[:, 1], M[:, 0], np.arange(0, n)))
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (M1, M2)), shape=(n, n))
return W.tocsr()
# Exact knnsearch
def knnsearch(X, k):
# KDtree to find nearest neighbors
n = X.shape[0]
Xtree = spatial.cKDTree(X)
D, J = Xtree.query(X, k=k)
I = np.ones((n, k), dtype=int) * J[:, 0][:, None]
return I, J, D
# Perform approximate nearest neighbor search, returning indices I,J of neighbors, and distance D
# Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot".
def knnsearch_annoy(X, k, similarity='euclidean'):
from annoy import AnnoyIndex
n = X.shape[0] # Number of points
dim = X.shape[1] # Dimension
print('kNN search with Annoy approximate nearest neighbor package...')
printProgressBar(0, n, prefix='Progress:', suffix='Complete', length=50)
u = AnnoyIndex(dim, similarity) # Length of item vector that will be indexed
for i in range(n):
u.add_item(i, X[i, :])
u.build(10) # 10 trees
D = []
I = []
J = []
for i in range(n):
printProgressBar(i + 1, n, prefix='Progress:', suffix='Complete', length=50)
A = u.get_nns_by_item(i, k, include_distances=True, search_k=-1)
I.append([i] * k)
J.append(A[0])
D.append(A[1])
I = np.array(I)
J = np.array(J)
D = np.array(D)
return I, J, D
# Compute weight matrix from nearest neighbor indices I,J and distances D
def weight_matrix_selftuning(I, J, D):
n = I.shape[0]
k = I.shape[1]
# Distance to kth nearest neighbor as a matrix
sigma = D[:, k - 1]
sigma = sparse.spdiags(1 / sigma, 0, n, n)
sigma = sigma.tocsr()
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Symmetrize and remove redundant entries
M1 = np.vstack((I, J, D))
M2 = np.vstack((J, I, D))
M = np.concatenate((M1, M2), axis=1)
M = np.unique(M, axis=1)
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
I = M[0, :]
J = M[1, :]
D = M[2, :]
dist = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
B = sparse.coo_matrix((np.ones(len(D), ), (I, J)), shape=(n, n)).tocsr() # Ones in all entries
# Self-tuning weights
E = -4 * sigma * (dist ** 2) * sigma
W = E.expm1()
W = W.multiply(B) + B
return W
# Compute weight matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
# Chooses k neighbors at random from I.shape[1] nearset neighbors
def weight_matrix_homogenized(I, J, D, k, f=exp_weight):
# I = I[:,:10]
# J = J[:,:10]
# D = D[:,:10]
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = I.shape[0]
for i in range(n):
ind = random.choice(I.shape[1], k, replace=False)
I[i, :k] = I[i, ind]
J[i, :k] = J[i, ind]
D[i, :k] = 1
n = I.shape[0]
k = I.shape[1]
D = D * D
eps = D[:, k - 1] / 4
D = f(D / eps[:, None])
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
return W
# Compute distance matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
def dist_matrix(I, J, D, k):
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
n = I.shape[0]
k = I.shape[1]
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
return W
# Adds weights to an adjacency matrix W using similarity in data X
def add_weights(W, X, labels):
n = W.shape[0]
# pca = PCA(n_components=20)
# X = pca.fit_transform(X)
# print(X.shape)
I, J, V = sparse.find(W)
# Dot products
Y = X[I, :] - X[J, :]
Y = np.sum(Y * Y, axis=1)
W = sparse.coo_matrix((Y, (I, J)), shape=(n, n)).tocsr()
max_dist = np.reshape(np.max(W, axis=1).todense().tolist(), (n,))
D = sparse.spdiags((max_dist + 1e-10) ** (-1), 0, n, n).tocsr()
W = D * W
I, J, V = sparse.find(W)
V = np.exp(-2 * V)
W = sparse.coo_matrix((V, (I, J)), shape=(n, n)).tocsr()
return W
# Finds largest connected component of the graph represented by adjacency matrix W
# Returns the weighted adjacency matrix, along with a boolean mask indicating the
# vertices from the input matrix that were selected
def largest_conn_component(W):
ncomp, labels = csgraph.connected_components(W, directed=False)
num_verts = np.zeros((ncomp,))
for i in range(ncomp):
num_verts[i] = np.sum(labels == i)
i_max = np.argmax(num_verts)
ind = labels == i_max
A = W[ind, :]
A = A[:, ind]
print("Found %d" % ncomp + " connected components.")
print("Returning component with %d" % num_verts[i_max] + " vertices out of %d" % W.shape[0] + " total vertices.")
return A, ind
# Compute weight matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
def weight_matrix(I, J, D, k, f=exp_weight, symmetrize=True):
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
n = I.shape[0]
k = I.shape[1]
D = D * D
eps = D[:, k - 1] / 4
D = f(D / eps[:, None])
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
if symmetrize:
W = (W + W.transpose()) / 2;
return W
def nnk_weight_matrix(dataset, metric, mask, knn_param, reg=1e-10, symmetrize=True):
# Try to Load data
X = load_dataset(dataset=dataset, metric=metric)
X_normalized = X / np.linalg.norm(X, axis=1, keepdims=True)
num_of_nodes = mask.shape[0]
neighbor_indices = np.zeros((num_of_nodes, knn_param))
weight_values = np.zeros((num_of_nodes, knn_param))
error_values = np.ones((num_of_nodes, knn_param))
for node_i in range(num_of_nodes):
non_zero_index = np.array(mask[node_i, :])
non_zero_index = np.delete(non_zero_index, np.where(non_zero_index == node_i))
if len(non_zero_index) > knn_param:
non_zero_index = non_zero_index[:knn_param]
x_neighbors = X_normalized[non_zero_index]
g_i = 0.5 + np.dot(x_neighbors, X_normalized[node_i]) / 2
G_i = 0.5 + np.dot(x_neighbors, x_neighbors.T) / 2
# x_opt, check = non_negative_qpsolver(G_i, g_i, g_i, reg)
# error_values[node_i, :] = 1 - 2 * np.dot(x_opt, g_i) + np.dot(x_opt, np.dot(G_i, x_opt))
x_opt = g_i
weight_values[node_i, :] = x_opt / np.sum(x_opt)
neighbor_indices[node_i, :] = non_zero_index
row_indices = np.expand_dims(np.arange(0, num_of_nodes), 1)
row_indices = np.tile(row_indices, [1, knn_param])
adjacency = sparse.coo_matrix((weight_values.ravel(), (row_indices.ravel(), neighbor_indices.ravel())),
shape=(num_of_nodes, num_of_nodes))
if symmetrize:
error = sparse.coo_matrix((error_values.ravel(), (row_indices.ravel(), neighbor_indices.ravel())),
shape=(num_of_nodes, num_of_nodes))
# Alternate way of doing: error_index = sparse.find(error > error.T); adjacency[error_index[0], error_index[
# 1]] = 0
adjacency = adjacency.multiply(error < error.T)
adjacency = adjacency.maximum(adjacency.T)
adjacency.eliminate_zeros()
error_values = error_values[:, 0]
return adjacency.tocsr(), error_values
# Compute boundary points
# k = number of neighbors to use
def boundary_points_new(X, k, I=None, J=None, D=None, ReturnNormals=False):
if (I is None) or (J is None) or (D is None):
n = X.shape[0]
d = X.shape[1]
if d <= 5:
I, J, D = knnsearch(X, k)
else:
I, J, D = knnsearch_annoy(X, k)
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = X.shape[0]
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
W = weight_matrix(I, J, D, k, f=lambda x: np.ones_like(x), symmetrize=False)
L = graph_laplacian(W)
# Estimates of normal vectors
nu = -L * X
nu = np.transpose(nu)
norms = np.sqrt(np.sum(nu * nu, axis=0))
nu = nu / norms
nu = np.transpose(nu)
print(nu.shape)
# Boundary test
NN = X[J]
NN = np.swapaxes(NN[:, 1:, :], 0, 1) # This is kxnxd
V = NN - X # This is x^i-x^0 kxnxd array
NN_nu = nu[J]
W = (np.swapaxes(NN_nu[:, 1:, :], 0, 1) + nu) / 2
xd = np.sum(V * W, axis=2) # dist to boundary
Y = np.max(-xd, axis=0)
if ReturnNormals:
return Y, nu
else:
return Y
# Compute boundary points
# k = number of neighbors to use
def boundary_points(X, k, I=None, J=None, D=None, ReturnNormals=False, R=np.inf):
if (I is None) or (J is None) or (D is None):
n = X.shape[0]
d = X.shape[1]
if d <= 5:
I, J, D = knnsearch(X, k)
else:
I, J, D = knnsearch_annoy(X, k)
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = X.shape[0]
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
W = weight_matrix(I, J, D, k, f=lambda x: np.ones_like(x), symmetrize=False)
L = graph_laplacian(W)
# Estimates of normal vectors
nu = -L * X
nu = np.transpose(nu)
norms = np.sqrt(np.sum(nu * nu, axis=0))
nu = nu / norms
nu = np.transpose(nu)
# Boundary test
NN = X[J]
NN = np.swapaxes(NN[:, 1:, :], 0, 1) # This is kxnxd
V = NN - X # This is x^i-x^0 kxnxd array
xd = np.sum(V * nu, axis=2) # xd coordinate (kxn)
sqdist = np.sum(V * V, axis=2)
Y = np.max((xd * xd - sqdist) / (2 * R) - xd, axis=0)
if ReturnNormals:
return Y, nu
else:
return Y
# Construct k-nn sparse distance matrix
# Note: Matrix is not symmetric
def knn_weight_matrix(X, k, f=exp_weight):
I, J, D = knnsearch_annoy(X, k)
W = weight_matrix(I, J, D, k, f=f)
return W
# Solves Lx=f subject to Rx=g at ind points
def gmres_bc_solve(L, f, R, g, ind):
# Mix matrices based on boundary points
A = L.copy()
A = A.tolil()
A[ind, :] = R[ind, :]
A = A.tocsr()
# Right hand side
b = f.copy()
b[ind] = g[ind]
# Preconditioner
m = A.shape[0]
M = A.diagonal()
M = sparse.spdiags(1 / M, 0, m, m).tocsr()
# GMRES solver
# start_time = time.time()
u, info = sparse.linalg.gmres(A, b, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
# print('gmres_err = %f'%np.max(np.absolute(A*u-b)))
return u
# Poisson solve
# Solves Lu = f with preconditioned conjugate gradient
def pcg_solve(L, f, x0=None, tol=1e-10):
# start_time = time.time()
L = L.tocsr()
# Conjugate gradient with Jacobi preconditioner
m = L.shape[0]
M = L.diagonal()
M = sparse.spdiags(1 / M, 0, m, m).tocsr()
if x0 is None:
u, i = splinalg.cg(L, f, tol=tol, M=M)
else:
u, i = splinalg.cg(L, f, x0=x0, tol=tol, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
return u
# Finds k Dirichlet eigenvectors
# Solves Lu = lambda u subject to u(I)=0
def dirichlet_eigenvectors(L, I, k):
L = L.tocsr()
n = L.shape[0]
# Locations of labels
idx = np.full((n,), True, dtype=bool)
idx[I] = False
# Left hand side matrix
A = L[idx, :]
A = A[:, idx]
# Eigenvector solver
vals, vec = sparse.linalg.eigs(A, k=k, which='SM')
vec = vec.real
vals = vals.real
# Add labels back into array
u = np.zeros((n, k))
u[idx, :] = vec
if k == 1:
u = u.flatten()
return u, vals
# Constrained linear solve
# Solves Lu = f subject to u(I)=g
def constrained_solve(L, I, g, f=None, x0=None, tol=1e-10):
L = L.tocsr()
n = L.shape[0]
# Locations of labels
idx = np.full((n,), True, dtype=bool)
idx[I] = False
# Right hand side
b = -L[:, I] * g
b = b[idx]
if f is not None:
b = b + f[idx]
# Left hand side matrix
A = L[idx, :]
A = A[:, idx]
# start_time = time.time()
# Conjugate gradient with Jacobi preconditioner
m = A.shape[0]
M = A.diagonal()
M = sparse.spdiags(1 / (M + 1e-10), 0, m, m).tocsr()
if x0 is None:
v, i = splinalg.cg(A, b, tol=tol, M=M)
else:
v, i = splinalg.cg(A, b, x0=x0[idx], tol=tol, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
# Add labels back into array
u = np.ones((n,))
u[idx] = v
u[I] = g
return u
# Returns n random points in R^d
def rand(n, d):
return random.rand(n, d)
# Returns n random points in annulus (r1,r2)
def rand_annulus(n, d, r1, r2):
N = 0
X = np.zeros((1, d))
while X.shape[0] <= n:
Y = r2 * (2 * rand(n, d) - 1)
dist2 = np.sum(Y * Y, axis=1)
I = (dist2 < r2 * r2) & (dist2 > r1 * r1)
Y = Y[I, :]
X = np.vstack((X, Y))
X = X[1:(n + 1)]
return X
# Returns n random points in unit ball in R^d
def rand_ball(n, d):
N = 0
X = np.zeros((1, d))
while X.shape[0] <= n:
Y = 2 * rand(n, d) - 1
I = np.sum(Y * Y, axis=1) < 1
Y = Y[I, :]
X = np.vstack((X, Y))
X = X[1:(n + 1)]
return X
def randn(n, d):
X = np.zeros((n, d))
for i in range(d):
X[:, i] = np.random.normal(0, 1, n)
return X
def bean_data(n, h):
# n = number of points
# h = height of bridge (h=0.2)
a = -1
b = 1
x = a + (b - a) * random.rand(3 * n);
c = -0.6
d = 0.6;
y = c + (d - c) * random.rand(3 * n);
X = np.transpose(np.vstack((x, y)))
dist_from_x_axis = 0.4 * np.sqrt(1 - x ** 2) * (1 + h - np.cos(3 * x))
in_bean = abs(y) <= dist_from_x_axis
X = X[in_bean, :]
if X.shape[0] < n:
print('Not enough samples');
else:
X = X[:n, :]
return X
def mesh(X):
T = spatial.Delaunay(X[:, :2]);
return T.simplices
def box_mesh(X, u=None):
n = X.shape[0]
d = X.shape[1]
if d > 2:
X = X[:, 0:2]
x1 = X[:, 0].min()
x2 = X[:, 0].max()
y1 = X[:, 1].min()
y2 = X[:, 1].max()
corners = np.array([[x1, y1], [x2, y2], [x1, y2], [x2, y1]])
X = np.append(X, corners, axis=0)
Tri = mesh(X)
if u is not None:
u = np.append(u, [0, 0, 0, 0])
for i in range(n, n + 4):
I = (Tri[:, 0] == i) | (Tri[:, 1] == i) | (Tri[:, 2] == i)
nn_tri = Tri[I, :].flatten()
nn_tri = np.unique(nn_tri[nn_tri < n])
u[i] = np.mean(u[nn_tri])
# u[i] = np.max(u[nn_tri])
return X, Tri, u
else:
return X, Tri
# Triangulation of domain
def improved_mesh(X):
n = X.shape[0]
d = X.shape[1]
if d > 2:
X = X[:, 0:2]
# Normalize data to unit box
x1 = X[:, 0].min()
x2 = X[:, 0].max()
y1 = X[:, 1].min()
y2 = X[:, 1].max()
X = X - [x1, y1]
X[:, 0] = X[:, 0] / (x2 - x1)
X[:, 1] = X[:, 1] / (y2 - y1)
# Add padding data around
pad = 10 / np.sqrt(n)
m = int(pad * n)
Y = rand(m, 2)
Y[:, 0] = Y[:, 0] * pad - pad
Z = np.vstack((X, Y))
Y = rand(m, 2)
Y[:, 0] = Y[:, 0] * pad + 1
Z = np.vstack((Z, Y))
Y = rand(m, 2)
Y[:, 1] = Y[:, 1] * pad - pad
Z = np.vstack((Z, Y))
Y = rand(m, 2)
Y[:, 1] = Y[:, 1] * pad + 1
Z = np.vstack((Z, Y))
# Delaunay triangulation
T = spatial.Delaunay(Z);
Tri = T.simplices
J = np.sum(Tri >= n, axis=1) == 0;
Tri = Tri[J, :]
return Tri
def plot(X, u):
Tri = mesh(X)
import mayavi.mlab as mlab
mlab.triangular_mesh(X[:, 0], X[:, 1], u, Tri)
mlab.view(azimuth=-45, elevation=60)
# Laplace learning
# Zhu, Xiaojin, <NAME>, and <NAME>. "Semi-supervised learning using gaussian fields and harmonic functions." Proceedings of the 20th International conference on Machine learning (ICML-03). 2003.
def laplace_solve(W, I, g, norm="none"):
L = graph_laplacian(W, norm=norm)
return constrained_solve(L, I, g)
# Shift trick
# W = Weight matrix
# I = indices of labels
# g = +1/-1 values of labels
def shift_solve(W, I, g):
# Laplace learning
u = laplace_solve(W, I, g)
# Shift solution
s = degrees(W)
c = np.sum(s[I] * g) / sum(s[I])
u = u - c
u = u - np.mean(u)
return u
# Shift trick by mean
# W = Weight matrix
# I = indices of labels
# g = +1/-1 values of labels
def meanshift_solve(W, I, g):
# Laplace learning
u = laplace_solve(W, I, g)
# Center solution
u = u - np.mean(u)
return u
# Reweights the weight matrix for WNLL
def wnll(W, I):
n = W.shape[0]
m = len(I)
a = np.ones((n,))
a[I] = n / m
D = sparse.spdiags(a, 0, n, n).tocsr()
W = D * W + W * D
return W
# Weighted nonlocal Laplacian
# Shi, Zuoqiang, <NAME>, and <NAME>. "Weighted nonlocal laplacian on interpolation from sparse data." Journal of Scientific Computing 73.2-3 (2017): 1164-1177.
def wnll_solve(W, I, g):
n = W.shape[0]
W = wnll(W, I)
L = graph_laplacian(W, norm="none")
return constrained_solve(L, I, g)
# Properly weighted Laplacian
# Calder, Jeff, and <NAME>. "Properly-weighted graph Laplacian for semi-supervised learning." arXiv preprint arXiv:1810.04351 (2018).
def properlyweighted_solve(W, I, g, X, alpha, zeta, r):
n = W.shape[0]
rzeta = r / (zeta - 1) ** (1 / alpha)
Xtree = spatial.cKDTree(X[I, :])
D, J = Xtree.query(X)
D[D < rzeta] = rzeta
gamma = 1 + (r / D) ** alpha
D = sparse.spdiags(gamma, 0, n, n).tocsr()
L = graph_laplacian(D * W + W * D, norm="none")
return constrained_solve(L, I, g)
# Game theoretic p-Laplace learning
# Rios, <NAME>, <NAME>, and <NAME>. "Algorithms for $\ell_p$-based semi-supervised learning on graphs." arXiv preprint arXiv:1901.05031 (2019).
def plaplace_solve(W, I, g, p, sol_method="SemiImplicit", norm="none"):
# start_time = time.time()
n = W.shape[0]
W = W / W.max()
if p == float("inf"):
alpha = 0
delta = 1
else:
alpha = 1 / p
delta = 1 - 2 / p
dx = degrees(W)
theta = 1.2 * (2 * alpha + np.max(dx) * delta)
if p == float("inf"):
beta = 1
gamma = 1 / theta
else:
beta = (theta * p - 2) / (theta * p)
gamma = (p - 2) / (theta * p - 2)
if norm == "normalized":
deg = dx[I] ** (1 / 2)
g = g / deg
L = graph_laplacian(W)
u = constrained_solve(L, I, g)
uu = np.max(g) * np.ones((n,))
ul = np.min(g) * np.ones((n,))
WI, WJ, WV = sparse.find(W)
# Set labels
u[I] = g
uu[I] = g
ul[I] = g
# Time step for gradient descent
dt = 0.9 / (alpha + 2 * delta)
if sol_method == "GradientDescentCcode":
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
# Type casting and memory blocking
uu = np.ascontiguousarray(uu, dtype=np.float64)
ul = np.ascontiguousarray(ul, dtype=np.float64)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.float64)
cgp.lp_iterate(uu, ul, WI, WJ, WV, I, g, p, 1e6, 1e-1, 0.0)
u = (uu + ul) / 2
# Check residual
L2uu = -L * uu
LIuu = graph_infinity_laplacian(W, uu, I=WI, J=WJ, V=WV)
resu = alpha * L2uu / dx + delta * LIuu
resu[I] = 0
L2ul = -L * ul
LIul = graph_infinity_laplacian(W, ul, I=WI, J=WJ, V=WV)
resl = alpha * L2ul / dx + delta * LIul
resl[I] = 0
# print('Upper residual = %f' % np.max(np.absolute(resu)))
# print('Lower residual = %f' % np.max(np.absolute(resl)))
else:
err = 1e6
i = 0
while err > 1e-1:
i += 1
# Graph laplacians
L2u = -L * u
LIu = graph_infinity_laplacian(W, u, I=WI, J=WJ, V=WV)
# Residual error
res = alpha * L2u / dx + delta * LIu
res[I] = 0
# err = np.max(np.absolute(res))
# print("Residual error = "+str(err))
# Update
if sol_method == "GradientDescent":
L2uu = -L * uu
LIuu = graph_infinity_laplacian(W, uu, I=WI, J=WJ, V=WV)
res = alpha * L2uu / dx + delta * LIuu
res[I] = 0
uu = uu + dt * res
err = np.max(np.absolute(res))
# print("Upper residual = "+str(err))
L2ul = -L * ul
LIul = graph_infinity_laplacian(W, ul, I=WI, J=WJ, V=WV)
res = alpha * L2ul / dx + delta * LIul
res[I] = 0
ul = ul + dt * res
err = np.max(np.absolute(res))
# print("Lower residual = "+str(err))
err1 = np.max(uu - ul)
err2 = np.min(uu - ul)
# print("Residual error = "+str(err1)+","+str(err2))
err = err1
u = (uu + ul) / 2
elif sol_method == "SemiImplicit":
rhs = beta * (2 * gamma * dx * LIu - L2u)
u = constrained_solve(L, I, g, f=rhs, x0=u, tol=err / 100)
else:
print("Invalid p-Laplace solution method.")
sys.exit()
if norm == "normalized":
deg = dx ** (1 / 2)
u = u * deg
# print("--- %s seconds ---" % (time.time() - start_time))
return u
# Gradient of function on graph
# W = sparse weight matrix
# u = function on graph
def graph_gradient(W, u, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
G = sparse.coo_matrix((V * (u[J] - u[I]), (I, J)), shape=(n, n)).tocsr()
return G
# Divergence of vector field F (F should be skew-symmetric)
# F = sparse matrix representing vector field
def graph_divergence(F, W):
F = F.multiply(W)
return 2 * np.squeeze(np.array(np.sum(F, axis=1)))
# Random-walk SSL
# Zhou, Dengyong, et al. "Learning with local and global consistency." Advances in neural information processing systems. 2004.
def randomwalk_solve(W, I, g, epsilon):
n = W.shape[0]
# Zero diagonals
W = W - sparse.spdiags(W.diagonal(), 0, n, n)
# Construct Laplacian matrix
Dinv2 = degree_matrix(W, p=-1 / 2)
L = sparse.identity(n) - (1 - epsilon) * Dinv2 * W * Dinv2;
# Format right hand side
b = np.zeros((n,))
b[I] = g
return pcg_solve(L, b)
# Computes accuracy of labeling
# m = number of labeled points used
def accuracy(L, L_true, m):
# Remove unlabeled nodes
I = L_true >= 0
L = L[I]
L_true = L_true[I]
# Compute accuracy
return 100 * np.maximum(np.sum(L == L_true) - m, 0) / (len(L) - m)
# Projects all columns of (kxn) matrix X onto k-simplex
def ProjectToSimplex(X):
n = X.shape[1]
k = X.shape[0]
Xs = -np.sort(-X, axis=0) # Sort descending
A = np.tril(np.ones((k, k)))
Sum = A @ Xs
Max = np.transpose((np.transpose(Sum) - 1) / (np.arange(k) + 1))
Xs[:-1, :] = Xs[1:, :]
Xs[-1, :] = (Sum[k - 1, :] - 1) / k
I = np.argmax(Max >= Xs, axis=0)
X = np.maximum(X - Max[I, range(n)], 0)
return X
# Takes list of labels and converts to vertices of simplex format
def LabelsToVec(L):
n = L.shape[0]
labels = np.unique(L)
k = len(labels)
for i in range(k):
L[L == labels[i]] = i
L = L.astype(int)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X, labels
# Projects all rows of (nxk) matrix X to closest vertex of the simplex
# Assume X already lives in the simplex, e.g., is the output of ProjectToSimplex
def ClosestVertex(X):
n = X.shape[1]
k = X.shape[0]
L = np.argmax(X, axis=0)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X
# Threshold with temperature to closest vertex
def ClosestVertexTemp(X, T=0.01):
n = X.shape[1]
k = X.shape[0]
beta = 1 / T
Y = np.exp(beta * X)
Ysum = np.sum(Y, axis=0)
Y = Y / Ysum
X[0, :] = Y[0, :]
for i in range(1, k):
X[i, :] = X[i - 1, :] + Y[i, :]
R = random.rand(n, 1)
L = np.sum(R.flatten() > X, axis=0)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X
# Volume MBO, initialized with Poisson
def poisson_volumeMBO(W, I, g, dataset, beta, T, volume_mult):
# Set diagonal entries to zero
W = diag_multiply(W, 0)
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
# Solve Poisson problem and compute labels
u, _ = poisson(W, I, g)
max_locations = np.argmax(u, axis=0)
u = (np.unique(g))[max_locations]
n = W.shape[0]
k = len(np.unique(g))
WI, WJ, WV = sparse.find(W)
# Class counts
ClassCounts = (n * beta).astype(int)
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
ClassCounts = np.ascontiguousarray(ClassCounts, dtype=np.int32)
cgp.volume_mbo(u, WI, WJ, WV, I, g, ClassCounts, k, 0.0, T, volume_mult)
# Set given labels and convert to vector format
u[I] = g
u, _ = LabelsToVec(u)
return u
# Volume MBO (Jacobs, et al.)
def volumeMBO(W, I, g, dataset, beta, T, volume_mult):
# Set diagonal entries to zero
W = diag_multiply(W, 0)
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
n = W.shape[0]
k = len(np.unique(g))
u = np.zeros((n,))
WI, WJ, WV = sparse.find(W)
# Class counts
ClassCounts = (n * beta).astype(int)
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
ClassCounts = np.ascontiguousarray(ClassCounts, dtype=np.int32)
cgp.volume_mbo(u, WI, WJ, WV, I, g, ClassCounts, k, 1.0, T, volume_mult)
# Set given labels and convert to vector format
u[I] = g
u, _ = LabelsToVec(u)
return u
# Multiclass MBO
# Garcia-Cardona, Cristina, et al. "Multiclass data segmentation using diffuse interface methods on graphs." IEEE transactions on pattern analysis and machine intelligence 36.8 (2014): 1600-1613.
def multiclassMBO(W, I, g, eigvals, eigvecs, dataset, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
Ns = 6
if dataset == 'MNIST' or dataset == 'FashionMNIST' or dataset == 'cifar':
dt = 0.15
mu = 50
elif dataset == 'WEBKB':
dt = 1
mu = 4
else:
print('Dataset not supported by MBO...')
sys.exit(2)
# Load eigenvalues and eigenvectors
X = eigvecs
num_eig = len(eigvals)
# Form matrices
V = np.diag(1 / (1 + (dt / Ns) * eigvals))
Y = X @ V
Xt = np.transpose(X)
# Random initial labeling
u = random.rand(k, n)
u = ProjectToSimplex(u)
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
u = Kg + (1 - J) * u
# Maximum number of iterations
T = 10
for i in range(T):
for s in range(Ns):
Z = (u - (dt / Ns) * mu * J * (u - Kg)) @ Y
u = Z @ Xt
# Projection step
u = ProjectToSimplex(u)
u = ClosestVertex(u)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Poisson MBO
def poissonMBO(W, I, g, dataset, beta, true_labels=None, temp=0, use_cuda=False, Ns=40, mu=1, T=50):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
num_labels = np.zeros((k,))
for i in range(k):
num_labels[i] = np.sum(g == unique_labels[i])
W = diag_multiply(W, 0)
if dataset == 'WEBKB':
mu = 1000
Ns = 8
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = np.transpose(b)
L = graph_laplacian(W, norm='none')
# Initialize u via Poisson learning
# u = np.zeros((k,n))
# for j in range(k):
# u[j,:] = pcg_solve(L,b[j,:])
# u = mu*u
# u = np.transpose(np.transpose(u) - np.mean(u,axis=1))
u, mix_time = poisson(W, I, g, use_cuda=use_cuda, beta=beta)
# Ns = int(mix_time/4)
u = ProjectToSimplex(u)
u = ClosestVertex(u)
# Time step for stability
dt = 1 / np.max(degrees(W))
P = sparse.identity(n) - dt * L
Db = mu * dt * b
if use_cuda:
Pt = torch_sparse(P).cuda()
Dbt = torch.from_numpy(np.transpose(Db)).float().cuda()
for i in range(T):
if use_cuda:
# Put on GPU and run heat equation
ut = torch.from_numpy(np.transpose(u)).float().cuda()
for s in range(Ns):
# u = u*P + Db
ut = torch.sparse.addmm(Dbt, Pt, ut)
# Put back on CPU
u = np.transpose(ut.cpu().numpy())
else: # Use CPU
for s in range(Ns):
# u = u + dt*(mu*b - u*L)
u = u * P + Db
# Projection step
# u = np.diag(beta/num_labels)@u
u = ProjectToSimplex(u)
u = ClosestVertex(u)
u = np.transpose(np.transpose(u) - np.mean(u, axis=1) + beta)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
def torch_sparse(A):
A = A.tocoo()
values = A.data
indices = np.vstack((A.row, A.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = A.shape
return torch.sparse.FloatTensor(i, v, torch.Size(shape))
# Sparse Label Propagation
def SparseLabelPropagation(W, I, g, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
WI, WJ, WV = sparse.find(W)
B = sparse.coo_matrix((np.ones(len(WV), ), (WI, WJ)), shape=(n, n)).tocsr() # Ones in all entries
# Construct matrix 1/2W and 1/deg
lam = 2 * W - (1 - 1e-10) * B
lam = -lam.log1p()
lam = lam.expm1() + B
Id = sparse.identity(n)
gamma = degree_matrix(W + 1e-10 * Id, p=-1)
# Random initial labeling
# u = random.rand(k,n)
u = np.zeros((k, n))
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Initialization
Y = list()
for j in range(k):
Gu = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
Y.append(Gu)
# Main loop for sparse label propagation
T = 100
for i in range(T):
u_prev = np.copy(u)
# Compute div
for j in range(k):
div = graph_divergence(Y[j], W)
u[j, :] = u_prev[j, :] - gamma * div
u[j, I] = Kg[j, I] # Set labels
u_tilde = 2 * u[j, :] - u_prev[j, :]
Gu = -graph_gradient(W, u_tilde, I=WI, J=WJ, V=WV)
Y[j] = Y[j] + Gu.multiply(lam)
ind1 = B.multiply(abs(Y[j]) > 1)
ind2 = B - ind1
Y[j] = ind1.multiply(Y[j].sign()) + ind2.multiply(Y[j])
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Dynamic Label Propagation
def DynamicLabelPropagation(W, I, g, alpha=0.05, lam=0.1, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
u, _ = LabelsToVec(K)
u = u * J
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = np.transpose(Kg * J)
u = np.copy(Kg)
if n > 5000:
print("Cannot use Dynamic Label Propagation on large datasets.")
else:
# Setup matrices
Id = sparse.identity(n)
D = degree_matrix(W, p=-1)
P = D * W
P = np.array(P.todense())
Pt = np.copy(P)
T = 2
for i in range(T):
v = P @ u
u = Pt @ u
u[I, :] = Kg[I, :]
Pt = P @ Pt @ np.transpose(P) + alpha * v @ np.transpose(v) + lam * Id
# Compute accuracy if all labels are provided
if true_labels is not None:
u = np.array(u)
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('i:%d' % i + ',Accuracy = %.2f' % acc)
u = np.transpose(np.array(u))
return u
# Centered and Iterated Centered Kernel of Mai/Coulliet 2018
def CenteredKernel(W, I, g, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = np.transpose(Kg * J)
# Center labels
c = np.sum(Kg, axis=0) / len(I)
Kg[I, :] = Kg[I, :] - c
u = np.copy(Kg)
v = np.ones((n, 1))
vt = np.ones((1, n))
e = np.random.rand(n, 1)
for i in range(100):
y = W * (e - (1 / n) * v @ (vt @ e))
w = y - (1 / n) * v @ (vt @ y) # =Ae
l = abs(np.transpose(e) @ w / (np.transpose(e) @ e))
e = w / np.linalg.norm(w)
# Number of iterations
# alpha = 5*l/4
alpha = 105 * l / 100
T = 1000
err = 1
while err > 1e-10:
y = W * (u - (1 / n) * v @ (vt @ u))
w = (1 / alpha) * (y - (1 / n) * v @ (vt @ y)) - u # Laplacian
w[I, :] = 0
err = np.max(np.absolute(w))
u = u + w
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return np.transpose(u)
def vec_acc(u, I, g, true_labels):
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
return acc
# def volume_label_projection(u,beta,s=None):
#
# k = u.shape[0]
# n = u.shape[1]
# if s is None:
# s = np.ones((k,))
# for i in range(100):
# grad = beta - np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n
# err0 = np.max(np.absolute(grad))
#
# dt = 1
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# err = err0
# newerr = np.max(np.absolute(gradnew))
# while newerr < err:
# print(dt)
# dt = 2*dt
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# err = newerr
# newerr = np.max(np.absolute(gradnew))
# dt = dt/2
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# newerr = np.max(np.absolute(gradnew))
# while newerr >= err:
# print(dt)
# dt = dt/2
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# newerr = np.max(np.absolute(gradnew))
# if dt < 1:
# dt = dt/2
#
# s = s + dt*grad
#
# print(err)
# if err == 0:
# print(i)
# break
#
# #s = s + dt*(beta - beta_u)
#
# return ClosestVertex(np.diag(s)@u),s
def volume_label_projection(u, beta, s=None, dt=None):
k = u.shape[0]
n = u.shape[1]
if s is None:
s = np.ones((k,))
if dt is None:
dt = 10
# print(np.around(100*beta,decimals=1))
# print(np.around(100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=1))
for i in range(100):
class_size = np.sum(ClosestVertex(np.diag(s) @ u), axis=1) / n
grad = beta - class_size
# print(np.around(100*class_size,decimals=1))
# err = np.max(np.absolute(grad))
# if err == 0:
# break
s = np.clip(s + dt * grad, 0.5, 2)
# print(np.around(100*beta,decimals=1))
# print(np.around(100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=1))
# print(np.around(100*beta - 100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=4))
return ClosestVertex(np.diag(s) @ u), s
# Poisson MBO with volume constraints
def poissonMBO_volume(W, I, g, dataset, beta, true_labels=None, temp=0, use_cuda=False, Ns=40, mu=1, T=20):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
if dataset == 'WEBKB':
mu = 1000
Ns = 8
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = np.transpose(b)
D = degree_matrix(W)
# L = graph_laplacian(W,norm='none')
L = D - W.transpose()
# Initialize u via Poisson learning
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda, beta=beta)
u = mu * u
# Time step for stability
dt = 1 / np.max(degrees(W))
P = sparse.identity(n) - dt * L
Db = mu * dt * b
if use_cuda:
Pt = torch_sparse(P).cuda()
Dbt = torch.from_numpy(np.transpose(Db)).float().cuda()
for i in range(T):
# Heat equation step
if use_cuda:
# Put on GPU and run heat equation
ut = torch.from_numpy(np.transpose(u)).float().cuda()
for j in range(Ns):
ut = torch.sparse.addmm(Dbt, Pt, ut)
# Put back on CPU
u = np.transpose(ut.cpu().numpy())
else: # Use CPU
for j in range(Ns):
u = u * P + Db
# Projection step
u, s = volume_label_projection(u, beta)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Poisson Volume
def PoissonVolume(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50):
# Run Poisson learning
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda, training_balance=training_balance, beta=beta)
# Volume constraints
_, s = volume_label_projection(u, beta)
return np.diag(s) @ u
def original_poisson(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
# Zero out diagonal for faster convergence
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
# Setup matrices
D = degree_matrix(W + 1e-10 * sparse.identity(n), p=-1)
# L = graph_laplacian(W,norm='none')
# P = sparse.identity(n) - D*L #Line below is equivalent when W symmetric
P = D * W.transpose()
Db = D * b
v = np.max(Kg, axis=0)
v = v / np.sum(v)
vinf = degrees(W) / np.sum(degrees(W))
RW = W.transpose() * D
u = np.zeros((n, k))
# vals, vec = sparse.linalg.eigs(RW,k=1,which='LM')
# vinf = np.absolute(vec.flatten())
# vinf = vinf/np.sum(vinf)
# Number of iterations
T = 0
if use_cuda:
Pt = torch_sparse(P).cuda()
ut = torch.from_numpy(u).float().cuda()
Dbt = torch.from_numpy(Db).float().cuda()
# start_time = time.time()
while (T < min_iter or np.max(np.absolute(v - vinf)) > 1 / n) and (T < 1000):
ut = torch.sparse.addmm(Dbt, Pt, ut)
v = RW * v
T = T + 1
# print("--- %s seconds ---" % (time.time() - start_time))
# Transfer to CPU and convert to numpy
u = ut.cpu().numpy()
else: # Use CPU
# start_time = time.time()
while (T < min_iter or np.max(np.absolute(v - vinf)) > 1 / n) and (T < 1000):
uold = u.copy()
u = Db + P * u
v = RW * v
T = T + 1
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('%d,Accuracy = %.2f' % (T, acc))
# print("--- %s seconds ---" % (time.time() - start_time))
# Balancing for training data/class size discrepancy
if training_balance:
if beta is None:
u = u @ np.diag(1 / c)
else:
u = u @ np.diag(beta / c)
return np.transpose(u), T
# Poisson learning
def poisson(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50, error=None):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
if error is None:
error = np.ones(n, dtype=np.float32)
else:
error = error.reshape((n,)) / np.max(error)
# Zero out diagonal for faster convergence
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = 2 * b[I, :] - 1
# Setup matrices
# D = degree_matrix(W + 1e-10 * sparse.identity(n), p=-1)
# L = graph_laplacian(W,norm='none')
# P = sparse.identity(n) - D*L #Line below is equivalent when W symmetric
v_prev = np.random.random(size=(n, 1))
residue_energy = 1
u = np.zeros((n, k))
confidence_gain = W.transpose() #* sparse.spdiags(np.power(1 + error, -1), 0, n, n)
# vals, vec = sparse.linalg.eigs(RW,k=1,which='LM')
# vinf = np.absolute(vec.flatten())
# vinf = vinf/np.sum(vinf)
# Number of iterations
T = 0
if use_cuda:
Wt = torch_sparse(confidence_gain).cuda()
ut = torch.from_numpy(u).float().cuda()
bt = torch.from_numpy(b).float().cuda()
# start_time = time.time()
while (T < min_iter or residue_energy > 1e-10) and (T < 1000):
ut = torch.sparse.addmm(bt, Wt, ut)
v = W.transpose() * v_prev
residue_energy = np.linalg.norm(v - v_prev)
v_prev = v
T = T + 1
# print("--- %s seconds ---" % (time.time() - start_time))
# Transfer to CPU and convert to numpy
u = ut.cpu().numpy()
else: # Use CPU
# start_time = time.time()
while (T < min_iter or residue_energy > 1e-6) and (T < 1000):
u = np.clip(b + confidence_gain * u, a_min=-1, a_max=1)
v = W.transpose() * v_prev
residue_energy = np.linalg.norm(v - v_prev)
v_prev = v
T = T + 1
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('%d,Accuracy = %.2f' % (T, acc))
# print("--- %s seconds ---" % (time.time() - start_time))
print(f"T: {T}, residue: {residue_energy}")
# Balancing for training data/class size discrepancy
if training_balance:
if beta is None:
u = u @ np.diag(1 / c)
else:
u = u @ np.diag(beta / c)
return np.transpose(u), T
# Poisson L1 based on Split Bregman Method
# Does not work as well as PoissonMBO
def poissonL1(W, I, g, dataset, norm="none", lam=100, mu=1000, Nouter=30, Ninner=6, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
# mu = mu*W.count_nonzero()/len(g) #Normalize constants
gamma = 1 / lam
WI, WJ, WV = sparse.find(W)
B = sparse.coo_matrix((np.ones(len(WV), ), (WI, WJ)), shape=(n, n)).tocsr() # Ones in all entries
L = graph_laplacian(2 * W.multiply(W), norm=norm)
deg = degrees(W)
dt = 1 / np.max(deg)
# Random initial labeling
# u = random.rand(k,n)
# u = ProjectToSimplex(u)
u = np.zeros((k, n))
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson parameters
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = (mu / lam) * np.transpose(b)
# Initialize u via Poisson learning
u = np.zeros((k, n))
L = graph_laplacian(W, norm='none')
for j in range(k):
u[j, :] = pcg_solve(L, b[j, :])
u = np.transpose(np.transpose(u) - np.mean(u, axis=1))
# Initialization
V = list()
R = list()
gradu = list()
for j in range(k):
Gu = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
gradu.append(Gu)
V.append(Gu)
R.append(Gu)
# Main loop for Split Bregman iteration
for i in range(Nouter):
print('Outer:%d' % i)
for s in range(Ninner):
normV = 0 * W
for j in range(k):
divVR = graph_divergence(R[j] - V[j], W)
u[j, :] = pcg_solve(L, b[j, :] + divVR, x0=u[j, :], tol=1e-10)
# for s in range(100):
# u[j,:] = u[j,:] + dt*(b[j,:] + divVR - u[j,:]*L)
gradu[j] = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
V[j] = gradu[j] + R[j]
normV = normV + V[j].multiply(V[j])
normV = normV.sqrt()
# Shrinkage operation
# normV^{-1} for nonzero entries (tricky to do in sparse format)
# normV.eliminate_zeros(X)
normVinv = normV - (1 - 1e-10) * B
normVinv = -normVinv.log1p()
normVinv = normVinv.expm1() + B
C = normV.multiply(normVinv)
# print(np.sum(C>0))
# print(np.sum(C>0.9999))
# Compute shrinkage factor
# print(np.sum(normV>0))
shrink = normV - gamma * B
shrink = shrink.maximum(0)
# print(np.sum(shrink>0))
shrink = shrink.multiply(normVinv)
# Apply shrinkage
for j in range(k):
V[j] = V[j].multiply(shrink)
for j in range(k):
R[j] = R[j] + gradu[j] - V[j]
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Heap functions
# d = values in heap (indexed by graph vertex)
# h = heap (contains indices of graph elements in heap)
# p = pointers from graph back to heap (are updated with heap operations)
# s = number of elements in heap
# Sift up
# i = heap index of element to be sifted up
def SiftUp(d, h, s, p, i):
pi = int(i / 2) # Parent index in heap
while pi != 0:
if d[h[pi]] > d[h[i]]: # If parent larger, then swap
# Swap in heap
tmp = h[pi]
h[pi] = h[i]
h[i] = tmp
# Update pointers to heap
p[h[i]] = i
p[h[pi]] = pi
# Update parent/child indices
i = pi
pi = int(i / 2)
else:
pi = 0
# Sift down
# i = heap index of element to be sifted down
def SiftDown(d, h, s, p, i):
ci = 2 * i # child index in heap
while ci <= s:
if d[h[ci + 1]] < d[h[ci]] and ci + 1 <= s: # Choose smallest child
ci = ci + 1
if d[h[ci]] < d[h[i]]: # If child smaller, then swap
# Swap in heap
tmp = h[ci]
h[ci] = h[i]
h[i] = tmp
# Update pointers to heap
p[h[i]] = i
p[h[ci]] = ci
# Update parent/child indices
i = ci
ci = 2 * i
else:
ci = s + 1
# Pop smallest off of heap
# Returns index of smallest and size of new heap
def PopHeap(d, h, s, p):
# Index of smallest in heap
i = h[1]
# Put last element on top of heap
h[1] = h[s]
# Update pointer
p[h[1]] = 1
# Sift down the heap
SiftDown(d, h, s - 1, p, 1)
return i, s - 1
# Push element onto heap
# i = Graph index to add to heap
def PushHeap(d, h, s, p, i):
h[s + 1] = i # add to heap at end
p[i] = s + 1 # Update pointer to heap
SiftUp(d, h, s + 1, p, s + 1)
return s + 1
def stencil_solver(ui, u, w=None):
if w is None:
w = np.ones((len(u),))
m = len(u)
# Sort neighbors
I = np.argsort(u)
u = u[I]
w = w[I]
f = np.zeros((m + 1,))
for i in range(m):
f[i] = np.sum(np.maximum(u[i] - u, 0) ** 2)
f[m] = np.maximum(1, f[m - 1])
k = np.argmin(f < 1)
b = np.sum(u[:k])
c = np.sum(u[:k] ** 2)
t = (b + np.sqrt(b * b - k * c + k)) / k
check = np.sum(np.maximum(t - u, 0) ** 2)
if (abs(check - 1) > 1e-5):
print("Error")
return t
# return np.min(u) + 1
# C code version of dijkstra
def cDijkstra(W, I, g, WI=None, WJ=None, K=None):
n = W.shape[0]
k = len(I)
u = np.ones((n,)) * 1e10 # HJ Solver
l = -np.ones((n,), dtype=int) # Index of closest label
if (WI == None) or (WJ == None) or (K == None):
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.float64)
cgp.dijkstra(u, l, WI, K, WV, I, g, 1.0)
except:
print("You need to compile the cmodules!")
sys.exit(2)
return u
# Solve a general HJ equation with fast marching
def HJsolver(W, I, g, WI=None, WJ=None, K=None, p=1):
n = W.shape[0]
k = len(I)
u = np.ones((n,)) * 1e10 # HJ Solver
l = -np.ones((n,), dtype=int) # Index of closest label
if (WI == None) or (WJ == None) or (K == None):
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
cgp.HJsolver(u, l, WI, K, WV, I, g, 1.0, p, 1.0)
except:
# Initialization
s = 0 # Size of heap
h = -np.ones((n + 1,), dtype=int) # Active points heap (indices of active points)
A = np.zeros((n,), dtype=bool) # Active flag
p = -np.ones((n,), dtype=int) # Pointer back to heap
V = np.zeros((n,), dtype=bool) # Finalized flag
l = -np.ones((n,), dtype=int) # Index of closest label
# Build active points heap and set distance = 0 for initial points
for i in range(k):
s = PushHeap(u, h, s, p, I[i])
u[I[i]] = g[i] # Initialize distance to zero
A[I[i]] = True # Set active flag to true
l[I[i]] = I[i] # Set index of closest label
# Dijkstra's algorithm
while s > 0:
i, s = PopHeap(u, h, s, p) # Pop smallest element off of heap
# Finalize this point
V[i] = True # Mark as finalized
A[i] = False # Set active flag to false
# Update neighbors (the code below is wrong: compare against C sometime)
for j in WI[K[i]:K[i + 1]]:
if j != i and V[j] == False:
nn_ind = WI[K[j]:K[j + 1]]
w_vals = WV[K[j]:K[j + 1]]
u_vals = u[nn_ind]
u_tmp = stencil_solver(u[j], u_vals, w=w_vals)
if A[j]: # If j is already active
if u_tmp < u[j]: # Need to update heap
u[j] = u_tmp
SiftUp(u, h, s, p, p[j])
l[j] = l[i]
else: # If j is not active
# Add to heap and initialize distance, active flag, and label index
s = PushHeap(u, h, s, p, j)
u[j] = u_tmp
A[j] = True
l[j] = l[i]
return u
# eikonal classifier
def eikonalSSL(W, I, g, p=2, beta=None):
k = len(I) # Number of labels
n = W.shape[0] # Number of datapoints
d = np.zeros((n,)) # Distance function
l = -np.ones((n,), dtype=int) # Index of closest label
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
c_code = False
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
d = np.ascontiguousarray(d, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
c_code = True
except:
c_code = False
labels = np.unique(g)
numl = len(labels)
u = np.zeros((numl, n))
for i in range(numl):
ind = I[g == labels[i]]
lab = np.zeros((len(ind),))
if c_code:
ind = np.ascontiguousarray(ind, dtype=np.int32)
lab = np.ascontiguousarray(lab, dtype=np.int32)
cgp.HJsolver(d, l, WI, K, WV, ind, lab, 1.0, p, 0.0)
u[i, :] = -d
else:
u[i, :] = -HJsolver(W, ind, lab, WI=WI, WV=WV, K=K, p=p)
if beta is not None:
_, s = volume_label_projection(u, beta, dt=-0.5)
u = np.diag(s) @ u
return u
# Nearest neighbor classifier (graph geodesic distance)
def nearestneighbor(W, I, g):
k = len(I) # Number of labels
n = W.shape[0] # Number of datapoints
d = np.ones((n,)) * 1e10 # Distance function
l = -np.ones((n,), dtype=int) # Index of closest label
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version of dijkstra, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
d = np.ascontiguousarray(d, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
init = np.ascontiguousarray(np.zeros_like(I), dtype=np.float64)
cgp.dijkstra(d, l, WI, K, WV, I, init, 1.0)
except: # Use python version, which is slower
# Initialization
s = 0 # Size of heap
h = -np.ones((n + 1,), dtype=int) # Active points heap (indices of active points)
A = np.zeros((n,), dtype=bool) # Active flag
p = -np.ones((n,), dtype=int) # Pointer back to heap
V = np.zeros((n,), dtype=bool) # Finalized flag
# Build active points heap and set distance = 0 for initial points
for i in range(k):
d[I[i]] = 0 # Initialize distance to zero
A[I[i]] = True # Set active flag to true
l[I[i]] = I[i] # Set index of closest label
s = PushHeap(d, h, s, p, I[i])
# Dijkstra's algorithm
while s > 0:
i, s = PopHeap(d, h, s, p) # Pop smallest element off of heap
# Finalize this point
V[i] = True # Mark as finalized
A[i] = False # Set active flag to false
# Update neighbors
# for j in WI[K[i]:K[i+1]]:
for jj in range(K[i], K[i + 1]):
j = WI[jj]
if j != i and V[j] == False:
if A[j]: # If j is already active
tmp_dist = d[i] + WV[jj]
if tmp_dist < d[j]: # Need to update heap
d[j] = tmp_dist
SiftUp(d, h, s, p, p[j])
l[j] = l[i]
else: # If j is not active
# Add to heap and initialize distance, active flag, and label index
d[j] = d[i] + WV[jj]
A[j] = True
l[j] = l[i]
s = PushHeap(d, h, s, p, j)
# Set labels based on nearest neighbor
u = np.zeros((n,))
u[I] = g
u, _ = LabelsToVec(u[l])
return u
# Computes accuracy of clustering
def clustering_accuracy(L, L_true):
unique_classes = np.unique(L_true)
num_classes = len(unique_classes)
C = np.zeros((num_classes, num_classes), dtype=float)
for i in range(num_classes):
for j in range(num_classes):
C[i][j] = np.sum((L == i) & (L_true != j))
row_ind, col_ind = opt.linear_sum_assignment(C)
return 100 * (1 - C[row_ind, col_ind].sum() / len(L))
# Spectral embedding
# Projects the graph to R^k via spectral projection
# Method can be 'unnormalized', 'ShiMalik', or 'NgJordanWeiss'
def spectral_embedding(W, k, method='NgJordanWeiss'):
n = W.shape[0]
if method == 'unnormalized':
L = graph_laplacian(W, norm='none')
vals, vec = sparse.linalg.eigs(L, k=k, which='SM')
vec = vec.real
vals = vals.real
elif method == 'ShiMalik':
D = degree_matrix(W)
L = graph_laplacian(W, norm='none')
vals, vec = sparse.linalg.eigs(L, M=D, k=k, which='SM')
vec = vec.real
vals = vals.real
elif method == 'NgJordanWeiss':
L = graph_laplacian(W, norm='normalized')
vals, vec = sparse.linalg.eigs(L, k=k, which='SM')
vec = vec.real
vals = vals.real
norms = np.sum(vec * vec, axis=1)
T = sparse.spdiags(norms ** (-1 / 2), 0, n, n)
vec = T @ vec # Normalize rows
return vec
def kmeans(X, k):
KM = cluster.KMeans(n_clusters=k).fit(X)
return KM.labels_
# Spectral Clustering
def spectral_cluster(W, k, method='NgJordanWeiss', extra_dim=0):
V = spectral_embedding(W, k + extra_dim, method=method)
kmeans = cluster.KMeans(n_clusters=k).fit(V)
# V = spectral_embedding(W,k,method=method)
# kmeans = cluster.KMeans(n_clusters=k).fit(V)
return kmeans.labels_
# INCRES clustering
# Bresson, Xavier, et al. "An incremental reseeding strategy for clustering." International Conference on Imaging, Vision and Learning based on Optimization and PDEs. Spr<NAME>, 2016.
# W = weight matrix
def incres_cluster(W, k, speed, T, labels):
n = W.shape[0]
# Increment
Dm = np.maximum(int(speed * 1e-4 * n / k), 1)
# Random initial labeling
u = random.randint(0, k, size=n)
# Initialization
F = np.zeros((n, k))
J = np.arange(n).astype(int)
# Random walk transition
D = degree_matrix(W, p=-1)
P = W * D
m = int(1)
for i in range(T):
# Plant
F.fill(0)
for r in range(k):
I = u == r
ind = J[I]
F[ind[random.choice(np.sum(I), m)], r] = 1
# Grow
while np.min(F) == 0:
F = P * F
# Harvest
u = np.argmax(F, axis=1)
# Increment
m = m + Dm
# Compute accuracy
if labels is not None:
acc = clustering_accuracy(u, labels)
print("Iteration " + str(i) + ": Accuracy = %.2f" % acc + "%%, #seeds= %d" % m)
return u
# Check if graph is connected
def isconnected(W):
num_comp, comp = csgraph.connected_components(W)
if num_comp == 1:
return True
else:
return False
# Graph-based clustering
# W = sparse weight matrix describing graph
# method = SSL method
# Options: incres
def graph_clustering(W, k, true_labels=None, method="incres", speed=5, T=100, extra_dim=0):
n = W.shape[0]
# Symmetrize W, if not already symmetric
W = (W + W.transpose()) / 2
# Check if connected
if not isconnected(W):
print('Warning: Graph is not connected!')
# Clustering
if method == "incres":
labels = incres_cluster(W, k, speed, T, true_labels)
elif method == "spectral":
labels = spectral_cluster(W, k, method="unnormalized", extra_dim=extra_dim)
elif method == "spectralshimalik":
labels = spectral_cluster(W, k, method="ShiMalik", extra_dim=extra_dim)
elif method == "spectralngjordanweiss":
labels = spectral_cluster(W, k, method="NgJordanWeiss", extra_dim=extra_dim)
else:
print("Invalid choice of clustering method.")
sys.exit()
return labels
# Graph-based semi-supervised learning
# W = sparse weight matrix describing graph
# I = indices of labeled datapoints
# g = values of labels
# method = SSL method
# Options: laplace, poisson, poisson_nodeg, wnll, properlyweighted, plaplace, randomwalk
def graph_ssl(W, I, g, D=None, Ns=40, mu=1, numT=50, beta=None, method="laplace", p=3, volume_mult=0.5, alpha=2,
zeta=1e7, r=0.1, epsilon=0.05, X=None, plaplace_solver="GradientDescentCcode", norm="none",
true_labels=None, eigvals=None, eigvecs=None, dataset=None, T=0, use_cuda=False, return_vector=False,
poisson_training_balance=True, symmetrize=True, error=None):
one_shot_methods = ["mbo", "poisson", "poissonbalanced", "poissonvolume", "poissonmbo_volume", "poissonmbo",
"poissonl1", "nearestneighbor", "poissonmbobalanced", "volumembo", "poissonvolumembo",
"dynamiclabelpropagation", "sparselabelpropagation", "centeredkernel", "eikonal"]
n = W.shape[0]
method = method.lower()
if beta is None:
beta = np.ones((len(np.unique(g)),))
# Symmetrize D,W, if not already symmetric
if symmetrize:
W = (W + W.transpose()) / 2
if D is not None:
D = sparse_max(D, D.transpose())
if not isconnected(W):
print('Warning: Graph is not connected!')
# One shot methods
if method in one_shot_methods:
if method == "mbo":
u = multiclassMBO(W, I, g, eigvals, eigvecs, dataset, true_labels=true_labels)
elif method == "volumembo":
u = volumeMBO(W, I, g, dataset, beta, T, volume_mult)
elif method == "poissonvolumembo":
u = poisson_volumeMBO(W, I, g, dataset, beta, T, volume_mult)
elif method == "poissonmbo_old":
u = poissonMBO(W, I, g, dataset, np.ones_like(beta), true_labels=true_labels, temp=T, use_cuda=use_cuda,
Ns=Ns, mu=mu, T=numT)
elif method == "poissonmbobalanced":
u = poissonMBO(W, I, g, dataset, beta, true_labels=true_labels, temp=T, use_cuda=use_cuda, Ns=Ns, mu=mu,
T=numT)
elif method == "poissonl1":
u = poissonL1(W, I, g, dataset, true_labels=true_labels)
elif method == "poisson":
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, error=error)
elif method == "poissonbalanced":
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, beta=beta)
elif method == "poissonvolume":
u = PoissonVolume(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, beta=beta)
elif method == "poissonmbo":
u = poissonMBO_volume(W, I, g, dataset, beta, true_labels=true_labels, temp=T, use_cuda=use_cuda, Ns=Ns,
mu=mu)
elif method == "dynamiclabelpropagation":
u = DynamicLabelPropagation(W, I, g, true_labels=true_labels)
elif method == "sparselabelpropagation":
u = SparseLabelPropagation(W, I, g, true_labels=true_labels)
elif method == "centeredkernel":
u = CenteredKernel(W, I, g, true_labels=true_labels)
elif method == "nearestneighbor":
# Use distance matrix if provided, instead of weight matrix
if D is None:
u = nearestneighbor(W, I, g)
else:
u = nearestneighbor(D, I, g)
elif method == "eikonal":
# Use distance matrix if provided, instead of weight matrix
if D is None:
u = eikonalSSL(W, I, g, p=p, beta=beta)
else:
u = eikonalSSL(W, I, g, p=p, beta=beta)
else: # One vs rest methods
k = len(np.unique(g)) # Number of labels
u = np.zeros((k, n))
i = 0
for l in np.unique(g):
h = g == l
# Solve binary classification problem
if method == "laplace":
v = laplace_solve(W, I, h, norm=norm)
elif method == "shift":
v = shift_solve(W, I, h)
elif method == "meanshift":
v = meanshift_solve(W, I, h)
elif method == "wnll":
v = wnll_solve(W, I, h)
elif method == "properlyweighted":
if X is None:
print("Must supply raw data points for properly weighted Laplacian.")
sys.exit()
v = properlyweighted_solve(W, I, h, X, alpha, zeta, r)
elif method == "plaplace":
v = plaplace_solve(W, I, h, p, sol_method=plaplace_solver, norm=norm)
elif method == "randomwalk":
v = randomwalk_solve(W, I, h, epsilon)
else:
print("Invalid choice of SSL method.")
sys.exit()
# Update labels
u[i, :] = v
i = i + 1
if return_vector:
labels = np.transpose(u)
else:
# Select labels
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
# Make sure to set labels at labeled points
labels[I] = g
return labels
confidence = usort[0, :] - usort[1, :]
# Read numerical data from csv file
def csvread(filename):
X = []
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
n = 0
for row in csv_reader:
if not row[0] == 'Date/Time':
X += [float(i) for i in row]
m = len(row)
n += 1
return np.array(X).reshape((n, m))
# Compute average and standard deviation of accuracy over many trials
# Reads data from csv file filename
# Returns accuracy (acc), standard deviation (stddev) and number of labeled points (N)
def accuracy_statistics(filename):
X = csvread(filename)
N = np.unique(X[:, 0])
acc = []
stddev = []
quant = []
for n in N:
Y = X[X[:, 0] == n, 1]
Y = np.sort(Y)
acc += [np.mean(Y)]
quant += [Y[int(3 * len(Y) / 4)]]
stddev += [np.std(Y)]
# print("%.1f (%.1f)"%(np.mean(Y),np.std(Y)), end="&")
num_trials = len(X[:, 0]) / len(N)
return acc, stddev, N, quant, num_trials
# Makes an accuracy table to be included in LaTeX documents
# dataset = name of dataset
# ssl_methods = list of names of methods to compare
def accuracy_table_icml(dataset, ssl_method_list, legend_list, num_of_classes, testerror=False, savefile='tables.tex',
title='', quantile=False, append=False, directory='Results', fontsize='small', small_caps=True,
two_column=True):
# Retrieve number of different label rates m
accfile = directory + "/" + dataset + "_" + ssl_method_list[0] + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
m = len(N)
# Determine best algorithm at each label rate
best = [None] * m
best_score = [0] * m
i = 0
for ssl_method in ssl_method_list:
accfile = directory + "/" + dataset + "_" + ssl_method + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
if quantile:
acc = quant
for j in range(m):
if acc[j] > best_score[j]:
best_score[j] = acc[j]
best[j] = i
i += 1
if append:
f = open(savefile, "r")
lines = f.readlines()
f.close()
f = open(savefile, "w")
f.writelines([item for item in lines[:-1]])
else:
f = open(savefile, "w")
f.write("\\documentclass{article}\n")
f.write("\\usepackage[T1]{fontenc}\n")
f.write("\\usepackage{booktabs}\n")
f.write("\\usepackage[margin=1in]{geometry}\n")
f.write("\\begin{document}\n")
f.write("\n\n\n")
if two_column:
f.write("\\begin{table*}[t!]\n")
else:
f.write("\\begin{table}[t!]\n")
f.write("\\vspace{-3mm}\n")
f.write(
"\\caption{" + title + ": Average (standard deviation) classification accuracy over %d trials.}\n" % num_trials)
f.write("\\vspace{-3mm}\n")
f.write("\\label{tab:" + title + "}\n")
f.write("\\vskip 0.15in\n")
f.write("\\begin{center}\n")
f.write("\\begin{" + fontsize + "}\n")
if small_caps:
f.write("\\begin{sc}\n")
f.write("\\begin{tabular}{l")
for i in range(m):
f.write("l")
f.write("}\n")
f.write("\\toprule\n")
f.write("\\# Labels per class")
for i in range(m):
f.write("&\\textbf{%d}" % int(N[i] / num_of_classes))
f.write("\\\\\n")
f.write("\\midrule\n")
i = 0
for ssl_method in ssl_method_list:
f.write(legend_list[i].ljust(15))
accfile = directory + "/" + dataset + "_" + ssl_method + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
for j in range(m):
if best[j] == i:
f.write("&{\\bf %.1f" % acc[j] + " (%.1f)}" % stddev[j])
# f.write("&${\\bf %.1f"%acc[j]+"\\pm %.1f}$"%stddev[j])
else:
f.write("&%.1f" % acc[j] + " (%.1f) " % stddev[j])
# f.write("&$%.1f"%acc[j]+"\\pm %.1f$ "%stddev[j])
f.write("\\\\\n")
i += 1
f.write("\\bottomrule\n")
f.write("\\end{tabular}\n")
if small_caps:
f.write("\\end{sc}\n")
f.write("\\end{" + fontsize + "}\n")
f.write("\\end{center}\n")
f.write("\\vskip -0.1in\n")
if two_column:
f.write("\\end{table*}")
else:
f.write("\\end{table}")
f.write("\n\n\n")
f.write("\\end{document}\n")
f.close()
def plot_graph(X, W, l=None):
# Other colormaps, coolwarm, winter, Set3, tab20b, rainbow
# plt.ion()
colors = np.array([[1.0, 0, 0], [0, 0.9, 0]])
plt.rcParams['figure.facecolor'] = 'navy'
n = W.shape[0]
I, J, V = sparse.find(W)
for i in range(len(I)):
xval = [X[I[i], 0], X[J[i], 0]]
yval = [X[I[i], 1], X[J[i], 1]]
# plt.plot(xval,yval, color='black', linewidth=0.15, markersize=0)
plt.plot(xval, yval, color=[0.5, 0.5, 0.5], linewidth=0.5, markersize=0)
if l is None:
# plt.scatter(X[:,0],X[:,1], s=30, cmap='Paired')
plt.scatter(X[:, 0], X[:, 1], s=8, zorder=3)
else:
# plt.scatter(X[:,0],X[:,1], s=30, c=l, cmap='Paired')
plt.scatter(X[:, 0], X[:, 1], s=8, c=colors[l, :], zorder=3)
plt.axis("off")
# plot average and standard deviation of accuracy over many trials
# dataset = name of dataset
# ssl_methods = list of names of methods to compare
def accuracy_plot(dataset, ssl_method_list, legend_list, num_of_classes, title=None, errorbars=False, testerror=False,
savefile=None, loglog=False, log_dirs=None, directed_graph=False):
if log_dirs is None:
log_dirs = ["Results/"]
# plt.ion()
plt.figure()
if errorbars:
matplotlib.rcParams.update({'errorbar.capsize': 5})
matplotlib.rcParams.update({'font.size': 16})
styles = ['^b-', 'or-', 'dg-', 'sk-', 'pm-', 'xc-', '*y-']
i = 0
for log in log_dirs:
for ssl_method in ssl_method_list:
accfile = os.path.join(log, dataset + "_" + ssl_method)
if directed_graph:
accfile += "_directed"
accfile += "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
if testerror:
acc = 100 - acc
# z = np.polyfit(np.log(N),np.log(acc),1)
# print(z[0])
if errorbars:
plt.errorbar(N / num_of_classes, acc, fmt=styles[i], yerr=stddev, label=legend_list[i])
else:
if loglog:
plt.loglog(N / num_of_classes, acc, styles[i], label=legend_list[i])
else:
plt.plot(N / num_of_classes, acc, styles[i], label=legend_list[i])
i += 1
plt.xlabel('Number of labels per class')
if testerror:
plt.ylabel('Test error (%)')
plt.legend(loc='upper right')
else:
plt.ylabel('Accuracy (%)')
plt.legend(loc='lower right')
if title is not None:
plt.title(title)
plt.tight_layout()
plt.grid(True)
if savefile is not None:
plt.savefig(savefile)
else:
plt.show()
# Select labels based on a ranking
# Prodces a label permutation with 1 trial with same variations of #labels per class as the label permutation perm provided as input
def SelectLabels(labels, permold, rank):
perm = permold
# Number of classes
L = np.unique(labels)
k = len(L)
n = len(labels)
m = len(permold)
num = np.zeros((m,))
for i in range(m):
num[i] = len(permold[i])
num, unique_perm = np.unique(num, return_index=True)
perm = list()
for i in unique_perm:
p = permold[i]
pl = labels[p]
ind = []
for l in L:
numl = np.sum(pl == l)
K = labels == l
c = np.argsort(-rank[K])
j = np.arange(0, n)[K]
ind = ind + j[c[:numl]].tolist()
ind = np.array(ind)
perm.append(ind)
return perm
# PageRank algorithm
def PageRank(W, alpha):
n = W.shape[0]
u = np.ones((n,))
v = np.ones((n,))
D = degree_matrix(W, p=-1)
P = np.transpose(D * W)
err = 1
while err > 1e-10:
w = alpha * P * u + (1 - alpha) * v
err = np.max(np.absolute(w - u))
u = w
return u
# Print help
def print_help():
print('========================================================')
print('GraphLearning: Python package for graph-based learning. ')
print('========================================================')
print('========================================================')
print('Graph-based Clustering & Semi-Supervised Learning')
print('========================================================')
print(' ')
print('Options:')
print(' -d (--dataset=): MNIST, FashionMNIST, WEBKB, cifar (default=MNIST)')
print(' -m (--metric=): Metric for computing similarities (default=L2)')
print(' Choices: vae, scatter, L2, aet')
print(' -a (--algorithm=): Learning algorithm (default=Laplace)')
print(' -k (--knn=): Number of nearest neighbors (default=10)')
print(' -t (--num_trials=): Number of trial permutations to run (default=all)')
print(
' -l (--label_perm=): Choice of label permutation file (format=dataset<label_perm>_permutations.npz). (default is empty).')
print(' -p (--p=): Value of p for plaplace method (default=3)')
print(' -n (--normalization=): Laplacian normalization (default=none)')
print(' Choices: none, normalized')
print(' -N (--num_classes): Number of clusters if choosing clustering algorithm (default=10)')
print(' -s (--speed=): Speed in INCRES method (1--10) (default=2)')
print(' -i (--num_iter=): Number of iterations for iterative methods (default=1000)')
print(' -x (--extra_dim=): Number of extra dimensions in spectral clustering (default=0)')
print(' -c (--cuda): Use GPU acceleration (when available)')
print(' -T (--temperature): Temperature for volume constrained MBO (default=0)')
print(' -v (--volume_constraint=): Volume constraint for MBO (default=0.5)')
print(' -j (--num_cores=): Number of cores to use in parallel processing (default=1)')
print(' -r (--results): Turns off automatic saving of results to .csv file')
print(' -b (--verbose): Turns on verbose mode (displaying more intermediate steps).')
# Default settings
def default_dataset(): return 'MNIST'
def default_metric(): return 'L2'
def default_algorithm(): return 'laplace'
def default_k(): return 10
def default_t(): return '-1'
def default_label_perm(): return ''
def default_p(): return 3
def default_norm(): return "none"
def default_use_cuda(): return False
def default_T(): return 0
def default_num_cores(): return 1
def default_results(): return True
def default_num_classes(): return 10
def default_speed(): return 2
def default_num_iter(): return 1000
def default_extra_dim(): return 0
def default_volume_constraint(): return 0.5
def default_verbose(): return False
def default_poisson_training_balance(): return True
def default_directed_graph(): return False
# Main subroutine. Is calleable from other scripts as graphlearning.main(...)
def main(dataset=default_dataset(), metric=default_metric(), algorithm=default_algorithm(), k=default_k(),
t=default_t(), label_perm=default_label_perm(), p=default_p(), norm=default_norm(),
use_cuda=default_use_cuda(), T=default_T(), num_cores=default_num_cores(), results=default_results(),
num_classes=default_num_classes(), speed=default_speed(), num_iter=default_num_iter(),
extra_dim=default_extra_dim(), volume_constraint=default_volume_constraint(), verbose=default_verbose(),
poisson_training_balance=default_poisson_training_balance(), directed_graph=default_directed_graph()):
# Load labels
labels = load_labels(dataset)
# Load nearest neighbor data
I, J, D = load_kNN_data(dataset, metric=metric)
# Consturct weight matrix and distance matrix
W, error = nnk_weight_matrix(dataset, metric, mask=J, knn_param=k, symmetrize=not directed_graph)
Wdist = None # dist_matrix(I, J, D, k)
# Load label permutation (including restrictions in t)
perm = load_label_permutation(dataset, label_perm=label_perm, t=t)
# Load eigenvector data if MBO selected
if algorithm == 'mbo':
eigvals, eigvecs = load_mbo_eig(dataset, metric, k)
else:
eigvals = None
eigvecs = None
# Output file
outfile = "Results/" + dataset + label_perm + "_" + metric + "_k%d" % k
if algorithm == 'plaplace':
outfile = outfile + "_p%.1f" % p + algorithm[1:] + "_" + norm
elif algorithm == 'eikonal':
outfile = outfile + "_p%.1f" % p + algorithm
else:
outfile = outfile + "_" + algorithm
if algorithm == 'volumembo' or algorithm == 'poissonvolumembo':
outfile = outfile + "_T%.3f" % T
outfile = outfile + "_V%.3f" % volume_constraint
if algorithm == 'poisson' and poisson_training_balance == False:
outfile = outfile + "_NoBal"
if directed_graph:
outfile += "_directed"
outfile = outfile + "_accuracy.csv"
# Print basic info
print('========================================================')
print('GraphLearning: Python package for graph-based learning. ')
print('========================================================')
print('========================================================')
print('Graph-based Clustering & Semi-Supervised Learning')
print('========================================================')
print(' ')
print('Dataset: ' + dataset)
print('Metric: ' + metric)
print('Number of neighbors: %d' % k)
print('Learning algorithm: ' + algorithm)
print('Laplacian normalization: ' + norm)
if algorithm == 'plaplace' or algorithm == 'eikonal':
print("p-Laplace/eikonal value p=%.2f" % p)
if algorithm in clustering_algorithms:
print('Number of clusters: %d' % num_classes)
if algorithm == 'INCRES':
print('INCRES speed: %.2f' % speed)
print('Number of iterations: %d' % num_iter)
if algorithm[:8] == 'Spectral':
print('Number of extra dimensions: %d' % extra_dim)
else:
print('Number of trial permutations: %d' % len(perm))
print('Permutations file: LabelPermutations/' + dataset + label_perm + '_permutations.npz')
if algorithm == 'volumembo' or algorithm == 'poissonvolumembo':
print("Using temperature=%.3f" % T)
print("Volume constraints = [%.3f,%.3f]" % (volume_constraint, 2 - volume_constraint))
# If output file selected
if results:
print('Output file: ' + outfile)
print(' ')
print('========================================================')
print(' ')
true_labels = None
if verbose:
true_labels = labels
# If clustering algorithm was chosen
if algorithm in clustering_algorithms:
# Clustering
u = graph_clustering(W, num_classes, labels, method=algorithm, T=num_iter, speed=speed, extra_dim=extra_dim)
# Compute accuracy
acc = clustering_accuracy(u, labels)
# Print to terminal
print("Accuracy: %.2f" % acc + "%")
# If semi-supervised algorithms chosen
else:
# If output file selected
if results:
# Check if Results directory exists
if not os.path.exists('Results'):
os.makedirs('Results')
now = datetime.datetime.now()
# Add time stamp to output file
f = open(outfile, "a+")
f.write("Date/Time, " + now.strftime("%Y-%m-%d_%H:%M") + "\n")
f.close()
# Loop over label permutations
print("Number of labels, Accuracy")
def one_trial(label_ind):
# Number of labels
m = len(label_ind)
# Label proportions (used by some algroithms)
beta = label_proportions(labels)
start_time = time.time()
# Graph-based semi-supervised learning
u = graph_ssl(W, label_ind, labels[label_ind], D=Wdist, beta=beta, method=algorithm, epsilon=0.3, p=p,
norm=norm, eigvals=eigvals, eigvecs=eigvecs, dataset=dataset, T=T, use_cuda=use_cuda,
volume_mult=volume_constraint, true_labels=true_labels,
poisson_training_balance=poisson_training_balance, symmetrize=not directed_graph, error=error)
print("--- %s seconds ---" % (time.time() - start_time))
# Compute accuracy
acc = accuracy(u, labels, m)
# Print to terminal
print("%d" % m + ",%.2f" % acc)
# Write to file
if results:
f = open(outfile, "a+")
f.write("%d" % m + ",%.2f\n" % acc)
f.close()
# Number of cores for parallel processing
num_cores = min(multiprocessing.cpu_count(), num_cores)
Parallel(n_jobs=num_cores)(delayed(one_trial)(label_ind) for label_ind in perm)
if __name__ == '__main__':
# Default settings
dataset = default_dataset()
metric = default_metric()
algorithm = default_algorithm()
k = default_k()
t = default_t()
label_perm = default_label_perm()
p = default_p()
norm = default_norm()
use_cuda = default_use_cuda()
T = default_T()
num_cores = default_num_cores()
results = default_results()
num_classes = default_num_classes()
speed = default_speed()
num_iter = default_num_iter()
extra_dim = default_extra_dim()
volume_constraint = default_volume_constraint()
verbose = default_verbose()
poisson_training_balance = default_poisson_training_balance()
directed_graph = default_directed_graph()
# Read command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:m:k:a:p:n:v:N:s:i:x:t:cl:T:j:rboz",
["dataset=", "metric=", "knn=", "algorithm=", "p=", "normalization=",
"volume_constraint=", "num_classes=", "speed=", "num_iter=", "extra_dim=",
"num_trials=", "cuda", "label_perm=", "temperature=", "num_cores=", "results",
"verbose", "poisson_training_balance", "directed"])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt in ("-d", "--dataset"):
dataset = arg
elif opt in ("-m", "--metric"):
metric = arg
elif opt in ("-k", "--knn"):
k = int(arg)
elif opt in ("-a", "--algorithm"):
algorithm = arg.lower()
elif opt in ("-p", "--p"):
p = float(arg)
elif opt in ("-n", "--normalization"):
norm = arg
elif opt in ("-v", "--volume_constraint"):
volume_constraint = float(arg)
elif opt in ("-N", "--num_classes"):
num_classes = int(arg)
elif opt in ("-s", "--speed"):
speed = float(arg)
elif opt in ("-i", "--num_iter"):
num_iter = int(arg)
elif opt in ("-x", "--extra_dim"):
extra_dim = int(arg)
elif opt in ("-t", "--num_trials"):
t = arg
elif opt in ("-c", "--cuda"):
use_cuda = True
elif opt in ("-l", "--label_perm"):
label_perm = arg
elif opt in ("-T", "--temperature"):
T = float(arg)
elif opt in ("-j", "--num_cores"):
num_cores = int(arg)
elif opt in ("-r", "--results"):
results = False
elif opt in ("-b", "--verbose"):
verbose = True
elif opt in ("-o", "--poisson_training_balance"):
poisson_training_balance = False
elif opt in ("-z", "--directed"):
directed_graph = True
# Call main subroutine
main(dataset=dataset, metric=metric, algorithm=algorithm, k=k, t=t, label_perm=label_perm, p=p, norm=norm,
use_cuda=use_cuda, T=T, num_cores=num_cores, results=results, num_classes=num_classes, speed=speed,
num_iter=num_iter, extra_dim=extra_dim, volume_constraint=volume_constraint, verbose=verbose,
poisson_training_balance=poisson_training_balance, directed_graph=directed_graph)
|
[
"numpy.sum",
"scipy.sparse.linalg.cg",
"numpy.ones",
"numpy.argmin",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"matplotlib.rcParams.update",
"torch.FloatTensor",
"numpy.max",
"datetime.datetime.now",
"numpy.minimum",
"matplotlib.pyplot.show",
"numpy.ones_like",
"scipy.sparse.spdiags",
"numpy.min",
"matplotlib.pyplot.grid",
"numpy.vstack",
"sys.exit",
"os.makedirs",
"numpy.array",
"csv.reader",
"numpy.linalg.norm",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"scipy.spatial.Delaunay",
"numpy.full",
"multiprocessing.cpu_count",
"numpy.transpose",
"kymatio.Scattering2D",
"numpy.append",
"scipy.sparse.identity",
"matplotlib.pyplot.errorbar",
"scipy.optimize.linear_sum_assignment",
"torch.from_numpy",
"cmodules.cgraphpy.volume_mbo",
"matplotlib.pyplot.plot",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.loglog",
"numpy.maximum",
"getopt.getopt",
"numpy.argmax",
"numpy.clip",
"numpy.mean",
"numpy.tile",
"os.path.join",
"numpy.copy",
"sklearn.cluster.KMeans",
"os.path.exists",
"numpy.swapaxes",
"cmodules.cgraphpy.HJsolver",
"scipy.sparse.find",
"os.path.realpath",
"matplotlib.pyplot.legend",
"numpy.dot",
"scipy.sparse.linalg.eigs",
"numpy.zeros",
"time.time",
"scipy.sparse.linalg.gmres",
"joblib.Parallel",
"numpy.sign",
"annoy.AnnoyIndex",
"joblib.delayed",
"matplotlib.pyplot.savefig",
"numpy.absolute",
"numpy.load",
"numpy.random.randint",
"scipy.sparse.csgraph.connected_components",
"numpy.random.normal",
"scipy.spatial.cKDTree",
"numpy.round",
"numpy.zeros_like",
"numpy.std",
"scipy.sparse.coo_matrix",
"cmodules.cgraphpy.dijkstra",
"numpy.random.choice",
"cmodules.cgraphpy.lp_iterate",
"numpy.sort",
"torch.sparse.addmm",
"torch.Size",
"numpy.cos",
"mayavi.mlab.view",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"torch.LongTensor",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"numpy.where",
"numpy.random.rand",
"numpy.ascontiguousarray",
"mayavi.mlab.triangular_mesh",
"numpy.sqrt"
] |
[((2665, 2718), 'os.path.join', 'os.path.join', (['location', '"""LabelPermutations"""', 'dataFile'], {}), "(location, 'LabelPermutations', dataFile)\n", (2677, 2718), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((3545, 3585), 'os.path.join', 'os.path.join', (['location', '"""Data"""', 'dataFile'], {}), "(location, 'Data', dataFile)\n", (3557, 3585), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((3939, 3979), 'os.path.join', 'os.path.join', (['location', '"""Data"""', 'dataFile'], {}), "(location, 'Data', dataFile)\n", (3951, 3979), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4365, 4408), 'os.path.join', 'os.path.join', (['location', '"""kNNData"""', 'dataFile'], {}), "(location, 'kNNData', dataFile)\n", (4377, 4408), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4762, 4779), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (4771, 4779), True, 'import numpy as np\n'), ((4844, 4863), 'numpy.sum', 'np.sum', (['(labels >= 0)'], {}), '(labels >= 0)\n', (4850, 4863), True, 'import numpy as np\n'), ((4875, 4889), 'numpy.zeros', 'np.zeros', (['(k,)'], {}), '((k,))\n', (4883, 4889), True, 'import numpy as np\n'), ((5500, 5514), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (5511, 5514), True, 'import scipy.sparse as sparse\n'), ((6088, 6123), 'kymatio.Scattering2D', 'Scattering2D', ([], {'J': 'depth', 'shape': '(n, m)'}), '(J=depth, shape=(n, m))\n', (6100, 6123), False, 'from kymatio import Scattering2D\n'), ((6433, 6450), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (6442, 6450), True, 'import numpy as np\n'), ((6535, 6558), 'numpy.zeros', 'np.zeros', (['(num_labels,)'], {}), '((num_labels,))\n', (6543, 6558), True, 'import numpy as np\n'), ((7327, 7344), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (7336, 7344), True, 'import numpy as np\n'), ((8216, 8226), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (8222, 8226), True, 'import numpy as np\n'), ((8908, 8939), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(d ** p)', '(0)', 'n', 'n'], {}), '(d ** p, 0, n, n)\n', (8922, 8939), True, 'import scipy.sparse as sparse\n'), ((9079, 9097), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['X'], {}), '(X)\n', (9094, 9097), True, 'import scipy.spatial as spatial\n'), ((9257, 9311), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(gamma + (1 - gamma) / nn_dist)', '(0)', 'n', 'n'], {}), '(gamma + (1 - gamma) / nn_dist, 0, n, n)\n', (9271, 9311), True, 'import scipy.sparse as sparse\n'), ((10358, 10372), 'numpy.absolute', 'np.absolute', (['w'], {}), '(w)\n', (10369, 10372), True, 'import numpy as np\n'), ((11222, 11240), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['X'], {}), '(X)\n', (11237, 11240), True, 'import scipy.spatial as spatial\n'), ((11392, 11413), 'numpy.sum', 'np.sum', (['(V * V)'], {'axis': '(1)'}), '(V * V, axis=1)\n', (11398, 11413), True, 'import numpy as np\n'), ((11774, 11820), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (M1, M2))'], {'shape': '(n, n)'}), '((D, (M1, M2)), shape=(n, n))\n', (11791, 11820), True, 'import scipy.sparse as sparse\n'), ((11954, 11972), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['X'], {}), '(X)\n', (11969, 11972), True, 'import scipy.spatial as spatial\n'), ((12572, 12599), 'annoy.AnnoyIndex', 'AnnoyIndex', (['dim', 'similarity'], {}), '(dim, similarity)\n', (12582, 12599), False, 'from annoy import AnnoyIndex\n'), ((13025, 13036), 'numpy.array', 'np.array', (['I'], {}), '(I)\n', (13033, 13036), True, 'import numpy as np\n'), ((13045, 13056), 'numpy.array', 'np.array', (['J'], {}), '(J)\n', (13053, 13056), True, 'import numpy as np\n'), ((13065, 13076), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (13073, 13076), True, 'import numpy as np\n'), ((13338, 13372), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(1 / sigma)', '(0)', 'n', 'n'], {}), '(1 / sigma, 0, n, n)\n', (13352, 13372), True, 'import scipy.sparse as sparse\n'), ((13530, 13550), 'numpy.vstack', 'np.vstack', (['(I, J, D)'], {}), '((I, J, D))\n', (13539, 13550), True, 'import numpy as np\n'), ((13560, 13580), 'numpy.vstack', 'np.vstack', (['(J, I, D)'], {}), '((J, I, D))\n', (13569, 13580), True, 'import numpy as np\n'), ((13589, 13621), 'numpy.concatenate', 'np.concatenate', (['(M1, M2)'], {'axis': '(1)'}), '((M1, M2), axis=1)\n', (13603, 13621), True, 'import numpy as np\n'), ((13630, 13650), 'numpy.unique', 'np.unique', (['M'], {'axis': '(1)'}), '(M, axis=1)\n', (13639, 13650), True, 'import numpy as np\n'), ((14397, 14422), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (14407, 14422), True, 'import numpy as np\n'), ((15118, 15143), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (15128, 15143), True, 'import numpy as np\n'), ((15685, 15699), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (15696, 15699), True, 'import scipy.sparse as sparse\n'), ((15754, 15775), 'numpy.sum', 'np.sum', (['(Y * Y)'], {'axis': '(1)'}), '(Y * Y, axis=1)\n', (15760, 15775), True, 'import numpy as np\n'), ((16005, 16019), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (16016, 16019), True, 'import scipy.sparse as sparse\n'), ((16028, 16042), 'numpy.exp', 'np.exp', (['(-2 * V)'], {}), '(-2 * V)\n', (16034, 16042), True, 'import numpy as np\n'), ((16388, 16435), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['W'], {'directed': '(False)'}), '(W, directed=False)\n', (16416, 16435), True, 'import scipy.sparse.csgraph as csgraph\n'), ((16452, 16470), 'numpy.zeros', 'np.zeros', (['(ncomp,)'], {}), '((ncomp,))\n', (16460, 16470), True, 'import numpy as np\n'), ((16554, 16574), 'numpy.argmax', 'np.argmax', (['num_verts'], {}), '(num_verts)\n', (16563, 16574), True, 'import numpy as np\n'), ((17041, 17066), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (17051, 17066), True, 'import numpy as np\n'), ((17798, 17833), 'numpy.zeros', 'np.zeros', (['(num_of_nodes, knn_param)'], {}), '((num_of_nodes, knn_param))\n', (17806, 17833), True, 'import numpy as np\n'), ((17854, 17889), 'numpy.zeros', 'np.zeros', (['(num_of_nodes, knn_param)'], {}), '((num_of_nodes, knn_param))\n', (17862, 17889), True, 'import numpy as np\n'), ((17909, 17943), 'numpy.ones', 'np.ones', (['(num_of_nodes, knn_param)'], {}), '((num_of_nodes, knn_param))\n', (17916, 17943), True, 'import numpy as np\n'), ((18778, 18814), 'numpy.tile', 'np.tile', (['row_indices', '[1, knn_param]'], {}), '(row_indices, [1, knn_param])\n', (18785, 18814), True, 'import numpy as np\n'), ((19938, 19963), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (19948, 19963), True, 'import numpy as np\n'), ((20203, 20219), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (20215, 20219), True, 'import numpy as np\n'), ((20294, 20310), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (20306, 20310), True, 'import numpy as np\n'), ((20376, 20407), 'numpy.swapaxes', 'np.swapaxes', (['NN[:, 1:, :]', '(0)', '(1)'], {}), '(NN[:, 1:, :], 0, 1)\n', (20387, 20407), True, 'import numpy as np\n'), ((20552, 20573), 'numpy.sum', 'np.sum', (['(V * W)'], {'axis': '(2)'}), '(V * W, axis=2)\n', (20558, 20573), True, 'import numpy as np\n'), ((20602, 20621), 'numpy.max', 'np.max', (['(-xd)'], {'axis': '(0)'}), '(-xd, axis=0)\n', (20608, 20621), True, 'import numpy as np\n'), ((21092, 21117), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (21102, 21117), True, 'import numpy as np\n'), ((21357, 21373), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (21369, 21373), True, 'import numpy as np\n'), ((21448, 21464), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (21460, 21464), True, 'import numpy as np\n'), ((21509, 21540), 'numpy.swapaxes', 'np.swapaxes', (['NN[:, 1:, :]', '(0)', '(1)'], {}), '(NN[:, 1:, :], 0, 1)\n', (21520, 21540), True, 'import numpy as np\n'), ((21613, 21635), 'numpy.sum', 'np.sum', (['(V * nu)'], {'axis': '(2)'}), '(V * nu, axis=2)\n', (21619, 21635), True, 'import numpy as np\n'), ((21672, 21693), 'numpy.sum', 'np.sum', (['(V * V)'], {'axis': '(2)'}), '(V * V, axis=2)\n', (21678, 21693), True, 'import numpy as np\n'), ((21702, 21751), 'numpy.max', 'np.max', (['((xd * xd - sqdist) / (2 * R) - xd)'], {'axis': '(0)'}), '((xd * xd - sqdist) / (2 * R) - xd, axis=0)\n', (21708, 21751), True, 'import numpy as np\n'), ((22469, 22499), 'scipy.sparse.linalg.gmres', 'sparse.linalg.gmres', (['A', 'b'], {'M': 'M'}), '(A, b, M=M)\n', (22488, 22499), True, 'import scipy.sparse as sparse\n'), ((23332, 23363), 'numpy.full', 'np.full', (['(n,)', '(True)'], {'dtype': 'bool'}), '((n,), True, dtype=bool)\n', (23339, 23363), True, 'import numpy as np\n'), ((23490, 23528), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['A'], {'k': 'k', 'which': '"""SM"""'}), "(A, k=k, which='SM')\n", (23508, 23528), True, 'import scipy.sparse as sparse\n'), ((23611, 23627), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (23619, 23627), True, 'import numpy as np\n'), ((23905, 23936), 'numpy.full', 'np.full', (['(n,)', '(True)'], {'dtype': 'bool'}), '((n,), True, dtype=bool)\n', (23912, 23936), True, 'import numpy as np\n'), ((24549, 24562), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (24556, 24562), True, 'import numpy as np\n'), ((24667, 24684), 'numpy.random.rand', 'random.rand', (['n', 'd'], {}), '(n, d)\n', (24678, 24684), True, 'import numpy.random as random\n'), ((24782, 24798), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (24790, 24798), True, 'import numpy as np\n'), ((25124, 25140), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (25132, 25140), True, 'import numpy as np\n'), ((25349, 25365), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (25357, 25365), True, 'import numpy as np\n'), ((25972, 25998), 'scipy.spatial.Delaunay', 'spatial.Delaunay', (['X[:, :2]'], {}), '(X[:, :2])\n', (25988, 25998), True, 'import scipy.spatial as spatial\n'), ((26231, 26281), 'numpy.array', 'np.array', (['[[x1, y1], [x2, y2], [x1, y2], [x2, y1]]'], {}), '([[x1, y1], [x2, y2], [x1, y2], [x2, y1]])\n', (26239, 26281), True, 'import numpy as np\n'), ((26290, 26319), 'numpy.append', 'np.append', (['X', 'corners'], {'axis': '(0)'}), '(X, corners, axis=0)\n', (26299, 26319), True, 'import numpy as np\n'), ((27211, 27228), 'numpy.vstack', 'np.vstack', (['(X, Y)'], {}), '((X, Y))\n', (27220, 27228), True, 'import numpy as np\n'), ((27288, 27305), 'numpy.vstack', 'np.vstack', (['(Z, Y)'], {}), '((Z, Y))\n', (27297, 27305), True, 'import numpy as np\n'), ((27367, 27384), 'numpy.vstack', 'np.vstack', (['(Z, Y)'], {}), '((Z, Y))\n', (27376, 27384), True, 'import numpy as np\n'), ((27444, 27461), 'numpy.vstack', 'np.vstack', (['(Z, Y)'], {}), '((Z, Y))\n', (27453, 27461), True, 'import numpy as np\n'), ((27500, 27519), 'scipy.spatial.Delaunay', 'spatial.Delaunay', (['Z'], {}), '(Z)\n', (27516, 27519), True, 'import scipy.spatial as spatial\n'), ((27690, 27736), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['X[:, 0]', 'X[:, 1]', 'u', 'Tri'], {}), '(X[:, 0], X[:, 1], u, Tri)\n', (27710, 27736), True, 'import mayavi.mlab as mlab\n'), ((27741, 27777), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(-45)', 'elevation': '(60)'}), '(azimuth=-45, elevation=60)\n', (27750, 27777), True, 'import mayavi.mlab as mlab\n'), ((28751, 28764), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (28758, 28764), True, 'import numpy as np\n'), ((29491, 29515), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['X[I, :]'], {}), '(X[I, :])\n', (29506, 29515), True, 'import scipy.spatial as spatial\n'), ((30657, 30671), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (30668, 30671), True, 'import scipy.sparse as sparse\n'), ((34775, 34789), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (34783, 34789), True, 'import numpy as np\n'), ((35474, 35502), 'numpy.argmax', 'np.argmax', (['(Max >= Xs)'], {'axis': '(0)'}), '(Max >= Xs, axis=0)\n', (35483, 35502), True, 'import numpy as np\n'), ((35681, 35693), 'numpy.unique', 'np.unique', (['L'], {}), '(L)\n', (35690, 35693), True, 'import numpy as np\n'), ((35798, 35814), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (35806, 35814), True, 'import numpy as np\n'), ((36082, 36102), 'numpy.argmax', 'np.argmax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (36091, 36102), True, 'import numpy as np\n'), ((36111, 36127), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (36119, 36127), True, 'import numpy as np\n'), ((36311, 36327), 'numpy.exp', 'np.exp', (['(beta * X)'], {}), '(beta * X)\n', (36317, 36327), True, 'import numpy as np\n'), ((36339, 36356), 'numpy.sum', 'np.sum', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (36345, 36356), True, 'import numpy as np\n'), ((36472, 36489), 'numpy.random.rand', 'random.rand', (['n', '(1)'], {}), '(n, 1)\n', (36483, 36489), True, 'import numpy.random as random\n'), ((36539, 36555), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (36547, 36555), True, 'import numpy as np\n'), ((37014, 37034), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (37023, 37034), True, 'import numpy as np\n'), ((37136, 37150), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (37147, 37150), True, 'import scipy.sparse as sparse\n'), ((37260, 37299), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['u'], {'dtype': 'np.int32'}), '(u, dtype=np.int32)\n', (37280, 37299), True, 'import numpy as np\n'), ((37309, 37349), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (37329, 37349), True, 'import numpy as np\n'), ((37359, 37399), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WJ'], {'dtype': 'np.int32'}), '(WJ, dtype=np.int32)\n', (37379, 37399), True, 'import numpy as np\n'), ((37409, 37451), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float32'}), '(WV, dtype=np.float32)\n', (37429, 37451), True, 'import numpy as np\n'), ((37460, 37499), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (37480, 37499), True, 'import numpy as np\n'), ((37508, 37547), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.int32'}), '(g, dtype=np.int32)\n', (37528, 37547), True, 'import numpy as np\n'), ((37566, 37615), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ClassCounts'], {'dtype': 'np.int32'}), '(ClassCounts, dtype=np.int32)\n', (37586, 37615), True, 'import numpy as np\n'), ((37621, 37693), 'cmodules.cgraphpy.volume_mbo', 'cgp.volume_mbo', (['u', 'WI', 'WJ', 'WV', 'I', 'g', 'ClassCounts', 'k', '(0.0)', 'T', 'volume_mult'], {}), '(u, WI, WJ, WV, I, g, ClassCounts, k, 0.0, T, volume_mult)\n', (37635, 37693), True, 'import cmodules.cgraphpy as cgp\n'), ((38162, 38176), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (38170, 38176), True, 'import numpy as np\n'), ((38194, 38208), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (38205, 38208), True, 'import scipy.sparse as sparse\n'), ((38318, 38357), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['u'], {'dtype': 'np.int32'}), '(u, dtype=np.int32)\n', (38338, 38357), True, 'import numpy as np\n'), ((38367, 38407), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (38387, 38407), True, 'import numpy as np\n'), ((38417, 38457), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WJ'], {'dtype': 'np.int32'}), '(WJ, dtype=np.int32)\n', (38437, 38457), True, 'import numpy as np\n'), ((38467, 38509), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float32'}), '(WV, dtype=np.float32)\n', (38487, 38509), True, 'import numpy as np\n'), ((38518, 38557), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (38538, 38557), True, 'import numpy as np\n'), ((38566, 38605), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.int32'}), '(g, dtype=np.int32)\n', (38586, 38605), True, 'import numpy as np\n'), ((38624, 38673), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ClassCounts'], {'dtype': 'np.int32'}), '(ClassCounts, dtype=np.int32)\n', (38644, 38673), True, 'import numpy as np\n'), ((38679, 38751), 'cmodules.cgraphpy.volume_mbo', 'cgp.volume_mbo', (['u', 'WI', 'WJ', 'WV', 'I', 'g', 'ClassCounts', 'k', '(1.0)', 'T', 'volume_mult'], {}), '(u, WI, WJ, WV, I, g, ClassCounts, k, 1.0, T, volume_mult)\n', (38693, 38751), True, 'import cmodules.cgraphpy as cgp\n'), ((39565, 39601), 'numpy.diag', 'np.diag', (['(1 / (1 + dt / Ns * eigvals))'], {}), '(1 / (1 + dt / Ns * eigvals))\n', (39572, 39601), True, 'import numpy as np\n'), ((39627, 39642), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (39639, 39642), True, 'import numpy as np\n'), ((39682, 39699), 'numpy.random.rand', 'random.rand', (['k', 'n'], {}), '(k, n)\n', (39693, 39699), True, 'import numpy.random as random\n'), ((39768, 39779), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (39776, 39779), True, 'import numpy as np\n'), ((40655, 40667), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (40664, 40667), True, 'import numpy as np\n'), ((40713, 40727), 'numpy.zeros', 'np.zeros', (['(k,)'], {}), '((k,))\n', (40721, 40727), True, 'import numpy as np\n'), ((40947, 40958), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (40955, 40958), True, 'import numpy as np\n'), ((41129, 41145), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (41141, 41145), True, 'import numpy as np\n'), ((41180, 41195), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (41192, 41195), True, 'import numpy as np\n'), ((42915, 42940), 'numpy.vstack', 'np.vstack', (['(A.row, A.col)'], {}), '((A.row, A.col))\n', (42924, 42940), True, 'import numpy as np\n'), ((42950, 42975), 'torch.LongTensor', 'torch.LongTensor', (['indices'], {}), '(indices)\n', (42966, 42975), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((42984, 43009), 'torch.FloatTensor', 'torch.FloatTensor', (['values'], {}), '(values)\n', (43001, 43009), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((43239, 43253), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (43250, 43253), True, 'import scipy.sparse as sparse\n'), ((43488, 43506), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (43503, 43506), True, 'import scipy.sparse as sparse\n'), ((43621, 43637), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (43629, 43637), True, 'import numpy as np\n'), ((43678, 43689), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (43686, 43689), True, 'import numpy as np\n'), ((45086, 45097), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (45094, 45097), True, 'import numpy as np\n'), ((45234, 45245), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (45242, 45245), True, 'import numpy as np\n'), ((45338, 45358), 'numpy.transpose', 'np.transpose', (['(Kg * J)'], {}), '(Kg * J)\n', (45350, 45358), True, 'import numpy as np\n'), ((45367, 45378), 'numpy.copy', 'np.copy', (['Kg'], {}), '(Kg)\n', (45374, 45378), True, 'import numpy as np\n'), ((46529, 46540), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (46537, 46540), True, 'import numpy as np\n'), ((46633, 46653), 'numpy.transpose', 'np.transpose', (['(Kg * J)'], {}), '(Kg * J)\n', (46645, 46653), True, 'import numpy as np\n'), ((46748, 46759), 'numpy.copy', 'np.copy', (['Kg'], {}), '(Kg)\n', (46755, 46759), True, 'import numpy as np\n'), ((46768, 46783), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (46775, 46783), True, 'import numpy as np\n'), ((46793, 46808), 'numpy.ones', 'np.ones', (['(1, n)'], {}), '((1, n))\n', (46800, 46808), True, 'import numpy as np\n'), ((46818, 46838), 'numpy.random.rand', 'np.random.rand', (['n', '(1)'], {}), '(n, 1)\n', (46832, 46838), True, 'import numpy as np\n'), ((47692, 47707), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (47704, 47707), True, 'import numpy as np\n'), ((47765, 47785), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (47774, 47785), True, 'import numpy as np\n'), ((50502, 50513), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (50510, 50513), True, 'import numpy as np\n'), ((50684, 50700), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (50696, 50700), True, 'import numpy as np\n'), ((50735, 50750), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (50747, 50750), True, 'import numpy as np\n'), ((52590, 52602), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (52599, 52602), True, 'import numpy as np\n'), ((52759, 52770), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (52767, 52770), True, 'import numpy as np\n'), ((52941, 52957), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (52953, 52957), True, 'import numpy as np\n'), ((53235, 53253), 'numpy.max', 'np.max', (['Kg'], {'axis': '(0)'}), '(Kg, axis=0)\n', (53241, 53253), True, 'import numpy as np\n'), ((53354, 53370), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (53362, 53370), True, 'import numpy as np\n'), ((55114, 55126), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (55123, 55126), True, 'import numpy as np\n'), ((55412, 55423), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (55420, 55423), True, 'import numpy as np\n'), ((55594, 55610), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (55606, 55610), True, 'import numpy as np\n'), ((55857, 55886), 'numpy.random.random', 'np.random.random', ([], {'size': '(n, 1)'}), '(size=(n, 1))\n', (55873, 55886), True, 'import numpy as np\n'), ((55918, 55934), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (55926, 55934), True, 'import numpy as np\n'), ((58153, 58167), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (58164, 58167), True, 'import scipy.sparse as sparse\n'), ((58467, 58483), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (58475, 58483), True, 'import numpy as np\n'), ((58524, 58535), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (58532, 58535), True, 'import numpy as np\n'), ((58705, 58721), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (58717, 58721), True, 'import numpy as np\n'), ((58834, 58850), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (58842, 58850), True, 'import numpy as np\n'), ((63051, 63064), 'numpy.argsort', 'np.argsort', (['u'], {}), '(u)\n', (63061, 63064), True, 'import numpy as np\n'), ((63100, 63118), 'numpy.zeros', 'np.zeros', (['(m + 1,)'], {}), '((m + 1,))\n', (63108, 63118), True, 'import numpy as np\n'), ((63206, 63229), 'numpy.maximum', 'np.maximum', (['(1)', 'f[m - 1]'], {}), '(1, f[m - 1])\n', (63216, 63229), True, 'import numpy as np\n'), ((63238, 63254), 'numpy.argmin', 'np.argmin', (['(f < 1)'], {}), '(f < 1)\n', (63247, 63254), True, 'import numpy as np\n'), ((63264, 63277), 'numpy.sum', 'np.sum', (['u[:k]'], {}), '(u[:k])\n', (63270, 63277), True, 'import numpy as np\n'), ((63286, 63304), 'numpy.sum', 'np.sum', (['(u[:k] ** 2)'], {}), '(u[:k] ** 2)\n', (63292, 63304), True, 'import numpy as np\n'), ((67763, 67777), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (67771, 67777), True, 'import numpy as np\n'), ((67943, 67957), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (67954, 67957), True, 'import scipy.sparse as sparse\n'), ((68601, 68613), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (68610, 68613), True, 'import numpy as np\n'), ((68646, 68665), 'numpy.zeros', 'np.zeros', (['(numl, n)'], {}), '((numl, n))\n', (68654, 68665), True, 'import numpy as np\n'), ((69556, 69570), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (69567, 69570), True, 'import scipy.sparse as sparse\n'), ((72063, 72077), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (72071, 72077), True, 'import numpy as np\n'), ((72227, 72244), 'numpy.unique', 'np.unique', (['L_true'], {}), '(L_true)\n', (72236, 72244), True, 'import numpy as np\n'), ((72292, 72341), 'numpy.zeros', 'np.zeros', (['(num_classes, num_classes)'], {'dtype': 'float'}), '((num_classes, num_classes), dtype=float)\n', (72300, 72341), True, 'import numpy as np\n'), ((72490, 72518), 'scipy.optimize.linear_sum_assignment', 'opt.linear_sum_assignment', (['C'], {}), '(C)\n', (72515, 72518), True, 'import scipy.optimize as opt\n'), ((74341, 74369), 'numpy.random.randint', 'random.randint', (['(0)', 'k'], {'size': 'n'}), '(0, k, size=n)\n', (74355, 74369), True, 'import numpy.random as random\n'), ((74400, 74416), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (74408, 74416), True, 'import numpy as np\n'), ((75173, 75204), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['W'], {}), '(W)\n', (75201, 75204), True, 'import scipy.sparse.csgraph as csgraph\n'), ((82388, 82406), 'numpy.unique', 'np.unique', (['X[:, 0]'], {}), '(X[:, 0])\n', (82397, 82406), True, 'import numpy as np\n'), ((86343, 86379), 'numpy.array', 'np.array', (['[[1.0, 0, 0], [0, 0.9, 0]]'], {}), '([[1.0, 0, 0], [0, 0.9, 0]])\n', (86351, 86379), True, 'import numpy as np\n'), ((86460, 86474), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (86471, 86474), True, 'import scipy.sparse as sparse\n'), ((87017, 87032), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (87025, 87032), True, 'import matplotlib.pyplot as plt\n'), ((87463, 87475), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (87473, 87475), True, 'import matplotlib.pyplot as plt\n'), ((87558, 87603), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (87584, 87603), False, 'import matplotlib\n'), ((88541, 88581), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of labels per class"""'], {}), "('Number of labels per class')\n", (88551, 88581), True, 'import matplotlib.pyplot as plt\n'), ((88813, 88831), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (88829, 88831), True, 'import matplotlib.pyplot as plt\n'), ((88836, 88850), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (88844, 88850), True, 'import matplotlib.pyplot as plt\n'), ((89202, 89219), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (89211, 89219), True, 'import numpy as np\n'), ((89287, 89301), 'numpy.zeros', 'np.zeros', (['(m,)'], {}), '((m,))\n', (89295, 89301), True, 'import numpy as np\n'), ((89382, 89415), 'numpy.unique', 'np.unique', (['num'], {'return_index': '(True)'}), '(num, return_index=True)\n', (89391, 89415), True, 'import numpy as np\n'), ((89869, 89882), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (89876, 89882), True, 'import numpy as np\n'), ((89891, 89904), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (89898, 89904), True, 'import numpy as np\n'), ((89945, 89964), 'numpy.transpose', 'np.transpose', (['(D * W)'], {}), '(D * W)\n', (89957, 89964), True, 'import numpy as np\n'), ((2074, 2117), 'os.path.join', 'os.path.join', (['location', '"""MBOdata"""', 'dataFile'], {}), "(location, 'MBOdata', dataFile)\n", (2086, 2117), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((2130, 2171), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (2137, 2171), True, 'import numpy as np\n'), ((2559, 2585), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2575, 2585), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((2770, 2811), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (2777, 2811), True, 'import numpy as np\n'), ((3497, 3523), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3513, 3523), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((3630, 3671), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (3637, 3671), True, 'import numpy as np\n'), ((3852, 3878), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3868, 3878), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4020, 4061), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (4027, 4061), True, 'import numpy as np\n'), ((4270, 4296), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4286, 4296), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4451, 4492), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (4458, 4492), True, 'import numpy as np\n'), ((5301, 5337), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(d ** -alpha)', '(0)', 'n', 'n'], {}), '(d ** -alpha, 0, n, n)\n', (5315, 5337), True, 'import scipy.sparse as sparse\n'), ((5595, 5604), 'numpy.max', 'np.max', (['V'], {}), '(V)\n', (5601, 5604), True, 'import numpy as np\n'), ((6608, 6638), 'numpy.sum', 'np.sum', (['(labels == labelvals[i])'], {}), '(labels == labelvals[i])\n', (6614, 6638), True, 'import numpy as np\n'), ((9215, 9228), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (9222, 9228), True, 'import numpy as np\n'), ((10314, 10328), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (10325, 10328), True, 'import scipy.sparse as sparse\n'), ((10866, 10880), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (10877, 10880), True, 'import scipy.sparse as sparse\n'), ((12012, 12038), 'numpy.ones', 'np.ones', (['(n, k)'], {'dtype': 'int'}), '((n, k), dtype=int)\n', (12019, 12038), True, 'import numpy as np\n'), ((14479, 14522), 'numpy.random.choice', 'random.choice', (['I.shape[1]', 'k'], {'replace': '(False)'}), '(I.shape[1], k, replace=False)\n', (14492, 14522), True, 'import numpy.random as random\n'), ((16521, 16540), 'numpy.sum', 'np.sum', (['(labels == i)'], {}), '(labels == i)\n', (16527, 16540), True, 'import numpy as np\n'), ((17700, 17740), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)', 'keepdims': '(True)'}), '(X, axis=1, keepdims=True)\n', (17714, 17740), True, 'import numpy as np\n'), ((18009, 18034), 'numpy.array', 'np.array', (['mask[node_i, :]'], {}), '(mask[node_i, :])\n', (18017, 18034), True, 'import numpy as np\n'), ((18729, 18755), 'numpy.arange', 'np.arange', (['(0)', 'num_of_nodes'], {}), '(0, num_of_nodes)\n', (18738, 18755), True, 'import numpy as np\n'), ((20240, 20263), 'numpy.sum', 'np.sum', (['(nu * nu)'], {'axis': '(0)'}), '(nu * nu, axis=0)\n', (20246, 20263), True, 'import numpy as np\n'), ((21394, 21417), 'numpy.sum', 'np.sum', (['(nu * nu)'], {'axis': '(0)'}), '(nu * nu, axis=0)\n', (21400, 21417), True, 'import numpy as np\n'), ((22972, 23003), 'scipy.sparse.linalg.cg', 'splinalg.cg', (['L', 'f'], {'tol': 'tol', 'M': 'M'}), '(L, f, tol=tol, M=M)\n', (22983, 23003), True, 'import scipy.sparse.linalg as splinalg\n'), ((23029, 23067), 'scipy.sparse.linalg.cg', 'splinalg.cg', (['L', 'f'], {'x0': 'x0', 'tol': 'tol', 'M': 'M'}), '(L, f, x0=x0, tol=tol, M=M)\n', (23040, 23067), True, 'import scipy.sparse.linalg as splinalg\n'), ((24343, 24374), 'scipy.sparse.linalg.cg', 'splinalg.cg', (['A', 'b'], {'tol': 'tol', 'M': 'M'}), '(A, b, tol=tol, M=M)\n', (24354, 24374), True, 'import scipy.sparse.linalg as splinalg\n'), ((24400, 24443), 'scipy.sparse.linalg.cg', 'splinalg.cg', (['A', 'b'], {'x0': 'x0[idx]', 'tol': 'tol', 'M': 'M'}), '(A, b, x0=x0[idx], tol=tol, M=M)\n', (24411, 24443), True, 'import scipy.sparse.linalg as splinalg\n'), ((24880, 24901), 'numpy.sum', 'np.sum', (['(Y * Y)'], {'axis': '(1)'}), '(Y * Y, axis=1)\n', (24886, 24901), True, 'import numpy as np\n'), ((24984, 25001), 'numpy.vstack', 'np.vstack', (['(X, Y)'], {}), '((X, Y))\n', (24993, 25001), True, 'import numpy as np\n'), ((25269, 25286), 'numpy.vstack', 'np.vstack', (['(X, Y)'], {}), '((X, Y))\n', (25278, 25286), True, 'import numpy as np\n'), ((25407, 25432), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (25423, 25432), True, 'import numpy as np\n'), ((25686, 25703), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (25695, 25703), True, 'import numpy as np\n'), ((26374, 26400), 'numpy.append', 'np.append', (['u', '[0, 0, 0, 0]'], {}), '(u, [0, 0, 0, 0])\n', (26383, 26400), True, 'import numpy as np\n'), ((27118, 27128), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (27125, 27128), True, 'import numpy as np\n'), ((27551, 27575), 'numpy.sum', 'np.sum', (['(Tri >= n)'], {'axis': '(1)'}), '(Tri >= n, axis=1)\n', (27557, 27575), True, 'import numpy as np\n'), ((28329, 28345), 'numpy.sum', 'np.sum', (['(s[I] * g)'], {}), '(s[I] * g)\n', (28335, 28345), True, 'import numpy as np\n'), ((28385, 28395), 'numpy.mean', 'np.mean', (['u'], {}), '(u)\n', (28392, 28395), True, 'import numpy as np\n'), ((28626, 28636), 'numpy.mean', 'np.mean', (['u'], {}), '(u)\n', (28633, 28636), True, 'import numpy as np\n'), ((30578, 30587), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (30584, 30587), True, 'import numpy as np\n'), ((30590, 30603), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (30597, 30603), True, 'import numpy as np\n'), ((30613, 30622), 'numpy.min', 'np.min', (['g'], {}), '(g)\n', (30619, 30622), True, 'import numpy as np\n'), ((30625, 30638), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (30632, 30638), True, 'import numpy as np\n'), ((31085, 31127), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['uu'], {'dtype': 'np.float64'}), '(uu, dtype=np.float64)\n', (31105, 31127), True, 'import numpy as np\n'), ((31141, 31183), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ul'], {'dtype': 'np.float64'}), '(ul, dtype=np.float64)\n', (31161, 31183), True, 'import numpy as np\n'), ((31197, 31237), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (31217, 31237), True, 'import numpy as np\n'), ((31251, 31291), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WJ'], {'dtype': 'np.int32'}), '(WJ, dtype=np.int32)\n', (31271, 31291), True, 'import numpy as np\n'), ((31305, 31347), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (31325, 31347), True, 'import numpy as np\n'), ((31360, 31399), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (31380, 31399), True, 'import numpy as np\n'), ((31412, 31453), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.float64'}), '(g, dtype=np.float64)\n', (31432, 31453), True, 'import numpy as np\n'), ((31463, 31527), 'cmodules.cgraphpy.lp_iterate', 'cgp.lp_iterate', (['uu', 'ul', 'WI', 'WJ', 'WV', 'I', 'g', 'p', '(1000000.0)', '(0.1)', '(0.0)'], {}), '(uu, ul, WI, WJ, WV, I, g, p, 1000000.0, 0.1, 0.0)\n', (31477, 31527), True, 'import cmodules.cgraphpy as cgp\n'), ((34001, 34015), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (34012, 34015), True, 'import scipy.sparse as sparse\n'), ((34681, 34699), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (34696, 34699), True, 'import scipy.sparse as sparse\n'), ((35241, 35260), 'numpy.sort', 'np.sort', (['(-X)'], {'axis': '(0)'}), '(-X, axis=0)\n', (35248, 35260), True, 'import numpy as np\n'), ((35296, 35311), 'numpy.ones', 'np.ones', (['(k, k)'], {}), '((k, k))\n', (35303, 35311), True, 'import numpy as np\n'), ((37044, 37056), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (37053, 37056), True, 'import numpy as np\n'), ((37105, 37117), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (37114, 37117), True, 'import numpy as np\n'), ((38140, 38152), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (38149, 38152), True, 'import numpy as np\n'), ((39176, 39188), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (39185, 39188), True, 'import numpy as np\n'), ((39790, 39800), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (39797, 39800), True, 'import numpy as np\n'), ((40775, 40804), 'numpy.sum', 'np.sum', (['(g == unique_labels[i])'], {}), '(g == unique_labels[i])\n', (40781, 40804), True, 'import numpy as np\n'), ((40969, 40979), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (40976, 40979), True, 'import numpy as np\n'), ((41093, 41111), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (41099, 41111), True, 'import numpy as np\n'), ((41659, 41677), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (41674, 41677), True, 'import scipy.sparse as sparse\n'), ((43073, 43090), 'torch.Size', 'torch.Size', (['shape'], {}), '(shape)\n', (43083, 43090), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((43207, 43219), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (43216, 43219), True, 'import numpy as np\n'), ((43700, 43710), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (43707, 43710), True, 'import numpy as np\n'), ((44027, 44037), 'numpy.copy', 'np.copy', (['u'], {}), '(u)\n', (44034, 44037), True, 'import numpy as np\n'), ((44990, 45002), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (44999, 45002), True, 'import numpy as np\n'), ((45108, 45118), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (45115, 45118), True, 'import numpy as np\n'), ((45256, 45266), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (45263, 45266), True, 'import numpy as np\n'), ((45518, 45536), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (45533, 45536), True, 'import scipy.sparse as sparse\n'), ((45637, 45647), 'numpy.copy', 'np.copy', (['P'], {}), '(P)\n', (45644, 45647), True, 'import numpy as np\n'), ((46433, 46445), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (46442, 46445), True, 'import numpy as np\n'), ((46551, 46561), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (46558, 46561), True, 'import numpy as np\n'), ((46683, 46701), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(0)'}), '(Kg, axis=0)\n', (46689, 46701), True, 'import numpy as np\n'), ((47800, 47812), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (47809, 47812), True, 'import numpy as np\n'), ((49411, 49424), 'numpy.ones', 'np.ones', (['(k,)'], {}), '((k,))\n', (49418, 49424), True, 'import numpy as np\n'), ((49868, 49898), 'numpy.clip', 'np.clip', (['(s + dt * grad)', '(0.5)', '(2)'], {}), '(s + dt * grad, 0.5, 2)\n', (49875, 49898), True, 'import numpy as np\n'), ((50346, 50358), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (50355, 50358), True, 'import numpy as np\n'), ((50524, 50534), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (50531, 50534), True, 'import numpy as np\n'), ((50648, 50666), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (50654, 50666), True, 'import numpy as np\n'), ((51055, 51073), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (51070, 51073), True, 'import scipy.sparse as sparse\n'), ((52422, 52432), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (52429, 52432), True, 'import numpy as np\n'), ((52781, 52791), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (52788, 52791), True, 'import numpy as np\n'), ((52905, 52923), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (52911, 52923), True, 'import numpy as np\n'), ((53266, 53275), 'numpy.sum', 'np.sum', (['v'], {}), '(v)\n', (53272, 53275), True, 'import numpy as np\n'), ((54920, 54935), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (54932, 54935), True, 'import numpy as np\n'), ((55192, 55220), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'np.float32'}), '(n, dtype=np.float32)\n', (55199, 55220), True, 'import numpy as np\n'), ((55434, 55444), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (55441, 55444), True, 'import numpy as np\n'), ((55558, 55576), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (55564, 55576), True, 'import numpy as np\n'), ((57803, 57818), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (57815, 57818), True, 'import numpy as np\n'), ((58039, 58051), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (58048, 58051), True, 'import numpy as np\n'), ((58359, 58370), 'numpy.max', 'np.max', (['deg'], {}), '(deg)\n', (58365, 58370), True, 'import numpy as np\n'), ((58546, 58556), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (58553, 58556), True, 'import numpy as np\n'), ((58669, 58687), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (58675, 58687), True, 'import numpy as np\n'), ((58769, 58784), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (58781, 58784), True, 'import numpy as np\n'), ((63617, 63630), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (63624, 63630), True, 'import numpy as np\n'), ((63660, 63684), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (63667, 63684), True, 'import numpy as np\n'), ((63855, 63869), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (63866, 63869), True, 'import scipy.sparse as sparse\n'), ((64122, 64163), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['u'], {'dtype': 'np.float64'}), '(u, dtype=np.float64)\n', (64142, 64163), True, 'import numpy as np\n'), ((64176, 64215), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (64196, 64215), True, 'import numpy as np\n'), ((64229, 64269), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (64249, 64269), True, 'import numpy as np\n'), ((64283, 64325), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (64303, 64325), True, 'import numpy as np\n'), ((64338, 64377), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['K'], {'dtype': 'np.int32'}), '(K, dtype=np.int32)\n', (64358, 64377), True, 'import numpy as np\n'), ((64390, 64429), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (64410, 64429), True, 'import numpy as np\n'), ((64442, 64483), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.float64'}), '(g, dtype=np.float64)\n', (64462, 64483), True, 'import numpy as np\n'), ((64493, 64533), 'cmodules.cgraphpy.dijkstra', 'cgp.dijkstra', (['u', 'l', 'WI', 'K', 'WV', 'I', 'g', '(1.0)'], {}), '(u, l, WI, K, WV, I, g, 1.0)\n', (64505, 64533), True, 'import cmodules.cgraphpy as cgp\n'), ((64778, 64791), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (64785, 64791), True, 'import numpy as np\n'), ((64821, 64845), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (64828, 64845), True, 'import numpy as np\n'), ((65016, 65030), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (65027, 65030), True, 'import scipy.sparse as sparse\n'), ((65283, 65324), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['u'], {'dtype': 'np.float64'}), '(u, dtype=np.float64)\n', (65303, 65324), True, 'import numpy as np\n'), ((65337, 65376), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (65357, 65376), True, 'import numpy as np\n'), ((65390, 65430), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (65410, 65430), True, 'import numpy as np\n'), ((65444, 65486), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (65464, 65486), True, 'import numpy as np\n'), ((65499, 65538), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['K'], {'dtype': 'np.int32'}), '(K, dtype=np.int32)\n', (65519, 65538), True, 'import numpy as np\n'), ((65551, 65590), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (65571, 65590), True, 'import numpy as np\n'), ((65603, 65642), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.int32'}), '(g, dtype=np.int32)\n', (65623, 65642), True, 'import numpy as np\n'), ((65652, 65700), 'cmodules.cgraphpy.HJsolver', 'cgp.HJsolver', (['u', 'l', 'WI', 'K', 'WV', 'I', 'g', '(1.0)', 'p', '(1.0)'], {}), '(u, l, WI, K, WV, I, g, 1.0, p, 1.0)\n', (65664, 65700), True, 'import cmodules.cgraphpy as cgp\n'), ((67808, 67832), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (67815, 67832), True, 'import numpy as np\n'), ((68221, 68262), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (68241, 68262), True, 'import numpy as np\n'), ((68275, 68314), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (68295, 68314), True, 'import numpy as np\n'), ((68328, 68368), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (68348, 68368), True, 'import numpy as np\n'), ((68382, 68424), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (68402, 68424), True, 'import numpy as np\n'), ((68437, 68476), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['K'], {'dtype': 'np.int32'}), '(K, dtype=np.int32)\n', (68457, 68476), True, 'import numpy as np\n'), ((68489, 68528), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (68509, 68528), True, 'import numpy as np\n'), ((69370, 69383), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (69377, 69383), True, 'import numpy as np\n'), ((69421, 69445), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (69428, 69445), True, 'import numpy as np\n'), ((69827, 69868), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (69847, 69868), True, 'import numpy as np\n'), ((69881, 69920), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (69901, 69920), True, 'import numpy as np\n'), ((69934, 69974), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (69954, 69974), True, 'import numpy as np\n'), ((69988, 70030), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (70008, 70030), True, 'import numpy as np\n'), ((70043, 70082), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['K'], {'dtype': 'np.int32'}), '(K, dtype=np.int32)\n', (70063, 70082), True, 'import numpy as np\n'), ((70095, 70134), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (70115, 70134), True, 'import numpy as np\n'), ((70216, 70259), 'cmodules.cgraphpy.dijkstra', 'cgp.dijkstra', (['d', 'l', 'WI', 'K', 'WV', 'I', 'init', '(1.0)'], {}), '(d, l, WI, K, WV, I, init, 1.0)\n', (70228, 70259), True, 'import cmodules.cgraphpy as cgp\n'), ((72887, 72925), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['L'], {'k': 'k', 'which': '"""SM"""'}), "(L, k=k, which='SM')\n", (72905, 72925), True, 'import scipy.sparse as sparse\n'), ((74825, 74845), 'numpy.argmax', 'np.argmax', (['F'], {'axis': '(1)'}), '(F, axis=1)\n', (74834, 74845), True, 'import numpy as np\n'), ((80287, 80303), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (80295, 80303), True, 'import numpy as np\n'), ((80335, 80347), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (80344, 80347), True, 'import numpy as np\n'), ((81455, 81470), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (81467, 81470), True, 'import numpy as np\n'), ((81529, 81549), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (81538, 81549), True, 'import numpy as np\n'), ((81864, 81899), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (81874, 81899), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((82511, 82521), 'numpy.sort', 'np.sort', (['Y'], {}), '(Y)\n', (82518, 82521), True, 'import numpy as np\n'), ((86667, 86739), 'matplotlib.pyplot.plot', 'plt.plot', (['xval', 'yval'], {'color': '[0.5, 0.5, 0.5]', 'linewidth': '(0.5)', 'markersize': '(0)'}), '(xval, yval, color=[0.5, 0.5, 0.5], linewidth=0.5, markersize=0)\n', (86675, 86739), True, 'import matplotlib.pyplot as plt\n'), ((86825, 86869), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(8)', 'zorder': '(3)'}), '(X[:, 0], X[:, 1], s=8, zorder=3)\n', (86836, 86869), True, 'import matplotlib.pyplot as plt\n'), ((86951, 87011), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(8)', 'c': 'colors[l, :]', 'zorder': '(3)'}), '(X[:, 0], X[:, 1], s=8, c=colors[l, :], zorder=3)\n', (86962, 87011), True, 'import matplotlib.pyplot as plt\n'), ((87502, 87553), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'errorbar.capsize': 5}"], {}), "({'errorbar.capsize': 5})\n", (87528, 87553), False, 'import matplotlib\n'), ((88608, 88636), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test error (%)"""'], {}), "('Test error (%)')\n", (88618, 88636), True, 'import matplotlib.pyplot as plt\n'), ((88645, 88674), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (88655, 88674), True, 'import matplotlib.pyplot as plt\n'), ((88693, 88719), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy (%)"""'], {}), "('Accuracy (%)')\n", (88703, 88719), True, 'import matplotlib.pyplot as plt\n'), ((88728, 88757), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (88738, 88757), True, 'import matplotlib.pyplot as plt\n'), ((88792, 88808), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (88801, 88808), True, 'import matplotlib.pyplot as plt\n'), ((88888, 88909), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savefile'], {}), '(savefile)\n', (88899, 88909), True, 'import matplotlib.pyplot as plt\n'), ((88928, 88938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (88936, 88938), True, 'import matplotlib.pyplot as plt\n'), ((89738, 89751), 'numpy.array', 'np.array', (['ind'], {}), '(ind)\n', (89746, 89751), True, 'import numpy as np\n'), ((100108, 100473), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hd:m:k:a:p:n:v:N:s:i:x:t:cl:T:j:rboz"""', "['dataset=', 'metric=', 'knn=', 'algorithm=', 'p=', 'normalization=',\n 'volume_constraint=', 'num_classes=', 'speed=', 'num_iter=',\n 'extra_dim=', 'num_trials=', 'cuda', 'label_perm=', 'temperature=',\n 'num_cores=', 'results', 'verbose', 'poisson_training_balance', 'directed']"], {}), "(sys.argv[1:], 'hd:m:k:a:p:n:v:N:s:i:x:t:cl:T:j:rboz', [\n 'dataset=', 'metric=', 'knn=', 'algorithm=', 'p=', 'normalization=',\n 'volume_constraint=', 'num_classes=', 'speed=', 'num_iter=',\n 'extra_dim=', 'num_trials=', 'cuda', 'label_perm=', 'temperature=',\n 'num_cores=', 'results', 'verbose', 'poisson_training_balance', 'directed']\n )\n", (100121, 100473), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((1949, 1975), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1965, 1975), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((2426, 2437), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2434, 2437), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((2964, 2975), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2972, 2975), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((3764, 3775), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3772, 3775), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4165, 4176), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4173, 4176), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4659, 4670), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4667, 4670), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4931, 4953), 'numpy.sum', 'np.sum', (['(labels == L[i])'], {}), '(labels == L[i])\n', (4937, 4953), True, 'import numpy as np\n'), ((5614, 5658), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V, (I, J))'], {'shape': '(n, n)'}), '((V, (I, J)), shape=(n, n))\n', (5631, 5658), True, 'import scipy.sparse as sparse\n'), ((6648, 6660), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6657, 6660), True, 'import numpy as np\n'), ((7010, 7021), 'numpy.array', 'np.array', (['L'], {}), '(L)\n', (7018, 7021), True, 'import numpy as np\n'), ((7450, 7462), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (7459, 7462), True, 'import numpy as np\n'), ((7974, 7985), 'numpy.array', 'np.array', (['L'], {}), '(L)\n', (7982, 7985), True, 'import numpy as np\n'), ((8488, 8505), 'numpy.sum', 'np.sum', (['W'], {'axis': '(1)'}), '(W, axis=1)\n', (8494, 8505), True, 'import numpy as np\n'), ((10422, 10485), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V * pa / (a + 1e-13), (I, J))'], {'shape': '(n, n)'}), '((V * pa / (a + 1e-13), (I, J)), shape=(n, n))\n', (10439, 10485), True, 'import scipy.sparse as sparse\n'), ((10621, 10638), 'numpy.sum', 'np.sum', (['M'], {'axis': '(1)'}), '(M, axis=1)\n', (10627, 10638), True, 'import numpy as np\n'), ((10889, 10949), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V * (u[J] - u[I]), (I, J))'], {'shape': '(n, n)'}), '((V * (u[J] - u[I]), (I, J)), shape=(n, n))\n', (10906, 10949), True, 'import scipy.sparse as sparse\n'), ((11606, 11621), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (11615, 11621), True, 'import numpy as np\n'), ((11667, 11682), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (11676, 11682), True, 'import numpy as np\n'), ((13791, 13835), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (I, J))'], {'shape': '(n, n)'}), '((D, (I, J)), shape=(n, n))\n', (13808, 13835), True, 'import scipy.sparse as sparse\n'), ((14874, 14918), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (I, J))'], {'shape': '(n, n)'}), '((D, (I, J)), shape=(n, n))\n', (14891, 14918), True, 'import scipy.sparse as sparse\n'), ((15398, 15442), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (I, J))'], {'shape': '(n, n)'}), '((D, (I, J)), shape=(n, n))\n', (15415, 15442), True, 'import scipy.sparse as sparse\n'), ((15785, 15829), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(Y, (I, J))'], {'shape': '(n, n)'}), '((Y, (I, J)), shape=(n, n))\n', (15802, 15829), True, 'import scipy.sparse as sparse\n'), ((15916, 15965), 'scipy.sparse.spdiags', 'sparse.spdiags', (['((max_dist + 1e-10) ** -1)', '(0)', 'n', 'n'], {}), '((max_dist + 1e-10) ** -1, 0, n, n)\n', (15930, 15965), True, 'import scipy.sparse as sparse\n'), ((16051, 16095), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V, (I, J))'], {'shape': '(n, n)'}), '((V, (I, J)), shape=(n, n))\n', (16068, 16095), True, 'import scipy.sparse as sparse\n'), ((17390, 17434), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (I, J))'], {'shape': '(n, n)'}), '((D, (I, J)), shape=(n, n))\n', (17407, 17434), True, 'import scipy.sparse as sparse\n'), ((18086, 18120), 'numpy.where', 'np.where', (['(non_zero_index == node_i)'], {}), '(non_zero_index == node_i)\n', (18094, 18120), True, 'import numpy as np\n'), ((18628, 18641), 'numpy.sum', 'np.sum', (['x_opt'], {}), '(x_opt)\n', (18634, 18641), True, 'import numpy as np\n'), ((20498, 20532), 'numpy.swapaxes', 'np.swapaxes', (['NN_nu[:, 1:, :]', '(0)', '(1)'], {}), '(NN_nu[:, 1:, :], 0, 1)\n', (20509, 20532), True, 'import numpy as np\n'), ((22365, 22395), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(1 / M)', '(0)', 'm', 'm'], {}), '(1 / M, 0, m, m)\n', (22379, 22395), True, 'import scipy.sparse as sparse\n'), ((22899, 22929), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(1 / M)', '(0)', 'm', 'm'], {}), '(1 / M, 0, m, m)\n', (22913, 22929), True, 'import scipy.sparse as sparse\n'), ((24259, 24299), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(1 / (M + 1e-10))', '(0)', 'm', 'm'], {}), '(1 / (M + 1e-10), 0, m, m)\n', (24273, 24299), True, 'import scipy.sparse as sparse\n'), ((25211, 25232), 'numpy.sum', 'np.sum', (['(Y * Y)'], {'axis': '(1)'}), '(Y * Y, axis=1)\n', (25217, 25232), True, 'import numpy as np\n'), ((25576, 25594), 'numpy.random.rand', 'random.rand', (['(3 * n)'], {}), '(3 * n)\n', (25587, 25594), True, 'import numpy.random as random\n'), ((25644, 25662), 'numpy.random.rand', 'random.rand', (['(3 * n)'], {}), '(3 * n)\n', (25655, 25662), True, 'import numpy.random as random\n'), ((25735, 25754), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (25742, 25754), True, 'import numpy as np\n'), ((25766, 25779), 'numpy.cos', 'np.cos', (['(3 * x)'], {}), '(3 * x)\n', (25772, 25779), True, 'import numpy as np\n'), ((26568, 26597), 'numpy.unique', 'np.unique', (['nn_tri[nn_tri < n]'], {}), '(nn_tri[nn_tri < n])\n', (26577, 26597), True, 'import numpy as np\n'), ((26617, 26635), 'numpy.mean', 'np.mean', (['u[nn_tri]'], {}), '(u[nn_tri])\n', (26624, 26635), True, 'import numpy as np\n'), ((28791, 28817), 'scipy.sparse.spdiags', 'sparse.spdiags', (['a', '(0)', 'n', 'n'], {}), '(a, 0, n, n)\n', (28805, 28817), True, 'import scipy.sparse as sparse\n'), ((29609, 29639), 'scipy.sparse.spdiags', 'sparse.spdiags', (['gamma', '(0)', 'n', 'n'], {}), '(gamma, 0, n, n)\n', (29623, 29639), True, 'import scipy.sparse as sparse\n'), ((34025, 34085), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V * (u[J] - u[I]), (I, J))'], {'shape': '(n, n)'}), '((V * (u[J] - u[I]), (I, J)), shape=(n, n))\n', (34042, 34085), True, 'import scipy.sparse as sparse\n'), ((36907, 36917), 'sys.exit', 'sys.exit', ([], {}), '()\n', (36915, 36917), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((38097, 38107), 'sys.exit', 'sys.exit', ([], {}), '()\n', (38105, 38107), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((39440, 39451), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (39448, 39451), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((40288, 40308), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (40297, 40308), True, 'import numpy as np\n'), ((42629, 42649), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (42638, 42649), True, 'import numpy as np\n'), ((44641, 44661), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (44650, 44661), True, 'import numpy as np\n'), ((46265, 46276), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (46273, 46276), True, 'import numpy as np\n'), ((47032, 47049), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (47046, 47049), True, 'import numpy as np\n'), ((47330, 47344), 'numpy.absolute', 'np.absolute', (['w'], {}), '(w)\n', (47341, 47344), True, 'import numpy as np\n'), ((47483, 47503), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (47492, 47503), True, 'import numpy as np\n'), ((51857, 51877), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (51866, 51877), True, 'import numpy as np\n'), ((53828, 53859), 'torch.sparse.addmm', 'torch.sparse.addmm', (['Dbt', 'Pt', 'ut'], {}), '(Dbt, Pt, ut)\n', (53846, 53859), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((55269, 55282), 'numpy.max', 'np.max', (['error'], {}), '(error)\n', (55275, 55282), True, 'import numpy as np\n'), ((56478, 56508), 'torch.sparse.addmm', 'torch.sparse.addmm', (['bt', 'Wt', 'ut'], {}), '(bt, Wt, ut)\n', (56496, 56508), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((56577, 56603), 'numpy.linalg.norm', 'np.linalg.norm', (['(v - v_prev)'], {}), '(v - v_prev)\n', (56591, 56603), True, 'import numpy as np\n'), ((56937, 56988), 'numpy.clip', 'np.clip', (['(b + confidence_gain * u)'], {'a_min': '(-1)', 'a_max': '(1)'}), '(b + confidence_gain * u, a_min=-1, a_max=1)\n', (56944, 56988), True, 'import numpy as np\n'), ((57057, 57083), 'numpy.linalg.norm', 'np.linalg.norm', (['(v - v_prev)'], {}), '(v - v_prev)\n', (57071, 57083), True, 'import numpy as np\n'), ((58975, 58990), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (58987, 58990), True, 'import numpy as np\n'), ((58993, 59011), 'numpy.mean', 'np.mean', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (59000, 59011), True, 'import numpy as np\n'), ((60798, 60818), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (60807, 60818), True, 'import numpy as np\n'), ((63318, 63344), 'numpy.sqrt', 'np.sqrt', (['(b * b - k * c + k)'], {}), '(b * b - k * c + k)\n', (63325, 63344), True, 'import numpy as np\n'), ((63370, 63390), 'numpy.maximum', 'np.maximum', (['(t - u)', '(0)'], {}), '(t - u, 0)\n', (63380, 63390), True, 'import numpy as np\n'), ((64605, 64616), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (64613, 64616), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((65873, 65899), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'bool'}), '((n,), dtype=bool)\n', (65881, 65899), True, 'import numpy as np\n'), ((65989, 66015), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'bool'}), '((n,), dtype=bool)\n', (65997, 66015), True, 'import numpy as np\n'), ((68798, 68839), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ind'], {'dtype': 'np.int32'}), '(ind, dtype=np.int32)\n', (68818, 68839), True, 'import numpy as np\n'), ((68858, 68899), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['lab'], {'dtype': 'np.int32'}), '(lab, dtype=np.int32)\n', (68878, 68899), True, 'import numpy as np\n'), ((68912, 68964), 'cmodules.cgraphpy.HJsolver', 'cgp.HJsolver', (['d', 'l', 'WI', 'K', 'WV', 'ind', 'lab', '(1.0)', 'p', '(0.0)'], {}), '(d, l, WI, K, WV, ind, lab, 1.0, p, 0.0)\n', (68924, 68964), True, 'import cmodules.cgraphpy as cgp\n'), ((69168, 69178), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (69175, 69178), True, 'import numpy as np\n'), ((70171, 70187), 'numpy.zeros_like', 'np.zeros_like', (['I'], {}), '(I)\n', (70184, 70187), True, 'import numpy as np\n'), ((70471, 70497), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'bool'}), '((n,), dtype=bool)\n', (70479, 70497), True, 'import numpy as np\n'), ((70587, 70613), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'bool'}), '((n,), dtype=bool)\n', (70595, 70613), True, 'import numpy as np\n'), ((72434, 72466), 'numpy.sum', 'np.sum', (['((L == i) & (L_true != j))'], {}), '((L == i) & (L_true != j))\n', (72440, 72466), True, 'import numpy as np\n'), ((73098, 73141), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['L'], {'M': 'D', 'k': 'k', 'which': '"""SM"""'}), "(L, M=D, k=k, which='SM')\n", (73116, 73141), True, 'import scipy.sparse as sparse\n'), ((73565, 73593), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (73579, 73593), True, 'import sklearn.cluster as cluster\n'), ((73785, 73813), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (73799, 73813), True, 'import sklearn.cluster as cluster\n'), ((74425, 74437), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (74434, 74437), True, 'import numpy as np\n'), ((74756, 74765), 'numpy.min', 'np.min', (['F'], {}), '(F)\n', (74762, 74765), True, 'import numpy as np\n'), ((80241, 80253), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (80250, 80253), True, 'import numpy as np\n'), ((81568, 81580), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (81577, 81580), True, 'import numpy as np\n'), ((82096, 82107), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (82104, 82107), True, 'import numpy as np\n'), ((82538, 82548), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (82545, 82548), True, 'import numpy as np\n'), ((82611, 82620), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (82617, 82620), True, 'import numpy as np\n'), ((87767, 87812), 'os.path.join', 'os.path.join', (['log', "(dataset + '_' + ssl_method)"], {}), "(log, dataset + '_' + ssl_method)\n", (87779, 87812), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((89563, 89578), 'numpy.sum', 'np.sum', (['(pl == l)'], {}), '(pl == l)\n', (89569, 89578), True, 'import numpy as np\n'), ((89623, 89643), 'numpy.argsort', 'np.argsort', (['(-rank[K])'], {}), '(-rank[K])\n', (89633, 89643), True, 'import numpy as np\n'), ((90066, 90084), 'numpy.absolute', 'np.absolute', (['(w - u)'], {}), '(w - u)\n', (90077, 90084), True, 'import numpy as np\n'), ((97712, 97735), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (97733, 97735), False, 'import datetime\n'), ((98226, 98237), 'time.time', 'time.time', ([], {}), '()\n', (98235, 98237), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((99184, 99211), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (99209, 99211), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((99232, 99258), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (99240, 99258), False, 'from joblib import Parallel, delayed\n'), ((100655, 100666), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (100663, 100666), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((100754, 100764), 'sys.exit', 'sys.exit', ([], {}), '()\n', (100762, 100764), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((11548, 11558), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (11555, 11558), True, 'import numpy as np\n'), ((18294, 18335), 'numpy.dot', 'np.dot', (['x_neighbors', 'X_normalized[node_i]'], {}), '(x_neighbors, X_normalized[node_i])\n', (18300, 18335), True, 'import numpy as np\n'), ((18360, 18394), 'numpy.dot', 'np.dot', (['x_neighbors', 'x_neighbors.T'], {}), '(x_neighbors, x_neighbors.T)\n', (18366, 18394), True, 'import numpy as np\n'), ((20081, 20096), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (20093, 20096), True, 'import numpy as np\n'), ((21235, 21250), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (21247, 21250), True, 'import numpy as np\n'), ((30238, 30248), 'numpy.max', 'np.max', (['dx'], {}), '(dx)\n', (30244, 30248), True, 'import numpy as np\n'), ((31017, 31027), 'sys.exit', 'sys.exit', ([], {}), '()\n', (31025, 31027), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((33171, 33186), 'numpy.max', 'np.max', (['(uu - ul)'], {}), '(uu - ul)\n', (33177, 33186), True, 'import numpy as np\n'), ((33210, 33225), 'numpy.min', 'np.min', (['(uu - ul)'], {}), '(uu - ul)\n', (33216, 33225), True, 'import numpy as np\n'), ((34301, 34318), 'numpy.sum', 'np.sum', (['F'], {'axis': '(1)'}), '(F, axis=1)\n', (34307, 34318), True, 'import numpy as np\n'), ((35354, 35371), 'numpy.transpose', 'np.transpose', (['Sum'], {}), '(Sum)\n', (35366, 35371), True, 'import numpy as np\n'), ((35380, 35392), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (35389, 35392), True, 'import numpy as np\n'), ((40331, 40343), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (40340, 40343), True, 'import numpy as np\n'), ((42070, 42101), 'torch.sparse.addmm', 'torch.sparse.addmm', (['Dbt', 'Pt', 'ut'], {}), '(Dbt, Pt, ut)\n', (42088, 42101), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((42672, 42684), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (42681, 42684), True, 'import numpy as np\n'), ((44684, 44696), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (44693, 44696), True, 'import numpy as np\n'), ((45968, 45979), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (45976, 45979), True, 'import numpy as np\n'), ((46012, 46032), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (46021, 46032), True, 'import numpy as np\n'), ((47526, 47538), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (47535, 47538), True, 'import numpy as np\n'), ((50148, 50158), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (50155, 50158), True, 'import numpy as np\n'), ((51464, 51495), 'torch.sparse.addmm', 'torch.sparse.addmm', (['Dbt', 'Pt', 'ut'], {}), '(Dbt, Pt, ut)\n', (51482, 51495), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((51900, 51912), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (51909, 51912), True, 'import numpy as np\n'), ((53040, 53058), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (53055, 53058), True, 'import scipy.sparse as sparse\n'), ((54424, 54444), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (54433, 54444), True, 'import numpy as np\n'), ((54841, 54855), 'numpy.diag', 'np.diag', (['(1 / c)'], {}), '(1 / c)\n', (54848, 54855), True, 'import numpy as np\n'), ((54890, 54907), 'numpy.diag', 'np.diag', (['(beta / c)'], {}), '(beta / c)\n', (54897, 54907), True, 'import numpy as np\n'), ((57260, 57280), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (57269, 57280), True, 'import numpy as np\n'), ((57724, 57738), 'numpy.diag', 'np.diag', (['(1 / c)'], {}), '(1 / c)\n', (57731, 57738), True, 'import numpy as np\n'), ((57773, 57790), 'numpy.diag', 'np.diag', (['(beta / c)'], {}), '(beta / c)\n', (57780, 57790), True, 'import numpy as np\n'), ((60841, 60853), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (60850, 60853), True, 'import numpy as np\n'), ((63164, 63187), 'numpy.maximum', 'np.maximum', (['(u[i] - u)', '(0)'], {}), '(u[i] - u, 0)\n', (63174, 63187), True, 'import numpy as np\n'), ((65783, 65811), 'numpy.ones', 'np.ones', (['(n + 1,)'], {'dtype': 'int'}), '((n + 1,), dtype=int)\n', (65790, 65811), True, 'import numpy as np\n'), ((65928, 65952), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (65935, 65952), True, 'import numpy as np\n'), ((66047, 66071), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (66054, 66071), True, 'import numpy as np\n'), ((70381, 70409), 'numpy.ones', 'np.ones', (['(n + 1,)'], {'dtype': 'int'}), '((n + 1,), dtype=int)\n', (70388, 70409), True, 'import numpy as np\n'), ((70526, 70550), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (70533, 70550), True, 'import numpy as np\n'), ((73296, 73334), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['L'], {'k': 'k', 'which': '"""SM"""'}), "(L, k=k, which='SM')\n", (73314, 73334), True, 'import scipy.sparse as sparse\n'), ((73399, 73424), 'numpy.sum', 'np.sum', (['(vec * vec)'], {'axis': '(1)'}), '(vec * vec, axis=1)\n', (73405, 73424), True, 'import numpy as np\n'), ((73437, 73479), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(norms ** (-1 / 2))', '(0)', 'n', 'n'], {}), '(norms ** (-1 / 2), 0, n, n)\n', (73451, 73479), True, 'import scipy.sparse as sparse\n'), ((88187, 88279), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['(N / num_of_classes)', 'acc'], {'fmt': 'styles[i]', 'yerr': 'stddev', 'label': 'legend_list[i]'}), '(N / num_of_classes, acc, fmt=styles[i], yerr=stddev, label=\n legend_list[i])\n', (88199, 88279), True, 'import matplotlib.pyplot as plt\n'), ((89660, 89675), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (89669, 89675), True, 'import numpy as np\n'), ((97627, 97652), 'os.path.exists', 'os.path.exists', (['"""Results"""'], {}), "('Results')\n", (97641, 97652), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((97670, 97692), 'os.makedirs', 'os.makedirs', (['"""Results"""'], {}), "('Results')\n", (97681, 97692), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((32753, 32769), 'numpy.absolute', 'np.absolute', (['res'], {}), '(res)\n', (32764, 32769), True, 'import numpy as np\n'), ((33076, 33092), 'numpy.absolute', 'np.absolute', (['res'], {}), '(res)\n', (33087, 33092), True, 'import numpy as np\n'), ((33632, 33642), 'sys.exit', 'sys.exit', ([], {}), '()\n', (33640, 33642), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((35066, 35085), 'numpy.sum', 'np.sum', (['(L == L_true)'], {}), '(L == L_true)\n', (35072, 35085), True, 'import numpy as np\n'), ((42465, 42480), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (42477, 42480), True, 'import numpy as np\n'), ((42483, 42501), 'numpy.mean', 'np.mean', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (42490, 42501), True, 'import numpy as np\n'), ((46059, 46071), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (46068, 46071), True, 'import numpy as np\n'), ((46971, 46986), 'numpy.transpose', 'np.transpose', (['e'], {}), '(e)\n', (46983, 46986), True, 'import numpy as np\n'), ((46994, 47009), 'numpy.transpose', 'np.transpose', (['e'], {}), '(e)\n', (47006, 47009), True, 'import numpy as np\n'), ((54471, 54483), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (54480, 54483), True, 'import numpy as np\n'), ((57307, 57319), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (57316, 57319), True, 'import numpy as np\n'), ((76224, 76234), 'sys.exit', 'sys.exit', ([], {}), '()\n', (76232, 76234), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((77350, 77362), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (77359, 77362), True, 'import numpy as np\n'), ((88340, 88408), 'matplotlib.pyplot.loglog', 'plt.loglog', (['(N / num_of_classes)', 'acc', 'styles[i]'], {'label': 'legend_list[i]'}), '(N / num_of_classes, acc, styles[i], label=legend_list[i])\n', (88350, 88408), True, 'import matplotlib.pyplot as plt\n'), ((88451, 88517), 'matplotlib.pyplot.plot', 'plt.plot', (['(N / num_of_classes)', 'acc', 'styles[i]'], {'label': 'legend_list[i]'}), '(N / num_of_classes, acc, styles[i], label=legend_list[i])\n', (88459, 88517), True, 'import matplotlib.pyplot as plt\n'), ((99259, 99277), 'joblib.delayed', 'delayed', (['one_trial'], {}), '(one_trial)\n', (99266, 99277), False, 'from joblib import Parallel, delayed\n'), ((7818, 7847), 'numpy.round', 'np.round', (['(i * multiplier[ind])'], {}), '(i * multiplier[ind])\n', (7826, 7847), True, 'import numpy as np\n'), ((10550, 10560), 'numpy.sign', 'np.sign', (['w'], {}), '(w)\n', (10557, 10560), True, 'import numpy as np\n'), ((15864, 15881), 'numpy.max', 'np.max', (['W'], {'axis': '(1)'}), '(W, axis=1)\n', (15870, 15881), True, 'import numpy as np\n'), ((45792, 45807), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (45804, 45807), True, 'import numpy as np\n'), ((45822, 45837), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (45834, 45837), True, 'import numpy as np\n'), ((49655, 49665), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (49662, 49665), True, 'import numpy as np\n'), ((53604, 53623), 'torch.from_numpy', 'torch.from_numpy', (['u'], {}), '(u)\n', (53620, 53623), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((53653, 53673), 'torch.from_numpy', 'torch.from_numpy', (['Db'], {}), '(Db)\n', (53669, 53673), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((53763, 53784), 'numpy.absolute', 'np.absolute', (['(v - vinf)'], {}), '(v - vinf)\n', (53774, 53784), True, 'import numpy as np\n'), ((54145, 54166), 'numpy.absolute', 'np.absolute', (['(v - vinf)'], {}), '(v - vinf)\n', (54156, 54166), True, 'import numpy as np\n'), ((56271, 56290), 'torch.from_numpy', 'torch.from_numpy', (['u'], {}), '(u)\n', (56287, 56290), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((56319, 56338), 'torch.from_numpy', 'torch.from_numpy', (['b'], {}), '(b)\n', (56335, 56338), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((98761, 98772), 'time.time', 'time.time', ([], {}), '()\n', (98770, 98772), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((41793, 41809), 'numpy.transpose', 'np.transpose', (['Db'], {}), '(Db)\n', (41805, 41809), True, 'import numpy as np\n'), ((51189, 51205), 'numpy.transpose', 'np.transpose', (['Db'], {}), '(Db)\n', (51201, 51205), True, 'import numpy as np\n'), ((74703, 74712), 'numpy.sum', 'np.sum', (['I'], {}), '(I)\n', (74709, 74712), True, 'import numpy as np\n'), ((78103, 78121), 'numpy.ones_like', 'np.ones_like', (['beta'], {}), '(beta)\n', (78115, 78121), True, 'import numpy as np\n'), ((7717, 7756), 'numpy.random.choice', 'random.choice', (['K'], {'size': 'i', 'replace': '(False)'}), '(K, size=i, replace=False)\n', (7730, 7756), True, 'import numpy.random as random\n'), ((7877, 7918), 'numpy.random.choice', 'random.choice', (['K'], {'size': 'sze', 'replace': '(False)'}), '(K, size=sze, replace=False)\n', (7890, 7918), True, 'import numpy.random as random\n'), ((41954, 41969), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (41966, 41969), True, 'import numpy as np\n'), ((51379, 51394), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (51391, 51394), True, 'import numpy as np\n'), ((80937, 80947), 'sys.exit', 'sys.exit', ([], {}), '()\n', (80945, 80947), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((81329, 81339), 'sys.exit', 'sys.exit', ([], {}), '()\n', (81337, 81339), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n')]
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import collections
import functools
import hashlib
import signal
import sys
import time
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import systemd
import six
from six import moves
from neutron._i18n import _, _LE, _LI, _LW
from neutron.agent.common import ip_lib
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.common import utils
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import ipv6_utils as ipv6
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as p_utils
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_agent_extension_api as ovs_ext_api
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_dvr_neutron_agent
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_neutron_agent
from networking_odl.ovsdb import impl_idl_ovn
from networking_odl.ovsdb import ovsdb_monitor
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
class OVSPluginApi(agent_rpc.PluginApi):
pass
class DBOVSNeutronAgent(ovs_neutron_agent.OVSNeutronAgent):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual network realized as a VLAN or flat network, a
veth or a pair of patch ports is used to connect the local VLAN on
the integration bridge with the physical network bridge, with flow
rules adding, modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, bridge_classes, conf=None):
'''Constructor.
:param bridge_classes: a dict for bridge classes.
:param conf: an instance of ConfigOpts
'''
super(DBOVSNeutronAgent, self).__init__(bridge_classes, conf)
def setup_rpc(self):
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
# RPC network init
self.context = context.get_admin_context_without_session()
# Define the listening consumers for the agent
consumers = [[constants.TUNNEL, topics.UPDATE],
[constants.TUNNEL, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.DVR, topics.UPDATE],
[topics.NETWORK, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION, topics.UPDATE])
self.connection = agent_rpc.create_consumers([self],
topics.AGENT,
consumers,
start_listening=False)
# NOTE(rtheis): This will initialize all workers (API, RPC,
# plugin service and OVN) with OVN IDL connections.
trigger = ovsdb_monitor.OvnWorker
self._nb_ovn, self._sb_ovn = impl_idl_ovn.get_ovn_idls(self, trigger)
def validate_local_ip(local_ip):
"""Verify if the ip exists on the agent's host."""
if not ip_lib.IPWrapper().get_device_by_ip(local_ip):
LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'."
" IP couldn't be found on this host's interfaces."),
local_ip)
raise SystemExit(1)
def validate_tunnel_config(tunnel_types, local_ip):
"""Verify local ip and tunnel config if tunneling is enabled."""
if not tunnel_types:
return
validate_local_ip(local_ip)
for tun in tunnel_types:
if tun not in constants.TUNNEL_NETWORK_TYPES:
LOG.error(_LE('Invalid tunnel type specified: %s'), tun)
raise SystemExit(1)
def prepare_xen_compute():
is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF.set_default('ip_lib_force_root', True)
def main(bridge_classes):
prepare_xen_compute()
validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip)
try:
agent = DBOVSNeutronAgent(bridge_classes, cfg.CONF)
except (RuntimeError, ValueError) as e:
LOG.error(_LE("%s Agent terminated!"), e)
sys.exit(1)
agent.daemon_loop()
|
[
"oslo_config.cfg.CONF.set_default",
"neutron.api.rpc.handlers.dvr_rpc.DVRServerRpcApi",
"oslo_log.log.getLogger",
"neutron.context.get_admin_context_without_session",
"neutron._i18n._LE",
"neutron.agent.common.ip_lib.IPWrapper",
"oslo_messaging.Target",
"oslo_config.cfg.CONF.import_group",
"neutron.agent.rpc.create_consumers",
"networking_odl.ovsdb.impl_idl_ovn.get_ovn_idls",
"oslo_config.cfg.CONF.register_opts",
"neutron.agent.rpc.PluginReportStateAPI",
"sys.exit",
"neutron.agent.securitygroups_rpc.SecurityGroupServerRpcApi"
] |
[((2376, 2403), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2393, 2403), True, 'from oslo_log import log as logging\n'), ((2405, 2502), 'oslo_config.cfg.CONF.import_group', 'cfg.CONF.import_group', (['"""AGENT"""', '"""neutron.plugins.ml2.drivers.openvswitch.agent.common.config"""'], {}), "('AGENT',\n 'neutron.plugins.ml2.drivers.openvswitch.agent.common.config')\n", (2426, 2502), False, 'from oslo_config import cfg\n'), ((2526, 2621), 'oslo_config.cfg.CONF.import_group', 'cfg.CONF.import_group', (['"""OVS"""', '"""neutron.plugins.ml2.drivers.openvswitch.agent.common.config"""'], {}), "('OVS',\n 'neutron.plugins.ml2.drivers.openvswitch.agent.common.config')\n", (2547, 2621), False, 'from oslo_config import cfg\n'), ((4377, 4413), 'oslo_messaging.Target', 'oslo_messaging.Target', ([], {'version': '"""1.4"""'}), "(version='1.4')\n", (4398, 4413), False, 'import oslo_messaging\n'), ((4799, 4846), 'neutron.agent.securitygroups_rpc.SecurityGroupServerRpcApi', 'sg_rpc.SecurityGroupServerRpcApi', (['topics.PLUGIN'], {}), '(topics.PLUGIN)\n', (4831, 4846), True, 'from neutron.agent import securitygroups_rpc as sg_rpc\n'), ((4878, 4916), 'neutron.api.rpc.handlers.dvr_rpc.DVRServerRpcApi', 'dvr_rpc.DVRServerRpcApi', (['topics.PLUGIN'], {}), '(topics.PLUGIN)\n', (4901, 4916), False, 'from neutron.api.rpc.handlers import dvr_rpc\n'), ((4943, 4989), 'neutron.agent.rpc.PluginReportStateAPI', 'agent_rpc.PluginReportStateAPI', (['topics.REPORTS'], {}), '(topics.REPORTS)\n', (4973, 4989), True, 'from neutron.agent import rpc as agent_rpc\n'), ((5044, 5087), 'neutron.context.get_admin_context_without_session', 'context.get_admin_context_without_session', ([], {}), '()\n', (5085, 5087), False, 'from neutron import context\n'), ((5546, 5633), 'neutron.agent.rpc.create_consumers', 'agent_rpc.create_consumers', (['[self]', 'topics.AGENT', 'consumers'], {'start_listening': '(False)'}), '([self], topics.AGENT, consumers, start_listening\n =False)\n', (5572, 5633), True, 'from neutron.agent import rpc as agent_rpc\n'), ((6004, 6044), 'networking_odl.ovsdb.impl_idl_ovn.get_ovn_idls', 'impl_idl_ovn.get_ovn_idls', (['self', 'trigger'], {}), '(self, trigger)\n', (6029, 6044), False, 'from networking_odl.ovsdb import impl_idl_ovn\n'), ((7077, 7112), 'oslo_config.cfg.CONF.register_opts', 'cfg.CONF.register_opts', (['ip_lib.OPTS'], {}), '(ip_lib.OPTS)\n', (7099, 7112), False, 'from oslo_config import cfg\n'), ((7122, 7169), 'oslo_config.cfg.CONF.set_default', 'cfg.CONF.set_default', (['"""ip_lib_force_root"""', '(True)'], {}), "('ip_lib_force_root', True)\n", (7142, 7169), False, 'from oslo_config import cfg\n'), ((6217, 6331), 'neutron._i18n._LE', '_LE', (['"""Tunneling can\'t be enabled with invalid local_ip \'%s\'. IP couldn\'t be found on this host\'s interfaces."""'], {}), '("Tunneling can\'t be enabled with invalid local_ip \'%s\'. IP couldn\'t be found on this host\'s interfaces."\n )\n', (6220, 6331), False, 'from neutron._i18n import _, _LE, _LI, _LW\n'), ((7486, 7497), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7494, 7497), False, 'import sys\n'), ((6151, 6169), 'neutron.agent.common.ip_lib.IPWrapper', 'ip_lib.IPWrapper', ([], {}), '()\n', (6167, 6169), False, 'from neutron.agent.common import ip_lib\n'), ((6724, 6764), 'neutron._i18n._LE', '_LE', (['"""Invalid tunnel type specified: %s"""'], {}), "('Invalid tunnel type specified: %s')\n", (6727, 6764), False, 'from neutron._i18n import _, _LE, _LI, _LW\n'), ((7445, 7472), 'neutron._i18n._LE', '_LE', (['"""%s Agent terminated!"""'], {}), "('%s Agent terminated!')\n", (7448, 7472), False, 'from neutron._i18n import _, _LE, _LI, _LW\n')]
|
from elasticsearch_dsl import Document, Integer, Percolator, Text
from admin.models.articles_es import SUPPORTED_LANGUAGES_ANALYZER_MAPPING
class TagRuleEs(Document):
tag_id = Integer()
query = Percolator()
title = Text(required=False, fields={
lang: Text(analyzer=analyzer)
for lang, analyzer in SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items()
})
content = Text(required=False, fields={
lang: Text(analyzer=analyzer)
for lang, analyzer in SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items()
})
class Index:
name = 'tag_rules'
settings = {
"number_of_shards": 2,
}
|
[
"elasticsearch_dsl.Percolator",
"admin.models.articles_es.SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items",
"elasticsearch_dsl.Text",
"elasticsearch_dsl.Integer"
] |
[((183, 192), 'elasticsearch_dsl.Integer', 'Integer', ([], {}), '()\n', (190, 192), False, 'from elasticsearch_dsl import Document, Integer, Percolator, Text\n'), ((205, 217), 'elasticsearch_dsl.Percolator', 'Percolator', ([], {}), '()\n', (215, 217), False, 'from elasticsearch_dsl import Document, Integer, Percolator, Text\n'), ((275, 298), 'elasticsearch_dsl.Text', 'Text', ([], {'analyzer': 'analyzer'}), '(analyzer=analyzer)\n', (279, 298), False, 'from elasticsearch_dsl import Document, Integer, Percolator, Text\n'), ((439, 462), 'elasticsearch_dsl.Text', 'Text', ([], {'analyzer': 'analyzer'}), '(analyzer=analyzer)\n', (443, 462), False, 'from elasticsearch_dsl import Document, Integer, Percolator, Text\n'), ((329, 373), 'admin.models.articles_es.SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items', 'SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items', ([], {}), '()\n', (371, 373), False, 'from admin.models.articles_es import SUPPORTED_LANGUAGES_ANALYZER_MAPPING\n'), ((493, 537), 'admin.models.articles_es.SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items', 'SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items', ([], {}), '()\n', (535, 537), False, 'from admin.models.articles_es import SUPPORTED_LANGUAGES_ANALYZER_MAPPING\n')]
|
import datetime
import numpy as np
import garminmanager.utils.JsonEncDecC
import garminmanager.utils.FileWriterC
from garminmanager.enumerators.EnumHealthTypeC import EnumHealtTypeC
def test_encode_decode():
raw_data = garminmanager.RawDataC.RawDataC()
my_dates1 = {
datetime.datetime(2019,4,11,1,00) : 100,
datetime.datetime(2019,4,11,2,00) : np.nan,
datetime.datetime(2019,4,11,3,00) : 100
}
for key, value in my_dates1.items():
raw_data.add_x(key)
raw_data.add_y(value)
raw_data.set_data_type(EnumHealtTypeC.heartrate)
json_enc_dec = garminmanager.utils.JsonEncDecC.JsonEncDecC()
json_enc_dec.set_input_data(raw_data)
json_string = json_enc_dec.encode()
json_enc_dec.set_input_json(json_string)
file_writer = garminmanager.utils.FileWriterC.FileWriterC()
file_writer.set_filename('test.json')
file_writer.set_text(json_string)
file_writer.write_text_to_file()
d = file_writer.read_json()
json_enc_dec.set_input_json(d)
json_enc_dec.decode()
raw_data_output = json_enc_dec.get_data()
x = raw_data_output.get_x()
y = raw_data_output.get_y()
org_x = raw_data.get_x()
org_y = raw_data.get_y()
y[np.isnan(y)] = -100
org_y[np.isnan(org_y)] = -100
y[np.isnan(y)] = -100
org_y[np.isnan(org_y)] = -100
raw_data.set_y(y)
raw_data_output.set_y(org_y)
assert raw_data == raw_data_output
|
[
"numpy.isnan",
"datetime.datetime"
] |
[((289, 325), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(4)', '(11)', '(1)', '(0)'], {}), '(2019, 4, 11, 1, 0)\n', (306, 325), False, 'import datetime\n'), ((338, 374), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(4)', '(11)', '(2)', '(0)'], {}), '(2019, 4, 11, 2, 0)\n', (355, 374), False, 'import datetime\n'), ((390, 426), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(4)', '(11)', '(3)', '(0)'], {}), '(2019, 4, 11, 3, 0)\n', (407, 426), False, 'import datetime\n'), ((1240, 1251), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1248, 1251), True, 'import numpy as np\n'), ((1270, 1285), 'numpy.isnan', 'np.isnan', (['org_y'], {}), '(org_y)\n', (1278, 1285), True, 'import numpy as np\n'), ((1301, 1312), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1309, 1312), True, 'import numpy as np\n'), ((1331, 1346), 'numpy.isnan', 'np.isnan', (['org_y'], {}), '(org_y)\n', (1339, 1346), True, 'import numpy as np\n')]
|
import os
import sql_query as sql
from styling import color
import password_checker
import password_generator
# Login Main class
class Login:
def __init__(self):
self.username = ''
self.password = ''
self.is_login = False
self.website = ''
self.account = ''
try:
os.system('clear')
print(color.BOLD + color.RED + 'User Login'.center(30) + color.END)
print('')
self.username = input('Enter Username : ')
self.password = input('Enter Password : ')
auth = sql.get_user_Password(self.username)
if auth == self.password:
self.is_login = True
else:
self.is_login = False
except:
print('An Error Occurred!')
return
def get_Website(self, arr):
for n, web in enumerate(arr):
print(f'{n + 1}. {web}')
if len(arr) > 0:
n = input('Select Website : ')
if n.isnumeric() and len(n) == 1:
self.website = arr[int(n) - 1]
else:
self.website = n
return True
else:
print('No password is saved')
return False
def get_Account(self, arr):
for n, acc in enumerate(arr):
print(f'{n + 1}. {acc}')
n = input('Select Account : ')
if n.isnumeric() and len(n) == 1:
self.account = arr[int(n) - 1]
else:
self.account = n
def select(self):
websites = sql.all_websites(self.username)
present = self.get_Website(websites)
if present:
accounts = sql.all_accounts(self.username, self.website)
self.get_Account(accounts)
return True
return False
def add_password(self):
os.system('clear')
print(color.BOLD + color.RED + 'Add Password'.center(30) + color.END + '\n')
url = input('Website/Url : ')
identifier = input('Account/Username/Identifier : ')
password = input('Password : ')
sql.add_password(url, identifier, password, self.username)
def delete_password(self):
os.system('clear')
print(color.BOLD + color.RED + 'Delete Passwrod'.center(30) + color.END)
print('')
if self.select():
sql.delete(self.username, self.website, self.account)
def modify_password(self):
os.system('clear')
print(color.BOLD + color.RED + 'Modify Password'.center(30) + color.END)
print('')
if self.select():
if input('Want to generate Password (Y/N): ').lower() == 'y':
password_generator.main()
new_pass = input('New Password : ')
sql.modify(self.username, self.website, self.account, new_pass)
def show_password(self):
os.system('clear')
print(color.BOLD + color.RED + 'Show Password'.center(30) + color.END)
print('')
if self.select():
sql.show_password(self.username, self.website, self.account)
return
def main(self):
while True:
os.system('clear')
print(color.BOLD + color.RED + self.username.center(30) + color.END)
print('')
print('1. Add Password')
print('2. Delete a Password')
print('3. Modify a Password')
print('4. Show Password')
print('5. Generate a strong password')
print('6. Check my password if secure')
print('7. Log out')
ch = input().lower()
if ch == '1' or ch == 'add':
self.add_password()
elif ch == '2' or ch == 'delete':
self.delete_password()
elif ch == '3' or ch == 'modify':
self.modify_password()
elif ch == '4' or ch == 'show':
self.show_password()
elif ch == '5' or ch == 'generate':
password_generator.main()
elif ch == '6' or ch == 'check':
password_checker.main()
elif ch == '7' or ch == 'exit':
return
else:
print('Invalid Choice!')
en = input("'Y' to Log out, or press Enter....").lower()
if en == 'y':
return
else:
pass
def user_login():
user = Login()
if user.is_login:
user.main()
else:
print('Login Failure, Wrong password or username..')
return
def user_signup():
try:
os.system('clear')
print(color.BOLD + color.RED + 'User Sign Up'.center(30) + color.END)
print('')
username = input('Enter Username : ')
password = input('Enter Password : ')
if password == input('Confirm Password : '):
sql.sign_up(username, password)
else:
print('Wrong Password...\nSign up failure!')
return
except:
print('An Error Occurred!')
return
|
[
"password_generator.main",
"os.system",
"sql_query.get_user_Password",
"sql_query.all_accounts",
"sql_query.sign_up",
"sql_query.delete",
"sql_query.all_websites",
"password_checker.main",
"sql_query.show_password",
"sql_query.modify",
"sql_query.add_password"
] |
[((1571, 1602), 'sql_query.all_websites', 'sql.all_websites', (['self.username'], {}), '(self.username)\n', (1587, 1602), True, 'import sql_query as sql\n'), ((1858, 1876), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1867, 1876), False, 'import os\n'), ((2111, 2169), 'sql_query.add_password', 'sql.add_password', (['url', 'identifier', 'password', 'self.username'], {}), '(url, identifier, password, self.username)\n', (2127, 2169), True, 'import sql_query as sql\n'), ((2210, 2228), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (2219, 2228), False, 'import os\n'), ((2460, 2478), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (2469, 2478), False, 'import os\n'), ((2884, 2902), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (2893, 2902), False, 'import os\n'), ((4615, 4633), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (4624, 4633), False, 'import os\n'), ((331, 349), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (340, 349), False, 'import os\n'), ((582, 618), 'sql_query.get_user_Password', 'sql.get_user_Password', (['self.username'], {}), '(self.username)\n', (603, 618), True, 'import sql_query as sql\n'), ((1691, 1736), 'sql_query.all_accounts', 'sql.all_accounts', (['self.username', 'self.website'], {}), '(self.username, self.website)\n', (1707, 1736), True, 'import sql_query as sql\n'), ((2366, 2419), 'sql_query.delete', 'sql.delete', (['self.username', 'self.website', 'self.account'], {}), '(self.username, self.website, self.account)\n', (2376, 2419), True, 'import sql_query as sql\n'), ((2782, 2845), 'sql_query.modify', 'sql.modify', (['self.username', 'self.website', 'self.account', 'new_pass'], {}), '(self.username, self.website, self.account, new_pass)\n', (2792, 2845), True, 'import sql_query as sql\n'), ((3039, 3099), 'sql_query.show_password', 'sql.show_password', (['self.username', 'self.website', 'self.account'], {}), '(self.username, self.website, self.account)\n', (3056, 3099), True, 'import sql_query as sql\n'), ((3170, 3188), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (3179, 3188), False, 'import os\n'), ((4887, 4918), 'sql_query.sign_up', 'sql.sign_up', (['username', 'password'], {}), '(username, password)\n', (4898, 4918), True, 'import sql_query as sql\n'), ((2695, 2720), 'password_generator.main', 'password_generator.main', ([], {}), '()\n', (2718, 2720), False, 'import password_generator\n'), ((4013, 4038), 'password_generator.main', 'password_generator.main', ([], {}), '()\n', (4036, 4038), False, 'import password_generator\n'), ((4100, 4123), 'password_checker.main', 'password_checker.main', ([], {}), '()\n', (4121, 4123), False, 'import password_checker\n')]
|
# Copyright 2012 Google Inc. All Rights Reserved.
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a given string from Django template to HTML and back."""
__author__ = ('<EMAIL> (<NAME>)')
import re
UNTRANSLATABLE_BEGIN = r'<!--DO_NOT_TRANSLATE_BLOCK>'
UNTRANSLATABLE_END = r'</DO_NOT_TRANSLATE_BLOCK-->'
CONTENT_BEGIN = """
<!--CONTENT_BLOCK ***********************************************************-->
"""
CONTENT_END = """
<!--/END_CONTENT_BLOCK ******************************************************-->
"""
class TextProcessor(object):
"""Translates text from Django template format to l10nable HTML and back.
Properties:
dango: The Django template representation of the text.
html: The HTML representation of the text.
"""
def __init__(self, django='', html=''):
self._django = django
self._html = html
@property
def django(self):
if not self._django:
self._django = self.__HtmlToDjango(self._html)
return self._django
@property
def html(self):
if not self._html:
self._html = self.__DjangoToHtml(self._django)
return self._html
def __DjangoToHtml(self, text):
"""Given a Django template's content, return HTML suitable for l10n.
Args:
text: The text to convert from Django to HTML.
Returns:
A string containing the newly HTMLized content.
* Django tags like `{% tag %}` will be rendered inside an HTML comment:
`<!--DO_NOT_TRANSLATE_BLOCK>{% tag %}</DO_NOT_TRANSLATE_BLOCK-->`.
* `pre`, `script`, and `style` tags' content will be likewise wrapped:
`<pre><!--DO_NOT_TRANSLATE_BLOCK>Text!</DO_NOT_TRANSLATE_BLOCK-->`.
* The article's content will be wrapped:
<!--CONTENT_BLOCK ***************************************************-->
Content goes here!
<!--END_CONTENT_BLOCK ***********************************************-->
"""
django_tag_before = r'(?P<tag>{%.+?%})'
django_tag_after = r'%s\g<tag>%s' % (UNTRANSLATABLE_BEGIN,
UNTRANSLATABLE_END)
open_notranslate_before = r'(?P<tag><(?:pre|script|style)[^>]*?>)'
open_notranslate_after = r'\g<tag>%s' % UNTRANSLATABLE_BEGIN
close_notranslate_before = r'(?P<tag></(?:pre|script|style)[^>]*?>)'
close_notranslate_after = r'%s\g<tag>' % UNTRANSLATABLE_END
open_content = r'{% block content %}'
close_content = r'{% endblock %}'
# Walk the given text line by line
to_return = []
in_content = False
for line in text.splitlines(True):
# Process Django tags
line = re.sub(django_tag_before, django_tag_after, line)
# Preprocess script/pre/style blocks
line = re.sub(open_notranslate_before, open_notranslate_after, line)
line = re.sub(close_notranslate_before, close_notranslate_after, line)
# Preprocess content block
if re.search(open_content, line):
line = CONTENT_BEGIN
in_content = True
elif re.search(close_content, line) and in_content:
line = CONTENT_END
in_content = False
to_return.append(line)
return ''.join(to_return)
def __HtmlToDjango(self, text):
"""Given localized HTML, return text formatted as a Django template.
Args:
text: The text to convert from HTML to Django.
Returns:
A string containing the newly Djangoized content, stripped of leading
and trailing whitespace.
See the documentation for `django_to_html` and imagine the inverse. :)
"""
# Strip UNTRANSLATABLE_BEGIN and UNTRANSLATABLE_END comments.
text = text.replace(UNTRANSLATABLE_BEGIN, '')
text = text.replace(UNTRANSLATABLE_END, '')
# Replace CONTENT_BEGIN with `{% block content %}` and CONTENT_END with
# `{% endblock %}`.
text = text.replace(CONTENT_BEGIN, '{% block content %}\n')
text = text.replace(CONTENT_END, '{% endblock %}')
# Return the result, stripped of leading/training whitespace.
return text.strip()
|
[
"re.sub",
"re.search"
] |
[((3121, 3170), 're.sub', 're.sub', (['django_tag_before', 'django_tag_after', 'line'], {}), '(django_tag_before, django_tag_after, line)\n', (3127, 3170), False, 'import re\n'), ((3227, 3288), 're.sub', 're.sub', (['open_notranslate_before', 'open_notranslate_after', 'line'], {}), '(open_notranslate_before, open_notranslate_after, line)\n', (3233, 3288), False, 'import re\n'), ((3302, 3365), 're.sub', 're.sub', (['close_notranslate_before', 'close_notranslate_after', 'line'], {}), '(close_notranslate_before, close_notranslate_after, line)\n', (3308, 3365), False, 'import re\n'), ((3408, 3437), 're.search', 're.search', (['open_content', 'line'], {}), '(open_content, line)\n', (3417, 3437), False, 'import re\n'), ((3505, 3535), 're.search', 're.search', (['close_content', 'line'], {}), '(close_content, line)\n', (3514, 3535), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
"""
@date: 2020/2/29 下午7:31
@file: util.py
@author: zj
@description:
"""
import numpy as np
import torch
import sys
def error(msg):
print(msg)
sys.exit(0)
def get_device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def iou(pred_box, target_box):
"""
计算候选建议和标注边界框的IoU
:param pred_box: 大小为[4]
:param target_box: 大小为[N, 4]
:return: [N]
"""
if len(target_box.shape) == 1:
target_box = target_box[np.newaxis, :]
xA = np.maximum(pred_box[0], target_box[:, 0])
yA = np.maximum(pred_box[1], target_box[:, 1])
xB = np.minimum(pred_box[2], target_box[:, 2])
yB = np.minimum(pred_box[3], target_box[:, 3])
# 计算交集面积
intersection = np.maximum(0.0, xB - xA + 1) * np.maximum(0.0, yB - yA + 1)
# 计算两个边界框面积
boxAArea = (pred_box[2] - pred_box[0] + 1) * (pred_box[3] - pred_box[1] + 1)
boxBArea = (target_box[:, 2] - target_box[:, 0] + 1) * (target_box[:, 3] - target_box[:, 1] + 1)
scores = intersection / (boxAArea + boxBArea - intersection)
return scores
def compute_ious(rects, bndboxs):
iou_list = list()
for rect in rects:
scores = iou(rect, bndboxs)
iou_list.append(max(scores))
return iou_list
def parse_output(outputs, S, B, C):
"""
每个网格保存置信度最高的检测边界框
:param outputs: (N, S*S, B*5+C)
:return: cates, probs, bboxs
cates: (N, S*S)
probs: (N, S*S)
bboxs: (N, S*S, 4)
"""
N = outputs.shape[0]
# (N*S*S, C)
probs = outputs[:, :, :C].reshape(-1, C)
# (N*S*S, B)
confidences = outputs[:, :, C:(C + B)].reshape(-1, B)
# (N*S*S, 4*B)
bboxs = outputs[:, :, (C + B):].reshape(-1, 4 * B)
# 计算每个网格所属类别 (N*S*S)
cates = torch.argmax(probs, dim=1)
# 计算每个网格最高置信度 (N*S*S)
idxs = torch.argmax(confidences, dim=1)
# 计算分类概率 (N*S*S)
cate_probs = probs[range(len(cates)), cates] * confidences[range(len(idxs)), idxs]
# 计算对应边界框坐标 (N*S*S, 4)
obj_boxs = bboxs[range(len(idxs)), idxs * 4: (idxs + 1) * 4]
return cates.reshape(N, S * S), cate_probs(N, S * S), obj_boxs(N, S * S, 4)
def bbox_corner_to_center(bboxs):
"""
[xmin, ymin, xmax, ymax] -> [x_center, y_center, w, h]
:param bboxs: [N, 4]
"""
assert len(bboxs.shape) == 2
tmp = np.zeros(bboxs.shape)
# w
tmp[:, 2] = bboxs[:, 2] - bboxs[:, 0] + 1
# h
tmp[:, 3] = bboxs[:, 3] - bboxs[:, 1] + 1
# x_center
tmp[:, 0] = bboxs[:, 0] + tmp[:, 2] / 2
# y_center
tmp[:, 1] = bboxs[:, 1] + tmp[:, 3] / 2
return tmp
def bbox_center_to_corner(bboxs):
"""
[x_center, y_center, w, h] -> [xmin, ymin, xmax, ymax]
:param bboxs: [N, 4]
"""
assert len(bboxs.shape) == 2
tmp = np.zeros(bboxs.shape)
# xmin
tmp[:, 0] = bboxs[:, 0] - bboxs[:, 2] / 2
# ymin
tmp[:, 1] = bboxs[:, 1] - bboxs[:, 3] / 2
# xmax
tmp[:, 2] = bboxs[:, 0] + bboxs[:, 2] / 2
# ymax
tmp[:, 3] = bboxs[:, 1] + bboxs[:, 3] / 2
return tmp
def deform_bboxs(pred_bboxs, data_dict, S):
"""
:param pred_bboxs: [S*S, 4]
:return:
"""
scale_h, scale_w = data_dict['scale_size']
grid_w = scale_w / S
grid_h = scale_h / S
bboxs = np.zeros(pred_bboxs.shape)
for i in range(S * S):
row = int(i / S)
col = int(i % S)
x_center, y_center, box_w, box_h = pred_bboxs[i]
bboxs[i, 0] = (col + x_center) * grid_w
bboxs[i, 1] = (row + y_center) * grid_h
bboxs[i, 2] = box_w * scale_w
bboxs[i, 3] = box_h * scale_h
# (x_center, y_center, w, h) -> (xmin, ymin, xmax, ymax)
bboxs = bbox_center_to_corner(bboxs)
ratio_h, ratio_w = data_dict['ratio']
bboxs[:, 0] /= ratio_w
bboxs[:, 1] /= ratio_h
bboxs[:, 2] /= ratio_w
bboxs[:, 3] /= ratio_h
# 最大最小值
h, w = data_dict['src_size']
bboxs[:, 0] = np.maximum(bboxs[:, 0], 0)
bboxs[:, 1] = np.maximum(bboxs[:, 1], 0)
bboxs[:, 2] = np.minimum(bboxs[:, 2], w)
bboxs[:, 3] = np.minimum(bboxs[:, 3], h)
return bboxs.astype(int)
def nms(rect_list, score_list, cate_list, thresh=0.3):
"""
非最大抑制
:param rect_list: list,大小为[N, 4]
:param score_list: list,大小为[N]
:param cate_list: list, 大小为[N]
"""
nms_rects = list()
nms_scores = list()
nms_cates = list()
rect_array = np.array(rect_list)
score_array = np.array(score_list)
cate_array = np.array(cate_list)
# 一次排序后即可
# 按分类概率从大到小排序
idxs = np.argsort(score_array)[::-1]
rect_array = rect_array[idxs]
score_array = score_array[idxs]
cate_array = cate_array[idxs]
while len(score_array) > 0:
# 添加分类概率最大的边界框
nms_rects.append(rect_array[0])
nms_scores.append(score_array[0])
nms_cates.append(cate_array[0])
rect_array = rect_array[1:]
score_array = score_array[1:]
cate_array = cate_array[1:]
length = len(score_array)
if length <= 0:
break
# 计算IoU
iou_scores = iou(np.array(nms_rects[len(nms_rects) - 1]), rect_array)
# print(iou_scores)
# 去除重叠率大于等于thresh的边界框
idxs = np.where(iou_scores < thresh)[0]
rect_array = rect_array[idxs]
score_array = score_array[idxs]
cate_array = cate_array[idxs]
return nms_rects, nms_scores, nms_cates
|
[
"numpy.minimum",
"numpy.maximum",
"torch.argmax",
"numpy.zeros",
"numpy.argsort",
"numpy.where",
"numpy.array",
"torch.cuda.is_available",
"sys.exit"
] |
[((180, 191), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (188, 191), False, 'import sys\n'), ((526, 567), 'numpy.maximum', 'np.maximum', (['pred_box[0]', 'target_box[:, 0]'], {}), '(pred_box[0], target_box[:, 0])\n', (536, 567), True, 'import numpy as np\n'), ((577, 618), 'numpy.maximum', 'np.maximum', (['pred_box[1]', 'target_box[:, 1]'], {}), '(pred_box[1], target_box[:, 1])\n', (587, 618), True, 'import numpy as np\n'), ((628, 669), 'numpy.minimum', 'np.minimum', (['pred_box[2]', 'target_box[:, 2]'], {}), '(pred_box[2], target_box[:, 2])\n', (638, 669), True, 'import numpy as np\n'), ((679, 720), 'numpy.minimum', 'np.minimum', (['pred_box[3]', 'target_box[:, 3]'], {}), '(pred_box[3], target_box[:, 3])\n', (689, 720), True, 'import numpy as np\n'), ((1752, 1778), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(1)'}), '(probs, dim=1)\n', (1764, 1778), False, 'import torch\n'), ((1816, 1848), 'torch.argmax', 'torch.argmax', (['confidences'], {'dim': '(1)'}), '(confidences, dim=1)\n', (1828, 1848), False, 'import torch\n'), ((2310, 2331), 'numpy.zeros', 'np.zeros', (['bboxs.shape'], {}), '(bboxs.shape)\n', (2318, 2331), True, 'import numpy as np\n'), ((2754, 2775), 'numpy.zeros', 'np.zeros', (['bboxs.shape'], {}), '(bboxs.shape)\n', (2762, 2775), True, 'import numpy as np\n'), ((3238, 3264), 'numpy.zeros', 'np.zeros', (['pred_bboxs.shape'], {}), '(pred_bboxs.shape)\n', (3246, 3264), True, 'import numpy as np\n'), ((3889, 3915), 'numpy.maximum', 'np.maximum', (['bboxs[:, 0]', '(0)'], {}), '(bboxs[:, 0], 0)\n', (3899, 3915), True, 'import numpy as np\n'), ((3934, 3960), 'numpy.maximum', 'np.maximum', (['bboxs[:, 1]', '(0)'], {}), '(bboxs[:, 1], 0)\n', (3944, 3960), True, 'import numpy as np\n'), ((3979, 4005), 'numpy.minimum', 'np.minimum', (['bboxs[:, 2]', 'w'], {}), '(bboxs[:, 2], w)\n', (3989, 4005), True, 'import numpy as np\n'), ((4024, 4050), 'numpy.minimum', 'np.minimum', (['bboxs[:, 3]', 'h'], {}), '(bboxs[:, 3], h)\n', (4034, 4050), True, 'import numpy as np\n'), ((4359, 4378), 'numpy.array', 'np.array', (['rect_list'], {}), '(rect_list)\n', (4367, 4378), True, 'import numpy as np\n'), ((4397, 4417), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (4405, 4417), True, 'import numpy as np\n'), ((4435, 4454), 'numpy.array', 'np.array', (['cate_list'], {}), '(cate_list)\n', (4443, 4454), True, 'import numpy as np\n'), ((753, 781), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xB - xA + 1)'], {}), '(0.0, xB - xA + 1)\n', (763, 781), True, 'import numpy as np\n'), ((784, 812), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yB - yA + 1)'], {}), '(0.0, yB - yA + 1)\n', (794, 812), True, 'import numpy as np\n'), ((4499, 4522), 'numpy.argsort', 'np.argsort', (['score_array'], {}), '(score_array)\n', (4509, 4522), True, 'import numpy as np\n'), ((248, 273), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (271, 273), False, 'import torch\n'), ((5166, 5195), 'numpy.where', 'np.where', (['(iou_scores < thresh)'], {}), '(iou_scores < thresh)\n', (5174, 5195), True, 'import numpy as np\n')]
|
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from pita import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
comic_patterns = [
path('<slug:slug>/<int:number>', views.view_comic, name='comic_page'),
path('<slug:slug>', views.view_comic, name='comic'),
path('', views.comic_index, name='comic_index'),
]
urlpatterns += [
path('', views.index, name='index'),
path('contact/', views.ContactView.as_view(), name='contact'),
path('comics/', include(comic_patterns)),
url(r'^(?P<slug>[a-z0-9]+(?:-[a-z0-9]+)*)$', views.page, name='page'),
]
|
[
"django.urls.path",
"django.conf.urls.url",
"django.urls.include",
"pita.views.ContactView.as_view",
"django.conf.urls.static.static"
] |
[((225, 256), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (228, 256), False, 'from django.conf.urls import url\n'), ((300, 327), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {}), '(settings.STATIC_URL)\n', (306, 327), False, 'from django.conf.urls.static import static\n'), ((348, 409), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (354, 409), False, 'from django.conf.urls.static import static\n'), ((460, 529), 'django.urls.path', 'path', (['"""<slug:slug>/<int:number>"""', 'views.view_comic'], {'name': '"""comic_page"""'}), "('<slug:slug>/<int:number>', views.view_comic, name='comic_page')\n", (464, 529), False, 'from django.urls import include, path\n'), ((535, 586), 'django.urls.path', 'path', (['"""<slug:slug>"""', 'views.view_comic'], {'name': '"""comic"""'}), "('<slug:slug>', views.view_comic, name='comic')\n", (539, 586), False, 'from django.urls import include, path\n'), ((593, 640), 'django.urls.path', 'path', (['""""""', 'views.comic_index'], {'name': '"""comic_index"""'}), "('', views.comic_index, name='comic_index')\n", (597, 640), False, 'from django.urls import include, path\n'), ((666, 701), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (670, 701), False, 'from django.urls import include, path\n'), ((820, 888), 'django.conf.urls.url', 'url', (['"""^(?P<slug>[a-z0-9]+(?:-[a-z0-9]+)*)$"""', 'views.page'], {'name': '"""page"""'}), "('^(?P<slug>[a-z0-9]+(?:-[a-z0-9]+)*)$', views.page, name='page')\n", (823, 888), False, 'from django.conf.urls import url\n'), ((724, 751), 'pita.views.ContactView.as_view', 'views.ContactView.as_view', ([], {}), '()\n', (749, 751), False, 'from pita import views\n'), ((790, 813), 'django.urls.include', 'include', (['comic_patterns'], {}), '(comic_patterns)\n', (797, 813), False, 'from django.urls import include, path\n')]
|
# rsinc : two-way / bi-drectional sync for rclone
import argparse
import os
import subprocess
import logging
import re
from datetime import datetime
import ujson
import halo
from pyfiglet import Figlet
from .sync import sync, calc_states
from .rclone import make_dirs, lsl
from .packed import pack, merge, unpack, get_branch, empty
from .classes import Flat
from .colors import grn, ylw, red
from .config import config_cli
from .__init__ import __version__
SPIN = halo.Halo(spinner="dots", placement="right", color="yellow")
CONFIG_FILE = os.path.expanduser("~/.rsinc/config.json") # Default config path
custom_fig = Figlet(font="graffiti")
print(custom_fig.renderText("Rsinc"))
print("Copyright 2019 <NAME> (CHURCHILL COLLEGE)")
print("This is free software with ABSOLUTELY NO WARRANTY")
def qt(string):
return '"' + string + '"'
def read(file):
"""Reads json do dict and returns dict."""
try:
with open(file, "r") as fp:
d = ujson.load(fp)
if not isinstance(d, dict):
raise ValueError("old file format")
except Exception as e:
emsg = "{} is corrupt ({}). ".format(file, e)
if file.endswith("master.json"):
emsg += "Delete it and restart rsinc to rebuild it."
raise TypeError(emsg)
return d
def write(file, d):
"""Writes dict to json"""
with open(file, "w") as fp:
ujson.dump(d, fp, sort_keys=True, indent=2)
def strtobool(string):
return string.lower() in STB
def escape(string):
tmp = []
for char in string:
tmp.append(ESCAPE.get(char, char))
return "".join(tmp)
# ****************************************************************************
# * Set-up/Parse *
# ****************************************************************************
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=52)
parser = argparse.ArgumentParser(formatter_class=formatter)
parser.add_argument("folders", help="Folders to sync", nargs="*")
parser.add_argument("-d", "--dry", action="store_true", help="Do a dry run")
parser.add_argument(
"-c", "--clean", action="store_true", help="Clean directories"
)
parser.add_argument(
"-D", "--default", help="Sync defaults", action="store_true"
)
parser.add_argument(
"-r", "--recovery", action="store_true", help="Enter recovery mode"
)
parser.add_argument(
"-a", "--auto", help="Don't ask permissions", action="store_true"
)
parser.add_argument(
"-p", "--purge", help="Reset history for all folders", action="store_true"
)
parser.add_argument(
"-i", "--ignore", help="Find .rignore files", action="store_true"
)
parser.add_argument(
"-v", "--version", action="version", version=f"rsinc version: {__version__}"
)
parser.add_argument(
"--config", action="store_true", help="Enter interactive CLI configurer"
)
parser.add_argument(
"--config_path", help="Path to config file (default ~/.rsinc/config.json)"
)
parser.add_argument(
"args",
nargs=argparse.REMAINDER,
help="Global flags to pass to rclone commands",
)
args = parser.parse_args()
# ****************************************************************************
# * Configuration *
# ****************************************************************************
# Read config and assign variables.
if args.config_path is None:
config_path = CONFIG_FILE
else:
config_path = args.config_path
if not os.path.isfile(config_path) or args.config:
config_cli(config_path)
config = read(config_path)
CASE_INSENSATIVE = config["CASE_INSENSATIVE"]
DEFAULT_DIRS = config["DEFAULT_DIRS"]
LOG_FOLDER = config["LOG_FOLDER"]
HASH_NAME = config["HASH_NAME"]
TEMP_FILE = config["TEMP_FILE"]
MASTER = config["MASTER"]
BASE_R = config["BASE_R"]
BASE_L = config["BASE_L"]
FAST_SAVE = config["FAST_SAVE"]
# Set up logging.
logging.basicConfig(
filename=LOG_FOLDER + datetime.now().strftime("%Y-%m-%d"),
level=logging.DEBUG,
datefmt="%H:%M:%S",
format="%(asctime)s %(levelname)s: %(message)s",
)
# ****************************************************************************
# * Main Program *
# ****************************************************************************
def main():
# Entry point for 'rsinc' as terminal command.
recover = args.recovery
dry_run = args.dry
auto = args.auto
# Decide which folder(s) to sync.
if args.default:
tmp = DEFAULT_DIRS
elif len(args.folders) == 0:
tmp = [os.getcwd()]
else:
tmp = []
for f in args.folders:
if os.path.isabs(f):
tmp.append(os.path.normpath(f))
else:
tmp.append(os.path.abspath(f))
folders = []
for f in tmp:
if BASE_L not in f:
print(ylw("Rejecting:"), f, "not in", BASE_L)
elif not os.path.isdir(f):
if strtobool(
input(
ylw("WARN: ")
+ f"{f} does not exist in local, sync anyway? "
)
):
folders.append(os.path.relpath(f, BASE_L))
else:
folders.append(os.path.relpath(f, BASE_L))
# Get & read master.
if args.purge or not os.path.exists(MASTER):
print(ylw("WARN:"), MASTER, "missing, this must be your first run")
write(MASTER, {'history':[], 'ignores':[], 'nest':empty()})
master = read(MASTER)
history = master['history']
ignores = master['ignores']
nest = master['nest']
history = set(history)
# Find all the ignore files in lcl and save them.
if args.ignore:
ignores = []
for dirpath, dirnames, filenames in os.walk(BASE_L, followlinks=False):
for name in filenames:
if name == '.rignore':
ignores.append(os.path.join(dirpath, name))
print("Found:", ignores)
write(MASTER, {'history':list(history), 'ignores':ignores, 'nest':nest})
# Detect crashes.
if os.path.exists(TEMP_FILE):
corrupt = read(TEMP_FILE)["folder"]
if corrupt in folders:
folders.remove(corrupt)
folders.insert(0, corrupt)
recover = True
print(red("ERROR") + ", detected a crash, recovering", corrupt)
logging.warning("Detected crash, recovering %s", corrupt)
# Main loop.
for folder in folders:
print("")
path_lcl = os.path.join(BASE_L, folder)
path_rmt = os.path.join(BASE_R, folder)
# Determine if first run.
if os.path.join(BASE_L, folder) in history:
print(grn("Have:"), qt(folder) + ", entering sync & merge mode")
else:
print(ylw("Don't have:"), qt(folder) + ", entering first_sync mode")
recover = True
# Build relative regular expressions
rmt_regexs, lcl_regexs, plain = build_regexs(
BASE_L, BASE_R, path_lcl, ignores
)
print("Ignore:", plain)
# Scan directories.
SPIN.start(("Crawling: ") + qt(folder))
lcl = lsl(path_lcl, HASH_NAME)
rmt = lsl(path_rmt, HASH_NAME)
old = Flat(path_lcl)
SPIN.stop_and_persist(symbol="✔")
lcl.tag_ignore(lcl_regexs)
rmt.tag_ignore(rmt_regexs)
# First run & recover mode.
if recover:
print("Running", ylw("recover/first_sync"), "mode")
else:
print("Reading last state")
branch = get_branch(nest, folder)
unpack(branch, old)
calc_states(old, lcl)
calc_states(old, rmt)
print(grn("Dry pass:"))
total, new_dirs, _, _ = sync(
lcl,
rmt,
old,
recover,
dry_run=True,
case=CASE_INSENSATIVE,
flags=args.args,
)
print("Found:", total, "job(s)")
print("With:", len(new_dirs), "folder(s) to make")
if not dry_run and (
auto or total == 0 or strtobool(input("Execute? "))
):
if total != 0 or recover:
print(grn("Live pass:"))
write(TEMP_FILE, {"folder": folder})
make_dirs(new_dirs)
_, _, lcl, rmt, = sync(
lcl,
rmt,
old,
recover,
total=total,
case=CASE_INSENSATIVE,
dry_run=dry_run,
flags=args.args,
)
SPIN.start(grn("Saving: ") + qt(folder))
# Get post sync state
if total == 0:
print("Skipping crawl as no jobs")
now = lcl
elif FAST_SAVE:
print("Skipping crawl as FAST_SAVE")
now = lcl
else:
now = lsl(path_lcl, HASH_NAME)
now.tag_ignore(lcl_regexs)
now.rm_ignore()
# Merge into history.
history.add(os.path.join(BASE_L, folder))
history.update(d for d in now.dirs)
# Merge into nest
merge(nest, folder, pack(now))
write(MASTER, {'history':list(history), 'ignores':ignores, 'nest':nest})
subprocess.run(["rm", TEMP_FILE])
SPIN.stop_and_persist(symbol="✔")
if args.clean:
SPIN.start(grn("Pruning: ") + qt(folder))
subprocess.run(["rclone", "rmdirs", path_rmt])
subprocess.run(["rclone", "rmdirs", path_lcl])
SPIN.stop_and_persist(symbol="✔")
recover = args.recovery
print("")
print(grn("All synced!"))
def build_regexs(BASE_L, BASE_R, path_lcl, files):
lcl_regex = []
rmt_regex = []
plain = []
for file in files:
for f_char, p_char in zip(os.path.dirname(file), path_lcl):
if f_char != p_char:
break
else:
if os.path.exists(file):
with open(file, "r") as fp:
for line in fp:
if line.rstrip() == "":
continue
mid = os.path.dirname(file)
mid = mid[len(BASE_L) + 1:]
mid = os.path.join(escape(mid), line.rstrip())
lcl = os.path.join(escape(BASE_L), mid)
rmt = os.path.join(escape(BASE_R), mid)
plain.append(mid)
lcl_regex.append(re.compile(lcl))
rmt_regex.append(re.compile(rmt))
return rmt_regex, lcl_regex, plain
STB = (
"yes",
"ye",
"y",
"1",
"t",
"true",
"",
"go",
"please",
"fire away",
"punch it",
"sure",
"ok",
"hell yes",
)
ESCAPE = {
"\\": "\\\\",
".": "\\.",
"^": "\\^",
"$": "\\$",
"*": "\\*",
"+": "\\+",
"?": "\\?",
"|": "\\|",
"(": "\\(",
")": "\\)",
"{": "\\{",
"}": "\\}",
"[": "\\[",
"]": "\\]",
}
|
[
"argparse.ArgumentParser",
"ujson.dump",
"os.walk",
"halo.Halo",
"os.path.isfile",
"os.path.join",
"os.path.abspath",
"logging.warning",
"os.path.dirname",
"argparse.HelpFormatter",
"os.path.exists",
"os.path.normpath",
"datetime.datetime.now",
"ujson.load",
"re.compile",
"subprocess.run",
"os.path.isabs",
"os.getcwd",
"pyfiglet.Figlet",
"os.path.isdir",
"os.path.relpath",
"os.path.expanduser"
] |
[((469, 529), 'halo.Halo', 'halo.Halo', ([], {'spinner': '"""dots"""', 'placement': '"""right"""', 'color': '"""yellow"""'}), "(spinner='dots', placement='right', color='yellow')\n", (478, 529), False, 'import halo\n'), ((544, 586), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.rsinc/config.json"""'], {}), "('~/.rsinc/config.json')\n", (562, 586), False, 'import os\n'), ((624, 647), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""graffiti"""'}), "(font='graffiti')\n", (630, 647), False, 'from pyfiglet import Figlet\n'), ((1962, 2012), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'formatter'}), '(formatter_class=formatter)\n', (1985, 2012), False, 'import argparse\n'), ((1900, 1950), 'argparse.HelpFormatter', 'argparse.HelpFormatter', (['prog'], {'max_help_position': '(52)'}), '(prog, max_help_position=52)\n', (1922, 1950), False, 'import argparse\n'), ((6173, 6198), 'os.path.exists', 'os.path.exists', (['TEMP_FILE'], {}), '(TEMP_FILE)\n', (6187, 6198), False, 'import os\n'), ((1399, 1442), 'ujson.dump', 'ujson.dump', (['d', 'fp'], {'sort_keys': '(True)', 'indent': '(2)'}), '(d, fp, sort_keys=True, indent=2)\n', (1409, 1442), False, 'import ujson\n'), ((3553, 3580), 'os.path.isfile', 'os.path.isfile', (['config_path'], {}), '(config_path)\n', (3567, 3580), False, 'import os\n'), ((5854, 5888), 'os.walk', 'os.walk', (['BASE_L'], {'followlinks': '(False)'}), '(BASE_L, followlinks=False)\n', (5861, 5888), False, 'import os\n'), ((6450, 6507), 'logging.warning', 'logging.warning', (['"""Detected crash, recovering %s"""', 'corrupt'], {}), "('Detected crash, recovering %s', corrupt)\n", (6465, 6507), False, 'import logging\n'), ((6590, 6618), 'os.path.join', 'os.path.join', (['BASE_L', 'folder'], {}), '(BASE_L, folder)\n', (6602, 6618), False, 'import os\n'), ((6638, 6666), 'os.path.join', 'os.path.join', (['BASE_R', 'folder'], {}), '(BASE_R, folder)\n', (6650, 6666), False, 'import os\n'), ((970, 984), 'ujson.load', 'ujson.load', (['fp'], {}), '(fp)\n', (980, 984), False, 'import ujson\n'), ((5401, 5423), 'os.path.exists', 'os.path.exists', (['MASTER'], {}), '(MASTER)\n', (5415, 5423), False, 'import os\n'), ((6713, 6741), 'os.path.join', 'os.path.join', (['BASE_L', 'folder'], {}), '(BASE_L, folder)\n', (6725, 6741), False, 'import os\n'), ((9690, 9736), 'subprocess.run', 'subprocess.run', (["['rclone', 'rmdirs', path_rmt]"], {}), "(['rclone', 'rmdirs', path_rmt])\n", (9704, 9736), False, 'import subprocess\n'), ((9749, 9795), 'subprocess.run', 'subprocess.run', (["['rclone', 'rmdirs', path_lcl]"], {}), "(['rclone', 'rmdirs', path_lcl])\n", (9763, 9795), False, 'import subprocess\n'), ((10084, 10105), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (10099, 10105), False, 'import os\n'), ((10202, 10222), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (10216, 10222), False, 'import os\n'), ((4664, 4675), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4673, 4675), False, 'import os\n'), ((4750, 4766), 'os.path.isabs', 'os.path.isabs', (['f'], {}), '(f)\n', (4763, 4766), False, 'import os\n'), ((5020, 5036), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (5033, 5036), False, 'import os\n'), ((9515, 9548), 'subprocess.run', 'subprocess.run', (["['rm', TEMP_FILE]"], {}), "(['rm', TEMP_FILE])\n", (9529, 9548), False, 'import subprocess\n'), ((4012, 4026), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4024, 4026), False, 'from datetime import datetime\n'), ((5322, 5348), 'os.path.relpath', 'os.path.relpath', (['f', 'BASE_L'], {}), '(f, BASE_L)\n', (5337, 5348), False, 'import os\n'), ((9245, 9273), 'os.path.join', 'os.path.join', (['BASE_L', 'folder'], {}), '(BASE_L, folder)\n', (9257, 9273), False, 'import os\n'), ((4795, 4814), 'os.path.normpath', 'os.path.normpath', (['f'], {}), '(f)\n', (4811, 4814), False, 'import os\n'), ((4861, 4879), 'os.path.abspath', 'os.path.abspath', (['f'], {}), '(f)\n', (4876, 4879), False, 'import os\n'), ((5253, 5279), 'os.path.relpath', 'os.path.relpath', (['f', 'BASE_L'], {}), '(f, BASE_L)\n', (5268, 5279), False, 'import os\n'), ((5999, 6026), 'os.path.join', 'os.path.join', (['dirpath', 'name'], {}), '(dirpath, name)\n', (6011, 6026), False, 'import os\n'), ((10419, 10440), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (10434, 10440), False, 'import os\n'), ((10777, 10792), 're.compile', 're.compile', (['lcl'], {}), '(lcl)\n', (10787, 10792), False, 'import re\n'), ((10835, 10850), 're.compile', 're.compile', (['rmt'], {}), '(rmt)\n', (10845, 10850), False, 'import re\n')]
|
'''
Module to preprocess filckr8k image data
'''
import cv2
import numpy as np
import os
from _pickle import dump, load
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Flatten
from keras.models import load_model
from keras.preprocessing import image
from keras.applications.inception_v3 import preprocess_input
from keras.models import Model
from PIL import Image
def load_images_as_arrays(directory):
img_array_dict = {}
for img_file in os.listdir(directory):
img_path = directory + '/' + img_file
img = Image.open(img_path)
x = np.array(img)
img_array_dict[os.path.splitext(img_file)[0]] = x
return img_array_dict
def extract_features(directory):
# base_model = InceptionV3(weights='imagenet')
# model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
#model = load_model('./preprocessing/CNN_encoder_100epoch.h5')
#top = Flatten()(model.output)
#model = Model(inputs=model.input, outputs=top)
#print(model.summary())
img_id = []
img_matrices = []
i = 0
for img_file in os.listdir(directory):
print(i, ":", i > 1999 and i < 8000 or i > 8999)
'''if (i > 1999 and i < 8000 or i > 8999):
i += 1
continue'''
img_path = directory + '/' + img_file
resizeDim = (256, 512)
img = cv2.imread(img_path)
img = cv2.resize(img, resizeDim, interpolation=cv2.INTER_AREA)
img = img.astype('float16') / 255
#x = img.reshape(img.shape + (1,))
img_id.append(os.path.splitext(img_file)[0])
img_matrices.append(img)
i += 1
img_matrices = np.array(img_matrices)
#img_features = model.predict(img_matrices, verbose=1)
return {'ids': img_id, 'features': img_matrices}
def extract_feature_from_image(file_dir):
img = image.load_img(file_dir, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# base_model = InceptionV3(weights='imagenet')
# model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
model = load_model('CNN_encoder_100epoch.h5')
return model.predict(x)
def load_features(dict_dir, dataset_dir, repeat_times=1):
assert (repeat_times >= 1)
img_ids = []
with open(dataset_dir, 'r') as f:
for line in f.readlines():
img_ids.append(os.path.splitext(line)[0])
features_dict = load(open(dict_dir, 'rb'))
#features_dict = extract_features('./datasets/Flickr8k_Dataset')
dataset_features = []
for img_id in img_ids:
fidx = features_dict['ids'].index(img_id)
dataset_features.append(np.vstack([features_dict['features'][fidx, :]] * repeat_times))
#dataset_features = np.vstack(dataset_features)
return np.array(dataset_features)
if __name__ == "__main__":
# pre-extract image features from Inception Net
image_directory = './datasets/Flickr8k_Dataset'
features_dict = extract_features(image_directory)
dump(features_dict, open('./datasets/features_dict2.pkl', 'wb'),protocol=4)
|
[
"keras.models.load_model",
"numpy.expand_dims",
"PIL.Image.open",
"keras.applications.inception_v3.preprocess_input",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"cv2.imread",
"numpy.array",
"os.path.splitext",
"numpy.vstack",
"os.listdir",
"cv2.resize"
] |
[((483, 504), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (493, 504), False, 'import os\n'), ((1130, 1151), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1140, 1151), False, 'import os\n'), ((1693, 1715), 'numpy.array', 'np.array', (['img_matrices'], {}), '(img_matrices)\n', (1701, 1715), True, 'import numpy as np\n'), ((1884, 1932), 'keras.preprocessing.image.load_img', 'image.load_img', (['file_dir'], {'target_size': '(299, 299)'}), '(file_dir, target_size=(299, 299))\n', (1898, 1932), False, 'from keras.preprocessing import image\n'), ((1941, 1964), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1959, 1964), False, 'from keras.preprocessing import image\n'), ((1973, 1998), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1987, 1998), True, 'import numpy as np\n'), ((2007, 2026), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (2023, 2026), False, 'from keras.applications.inception_v3 import preprocess_input\n'), ((2185, 2222), 'keras.models.load_model', 'load_model', (['"""CNN_encoder_100epoch.h5"""'], {}), "('CNN_encoder_100epoch.h5')\n", (2195, 2222), False, 'from keras.models import load_model\n'), ((2869, 2895), 'numpy.array', 'np.array', (['dataset_features'], {}), '(dataset_features)\n', (2877, 2895), True, 'import numpy as np\n'), ((567, 587), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (577, 587), False, 'from PIL import Image\n'), ((600, 613), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (608, 613), True, 'import numpy as np\n'), ((1395, 1415), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1405, 1415), False, 'import cv2\n'), ((1430, 1486), 'cv2.resize', 'cv2.resize', (['img', 'resizeDim'], {'interpolation': 'cv2.INTER_AREA'}), '(img, resizeDim, interpolation=cv2.INTER_AREA)\n', (1440, 1486), False, 'import cv2\n'), ((2740, 2802), 'numpy.vstack', 'np.vstack', (["([features_dict['features'][fidx, :]] * repeat_times)"], {}), "([features_dict['features'][fidx, :]] * repeat_times)\n", (2749, 2802), True, 'import numpy as np\n'), ((638, 664), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (654, 664), False, 'import os\n'), ((1594, 1620), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (1610, 1620), False, 'import os\n'), ((2461, 2483), 'os.path.splitext', 'os.path.splitext', (['line'], {}), '(line)\n', (2477, 2483), False, 'import os\n')]
|
import subprocess
import psycopg2
import multiprocessing
import threading
import os
import pathlib
import sendgrid
import datetime
import shutil
import boto3
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
from sendgrid.helpers.mail import *
MAX_WORKERS = 1
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VOICES_ROOT = os.path.join(PROJECT_ROOT, 'static')
dynamodb = boto3.resource('dynamodb')
def ConvertAudio(fileImput):
#validar fileImput
inFile = GetOrginalPath(fileImput[1])
outputFileName = VOICES_ROOT + '/voices/processed/' + GetFileName(inFile) + '.mp3'
if IsMp3(fileImput[1]):
print("--mp3Detected--")
shutil.copy(inFile, outputFileName)
PostProcessFile(fileImput[0], outputFileName, fileImput[2], fileImput[3])
return True
else:
print("--NonMp3Detected--")
result = subprocess.run(['ffmpeg', '-i', inFile, '-acodec', 'libmp3lame', outputFileName])
#command = "ffmpeg -i {0} -acodec libmp3lame {1}".format(fileImput, outputFileName)
#result = os.system(command)
if result.returncode is 0:
PostProcessFile(fileImput[0], outputFileName, fileImput[2], fileImput[3])
return True
else:
return False
def PostProcessFile(fileId, outFile, mail, url):
UpdateProcessedFile(fileId, outFile)
#SendEmailSendgrid(mail, url)
SendEmailSeS(mail, url)
def GetOrginalPath(relativepath):
file = pathlib.PurePath(relativepath).name
return VOICES_ROOT + '/voices/original/' + file
def IsMp3(filePath):
file_extension = pathlib.PurePath(filePath).suffix
if file_extension == '.mp3':
return True
else:
return False
def GetFileName(filePath):
return pathlib.PurePath(filePath).stem
def UpdateProcessedFile(fileId, filePath):
print("updatefile--" + str(fileId))
conn = None
with threading.Lock():
try:
outputFileDB = 'voices/processed/' + GetFileName(filePath) + '.mp3'
table = dynamodb.Table('grabacion')
table.update_item(
Key={
'Archivo_Original': fileId
},
UpdateExpression='SET Estado_Archivo=:val1, Archivo_Final=:val2',
ExpressionAttributeValues={
':val1': 1,
':val2': outputFileDB
}
)
except(Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def SendEmailSendgrid(mail, url):
print("usendmail--" + mail)
sg = sendgrid.SendGridAPIClient(
apikey=os.environ.get('SENDGRID_API_KEY')
)
from_email = Email("<EMAIL>")
to_email = Email(mail)
subject = "La voz ya esta disponible"
WS_IP = os.environ.get('IP_HOST')
content = Content(
"text/plain", "La voz ya se encuentra disponible en la página principal del concurso " +
WS_IP + "/concursar/" + url
)
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
def SendEmailSeS(mail, url):
print("usendmai2--" + mail)
client = boto3.client('ses')
WS_IP = os.environ.get('IP_HOST') + '/concursar/' + url
mensaje = '<html><head></head><body><p>La voz ya se encuentra disponible en la página principal del ' +\
'concurso, visite</p> <a href="' + WS_IP + '">Supervoices</a> ' +\
'<p>para mas informacion</p></body></html>'
# Try to send the email.
try:
# Provide the contents of the email.
response = client.send_email(
Destination={
'ToAddresses': [
mail,
],
},
Message={
'Body': {
'Html': {
'Charset': 'UTF8',
'Data': mensaje,
},
},
'Subject': {
'Charset': 'UTF8',
'Data': 'La voz ya esta disponible',
},
},
Source='<EMAIL>',
#ConfigurationSetName='UTF8',
)
# Display an error if something goes wrong.
except ClientError as e:
print("catchSeS" + mail)
print(e.response['Error']['Message'])
else:
print("Email sent! Message ID:"),
print(response['MessageId'])
def GetPendingFiles():
conn = None
files = None
try:
table = dynamodb.Table('grabacion')
response = table.query(
KeyConditionExpression=Key('Estado_Archivo').eq(0)
)
items = response['Items']
#cur.execute("""SELECT gr.id, gr."Archivo_Original", gr."Mail_Autor", co."Url" FROM api_grabacion gr, api_concurso co WHERE gr."Estado_Archivo" = 0 and gr."Concurso_id" = co.id""")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return items
def UpdateLogTable(startTime, endTime, totalFiles):
conn = None
try:
table = dynamodb.Table('grabacion')
table.put_item(
Item={
'startDate': startTime,
'endDate': endTime,
'totalFiles': totalFiles
}
)
except(Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def StartJob():
startTime = datetime.datetime.now(datetime.timezone.utc)
pendingFiles = GetPendingFiles()
if pendingFiles is not None:
with multiprocessing.Pool(MAX_WORKERS) as pool:
results = pool.imap(ConvertAudio, pendingFiles)
totalFiles = sum(results)
endTime = datetime.datetime.now(datetime.timezone.utc)
UpdateLogTable(startTime, endTime, totalFiles)
else:
UpdateLogTable(startTime, startTime, 0)
print("No files to Tansform")
if __name__ == '__main__':
StartJob()
|
[
"subprocess.run",
"os.path.abspath",
"boto3.client",
"boto3.dynamodb.conditions.Key",
"datetime.datetime.now",
"os.environ.get",
"threading.Lock",
"boto3.resource",
"pathlib.PurePath",
"multiprocessing.Pool",
"os.path.join",
"shutil.copy"
] |
[((392, 428), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""static"""'], {}), "(PROJECT_ROOT, 'static')\n", (404, 428), False, 'import os\n'), ((440, 466), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (454, 466), False, 'import boto3\n'), ((2899, 2924), 'os.environ.get', 'os.environ.get', (['"""IP_HOST"""'], {}), "('IP_HOST')\n", (2913, 2924), False, 'import os\n'), ((3298, 3317), 'boto3.client', 'boto3.client', (['"""ses"""'], {}), "('ses')\n", (3310, 3317), False, 'import boto3\n'), ((5643, 5687), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (5664, 5687), False, 'import datetime\n'), ((350, 375), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (365, 375), False, 'import os\n'), ((720, 755), 'shutil.copy', 'shutil.copy', (['inFile', 'outputFileName'], {}), '(inFile, outputFileName)\n', (731, 755), False, 'import shutil\n'), ((921, 1006), 'subprocess.run', 'subprocess.run', (["['ffmpeg', '-i', inFile, '-acodec', 'libmp3lame', outputFileName]"], {}), "(['ffmpeg', '-i', inFile, '-acodec', 'libmp3lame',\n outputFileName])\n", (935, 1006), False, 'import subprocess\n'), ((1517, 1547), 'pathlib.PurePath', 'pathlib.PurePath', (['relativepath'], {}), '(relativepath)\n', (1533, 1547), False, 'import pathlib\n'), ((1649, 1675), 'pathlib.PurePath', 'pathlib.PurePath', (['filePath'], {}), '(filePath)\n', (1665, 1675), False, 'import pathlib\n'), ((1807, 1833), 'pathlib.PurePath', 'pathlib.PurePath', (['filePath'], {}), '(filePath)\n', (1823, 1833), False, 'import pathlib\n'), ((1949, 1965), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1963, 1965), False, 'import threading\n'), ((2743, 2777), 'os.environ.get', 'os.environ.get', (['"""SENDGRID_API_KEY"""'], {}), "('SENDGRID_API_KEY')\n", (2757, 2777), False, 'import os\n'), ((3330, 3355), 'os.environ.get', 'os.environ.get', (['"""IP_HOST"""'], {}), "('IP_HOST')\n", (3344, 3355), False, 'import os\n'), ((5771, 5804), 'multiprocessing.Pool', 'multiprocessing.Pool', (['MAX_WORKERS'], {}), '(MAX_WORKERS)\n', (5791, 5804), False, 'import multiprocessing\n'), ((5934, 5978), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (5955, 5978), False, 'import datetime\n'), ((4735, 4756), 'boto3.dynamodb.conditions.Key', 'Key', (['"""Estado_Archivo"""'], {}), "('Estado_Archivo')\n", (4738, 4756), False, 'from boto3.dynamodb.conditions import Key, Attr\n')]
|
from features.friend.entities import Friendship
from features.friend.models import FriendshipOutput
def map_friendship_to_friendship_output(*, friendship: Friendship) -> FriendshipOutput:
return FriendshipOutput(
client_id=friendship.client_id,
friend_id=friendship.friend_id,
requested=friendship.requested,
approved=friendship.approved
)
|
[
"features.friend.models.FriendshipOutput"
] |
[((201, 348), 'features.friend.models.FriendshipOutput', 'FriendshipOutput', ([], {'client_id': 'friendship.client_id', 'friend_id': 'friendship.friend_id', 'requested': 'friendship.requested', 'approved': 'friendship.approved'}), '(client_id=friendship.client_id, friend_id=friendship.\n friend_id, requested=friendship.requested, approved=friendship.approved)\n', (217, 348), False, 'from features.friend.models import FriendshipOutput\n')]
|
from train import models_initializers
model_name = 'basic_nn'
model = models_initializers.get(model_name)()
for l in model.layers:
print(f'{l.name} -- {l.input_shape} -- {l.output_shape}')
|
[
"train.models_initializers.get"
] |
[((72, 107), 'train.models_initializers.get', 'models_initializers.get', (['model_name'], {}), '(model_name)\n', (95, 107), False, 'from train import models_initializers\n')]
|
from bson.json_util import dumps
from flask import request, render_template
from app import Carrello, app
from complements.db import SearchIntoDb, SearchviaAttributesCASE
from complements.forms import Searchfor, CaseSelect
@app.route('/case', methods=['POST', 'GET'])
def case():
form1 = Searchfor()
form2 = CaseSelect()
qir = list()
if request.method == 'POST':
if request.form.get('submit'):
query = SearchIntoDb("CASE", request.form.get('search')).findquery()
for x in query:
qir.insert(1, [dumps(x['name']), dumps(x['marca']), dumps(x['COSTO']), dumps(x['_id'])])
return render_template("case.html", form=form1, form2=form2, queri=qir)
if request.form.get('val'):
x = str(request.form.get('val'))
x = x.split('"$oid": "', 1)[1]
x = x.split('"', 1)[0]
Carrello.Insert(x, 6, "CASE")
if request.form.get("submitf"):
marche = list()
model = list()
if request.form.get("Col"):
marche.append("Cooler Master")
if request.form.get("Shark"):
marche.append("Sharkoon")
if request.form.get("Therm"):
marche.append("Thermaltake")
if request.form.get("ATX"):
model.append("ATX")
if request.form.get("mATX"):
model.append("mATX")
if request.form.get('minmonet'):
min = request.form.get('minmonet')
else:
min = "0"
if request.form.get('maxmonet'):
max = request.form.get('maxmonet')
else:
max = "10000"
query = SearchviaAttributesCASE("CASE", " ".join(marche), min, max, " ".join(model)
).findqueryattr()
for x in query:
qir.insert(1, [dumps(x['name']), dumps(x['marca']), dumps(x['COSTO']), dumps(x['_id'])])
return render_template("case.html", form=form1, form2=form2, queri=qir)
return render_template("case.html", form=form1, form2=form2)
|
[
"app.app.route",
"flask.request.form.get",
"flask.render_template",
"complements.forms.CaseSelect",
"bson.json_util.dumps",
"complements.forms.Searchfor",
"app.Carrello.Insert"
] |
[((227, 270), 'app.app.route', 'app.route', (['"""/case"""'], {'methods': "['POST', 'GET']"}), "('/case', methods=['POST', 'GET'])\n", (236, 270), False, 'from app import Carrello, app\n'), ((295, 306), 'complements.forms.Searchfor', 'Searchfor', ([], {}), '()\n', (304, 306), False, 'from complements.forms import Searchfor, CaseSelect\n'), ((319, 331), 'complements.forms.CaseSelect', 'CaseSelect', ([], {}), '()\n', (329, 331), False, 'from complements.forms import Searchfor, CaseSelect\n'), ((2100, 2153), 'flask.render_template', 'render_template', (['"""case.html"""'], {'form': 'form1', 'form2': 'form2'}), "('case.html', form=form1, form2=form2)\n", (2115, 2153), False, 'from flask import request, render_template\n'), ((393, 419), 'flask.request.form.get', 'request.form.get', (['"""submit"""'], {}), "('submit')\n", (409, 419), False, 'from flask import request, render_template\n'), ((730, 753), 'flask.request.form.get', 'request.form.get', (['"""val"""'], {}), "('val')\n", (746, 753), False, 'from flask import request, render_template\n'), ((931, 958), 'flask.request.form.get', 'request.form.get', (['"""submitf"""'], {}), "('submitf')\n", (947, 958), False, 'from flask import request, render_template\n'), ((654, 718), 'flask.render_template', 'render_template', (['"""case.html"""'], {'form': 'form1', 'form2': 'form2', 'queri': 'qir'}), "('case.html', form=form1, form2=form2, queri=qir)\n", (669, 718), False, 'from flask import request, render_template\n'), ((890, 919), 'app.Carrello.Insert', 'Carrello.Insert', (['x', '(6)', '"""CASE"""'], {}), "(x, 6, 'CASE')\n", (905, 919), False, 'from app import Carrello, app\n'), ((1030, 1053), 'flask.request.form.get', 'request.form.get', (['"""Col"""'], {}), "('Col')\n", (1046, 1053), False, 'from flask import request, render_template\n'), ((1117, 1142), 'flask.request.form.get', 'request.form.get', (['"""Shark"""'], {}), "('Shark')\n", (1133, 1142), False, 'from flask import request, render_template\n'), ((1201, 1226), 'flask.request.form.get', 'request.form.get', (['"""Therm"""'], {}), "('Therm')\n", (1217, 1226), False, 'from flask import request, render_template\n'), ((1289, 1312), 'flask.request.form.get', 'request.form.get', (['"""ATX"""'], {}), "('ATX')\n", (1305, 1312), False, 'from flask import request, render_template\n'), ((1365, 1389), 'flask.request.form.get', 'request.form.get', (['"""mATX"""'], {}), "('mATX')\n", (1381, 1389), False, 'from flask import request, render_template\n'), ((1444, 1472), 'flask.request.form.get', 'request.form.get', (['"""minmonet"""'], {}), "('minmonet')\n", (1460, 1472), False, 'from flask import request, render_template\n'), ((1584, 1612), 'flask.request.form.get', 'request.form.get', (['"""maxmonet"""'], {}), "('maxmonet')\n", (1600, 1612), False, 'from flask import request, render_template\n'), ((2023, 2087), 'flask.render_template', 'render_template', (['"""case.html"""'], {'form': 'form1', 'form2': 'form2', 'queri': 'qir'}), "('case.html', form=form1, form2=form2, queri=qir)\n", (2038, 2087), False, 'from flask import request, render_template\n'), ((775, 798), 'flask.request.form.get', 'request.form.get', (['"""val"""'], {}), "('val')\n", (791, 798), False, 'from flask import request, render_template\n'), ((1496, 1524), 'flask.request.form.get', 'request.form.get', (['"""minmonet"""'], {}), "('minmonet')\n", (1512, 1524), False, 'from flask import request, render_template\n'), ((1636, 1664), 'flask.request.form.get', 'request.form.get', (['"""maxmonet"""'], {}), "('maxmonet')\n", (1652, 1664), False, 'from flask import request, render_template\n'), ((462, 488), 'flask.request.form.get', 'request.form.get', (['"""search"""'], {}), "('search')\n", (478, 488), False, 'from flask import request, render_template\n'), ((561, 577), 'bson.json_util.dumps', 'dumps', (["x['name']"], {}), "(x['name'])\n", (566, 577), False, 'from bson.json_util import dumps\n'), ((579, 596), 'bson.json_util.dumps', 'dumps', (["x['marca']"], {}), "(x['marca'])\n", (584, 596), False, 'from bson.json_util import dumps\n'), ((598, 615), 'bson.json_util.dumps', 'dumps', (["x['COSTO']"], {}), "(x['COSTO'])\n", (603, 615), False, 'from bson.json_util import dumps\n'), ((617, 632), 'bson.json_util.dumps', 'dumps', (["x['_id']"], {}), "(x['_id'])\n", (622, 632), False, 'from bson.json_util import dumps\n'), ((1930, 1946), 'bson.json_util.dumps', 'dumps', (["x['name']"], {}), "(x['name'])\n", (1935, 1946), False, 'from bson.json_util import dumps\n'), ((1948, 1965), 'bson.json_util.dumps', 'dumps', (["x['marca']"], {}), "(x['marca'])\n", (1953, 1965), False, 'from bson.json_util import dumps\n'), ((1967, 1984), 'bson.json_util.dumps', 'dumps', (["x['COSTO']"], {}), "(x['COSTO'])\n", (1972, 1984), False, 'from bson.json_util import dumps\n'), ((1986, 2001), 'bson.json_util.dumps', 'dumps', (["x['_id']"], {}), "(x['_id'])\n", (1991, 2001), False, 'from bson.json_util import dumps\n')]
|
from secedgar.filings import Filing, FilingType
# This will download the past 15 10-Q filings made by Apple.
import pandas as pd
path = '/Users/schen/sec-scraper/data/cik_ticker.csv'
df = pd.read_csv(path, sep='|')
def run(df):
cik = list(df['CIK'])
names = list(df['Name'])
for c, n in zip(cik, names):
if len(str(c)) < 10:
missing = 10 - len(str(c))
temp = ("0" * missing) + str(c)
print("SCRAPING {} ...".format(temp))
my_filings = Filing(cik=temp, filing_type=FilingType.FILING_10K) # 10-Q filings for Apple (NYSE: AAPL)
try:
my_filings.save('./filings/') # Saves last 15 10Q reports from AAPL to ~/path/to/dir
except ValueError:
print("No {}".format(n))
run(df)
|
[
"pandas.read_csv",
"secedgar.filings.Filing"
] |
[((189, 215), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""|"""'}), "(path, sep='|')\n", (200, 215), True, 'import pandas as pd\n'), ((501, 552), 'secedgar.filings.Filing', 'Filing', ([], {'cik': 'temp', 'filing_type': 'FilingType.FILING_10K'}), '(cik=temp, filing_type=FilingType.FILING_10K)\n', (507, 552), False, 'from secedgar.filings import Filing, FilingType\n')]
|
import unittest
import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt
import json
import io
import zipfile
COMMIT_REFERENCE = {
"commit": "5<PASSWORD>",
"ref": "refs/heads/master"
}
TAG_REFERENCE = {
"commit": "<PASSWORD>",
"ref": "refs/tags/v1.1.0"
}
class PipelineTriggerTest(unittest.TestCase):
def test_extract_repository_name(self):
self.assertEqual('my-repo', pt.extract_repository_name('arn:aws:codecommit:eu-west-1:123456789012:my-repo'))
self.assertEqual('', pt.extract_repository_name(''))
self.assertEqual('anything', pt.extract_repository_name('anything'))
def test_is_commit(self):
self.assertTrue(pt.is_commit(COMMIT_REFERENCE))
self.assertFalse(pt.is_commit(TAG_REFERENCE))
def test_is_tag(self):
self.assertTrue(pt.is_tag(TAG_REFERENCE))
self.assertFalse(pt.is_tag(COMMIT_REFERENCE))
def test_extract_tag(self):
self.assertEqual('v1.1.0', pt.extract_tag(TAG_REFERENCE))
def test_event(self):
with open('code_commit_event.json') as f:
event = json.load(f)
pipeline_trigger = pt.derive_trigger(event['Records'][0])
self.assertEqual('eu-west-1', pipeline_trigger.aws_region)
self.assertEqual('my-repo', pipeline_trigger.repository)
self.assertEqual('git checkout 5<PASSWORD>0<PASSWORD>', pipeline_trigger.checkout_command)
buf = io.BytesIO(pipeline_trigger.generate_zip_file())
with zipfile.ZipFile(buf) as zf:
given_files = [file.filename for file in zf.filelist]
expected_files = ['buildspec.yaml', 'chechkout.sh']
self.assertEqual(expected_files, given_files)
given_checkout_text = pipeline_trigger.generate_files()['chechkout.sh']
expected_checkout_text = '''#!/bin/bash
git config --global credential.helper '!aws codecommit credential-helper $@'
git config --global credential.UseHttpPath true
git clone --shallow-submodules https://git-codecommit.eu-west-1.amazonaws.com/v1/repos/my-repo repo
cd repo
git checkout 5<PASSWORD>
cd
'''
self.assertEqual(expected_checkout_text, given_checkout_text)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"codesmith.CodeCommit.PipelineTrigger.pipline_trigger.derive_trigger",
"codesmith.CodeCommit.PipelineTrigger.pipline_trigger.is_commit",
"codesmith.CodeCommit.PipelineTrigger.pipline_trigger.extract_tag",
"json.load",
"zipfile.ZipFile",
"codesmith.CodeCommit.PipelineTrigger.pipline_trigger.is_tag",
"codesmith.CodeCommit.PipelineTrigger.pipline_trigger.extract_repository_name"
] |
[((2202, 2217), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2215, 2217), False, 'import unittest\n'), ((1139, 1177), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.derive_trigger', 'pt.derive_trigger', (["event['Records'][0]"], {}), "(event['Records'][0])\n", (1156, 1177), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((410, 489), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.extract_repository_name', 'pt.extract_repository_name', (['"""arn:aws:codecommit:eu-west-1:123456789012:my-repo"""'], {}), "('arn:aws:codecommit:eu-west-1:123456789012:my-repo')\n", (436, 489), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((521, 551), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.extract_repository_name', 'pt.extract_repository_name', (['""""""'], {}), "('')\n", (547, 551), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((590, 628), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.extract_repository_name', 'pt.extract_repository_name', (['"""anything"""'], {}), "('anything')\n", (616, 628), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((685, 715), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.is_commit', 'pt.is_commit', (['COMMIT_REFERENCE'], {}), '(COMMIT_REFERENCE)\n', (697, 715), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((742, 769), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.is_commit', 'pt.is_commit', (['TAG_REFERENCE'], {}), '(TAG_REFERENCE)\n', (754, 769), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((823, 847), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.is_tag', 'pt.is_tag', (['TAG_REFERENCE'], {}), '(TAG_REFERENCE)\n', (832, 847), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((874, 901), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.is_tag', 'pt.is_tag', (['COMMIT_REFERENCE'], {}), '(COMMIT_REFERENCE)\n', (883, 901), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((971, 1000), 'codesmith.CodeCommit.PipelineTrigger.pipline_trigger.extract_tag', 'pt.extract_tag', (['TAG_REFERENCE'], {}), '(TAG_REFERENCE)\n', (985, 1000), True, 'import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt\n'), ((1099, 1111), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1108, 1111), False, 'import json\n'), ((1487, 1507), 'zipfile.ZipFile', 'zipfile.ZipFile', (['buf'], {}), '(buf)\n', (1502, 1507), False, 'import zipfile\n')]
|
# Copyright (c) 2017, <NAME>
from datetime import datetime
import time
def convert(date):
"""Convert a date string into a datetime instance. Assumes date string
is RfC 3339 format."""
time_s = time.strptime(date, '%Y-%m-%dT%H:%M:%S')
return datetime.fromtimestamp(time.mktime(time_s))
|
[
"time.mktime",
"time.strptime"
] |
[((208, 248), 'time.strptime', 'time.strptime', (['date', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(date, '%Y-%m-%dT%H:%M:%S')\n", (221, 248), False, 'import time\n'), ((283, 302), 'time.mktime', 'time.mktime', (['time_s'], {}), '(time_s)\n', (294, 302), False, 'import time\n')]
|
# Omid55
# Test module for network_utils.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import networkx as nx
import pandas as pd
import numpy as np
import unittest
import datetime
import re
from parameterized import parameterized
import utils
import network_utils
class MyTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.triad_map, cls.triad_list = (
network_utils.generate_all_possible_sparse_triads())
@classmethod
def tearDownClass(cls):
del cls.triad_map
del cls.triad_list
# =========================================================================
# ==================== extract_graph ======================================
# =========================================================================
@parameterized.expand([
["latest_multiple_edge_weight", False],
["sum_of_multiple_edge_weights", True]])
def test_extract_graph(self, name, sum_multiple_edge):
matrix_edges = [
[1, 2, +1, datetime.datetime(2017, 1, 1)],
[1, 2, +5, datetime.datetime(2017, 1, 2)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[2, 3, -2, datetime.datetime(2017, 1, 6)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 22)],
[4, 3, -5, datetime.datetime(2017, 2, 24)]]
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
expected = nx.DiGraph()
expected.add_nodes_from([1, 2, 3, 4])
if sum_multiple_edge:
expected.add_edge(1, 2, weight=6)
else:
expected.add_edge(1, 2, weight=5)
if sum_multiple_edge:
expected.add_edge(2, 3, weight=1)
else:
expected.add_edge(2, 3, weight=-2)
expected.add_edge(3, 1, weight=1)
expected.add_edge(1, 4, weight=-1)
if sum_multiple_edge:
expected.add_edge(4, 3, weight=-10)
else:
expected.add_edge(4, 3, weight=-5)
computed = network_utils.extract_graph(
sample_edge_list, sum_multiple_edge=sum_multiple_edge)
self.assertTrue(
utils.graph_equals(
expected,
computed,
weight_column_name='weight'))
# =========================================================================
# ==================== extract_graphs =====================================
# =========================================================================
def test_extract_graphs_raises_with_missing_columns(self):
sample_edge_list = pd.DataFrame({'source': [1, 2], 'target': [5, 6]})
with self.assertRaises(ValueError):
network_utils.extract_graphs(edge_list=sample_edge_list)
@parameterized.expand(
[["seperated graphs", False],
["accumulative graphs", True]])
def test_extract_graphs(self, name, accumulative):
# source, target, weight, edge_date
matrix_edges = [[1, 2, +1, datetime.datetime(2017, 1, 1)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 24)],
[-1, -1, -1, datetime.datetime(2017, 2, 28)]]
# The last one is going to be ignored because fall into another period
# which is neglected.
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
g1 = nx.DiGraph()
g1.add_nodes_from([1, 2, 3])
g1.add_edge(1, 2, weight=1)
g1.add_edge(2, 3, weight=3)
g2 = nx.DiGraph()
g2.add_nodes_from([1, 3, 4])
g2.add_edge(3, 1, weight=1)
g2.add_edge(1, 4, weight=-1)
g2.add_edge(4, 3, weight=-5)
g3 = nx.DiGraph()
g3.add_nodes_from([1, 2, 3, 4])
g3.add_edge(1, 2, weight=1)
g3.add_edge(2, 3, weight=3)
g3.add_edge(3, 1, weight=1)
g3.add_edge(1, 4, weight=-1)
g3.add_edge(4, 3, weight=-5)
if not accumulative:
expected = [g1, g2]
else:
expected = [g1, g3]
computed = network_utils.extract_graphs(
edge_list=sample_edge_list, weeks=4, accumulative=accumulative)
for expected_graph, computed_graph in zip(expected, computed):
self.assertTrue(
utils.graph_equals(
expected_graph,
computed_graph,
weight_column_name='weight'))
# =========================================================================
# ====================== get_all_degrees ==================================
# =========================================================================
def test_get_all_degrees(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 1, weight=6)
dg.add_edge(1, 2, weight=1)
dg.add_edge(1, 4, weight=-5)
dg.add_edge(2, 2, weight=-1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-4)
dg.add_edge(3, 2, weight=4)
dg.add_edge(4, 4, weight=-10)
computed = network_utils.get_all_degrees(dg)
expected = (
{1: {'self': 6, 'out': -4, 'in': -4},
2: {'self': -1, 'out': 1, 'in': 5},
3: {'self': 0, 'out': 0, 'in': 1},
4: {'self': -10, 'out': 0, 'in': -5},
5: {'self': 0, 'out': 0, 'in': 0}})
self.assertDictEqual(computed, expected)
# =========================================================================
# ===================== get_just_periods ==================================
# =========================================================================
def test_get_just_periods(self):
matrix_edges = [[1, 2, +1, datetime.datetime(2017, 1, 1)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 24)],
[-1, -1, -1, datetime.datetime(2017, 2, 28)]]
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
expected = [['2017-01-01', '2017-01-29'], ['2017-01-29', '2017-02-26']]
computed = network_utils.get_just_periods(
sample_edge_list, weeks=4, accumulative=False)
self.assertEqual(expected, computed)
# =========================================================================
# ==================== get_metrics_for_network ============================
# =========================================================================
def test_get_metrics_for_network(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(1, 3, weight=-1)
computed = network_utils.get_metrics_for_network(dg)
expected = {
'#edges': 4,
'#edges/#nodes': 1,
'#gcc edges': 3,
'#gcc neg edges': 1,
'#gcc nodes': 3,
'#gcc pos edges': 2,
'#neg edges': 2,
'#nodes': 4,
'#pos edges': 2,
'algebraic connectivity': 0,
'average (und) clustering coefficient': 0.75,
'average betweenness': 0.0833,
'average closeness': 0.3888,
'average degree': 2,
'average eigenvector': 0.4222,
'average harmonic': 1.25,
'average in degree': 1,
'average w in degree': 0,
'average w out degree': 0,
'average load': 0.0833,
'average out degree': 1,
'gcc algebraic connectivity': 2.9999,
'gcc diameter': 1,
'unbalanced cycles 3 ratio': 1,
'weights max': 1,
'weights average': 0,
'weights min': -1,
'weights std': 1
}
# utils.print_dict_pretty(computed)
# self.assertDictEqual(computed, expected)
for key, value in expected.items():
self.assertAlmostEqual(value, computed[key], places=3)
# =========================================================================
# ====================== cartwright_harary_balance_ratio ==================
# =========================================================================
def test_cartwright_harary_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 0)
def test_cartwright_harary_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 0)
def test_cartwright_harary_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 1)
def test_cartwright_harary_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
self.assertEqual(
network_utils.cartwright_harary_balance_ratio(dg), 0.5)
# =========================================================================
# ========================= sprase_balance_ratio ==========================
# =========================================================================
def test_sparse_balance_ratio_raises_when_incorrect_balance_type(self):
with self.assertRaises(ValueError):
network_utils.sprase_balance_ratio(
dgraph=nx.DiGraph(),
balance_type=0)
@parameterized.expand([
['CartwrightHarary', 1, [0.3, 3, 7]],
['Clustering', 2, [0.5, 5, 5]],
['Transitivity', 3, [0.9, 9, 1]]])
def test_sprase_balance_ratio(
self,
name,
balance_type,
expected_values):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=5)
dg.add_edge(2, 3, weight=-4)
dg.add_edge(3, 1, weight=-7)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-2)
dg.add_edge(1, 5, weight=9)
dg.add_edge(5, 1, weight=-11)
dg.add_edge(2, 1, weight=100)
computed = network_utils.sprase_balance_ratio(
dgraph=dg,
balance_type=balance_type)
np.testing.assert_array_almost_equal(
computed, expected_values, decimal=2)
# =========================================================================
# ======================= fullyconnected_balance_ratio ====================
# =========================================================================
def test_fullyconnected_balance_ratio_raises_when_incorrect_balance_type(
self):
with self.assertRaises(ValueError):
network_utils.fullyconnected_balance_ratio(
dgraph=nx.DiGraph(),
balance_type=0)
def test_fullyconnected_balance_ratio_raises_when_negative_in_dgraph(self):
with self.assertRaises(ValueError):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=-1)
network_utils.fullyconnected_balance_ratio(
dgraph=dg,
balance_type=1)
@parameterized.expand([
['Classical', 1, [0.4, 4, 6]],
['Clustering', 2, [0.7, 7, 3]],
['Transitivity', 3, [0.8, 8, 2]]])
def test_fullyconnected_balance_ratio(
self,
name,
balance_type,
expected_values):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(5, 1, weight=1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 2, weight=1)
dg.add_edge(2, 5, weight=1)
dg.add_edge(5, 3, weight=1)
dg.add_edge(2, 3, weight=1)
computed = network_utils.fullyconnected_balance_ratio(
dgraph=dg,
balance_type=balance_type)
np.testing.assert_array_almost_equal(
computed, expected_values, decimal=2)
# =========================================================================
# ====================== count_different_signed_edges =====================
# =========================================================================
def test_count_different_signed_edges(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 1, weight=-5)
dg.add_edge(1, 3, weight=-2)
self.assertEqual(network_utils.count_different_signed_edges(dg), 0)
def test_count_different_signed_edges1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=3)
dg.add_edge(2, 1, weight=4)
dg.add_edge(3, 1, weight=1)
dg.add_edge(1, 3, weight=-1)
self.assertEqual(network_utils.count_different_signed_edges(dg), 1)
def test_count_different_signed_edges2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 1, weight=9)
dg.add_edge(1, 3, weight=-2)
self.assertEqual(network_utils.count_different_signed_edges(dg), 2)
# =========================================================================
# ==================== terzi_sprase_balance_ratio =========================
# =========================================================================
def test_terzi_sprase_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 1
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
expected = 0.5
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
# =========================================================================
# ================= kunegis_sprase_balance_ratio ==========================
# =========================================================================
def test_kunegis_sprase_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 1
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
expected = 0.6
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected, decimal=1)
# =========================================================================
# ====================== compute_vanderijt_edge_balance ===================
# =========================================================================
def test_compute_vanderijt_edge_balance_small_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=-5)
dg.add_edge(3, 1, weight=-2)
expected = {(2, 1): {'#nodes3': 1, '#balanced_node3': 1}}
computed = network_utils.compute_vanderijt_edge_balance(dg)
self.assertDictEqual(computed, expected)
def test_compute_vanderijt_edge_balance_allnegative_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(1, 3, weight=-1)
dg.add_edge(2, 4, weight=-1)
dg.add_edge(4, 2, weight=-1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 2, weight=1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(4, 1, weight=-5)
dg.add_edge(1, 4, weight=-5)
dg.add_edge(4, 3, weight=-2)
dg.add_edge(3, 4, weight=1)
expected = {
(1, 2): {'#balanced_node3': 1, '#nodes3': 2},
(3, 2): {'#balanced_node3': 1, '#nodes3': 2},
(1, 3): {'#balanced_node3': 0, '#nodes3': 2},
(3, 4): {'#balanced_node3': 1, '#nodes3': 2},
(3, 1): {'#balanced_node3': 1, '#nodes3': 2},
(1, 4): {'#balanced_node3': 1, '#nodes3': 2},
(2, 3): {'#balanced_node3': 1, '#nodes3': 2},
(2, 1): {'#balanced_node3': 2, '#nodes3': 2},
(4, 3): {'#balanced_node3': 0, '#nodes3': 2},
(4, 2): {'#balanced_node3': 1, '#nodes3': 2},
(4, 1): {'#balanced_node3': 1, '#nodes3': 2},
(2, 4): {'#balanced_node3': 2, '#nodes3': 2}}
computed = network_utils.compute_vanderijt_edge_balance(dg)
self.assertDictEqual(computed, expected)
# @parameterized.expand(
# [["no_isomorph_cycles", False], ["no_isomorph_cycles", True]])
# def test_compute_vanderijt_edge_balance_small_graph(
# self, name, no_isomorph_cycles):
# dg = nx.DiGraph()
# dg.add_nodes_from([1, 2, 3])
# dg.add_edge(1, 2, weight=1)
# dg.add_edge(2, 1, weight=1)
# dg.add_edge(2, 3, weight=-5)
# dg.add_edge(3, 1, weight=-2)
# if no_isomorph_cycles:
# expected = {
# (1, 2): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 9,
# 'as_expected_sign': True}}
# else:
# expected = {
# (1, 2): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 9,
# 'as_expected_sign': True},
# (3, 1): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 3,
# 'as_expected_sign': True},
# (2, 3): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 3,
# 'as_expected_sign': True}}
# computed = network_utils.compute_vanderijt_edge_balance(
# dg, no_isomorph_cycles=no_isomorph_cycles)
# self.assertDictEqual(computed, expected)
# @parameterized.expand(
# [["no_isomorph_cycles", False],
# ["no_isomorph_cycles", True]])
# def test_compute_vanderijt_edge_balance_allnegative_graph(
# self, name, no_isomorph_cycles):
# dg = nx.DiGraph()
# dg.add_nodes_from([1, 2, 3, 4])
# dg.add_edge(1, 2, weight=-1)
# dg.add_edge(2, 3, weight=-1)
# dg.add_edge(3, 1, weight=-1)
# dg.add_edge(1, 4, weight=-5)
# dg.add_edge(4, 3, weight=-2)
# if no_isomorph_cycles:
# expected = {
# (1, 2): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (1, 4): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False}}
# else:
# expected = {
# (1, 2): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (1, 4): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False},
# (2, 3): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (3, 1): {
# '#balanced': 0,
# '#cycle3': 2,
# 'weight_distance': 13,
# 'as_expected_sign': False},
# (4, 3): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False}}
# computed = network_utils.compute_vanderijt_edge_balance(
# dg, no_isomorph_cycles=no_isomorph_cycles)
# self.assertDictEqual(computed, expected)
# =========================================================================
# ====================== compute_fairness_goodness ========================
# =========================================================================
def test_compute_fairness_goodness(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1.0)
dg.add_edge(2, 3, weight=1.0)
dg.add_edge(3, 1, weight=1.0)
dg.add_edge(1, 4, weight=2.0)
dg.add_edge(4, 3, weight=-1.0)
expected = {'fairness': {1: 1.0, 2: 0.95, 3: 1.0, 4: 0.95},
'goodness': {1: 1.0, 2: 1.0, 3: 0.0, 4: 2.0}}
computed = network_utils.compute_fairness_goodness(dg, verbose=False)
self.assertDictEqual(computed, expected)
# =========================================================================
# ====================== is_sparsely_transitive_balanced ==================
# =========================================================================
def test_is_sparsely_transitive_balanced_raises_when_self_loops(self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_transitive_balanced(triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), True],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), True],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), True],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), True],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True]]
)
def test_is_sparsely_transitive_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_transitive_balanced(triad),
expected_balance)
# =========================================================================
# ====================== is_sparsely_cartwright_harary_balanced ===========
# =========================================================================
def test_is_sparsely_cartwright_harary_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_cartwright_harary_balanced(
triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), False],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), False],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), False],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), False],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), False]]
)
def test_is_sparsely_cartwright_harary_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_cartwright_harary_balanced(triad),
expected_balance)
# =========================================================================
# ====================== is_sparsely_clustering_balanced ==================
# =========================================================================
def test_is_sparsely_clustering_balanced_raises_when_self_loops(self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_clustering_balanced(
triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), False],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), False],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), True],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True]]
)
def test_is_sparsely_clustering_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_clustering_balanced(triad),
expected_balance)
# =========================================================================
# ========= is_fullyconnected_cartwright_harary_balance ===================
# =========================================================================
def test_is_fullyconnected_cartwright_harary_balance_raises_when_selfloops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_cartwright_harary_balance(
triad_with_self_loop)
def test_is_fullyconnected_cartwright_harary_balance_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_cartwright_harary_balance(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), False],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), False],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), False],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), False],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), False],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_cartwright_harary_balance(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_cartwright_harary_balance(triad),
expected_balance)
# =========================================================================
# =============== is_fullyconnected_clustering_balanced ===================
# =========================================================================
def test_is_fullyconnected_clustering_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_clustering_balanced(
triad_with_self_loop)
def test_is_fullyconnected_clustering_balanced_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_clustering_balanced(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), False],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), False],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), False],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_clustering_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_clustering_balanced(triad),
expected_balance)
# =========================================================================
# ============= is_fullyconnected_transitivity_balanced ===================
# =========================================================================
def test_is_fullyconnected_transitivity_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_transitivity_balanced(
triad_with_self_loop)
def test_is_fullyconnected_transitivity_balanced_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_transitivity_balanced(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), True],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), True],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_transitivity_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_transitivity_balanced(triad),
expected_balance)
# # =======================================================================
# # =================== is_sparsely_ranked_clustering_balanced ============
# # =======================================================================
# def test_is_sparsely_ranked_clustering_balanced_raises_when_self_loops(
# self):
# with self.assertRaises(ValueError):
# triad_with_self_loop = np.array(
# [[0, 1, 0],
# [0, 1, 1],
# [0, 0, 0]])
# network_utils.is_sparsely_ranked_clustering_balanced(
# triad_with_self_loop)
# @parameterized.expand([
# ["120U", np.array(
# [[0, 1, 1],
# [1, 0, 1],
# [-1, -1, 0]]), True],
# ["120D", np.array(
# [[0, 1, -1],
# [1, 0, -1],
# [1, 1, 0]]), True],
# ["0122Z", np.array(
# [[0, 0, -1],
# [-1, 0, 0],
# [1, -1, 0]]), True],
# ["030TZ", np.array(
# [[0, 1, 1],
# [0, 0, 1],
# [0, 0, 0]]), True],
# ["003", np.array(
# [[0, -1, -1],
# [-1, 0, -1],
# [-1, -1, 0]]), True],
# ["0032Z", np.array(
# [[0, 0, -1],
# [-1, 0, 0],
# [-1, -1, 0]]), True],
# ["030T", np.array(
# [[0, 1, 1],
# [-1, 0, 1],
# [-1, -1, 0]]), False],
# ["021C", np.array(
# [[0, 1, -1],
# [-1, 0, 1],
# [-1, -1, 0]]), False],
# ["030T2negZ", np.array(
# [[0, 1, -1],
# [0, 0, -1],
# [0, 0, 0]]), True],
# ["021UnegZ", np.array(
# [[0, 1, 0],
# [0, 0, 0],
# [0, -1, 0]]), True],
# ["021DZ", np.array(
# [[0, 0, 0],
# [1, 0, 1],
# [0, 0, 0]]), True],
# ["210", np.array(
# [[0, 1, -1],
# [1, 0, 1],
# [1, 1, 0]]), False],
# ["210Z", np.array(
# [[0, 1, 0],
# [1, 0, 1],
# [1, 1, 0]]), False],
# ["003Z", np.array(
# [[0, 0, 0],
# [0, 0, 0],
# [0, 0, 0]]), True],
# ["102Z", np.array(
# [[0, 1, 0],
# [1, 0, 0],
# [0, 0, 0]]), True],
# ["102negZ", np.array(
# [[0, -1, 0],
# [-1, 0, 0],
# [0, 0, 0]]), True],
# ["102posnegZ", np.array(
# [[0, 1, 0],
# [-1, 0, 0],
# [0, 0, 0]]), True],
# ["012Z", np.array(
# [[0, 1, 0],
# [0, 0, 0],
# [0, 0, 0]]), True],
# ["012", np.array(
# [[0, 1, -1],
# [-1, 0, -1],
# [-1, -1, 0]]), True]]
# )
# def test_is_sparsely_ranked_clustering_balanced(
# self, name, triad, expected_balance):
# self.assertEqual(
# network_utils.is_sparsely_ranked_clustering_balanced(triad),
# expected_balance)
# =========================================================================
# ====================== get_all_triad_permutations =======================
# =========================================================================
def test_get_all_triad_permutations(self):
triad_adj_matrix = np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]])
expected = set([
'[[0 1 1]\n [1 0 1]\n [1 0 0]]',
'[[0 0 1]\n [1 0 1]\n [1 1 0]]',
'[[0 1 1]\n [0 0 1]\n [1 1 0]]',
'[[0 1 1]\n [1 0 1]\n [0 1 0]]',
'[[0 1 1]\n [1 0 0]\n [1 1 0]]',
'[[0 1 0]\n [1 0 1]\n [1 1 0]]'])
computed = network_utils._get_all_triad_permutations(triad_adj_matrix)
self.assertEqual(expected, computed)
# =========================================================================
# ====================== generate_all_possible_sparse_triads ==============
# =========================================================================
def test_generate_all_possible_sparse_triads(self):
computed_triad_map, computed_triad_list = (
network_utils.generate_all_possible_sparse_triads())
# Testing triad_list
self.assertTrue(
len(computed_triad_list) == 138,
'Length of triad_list is not correct.')
np.testing.assert_array_equal(
computed_triad_list[0], np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), 'First triad_list is incorrect.')
np.testing.assert_array_equal(
computed_triad_list[-1], np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), 'Last triad_list is incorrect.')
np.testing.assert_array_equal(
computed_triad_list[69], np.array(
[[0, 0, 1],
[1, 0, -1],
[1, 0, 0]]), 'Middle triad_list is incorrect.')
# Testing triad_map.
expected_key1 = '[[0 0 0]\n [1 0 0]\n [0 0 0]]'
expected_value1 = 1
expected_key2 = '[[ 0 1 1]\n [-1 0 1]\n [-1 -1 0]]'
expected_value2 = 129
self.assertTrue(
expected_key1 in computed_triad_map,
'First key was not found in computed_triad_map.')
self.assertTrue(
expected_key2 in computed_triad_map,
'Second key was not found in computed_triad_map.')
self.assertEqual(
computed_triad_map[expected_key1], expected_value1,
'First value was not found in computed_triad_map.')
self.assertEqual(
computed_triad_map[expected_key2], expected_value2,
'Second value was not found in computed_triad_map.')
# =========================================================================
# ====================== detect_triad_type_for_all_subgraph3 ==============
# =========================================================================
def test_detect_triad_type_for_all_subgraph3(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=1)
dg.add_edge(1, 4, weight=2)
dg.add_edge(4, 3, weight=-5)
expected = {
'(1, 2, 3)': 55,
# [[0, 0, 1],
# [1, 0, 0],
# [0, 1, 0]]
'(1, 2, 4)': 3,
# [[0, 0, 0],
# [0, 0, 0],
# [1, 1, 0]]
'(1, 3, 4)': 56,
# [[0, 0, 1],
# [1, 0, 0],
# [0,-1, 0]]
'(2, 3, 4)': 24
# [[0, 0, 0],
# [1, 0, 0],
# [-1, 0, 0]]
}
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
self.assertDictEqual(expected, computed)
def test_detect_triad_type_for_all_subgraph3_nodes_with_str_name(self):
dg = nx.DiGraph()
dg.add_nodes_from(['b', 'c', 'a', 'd'])
dg.add_edge('b', 'c', weight=1)
dg.add_edge('c', 'a', weight=1)
dg.add_edge('a', 'b', weight=1)
dg.add_edge('b', 'd', weight=2)
dg.add_edge('d', 'a', weight=-5)
expected = {
"('a', 'b', 'c')": 55,
"('a', 'b', 'd')": 56,
"('a', 'c', 'd')": 24,
"('b', 'c', 'd')": 3
}
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
self.assertDictEqual(expected, computed)
def test_detect_triad_type_for_all_subgraph3_has_unique_keys(self):
dg = nx.DiGraph()
dg.add_nodes_from(['b', 'c', 'a', 'd'])
dg.add_edge('b', 'c', weight=1)
dg.add_edge('c', 'a', weight=1)
dg.add_edge('a', 'b', weight=1)
dg.add_edge('b', 'd', weight=2)
dg.add_edge('d', 'a', weight=-5)
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
truncated_keys = []
for key in list(computed.keys()):
key = re.sub(r'[^\w]', ' ', key)
key = key.replace(" ", "")
truncated_keys.append(''.join(sorted(key)))
self.assertEqual(len(truncated_keys), len(np.unique(truncated_keys)))
# =========================================================================
# ====================== compute_transition_matrix ========================
# =========================================================================
def test_compute_transition_matrix(self):
dg1 = nx.DiGraph()
dg1.add_nodes_from([1, 2, 3, 4])
dg1.add_edge(1, 2, weight=1)
dg1.add_edge(2, 1, weight=1)
dg1.add_edge(2, 3, weight=1)
dg1.add_edge(3, 1, weight=-1)
dg1.add_edge(3, 4, weight=1)
dg2 = nx.DiGraph()
dg2.add_nodes_from([1, 2, 3, 4])
dg2.add_edge(1, 2, weight=1)
dg2.add_edge(1, 3, weight=1)
dg2.add_edge(2, 1, weight=1)
dg2.add_edge(2, 3, weight=1)
dg2.add_edge(2, 4, weight=1)
dg2.add_edge(3, 1, weight=1)
dg2.add_edge(3, 4, weight=1)
dg2.add_edge(4, 1, weight=1)
dgraphs = [dg1, dg2]
triads_types = [
{'(1, 2, 3)': 76,
'(1, 2, 4)': 6,
'(1, 3, 4)': 4,
'(2, 3, 4)': 8},
{'(1, 2, 3)': 63,
'(1, 2, 4)': 57,
'(1, 3, 4)': 57,
'(2, 3, 4)': 22}]
n = len(self.triad_list)
transition_matrix = np.zeros((n, n))
transition_matrix[76, 63] = 1
transition_matrix[6, 57] = 1
transition_matrix[4, 57] = 1
transition_matrix[8, 22] = 1
computed = network_utils.compute_transition_matrix(
dgraphs=dgraphs,
unique_triad_num=n,
triad_map=self.triad_map)
# self.assertDictEqual(expected, computed)
self.assertTrue(
'triads_types' in computed,
'triads_types was not found in computed transition matrix.')
self.assertTrue(
'transition_matrices' in computed,
'transition_matrices was not found in computed transition matrix.')
self.assertEqual(
triads_types,
computed['triads_types'],
'Triad types were different.')
np.testing.assert_array_equal(
transition_matrix,
computed['transition_matrices'][0],
'Transition matrices were different.')
# =========================================================================
# ====================== get_stationary_distribution ======================
# =========================================================================
def test_get_stationary_distribution_simple(self):
transition_matrix = np.array(
[[0, 0, 1],
[0, 0, 1],
[0, 0, 1]], dtype=float)
expected = np.array([0, 0, 1])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution_full_matrix(self):
transition_matrix = np.array(
[[0.6, 0.1, 0.3],
[0.1, 0.7, 0.2],
[0.2, 0.2, 0.6]], dtype=float)
expected = np.array([0.2759, 0.3448, 0.3793])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution_not_row_stochastic(self):
transition_matrix = np.array(
[[0, 0, 0],
[9, 0, 1],
[1, 0, 3]], dtype=float)
expected = np.array([0.3571, 0.1191, 0.5238])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0001)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution(self):
transition_matrix = np.array(
[[0, 0, 0],
[0.9, 0, 0.1],
[0.25, 0, 0.75]], dtype=float)
expected = np.array([0.3571, 0.1191, 0.5238])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0001)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
# =========================================================================
# ====================== get_mixing_time_range ============================
# =========================================================================
def test_get_mixing_time_range(self):
transition_matrix = np.array(
[[0, 0, 0],
[0.9, 0, 0.1],
[0.25, 0, 0.75]], dtype=float)
expected = 13.7081
computed = network_utils.get_mixing_time_range(
transition_matrix,
aperiodic_irreducible_eps=0.0001,
distance_from_stationary_eps=0.01)
self.assertEqual(np.round(expected, 4), np.round(computed, 4))
# =========================================================================
# ====================== _randomize_network ===============================
# =========================================================================
def test_randomize_network_with_unweighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2)
dg.add_edge(2, 1)
dg.add_edge(2, 3)
dg.add_edge(3, 1)
dg.add_edge(3, 4)
dg.add_edge(4, 5)
dg.add_edge(5, 4)
dg.add_edge(1, 6)
dg.add_edge(6, 1)
dg.add_edge(6, 5)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
def test_randomize_network_with_all_positive_weighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=2)
dg.add_edge(3, 4, weight=5)
dg.add_edge(4, 5, weight=9)
dg.add_edge(5, 4, weight=6)
dg.add_edge(1, 6, weight=9)
dg.add_edge(6, 1, weight=1)
dg.add_edge(6, 5, weight=16)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
def test_randomize_network_with_signed_weighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-2)
dg.add_edge(3, 4, weight=5)
dg.add_edge(4, 5, weight=9)
dg.add_edge(5, 4, weight=-6)
dg.add_edge(1, 6, weight=-9)
dg.add_edge(6, 1, weight=1)
dg.add_edge(6, 5, weight=-16)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
# =========================================================================
# ================== get_robustness_of_transitions ========================
# =========================================================================
def test_get_robustness_of_transitions(self):
transition_matrices = [
np.array(
[[0.9, 0.1, 0],
[0.6, 0.2, 0.2],
[0.7, 0.1, 0.2]]),
np.array(
[[0.1, 0.8, 0.1],
[0, 0.9, 0.1],
[0.1, 0.1, 0.8]])
]
# Expected dataframe.
columns = [
'Transitions',
'Matrix L2-Norm Dist. from Average',
'Matrix Pearson r-value',
'Matrix Pearson p-value',
'Stationary Dist. L2-Norm Dist. from Average',
'Stationary Dist. Pearson r-value',
'Stationary Dist. Pearson p-value']
expected_df = pd.DataFrame({
columns[0]: ['Period 1 to Period 2', 'Period 2 to Period 3'],
columns[1]: [0.8444, 0.8083],
columns[2]: [0.4256, 0.6522],
columns[3]: [0.2534, 0.0569],
columns[4]: [0.5833, 0.4404],
columns[5]: [0.4637, 0.1319],
columns[6]: [0.6930, 0.9156],
},
columns=columns)
expected_df = pd.DataFrame(
expected_df, columns=columns)
# Computed dataframe.
computed_df = network_utils.get_robustness_of_transitions(
transition_matrices, lnorm=2)
# Comparing computed with expected.
pd.testing.assert_frame_equal(
expected_df, computed_df, check_less_precise=2)
# =========================================================================
# ================== generate_converted_graphs ============================
# =========================================================================
def test_generate_converted_graphs_raises_when_wrong_percentage(self):
with self.assertRaises(ValueError):
network_utils.generate_converted_graphs(
dgraph=nx.DiGraph(),
percentage=-1)
def test_generate_converted_graphs_when_it_adds_edges(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(1, 3, weight=2)
dg.add_edge(2, 3, weight=5)
dg.add_edge(3, 1, weight=1)
percentage = 25
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=5)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain all dg's edges.
self.assertEqual(len(nx.difference(dg, computed).edges()), 0)
# It should contain percentage% more edges.
remaining_edges_count = 4 * 3 - 4
self.assertEqual(
len(nx.difference(computed, dg).edges()),
int(percentage*remaining_edges_count/100))
def test_generate_converted_graphs_when_all_edges_exist(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=2)
dg.add_edge(1, 3, weight=-5)
dg.add_edge(2, 3, weight=-2)
dg.add_edge(3, 1, weight=2)
dg.add_edge(4, 1, weight=2)
dg.add_edge(4, 3, weight=2)
percentage = 25
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=2,
convert_to=3,
percentage=percentage,
how_many_to_generate=2)
for computed in computed_graphs:
converted_cnt = 0
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain all dg's edges.
self.assertEqual(dg.edges(), computed.edges())
# Checking every edge weight.
for edge in dg.edges():
w1 = dg.get_edge_data(edge[0], edge[1])['weight']
w2 = computed.get_edge_data(edge[0], edge[1])['weight']
if w1 == w2:
continue
if w1 != w2 and w1 == 2 and w2 == 3 and converted_cnt == 0:
converted_cnt += 1
else:
self.assertTrue(
False, 'Found more converted edges than expeced.')
def test_generate_converted_graphs(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
percentage = 10
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=2)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain percentage extra edges.
self.assertEqual(
len(computed.edges()), int(4 * 3 * percentage / 100))
def test_generate_converted_graphs_for_large_networks(self):
n = 100
m = 300
dgraph = nx.gnm_random_graph(n=n, m=m, directed=True)
percentage = 5
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dgraph,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=6)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dgraph.nodes(), computed.nodes())
# It should contain percentage extra edges.
self.assertEqual(
len(computed.edges()), m + int(
(n * (n-1) - m) * percentage / 100))
if __name__ == '__main__':
unittest.main()
|
[
"network_utils.generate_converted_graphs",
"network_utils.is_fullyconnected_cartwright_harary_balance",
"numpy.testing.assert_array_almost_equal",
"numpy.round",
"network_utils.compute_vanderijt_edge_balance",
"network_utils.get_just_periods",
"numpy.unique",
"unittest.main",
"pandas.DataFrame",
"network_utils.generate_all_possible_sparse_triads",
"network_utils.is_sparsely_cartwright_harary_balanced",
"network_utils.extract_graph",
"network_utils.kunegis_sprase_balance_ratio",
"network_utils.get_robustness_of_transitions",
"numpy.testing.assert_almost_equal",
"network_utils.get_all_degrees",
"network_utils.count_different_signed_edges",
"re.sub",
"networkx.gnm_random_graph",
"pandas.testing.assert_frame_equal",
"network_utils.is_sparsely_transitive_balanced",
"network_utils.get_metrics_for_network",
"network_utils._detect_triad_type_for_all_subgraph3",
"numpy.testing.assert_array_equal",
"utils.graph_equals",
"network_utils.cartwright_harary_balance_ratio",
"datetime.datetime",
"parameterized.parameterized.expand",
"network_utils._get_all_triad_permutations",
"network_utils.is_sparsely_clustering_balanced",
"network_utils.is_fullyconnected_clustering_balanced",
"network_utils.compute_transition_matrix",
"network_utils.terzi_sprase_balance_ratio",
"networkx.DiGraph",
"network_utils.sprase_balance_ratio",
"network_utils.get_stationary_distribution",
"network_utils.compute_fairness_goodness",
"network_utils.is_fullyconnected_transitivity_balanced",
"network_utils.extract_graphs",
"numpy.zeros",
"network_utils.fullyconnected_balance_ratio",
"networkx.difference",
"numpy.array",
"network_utils._randomize_network",
"network_utils.get_mixing_time_range"
] |
[((865, 972), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['latest_multiple_edge_weight', False], ['sum_of_multiple_edge_weights', True]\n ]"], {}), "([['latest_multiple_edge_weight', False], [\n 'sum_of_multiple_edge_weights', True]])\n", (885, 972), False, 'from parameterized import parameterized\n'), ((2978, 3065), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['seperated graphs', False], ['accumulative graphs', True]]"], {}), "([['seperated graphs', False], ['accumulative graphs', \n True]])\n", (2998, 3065), False, 'from parameterized import parameterized\n'), ((10923, 11054), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['CartwrightHarary', 1, [0.3, 3, 7]], ['Clustering', 2, [0.5, 5, 5]], [\n 'Transitivity', 3, [0.9, 9, 1]]]"], {}), "([['CartwrightHarary', 1, [0.3, 3, 7]], ['Clustering', \n 2, [0.5, 5, 5]], ['Transitivity', 3, [0.9, 9, 1]]])\n", (10943, 11054), False, 'from parameterized import parameterized\n'), ((12682, 12805), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['Classical', 1, [0.4, 4, 6]], ['Clustering', 2, [0.7, 7, 3]], [\n 'Transitivity', 3, [0.8, 8, 2]]]"], {}), "([['Classical', 1, [0.4, 4, 6]], ['Clustering', 2, [0.7,\n 7, 3]], ['Transitivity', 3, [0.8, 8, 2]]])\n", (12702, 12805), False, 'from parameterized import parameterized\n'), ((65583, 65598), 'unittest.main', 'unittest.main', ([], {}), '()\n', (65596, 65598), False, 'import unittest\n'), ((467, 518), 'network_utils.generate_all_possible_sparse_triads', 'network_utils.generate_all_possible_sparse_triads', ([], {}), '()\n', (516, 518), False, 'import network_utils\n'), ((1539, 1618), 'pandas.DataFrame', 'pd.DataFrame', (['matrix_edges'], {'columns': "['source', 'target', 'weight', 'edge_date']"}), "(matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])\n", (1551, 1618), True, 'import pandas as pd\n'), ((1651, 1663), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1661, 1663), True, 'import networkx as nx\n'), ((2226, 2313), 'network_utils.extract_graph', 'network_utils.extract_graph', (['sample_edge_list'], {'sum_multiple_edge': 'sum_multiple_edge'}), '(sample_edge_list, sum_multiple_edge=\n sum_multiple_edge)\n', (2253, 2313), False, 'import network_utils\n'), ((2808, 2858), 'pandas.DataFrame', 'pd.DataFrame', (["{'source': [1, 2], 'target': [5, 6]}"], {}), "({'source': [1, 2], 'target': [5, 6]})\n", (2820, 2858), True, 'import pandas as pd\n'), ((3724, 3803), 'pandas.DataFrame', 'pd.DataFrame', (['matrix_edges'], {'columns': "['source', 'target', 'weight', 'edge_date']"}), "(matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])\n", (3736, 3803), True, 'import pandas as pd\n'), ((3830, 3842), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3840, 3842), True, 'import networkx as nx\n'), ((3965, 3977), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3975, 3977), True, 'import networkx as nx\n'), ((4138, 4150), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4148, 4150), True, 'import networkx as nx\n'), ((4499, 4595), 'network_utils.extract_graphs', 'network_utils.extract_graphs', ([], {'edge_list': 'sample_edge_list', 'weeks': '(4)', 'accumulative': 'accumulative'}), '(edge_list=sample_edge_list, weeks=4,\n accumulative=accumulative)\n', (4527, 4595), False, 'import network_utils\n'), ((5153, 5165), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (5163, 5165), True, 'import networkx as nx\n'), ((5521, 5554), 'network_utils.get_all_degrees', 'network_utils.get_all_degrees', (['dg'], {}), '(dg)\n', (5550, 5554), False, 'import network_utils\n'), ((6584, 6663), 'pandas.DataFrame', 'pd.DataFrame', (['matrix_edges'], {'columns': "['source', 'target', 'weight', 'edge_date']"}), "(matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])\n", (6596, 6663), True, 'import pandas as pd\n'), ((6776, 6853), 'network_utils.get_just_periods', 'network_utils.get_just_periods', (['sample_edge_list'], {'weeks': '(4)', 'accumulative': '(False)'}), '(sample_edge_list, weeks=4, accumulative=False)\n', (6806, 6853), False, 'import network_utils\n'), ((7210, 7222), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (7220, 7222), True, 'import networkx as nx\n'), ((7428, 7469), 'network_utils.get_metrics_for_network', 'network_utils.get_metrics_for_network', (['dg'], {}), '(dg)\n', (7465, 7469), False, 'import network_utils\n'), ((9028, 9040), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (9038, 9040), True, 'import networkx as nx\n'), ((9351, 9363), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (9361, 9363), True, 'import networkx as nx\n'), ((9672, 9684), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (9682, 9684), True, 'import networkx as nx\n'), ((9996, 10008), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (10006, 10008), True, 'import networkx as nx\n'), ((11215, 11227), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (11225, 11227), True, 'import networkx as nx\n'), ((11586, 11658), 'network_utils.sprase_balance_ratio', 'network_utils.sprase_balance_ratio', ([], {'dgraph': 'dg', 'balance_type': 'balance_type'}), '(dgraph=dg, balance_type=balance_type)\n', (11620, 11658), False, 'import network_utils\n'), ((11692, 11766), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['computed', 'expected_values'], {'decimal': '(2)'}), '(computed, expected_values, decimal=2)\n', (11728, 11766), True, 'import numpy as np\n'), ((12975, 12987), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (12985, 12987), True, 'import networkx as nx\n'), ((13338, 13423), 'network_utils.fullyconnected_balance_ratio', 'network_utils.fullyconnected_balance_ratio', ([], {'dgraph': 'dg', 'balance_type': 'balance_type'}), '(dgraph=dg, balance_type=balance_type\n )\n', (13380, 13423), False, 'import network_utils\n'), ((13452, 13526), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['computed', 'expected_values'], {'decimal': '(2)'}), '(computed, expected_values, decimal=2)\n', (13488, 13526), True, 'import numpy as np\n'), ((13843, 13855), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (13853, 13855), True, 'import networkx as nx\n'), ((14179, 14191), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (14189, 14191), True, 'import networkx as nx\n'), ((14514, 14526), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (14524, 14526), True, 'import networkx as nx\n'), ((15106, 15118), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (15116, 15118), True, 'import networkx as nx\n'), ((15305, 15366), 'network_utils.terzi_sprase_balance_ratio', 'network_utils.terzi_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (15345, 15366), False, 'import network_utils\n'), ((15388, 15438), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (15418, 15438), True, 'import numpy as np\n'), ((15519, 15531), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (15529, 15531), True, 'import networkx as nx\n'), ((15720, 15781), 'network_utils.terzi_sprase_balance_ratio', 'network_utils.terzi_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (15760, 15781), False, 'import network_utils\n'), ((15803, 15853), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (15833, 15853), True, 'import numpy as np\n'), ((15930, 15942), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (15940, 15942), True, 'import networkx as nx\n'), ((16130, 16191), 'network_utils.terzi_sprase_balance_ratio', 'network_utils.terzi_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (16170, 16191), False, 'import network_utils\n'), ((16213, 16263), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (16243, 16263), True, 'import numpy as np\n'), ((16344, 16356), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (16354, 16356), True, 'import networkx as nx\n'), ((16735, 16796), 'network_utils.terzi_sprase_balance_ratio', 'network_utils.terzi_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (16775, 16796), False, 'import network_utils\n'), ((16818, 16868), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (16848, 16868), True, 'import numpy as np\n'), ((17191, 17203), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (17201, 17203), True, 'import networkx as nx\n'), ((17390, 17453), 'network_utils.kunegis_sprase_balance_ratio', 'network_utils.kunegis_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (17432, 17453), False, 'import network_utils\n'), ((17475, 17525), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (17505, 17525), True, 'import numpy as np\n'), ((17608, 17620), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (17618, 17620), True, 'import networkx as nx\n'), ((17809, 17872), 'network_utils.kunegis_sprase_balance_ratio', 'network_utils.kunegis_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (17851, 17872), False, 'import network_utils\n'), ((17894, 17944), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (17924, 17944), True, 'import numpy as np\n'), ((18023, 18035), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (18033, 18035), True, 'import networkx as nx\n'), ((18223, 18286), 'network_utils.kunegis_sprase_balance_ratio', 'network_utils.kunegis_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (18265, 18286), False, 'import network_utils\n'), ((18308, 18358), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (18338, 18358), True, 'import numpy as np\n'), ((18441, 18453), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (18451, 18453), True, 'import networkx as nx\n'), ((18832, 18895), 'network_utils.kunegis_sprase_balance_ratio', 'network_utils.kunegis_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (18874, 18895), False, 'import network_utils\n'), ((18917, 18978), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {'decimal': '(1)'}), '(computed, expected, decimal=1)\n', (18947, 18978), True, 'import numpy as np\n'), ((19296, 19308), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (19306, 19308), True, 'import networkx as nx\n'), ((19577, 19625), 'network_utils.compute_vanderijt_edge_balance', 'network_utils.compute_vanderijt_edge_balance', (['dg'], {}), '(dg)\n', (19621, 19625), False, 'import network_utils\n'), ((19758, 19770), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (19768, 19770), True, 'import networkx as nx\n'), ((20988, 21036), 'network_utils.compute_vanderijt_edge_balance', 'network_utils.compute_vanderijt_edge_balance', (['dg'], {}), '(dg)\n', (21032, 21036), False, 'import network_utils\n'), ((25020, 25032), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (25030, 25032), True, 'import networkx as nx\n'), ((25417, 25475), 'network_utils.compute_fairness_goodness', 'network_utils.compute_fairness_goodness', (['dg'], {'verbose': '(False)'}), '(dg, verbose=False)\n', (25456, 25475), False, 'import network_utils\n'), ((46930, 46973), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (46938, 46973), True, 'import numpy as np\n'), ((47328, 47387), 'network_utils._get_all_triad_permutations', 'network_utils._get_all_triad_permutations', (['triad_adj_matrix'], {}), '(triad_adj_matrix)\n', (47369, 47387), False, 'import network_utils\n'), ((47794, 47845), 'network_utils.generate_all_possible_sparse_triads', 'network_utils.generate_all_possible_sparse_triads', ([], {}), '()\n', (47843, 47845), False, 'import network_utils\n'), ((49732, 49744), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (49742, 49744), True, 'import networkx as nx\n'), ((50443, 50535), 'network_utils._detect_triad_type_for_all_subgraph3', 'network_utils._detect_triad_type_for_all_subgraph3', ([], {'dgraph': 'dg', 'triad_map': 'self.triad_map'}), '(dgraph=dg, triad_map=\n self.triad_map)\n', (50493, 50535), False, 'import network_utils\n'), ((50683, 50695), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (50693, 50695), True, 'import networkx as nx\n'), ((51133, 51225), 'network_utils._detect_triad_type_for_all_subgraph3', 'network_utils._detect_triad_type_for_all_subgraph3', ([], {'dgraph': 'dg', 'triad_map': 'self.triad_map'}), '(dgraph=dg, triad_map=\n self.triad_map)\n', (51183, 51225), False, 'import network_utils\n'), ((51369, 51381), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (51379, 51381), True, 'import networkx as nx\n'), ((51650, 51742), 'network_utils._detect_triad_type_for_all_subgraph3', 'network_utils._detect_triad_type_for_all_subgraph3', ([], {'dgraph': 'dg', 'triad_map': 'self.triad_map'}), '(dgraph=dg, triad_map=\n self.triad_map)\n', (51700, 51742), False, 'import network_utils\n'), ((52340, 52352), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (52350, 52352), True, 'import networkx as nx\n'), ((52594, 52606), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (52604, 52606), True, 'import networkx as nx\n'), ((53330, 53346), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (53338, 53346), True, 'import numpy as np\n'), ((53515, 53621), 'network_utils.compute_transition_matrix', 'network_utils.compute_transition_matrix', ([], {'dgraphs': 'dgraphs', 'unique_triad_num': 'n', 'triad_map': 'self.triad_map'}), '(dgraphs=dgraphs, unique_triad_num=n,\n triad_map=self.triad_map)\n', (53554, 53621), False, 'import network_utils\n'), ((54137, 54265), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['transition_matrix', "computed['transition_matrices'][0]", '"""Transition matrices were different."""'], {}), "(transition_matrix, computed[\n 'transition_matrices'][0], 'Transition matrices were different.')\n", (54166, 54265), True, 'import numpy as np\n'), ((54622, 54678), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 0, 1], [0, 0, 1]]'], {'dtype': 'float'}), '([[0, 0, 1], [0, 0, 1], [0, 0, 1]], dtype=float)\n', (54630, 54678), True, 'import numpy as np\n'), ((54737, 54756), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (54745, 54756), True, 'import numpy as np\n'), ((54776, 54871), 'network_utils.get_stationary_distribution', 'network_utils.get_stationary_distribution', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0)\n', (54817, 54871), False, 'import network_utils\n'), ((54889, 54956), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'computed'], {'decimal': '(4)'}), '(expected, computed, decimal=4)\n', (54925, 54956), True, 'import numpy as np\n'), ((55046, 55120), 'numpy.array', 'np.array', (['[[0.6, 0.1, 0.3], [0.1, 0.7, 0.2], [0.2, 0.2, 0.6]]'], {'dtype': 'float'}), '([[0.6, 0.1, 0.3], [0.1, 0.7, 0.2], [0.2, 0.2, 0.6]], dtype=float)\n', (55054, 55120), True, 'import numpy as np\n'), ((55179, 55213), 'numpy.array', 'np.array', (['[0.2759, 0.3448, 0.3793]'], {}), '([0.2759, 0.3448, 0.3793])\n', (55187, 55213), True, 'import numpy as np\n'), ((55233, 55328), 'network_utils.get_stationary_distribution', 'network_utils.get_stationary_distribution', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0)\n', (55274, 55328), False, 'import network_utils\n'), ((55346, 55413), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'computed'], {'decimal': '(4)'}), '(expected, computed, decimal=4)\n', (55382, 55413), True, 'import numpy as np\n'), ((55510, 55566), 'numpy.array', 'np.array', (['[[0, 0, 0], [9, 0, 1], [1, 0, 3]]'], {'dtype': 'float'}), '([[0, 0, 0], [9, 0, 1], [1, 0, 3]], dtype=float)\n', (55518, 55566), True, 'import numpy as np\n'), ((55625, 55659), 'numpy.array', 'np.array', (['[0.3571, 0.1191, 0.5238]'], {}), '([0.3571, 0.1191, 0.5238])\n', (55633, 55659), True, 'import numpy as np\n'), ((55679, 55777), 'network_utils.get_stationary_distribution', 'network_utils.get_stationary_distribution', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0001)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0001)\n', (55720, 55777), False, 'import network_utils\n'), ((55795, 55862), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'computed'], {'decimal': '(4)'}), '(expected, computed, decimal=4)\n', (55831, 55862), True, 'import numpy as np\n'), ((55940, 56006), 'numpy.array', 'np.array', (['[[0, 0, 0], [0.9, 0, 0.1], [0.25, 0, 0.75]]'], {'dtype': 'float'}), '([[0, 0, 0], [0.9, 0, 0.1], [0.25, 0, 0.75]], dtype=float)\n', (55948, 56006), True, 'import numpy as np\n'), ((56065, 56099), 'numpy.array', 'np.array', (['[0.3571, 0.1191, 0.5238]'], {}), '([0.3571, 0.1191, 0.5238])\n', (56073, 56099), True, 'import numpy as np\n'), ((56119, 56217), 'network_utils.get_stationary_distribution', 'network_utils.get_stationary_distribution', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0001)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0001)\n', (56160, 56217), False, 'import network_utils\n'), ((56235, 56302), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'computed'], {'decimal': '(4)'}), '(expected, computed, decimal=4)\n', (56271, 56302), True, 'import numpy as np\n'), ((56614, 56680), 'numpy.array', 'np.array', (['[[0, 0, 0], [0.9, 0, 0.1], [0.25, 0, 0.75]]'], {'dtype': 'float'}), '([[0, 0, 0], [0.9, 0, 0.1], [0.25, 0, 0.75]], dtype=float)\n', (56622, 56680), True, 'import numpy as np\n'), ((56766, 56893), 'network_utils.get_mixing_time_range', 'network_utils.get_mixing_time_range', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0001)', 'distance_from_stationary_eps': '(0.01)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0001, distance_from_stationary_eps=0.01)\n', (56801, 56893), False, 'import network_utils\n'), ((57312, 57324), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (57322, 57324), True, 'import networkx as nx\n'), ((57650, 57710), 'network_utils._randomize_network', 'network_utils._randomize_network', (['dg'], {'switching_count_coef': '(2)'}), '(dg, switching_count_coef=2)\n', (57682, 57710), False, 'import network_utils\n'), ((58020, 58032), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (58030, 58032), True, 'import networkx as nx\n'), ((58459, 58519), 'network_utils._randomize_network', 'network_utils._randomize_network', (['dg'], {'switching_count_coef': '(2)'}), '(dg, switching_count_coef=2)\n', (58491, 58519), False, 'import network_utils\n'), ((58823, 58835), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (58833, 58835), True, 'import networkx as nx\n'), ((59266, 59326), 'network_utils._randomize_network', 'network_utils._randomize_network', (['dg'], {'switching_count_coef': '(2)'}), '(dg, switching_count_coef=2)\n', (59298, 59326), False, 'import network_utils\n'), ((60510, 60795), 'pandas.DataFrame', 'pd.DataFrame', (["{columns[0]: ['Period 1 to Period 2', 'Period 2 to Period 3'], columns[1]:\n [0.8444, 0.8083], columns[2]: [0.4256, 0.6522], columns[3]: [0.2534, \n 0.0569], columns[4]: [0.5833, 0.4404], columns[5]: [0.4637, 0.1319],\n columns[6]: [0.693, 0.9156]}"], {'columns': 'columns'}), "({columns[0]: ['Period 1 to Period 2', 'Period 2 to Period 3'],\n columns[1]: [0.8444, 0.8083], columns[2]: [0.4256, 0.6522], columns[3]:\n [0.2534, 0.0569], columns[4]: [0.5833, 0.4404], columns[5]: [0.4637, \n 0.1319], columns[6]: [0.693, 0.9156]}, columns=columns)\n", (60522, 60795), True, 'import pandas as pd\n'), ((60917, 60959), 'pandas.DataFrame', 'pd.DataFrame', (['expected_df'], {'columns': 'columns'}), '(expected_df, columns=columns)\n', (60929, 60959), True, 'import pandas as pd\n'), ((61025, 61098), 'network_utils.get_robustness_of_transitions', 'network_utils.get_robustness_of_transitions', (['transition_matrices'], {'lnorm': '(2)'}), '(transition_matrices, lnorm=2)\n', (61068, 61098), False, 'import network_utils\n'), ((61164, 61241), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['expected_df', 'computed_df'], {'check_less_precise': '(2)'}), '(expected_df, computed_df, check_less_precise=2)\n', (61193, 61241), True, 'import pandas as pd\n'), ((61815, 61827), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (61825, 61827), True, 'import networkx as nx\n'), ((62062, 62193), 'network_utils.generate_converted_graphs', 'network_utils.generate_converted_graphs', ([], {'dgraph': 'dg', 'convert_from': '(0)', 'convert_to': '(1)', 'percentage': 'percentage', 'how_many_to_generate': '(5)'}), '(dgraph=dg, convert_from=0,\n convert_to=1, percentage=percentage, how_many_to_generate=5)\n', (62101, 62193), False, 'import network_utils\n'), ((62846, 62858), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (62856, 62858), True, 'import networkx as nx\n'), ((63167, 63298), 'network_utils.generate_converted_graphs', 'network_utils.generate_converted_graphs', ([], {'dgraph': 'dg', 'convert_from': '(2)', 'convert_to': '(3)', 'percentage': 'percentage', 'how_many_to_generate': '(2)'}), '(dgraph=dg, convert_from=2,\n convert_to=3, percentage=percentage, how_many_to_generate=2)\n', (63206, 63298), False, 'import network_utils\n'), ((64219, 64231), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (64229, 64231), True, 'import networkx as nx\n'), ((64322, 64453), 'network_utils.generate_converted_graphs', 'network_utils.generate_converted_graphs', ([], {'dgraph': 'dg', 'convert_from': '(0)', 'convert_to': '(1)', 'percentage': 'percentage', 'how_many_to_generate': '(2)'}), '(dgraph=dg, convert_from=0,\n convert_to=1, percentage=percentage, how_many_to_generate=2)\n', (64361, 64453), False, 'import network_utils\n'), ((64925, 64969), 'networkx.gnm_random_graph', 'nx.gnm_random_graph', ([], {'n': 'n', 'm': 'm', 'directed': '(True)'}), '(n=n, m=m, directed=True)\n', (64944, 64969), True, 'import networkx as nx\n'), ((65019, 65154), 'network_utils.generate_converted_graphs', 'network_utils.generate_converted_graphs', ([], {'dgraph': 'dgraph', 'convert_from': '(0)', 'convert_to': '(1)', 'percentage': 'percentage', 'how_many_to_generate': '(6)'}), '(dgraph=dgraph, convert_from=0,\n convert_to=1, percentage=percentage, how_many_to_generate=6)\n', (65058, 65154), False, 'import network_utils\n'), ((2359, 2426), 'utils.graph_equals', 'utils.graph_equals', (['expected', 'computed'], {'weight_column_name': '"""weight"""'}), "(expected, computed, weight_column_name='weight')\n", (2377, 2426), False, 'import utils\n'), ((2915, 2971), 'network_utils.extract_graphs', 'network_utils.extract_graphs', ([], {'edge_list': 'sample_edge_list'}), '(edge_list=sample_edge_list)\n', (2943, 2971), False, 'import network_utils\n'), ((9212, 9261), 'network_utils.cartwright_harary_balance_ratio', 'network_utils.cartwright_harary_balance_ratio', (['dg'], {}), '(dg)\n', (9257, 9261), False, 'import network_utils\n'), ((9537, 9586), 'network_utils.cartwright_harary_balance_ratio', 'network_utils.cartwright_harary_balance_ratio', (['dg'], {}), '(dg)\n', (9582, 9586), False, 'import network_utils\n'), ((9857, 9906), 'network_utils.cartwright_harary_balance_ratio', 'network_utils.cartwright_harary_balance_ratio', (['dg'], {}), '(dg)\n', (9902, 9906), False, 'import network_utils\n'), ((10383, 10432), 'network_utils.cartwright_harary_balance_ratio', 'network_utils.cartwright_harary_balance_ratio', (['dg'], {}), '(dg)\n', (10428, 10432), False, 'import network_utils\n'), ((12429, 12441), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (12439, 12441), True, 'import networkx as nx\n'), ((12573, 12642), 'network_utils.fullyconnected_balance_ratio', 'network_utils.fullyconnected_balance_ratio', ([], {'dgraph': 'dg', 'balance_type': '(1)'}), '(dgraph=dg, balance_type=1)\n', (12615, 12642), False, 'import network_utils\n'), ((14064, 14110), 'network_utils.count_different_signed_edges', 'network_utils.count_different_signed_edges', (['dg'], {}), '(dg)\n', (14106, 14110), False, 'import network_utils\n'), ((14399, 14445), 'network_utils.count_different_signed_edges', 'network_utils.count_different_signed_edges', (['dg'], {}), '(dg)\n', (14441, 14445), False, 'import network_utils\n'), ((14735, 14781), 'network_utils.count_different_signed_edges', 'network_utils.count_different_signed_edges', (['dg'], {}), '(dg)\n', (14777, 14781), False, 'import network_utils\n'), ((25920, 25963), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (25928, 25963), True, 'import numpy as np\n'), ((26027, 26094), 'network_utils.is_sparsely_transitive_balanced', 'network_utils.is_sparsely_transitive_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (26072, 26094), False, 'import network_utils\n'), ((28378, 28430), 'network_utils.is_sparsely_transitive_balanced', 'network_utils.is_sparsely_transitive_balanced', (['triad'], {}), '(triad)\n', (28423, 28430), False, 'import network_utils\n'), ((28877, 28920), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (28885, 28920), True, 'import numpy as np\n'), ((28984, 29058), 'network_utils.is_sparsely_cartwright_harary_balanced', 'network_utils.is_sparsely_cartwright_harary_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (29036, 29058), False, 'import network_utils\n'), ((31373, 31432), 'network_utils.is_sparsely_cartwright_harary_balanced', 'network_utils.is_sparsely_cartwright_harary_balanced', (['triad'], {}), '(triad)\n', (31425, 31432), False, 'import network_utils\n'), ((31859, 31902), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (31867, 31902), True, 'import numpy as np\n'), ((31966, 32033), 'network_utils.is_sparsely_clustering_balanced', 'network_utils.is_sparsely_clustering_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (32011, 32033), False, 'import network_utils\n'), ((34337, 34389), 'network_utils.is_sparsely_clustering_balanced', 'network_utils.is_sparsely_clustering_balanced', (['triad'], {}), '(triad)\n', (34382, 34389), False, 'import network_utils\n'), ((34840, 34883), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (34848, 34883), True, 'import numpy as np\n'), ((34947, 35026), 'network_utils.is_fullyconnected_cartwright_harary_balance', 'network_utils.is_fullyconnected_cartwright_harary_balance', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (35004, 35026), False, 'import network_utils\n'), ((35222, 35266), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [-1, 0, 0]])\n', (35230, 35266), True, 'import numpy as np\n'), ((35330, 35409), 'network_utils.is_fullyconnected_cartwright_harary_balance', 'network_utils.is_fullyconnected_cartwright_harary_balance', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (35387, 35409), False, 'import network_utils\n'), ((37352, 37416), 'network_utils.is_fullyconnected_cartwright_harary_balance', 'network_utils.is_fullyconnected_cartwright_harary_balance', (['triad'], {}), '(triad)\n', (37409, 37416), False, 'import network_utils\n'), ((37862, 37905), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (37870, 37905), True, 'import numpy as np\n'), ((37969, 38042), 'network_utils.is_fullyconnected_clustering_balanced', 'network_utils.is_fullyconnected_clustering_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (38020, 38042), False, 'import network_utils\n'), ((38232, 38276), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [-1, 0, 0]])\n', (38240, 38276), True, 'import numpy as np\n'), ((38340, 38413), 'network_utils.is_fullyconnected_clustering_balanced', 'network_utils.is_fullyconnected_clustering_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (38391, 38413), False, 'import network_utils\n'), ((40348, 40406), 'network_utils.is_fullyconnected_clustering_balanced', 'network_utils.is_fullyconnected_clustering_balanced', (['triad'], {}), '(triad)\n', (40399, 40406), False, 'import network_utils\n'), ((40854, 40897), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (40862, 40897), True, 'import numpy as np\n'), ((40961, 41036), 'network_utils.is_fullyconnected_transitivity_balanced', 'network_utils.is_fullyconnected_transitivity_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (41014, 41036), False, 'import network_utils\n'), ((41228, 41272), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [-1, 0, 0]])\n', (41236, 41272), True, 'import numpy as np\n'), ((41336, 41411), 'network_utils.is_fullyconnected_transitivity_balanced', 'network_utils.is_fullyconnected_transitivity_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (41389, 41411), False, 'import network_utils\n'), ((43343, 43403), 'network_utils.is_fullyconnected_transitivity_balanced', 'network_utils.is_fullyconnected_transitivity_balanced', (['triad'], {}), '(triad)\n', (43396, 43403), False, 'import network_utils\n'), ((48074, 48117), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (48082, 48117), True, 'import numpy as np\n'), ((48280, 48329), 'numpy.array', 'np.array', (['[[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (48288, 48329), True, 'import numpy as np\n'), ((48493, 48537), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, -1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, -1], [1, 0, 0]])\n', (48501, 48537), True, 'import numpy as np\n'), ((51839, 51865), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'key'], {}), "('[^\\\\w]', ' ', key)\n", (51845, 51865), False, 'import re\n'), ((56952, 56973), 'numpy.round', 'np.round', (['expected', '(4)'], {}), '(expected, 4)\n', (56960, 56973), True, 'import numpy as np\n'), ((56975, 56996), 'numpy.round', 'np.round', (['computed', '(4)'], {}), '(computed, 4)\n', (56983, 56996), True, 'import numpy as np\n'), ((59886, 59945), 'numpy.array', 'np.array', (['[[0.9, 0.1, 0], [0.6, 0.2, 0.2], [0.7, 0.1, 0.2]]'], {}), '([[0.9, 0.1, 0], [0.6, 0.2, 0.2], [0.7, 0.1, 0.2]])\n', (59894, 59945), True, 'import numpy as np\n'), ((60010, 60069), 'numpy.array', 'np.array', (['[[0.1, 0.8, 0.1], [0, 0.9, 0.1], [0.1, 0.1, 0.8]]'], {}), '([[0.1, 0.8, 0.1], [0, 0.9, 0.1], [0.1, 0.1, 0.8]])\n', (60018, 60069), True, 'import numpy as np\n'), ((1092, 1121), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (1109, 1121), False, 'import datetime\n'), ((1147, 1176), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(2)'], {}), '(2017, 1, 2)\n', (1164, 1176), False, 'import datetime\n'), ((1202, 1231), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(4)'], {}), '(2017, 1, 4)\n', (1219, 1231), False, 'import datetime\n'), ((1257, 1286), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(5)'], {}), '(2017, 2, 5)\n', (1274, 1286), False, 'import datetime\n'), ((1312, 1341), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(6)'], {}), '(2017, 1, 6)\n', (1329, 1341), False, 'import datetime\n'), ((1367, 1397), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(13)'], {}), '(2017, 2, 13)\n', (1384, 1397), False, 'import datetime\n'), ((1423, 1453), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(22)'], {}), '(2017, 2, 22)\n', (1440, 1453), False, 'import datetime\n'), ((1479, 1509), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(24)'], {}), '(2017, 2, 24)\n', (1496, 1509), False, 'import datetime\n'), ((3213, 3242), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (3230, 3242), False, 'import datetime\n'), ((3280, 3309), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(4)'], {}), '(2017, 1, 4)\n', (3297, 3309), False, 'import datetime\n'), ((3347, 3376), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(5)'], {}), '(2017, 2, 5)\n', (3364, 3376), False, 'import datetime\n'), ((3414, 3444), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(13)'], {}), '(2017, 2, 13)\n', (3431, 3444), False, 'import datetime\n'), ((3482, 3512), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(24)'], {}), '(2017, 2, 24)\n', (3499, 3512), False, 'import datetime\n'), ((3552, 3582), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(28)'], {}), '(2017, 2, 28)\n', (3569, 3582), False, 'import datetime\n'), ((4721, 4800), 'utils.graph_equals', 'utils.graph_equals', (['expected_graph', 'computed_graph'], {'weight_column_name': '"""weight"""'}), "(expected_graph, computed_graph, weight_column_name='weight')\n", (4739, 4800), False, 'import utils\n'), ((6185, 6214), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (6202, 6214), False, 'import datetime\n'), ((6252, 6281), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(4)'], {}), '(2017, 1, 4)\n', (6269, 6281), False, 'import datetime\n'), ((6319, 6348), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(5)'], {}), '(2017, 2, 5)\n', (6336, 6348), False, 'import datetime\n'), ((6386, 6416), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(13)'], {}), '(2017, 2, 13)\n', (6403, 6416), False, 'import datetime\n'), ((6454, 6484), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(24)'], {}), '(2017, 2, 24)\n', (6471, 6484), False, 'import datetime\n'), ((6524, 6554), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(28)'], {}), '(2017, 2, 28)\n', (6541, 6554), False, 'import datetime\n'), ((26141, 26186), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [-1, -1, 0]])\n', (26149, 26186), True, 'import numpy as np\n'), ((26251, 26296), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, -1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, -1], [1, 1, 0]])\n', (26259, 26296), True, 'import numpy as np\n'), ((26362, 26408), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [1, -1, 0]])\n', (26370, 26408), True, 'import numpy as np\n'), ((26474, 26517), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (26482, 26517), True, 'import numpy as np\n'), ((26581, 26630), 'numpy.array', 'np.array', (['[[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (26589, 26630), True, 'import numpy as np\n'), ((26696, 26743), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [-1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [-1, -1, 0]])\n', (26704, 26743), True, 'import numpy as np\n'), ((26808, 26854), 'numpy.array', 'np.array', (['[[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]])\n', (26816, 26854), True, 'import numpy as np\n'), ((26919, 26966), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, 1], [-1, -1, 0]])\n', (26927, 26966), True, 'import numpy as np\n'), ((27037, 27082), 'numpy.array', 'np.array', (['[[0, 1, -1], [0, 0, -1], [0, 0, 0]]'], {}), '([[0, 1, -1], [0, 0, -1], [0, 0, 0]])\n', (27045, 27082), True, 'import numpy as np\n'), ((27151, 27195), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, -1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\n', (27159, 27195), True, 'import numpy as np\n'), ((27261, 27304), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (27269, 27304), True, 'import numpy as np\n'), ((27368, 27412), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, 1], [1, 1, 0]])\n', (27376, 27412), True, 'import numpy as np\n'), ((27478, 27521), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (27486, 27521), True, 'import numpy as np\n'), ((27587, 27630), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (27595, 27630), True, 'import numpy as np\n'), ((27695, 27738), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (27703, 27738), True, 'import numpy as np\n'), ((27806, 27851), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, -1, 0], [-1, 0, 0], [0, 0, 0]])\n', (27814, 27851), True, 'import numpy as np\n'), ((27922, 27966), 'numpy.array', 'np.array', (['[[0, 1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [-1, 0, 0], [0, 0, 0]])\n', (27930, 27966), True, 'import numpy as np\n'), ((28031, 28074), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (28039, 28074), True, 'import numpy as np\n'), ((28138, 28186), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (28146, 28186), True, 'import numpy as np\n'), ((29122, 29167), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [-1, -1, 0]])\n', (29130, 29167), True, 'import numpy as np\n'), ((29233, 29278), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, -1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, -1], [1, 1, 0]])\n', (29241, 29278), True, 'import numpy as np\n'), ((29345, 29391), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [1, -1, 0]])\n', (29353, 29391), True, 'import numpy as np\n'), ((29458, 29501), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (29466, 29501), True, 'import numpy as np\n'), ((29565, 29614), 'numpy.array', 'np.array', (['[[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (29573, 29614), True, 'import numpy as np\n'), ((29681, 29728), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [-1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [-1, -1, 0]])\n', (29689, 29728), True, 'import numpy as np\n'), ((29794, 29840), 'numpy.array', 'np.array', (['[[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]])\n', (29802, 29840), True, 'import numpy as np\n'), ((29906, 29953), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, 1], [-1, -1, 0]])\n', (29914, 29953), True, 'import numpy as np\n'), ((30024, 30069), 'numpy.array', 'np.array', (['[[0, 1, -1], [0, 0, -1], [0, 0, 0]]'], {}), '([[0, 1, -1], [0, 0, -1], [0, 0, 0]])\n', (30032, 30069), True, 'import numpy as np\n'), ((30138, 30182), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, -1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\n', (30146, 30182), True, 'import numpy as np\n'), ((30248, 30291), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (30256, 30291), True, 'import numpy as np\n'), ((30355, 30399), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, 1], [1, 1, 0]])\n', (30363, 30399), True, 'import numpy as np\n'), ((30465, 30508), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (30473, 30508), True, 'import numpy as np\n'), ((30574, 30617), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (30582, 30617), True, 'import numpy as np\n'), ((30682, 30725), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (30690, 30725), True, 'import numpy as np\n'), ((30793, 30838), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, -1, 0], [-1, 0, 0], [0, 0, 0]])\n', (30801, 30838), True, 'import numpy as np\n'), ((30909, 30953), 'numpy.array', 'np.array', (['[[0, 1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [-1, 0, 0], [0, 0, 0]])\n', (30917, 30953), True, 'import numpy as np\n'), ((31018, 31061), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (31026, 31061), True, 'import numpy as np\n'), ((31125, 31173), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (31133, 31173), True, 'import numpy as np\n'), ((32097, 32142), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [-1, -1, 0]])\n', (32105, 32142), True, 'import numpy as np\n'), ((32208, 32253), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, -1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, -1], [1, 1, 0]])\n', (32216, 32253), True, 'import numpy as np\n'), ((32320, 32366), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [1, -1, 0]])\n', (32328, 32366), True, 'import numpy as np\n'), ((32432, 32475), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (32440, 32475), True, 'import numpy as np\n'), ((32539, 32588), 'numpy.array', 'np.array', (['[[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (32547, 32588), True, 'import numpy as np\n'), ((32654, 32701), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [-1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [-1, -1, 0]])\n', (32662, 32701), True, 'import numpy as np\n'), ((32766, 32812), 'numpy.array', 'np.array', (['[[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]])\n', (32774, 32812), True, 'import numpy as np\n'), ((32878, 32925), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, 1], [-1, -1, 0]])\n', (32886, 32925), True, 'import numpy as np\n'), ((32996, 33041), 'numpy.array', 'np.array', (['[[0, 1, -1], [0, 0, -1], [0, 0, 0]]'], {}), '([[0, 1, -1], [0, 0, -1], [0, 0, 0]])\n', (33004, 33041), True, 'import numpy as np\n'), ((33110, 33154), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, -1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\n', (33118, 33154), True, 'import numpy as np\n'), ((33220, 33263), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (33228, 33263), True, 'import numpy as np\n'), ((33327, 33371), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, 1], [1, 1, 0]])\n', (33335, 33371), True, 'import numpy as np\n'), ((33437, 33480), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (33445, 33480), True, 'import numpy as np\n'), ((33546, 33589), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (33554, 33589), True, 'import numpy as np\n'), ((33654, 33697), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (33662, 33697), True, 'import numpy as np\n'), ((33765, 33810), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, -1, 0], [-1, 0, 0], [0, 0, 0]])\n', (33773, 33810), True, 'import numpy as np\n'), ((33881, 33925), 'numpy.array', 'np.array', (['[[0, 1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [-1, 0, 0], [0, 0, 0]])\n', (33889, 33925), True, 'import numpy as np\n'), ((33990, 34033), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (33998, 34033), True, 'import numpy as np\n'), ((34097, 34145), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (34105, 34145), True, 'import numpy as np\n'), ((35472, 35515), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (35480, 35515), True, 'import numpy as np\n'), ((35579, 35622), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (35587, 35622), True, 'import numpy as np\n'), ((35686, 35729), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (35694, 35729), True, 'import numpy as np\n'), ((35795, 35838), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, 1], [1, 0, 0]])\n', (35803, 35838), True, 'import numpy as np\n'), ((35904, 35947), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 0], [1, 1, 0]]'], {}), '([[0, 1, 1], [0, 0, 0], [1, 1, 0]])\n', (35912, 35947), True, 'import numpy as np\n'), ((36013, 36056), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (36021, 36056), True, 'import numpy as np\n'), ((36122, 36165), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (36130, 36165), True, 'import numpy as np\n'), ((36231, 36274), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n', (36239, 36274), True, 'import numpy as np\n'), ((36339, 36382), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (36347, 36382), True, 'import numpy as np\n'), ((36448, 36491), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (36456, 36491), True, 'import numpy as np\n'), ((36557, 36600), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [0, 0, 0]])\n', (36565, 36600), True, 'import numpy as np\n'), ((36666, 36709), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 1, 0]])\n', (36674, 36709), True, 'import numpy as np\n'), ((36775, 36818), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n', (36783, 36818), True, 'import numpy as np\n'), ((36883, 36926), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (36891, 36926), True, 'import numpy as np\n'), ((36992, 37035), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 0, 0]])\n', (37000, 37035), True, 'import numpy as np\n'), ((37100, 37143), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (37108, 37143), True, 'import numpy as np\n'), ((38476, 38519), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (38484, 38519), True, 'import numpy as np\n'), ((38583, 38626), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (38591, 38626), True, 'import numpy as np\n'), ((38690, 38733), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (38698, 38733), True, 'import numpy as np\n'), ((38798, 38841), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, 1], [1, 0, 0]])\n', (38806, 38841), True, 'import numpy as np\n'), ((38907, 38950), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 0], [1, 1, 0]]'], {}), '([[0, 1, 1], [0, 0, 0], [1, 1, 0]])\n', (38915, 38950), True, 'import numpy as np\n'), ((39016, 39059), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (39024, 39059), True, 'import numpy as np\n'), ((39125, 39168), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (39133, 39168), True, 'import numpy as np\n'), ((39234, 39277), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n', (39242, 39277), True, 'import numpy as np\n'), ((39342, 39385), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (39350, 39385), True, 'import numpy as np\n'), ((39450, 39493), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (39458, 39493), True, 'import numpy as np\n'), ((39559, 39602), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [0, 0, 0]])\n', (39567, 39602), True, 'import numpy as np\n'), ((39668, 39711), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 1, 0]])\n', (39676, 39711), True, 'import numpy as np\n'), ((39777, 39820), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n', (39785, 39820), True, 'import numpy as np\n'), ((39885, 39928), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (39893, 39928), True, 'import numpy as np\n'), ((39994, 40037), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 0, 0]])\n', (40002, 40037), True, 'import numpy as np\n'), ((40102, 40145), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (40110, 40145), True, 'import numpy as np\n'), ((41474, 41517), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (41482, 41517), True, 'import numpy as np\n'), ((41581, 41624), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (41589, 41624), True, 'import numpy as np\n'), ((41688, 41731), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (41696, 41731), True, 'import numpy as np\n'), ((41796, 41839), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, 1], [1, 0, 0]])\n', (41804, 41839), True, 'import numpy as np\n'), ((41904, 41947), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 0], [1, 1, 0]]'], {}), '([[0, 1, 1], [0, 0, 0], [1, 1, 0]])\n', (41912, 41947), True, 'import numpy as np\n'), ((42012, 42055), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (42020, 42055), True, 'import numpy as np\n'), ((42120, 42163), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (42128, 42163), True, 'import numpy as np\n'), ((42228, 42271), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n', (42236, 42271), True, 'import numpy as np\n'), ((42335, 42378), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (42343, 42378), True, 'import numpy as np\n'), ((42443, 42486), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (42451, 42486), True, 'import numpy as np\n'), ((42552, 42595), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [0, 0, 0]])\n', (42560, 42595), True, 'import numpy as np\n'), ((42661, 42704), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 1, 0]])\n', (42669, 42704), True, 'import numpy as np\n'), ((42770, 42813), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n', (42778, 42813), True, 'import numpy as np\n'), ((42878, 42921), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (42886, 42921), True, 'import numpy as np\n'), ((42987, 43030), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 0, 0]])\n', (42995, 43030), True, 'import numpy as np\n'), ((43095, 43138), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (43103, 43138), True, 'import numpy as np\n'), ((52011, 52036), 'numpy.unique', 'np.unique', (['truncated_keys'], {}), '(truncated_keys)\n', (52020, 52036), True, 'import numpy as np\n'), ((10871, 10883), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (10881, 10883), True, 'import networkx as nx\n'), ((12241, 12253), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (12251, 12253), True, 'import networkx as nx\n'), ((61691, 61703), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (61701, 61703), True, 'import networkx as nx\n'), ((62475, 62502), 'networkx.difference', 'nx.difference', (['dg', 'computed'], {}), '(dg, computed)\n', (62488, 62502), True, 'import networkx as nx\n'), ((62668, 62695), 'networkx.difference', 'nx.difference', (['computed', 'dg'], {}), '(computed, dg)\n', (62681, 62695), True, 'import networkx as nx\n')]
|
#!/usr/bin/python
import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor, QFont,QPalette,QPolygonF,QBrush
from PyQt5.QtCore import Qt , QPointF,QRectF,QObject,pyqtSignal,QRunnable,pyqtSlot,QThreadPool,QTimer
import traceback
from time import sleep
class WorkerSignals(QObject):
""" defines the signals available from a running worker thread
supported signals are:
finished
No data
error
tuple( exctype ,value ,traceback.format_exc() )
result
object data returned from processing , anything
"""
finished = pyqtSignal()
error= pyqtSignal(tuple)
result = pyqtSignal(object)
class Worker(QRunnable):
"""
Worker thread
inherits from QRunnable to handler worker thread setup , signals, wrap-up.
:param callback: The function to run on this worker thread . Supllied args and
kwargs will be passed through the runner
:type callback: function
:param args : Arguments to pass the callback function
:param kwargs : keyword to pass to the callback function
"""
def __init__(self,fn,*args,**kwargs):
super(Worker, self).__init__()
self.fn =fn
self.args= args
self.kwargs=kwargs
self.signals=WorkerSignals()
@pyqtSlot()
def run(self):
"""
initialise the runner function with passed args and kwargs
"""
try:
result =self.fn(*self.args,**self.kwargs)
except:
traceback.print_exc()
exctype,value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc() ))
else:
self.signals.result.emit(result)
finally:
self.signals.finished.emit()
class LoadingBar(QWidget):
def __init__(self):
super().__init__()
self.threadpool=QThreadPool()
self.initUI()
#position of colored part in loading bar 0 -100
self.position=20
self.startWorker(self.move_loading_bar)
self.loading_increasing=True
# self.timer=QTimer()
# self.timer.timeout.connect(self.move_loading_bar)
# self.timer.start(500)
def move_loading_bar(self):
""" move the loading bar back and forth by changing the value of self.position """
while True:
# print('moving loading bar',self.position)
sleep(0.015)
if self.position ==100:
self.loading_increasing=False
elif self.position==0:
self.loading_increasing=True
if self.loading_increasing:
self.position+=1
else:
self.position-=1
qp=QPainter()
#Error might occur if the LoadingBar widget is deleted so to catch that
try:
self.update()
except RuntimeError:
pass
def startWorker(self,fn,*args,**kwargs):
worker= Worker(fn)
self.threadpool.start(worker)
def initUI(self):
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('loading please wait')
self.show()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
width = self.width()
height = self.height()
self.widget_height= 6
#the part of the loading bar that is not going to have the progressed part
reduce_amount = width*0.6
top_left =QPointF(int(width*0.1),int(height/2-self.widget_height/2))
bottom_right =QPointF(int(width*0.9)-reduce_amount ,int(height/2 +self.widget_height/2))
bigger_bottom_right =QPointF(int(width*0.9) ,int(height/2+self.widget_height/2) )
recty =QRectF(QPointF(top_left.x()+self.position/100*reduce_amount,top_left.y()),
QPointF(bottom_right.x()+self.position/100*reduce_amount,bottom_right.y()))
bigger_recty=QRectF(top_left,bigger_bottom_right)
#non progressed part (bigger rounded rect)
qp.setPen(QPalette().color(QPalette.Disabled,QPalette.Text))
qp.setBrush(QBrush(QPalette().color(QPalette.Active,QPalette.Button)))
qp.drawRoundedRect(bigger_recty,3,3)
#progressed part
qp.setBrush(QBrush(QPalette().color(QPalette().Inactive,QPalette().Highlight)))
qp.setPen(QPalette().color(QPalette().Active,QPalette().Highlight))
qp.drawRoundedRect(recty,2,2)
def main():
app = QApplication(sys.argv)
ex = LoadingBar()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtGui.QPainter",
"traceback.print_exc",
"PyQt5.QtCore.QThreadPool",
"PyQt5.QtGui.QPalette",
"time.sleep",
"PyQt5.QtCore.QRectF",
"traceback.format_exc",
"sys.exc_info",
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtWidgets.QApplication"
] |
[((643, 655), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (653, 655), False, 'from PyQt5.QtCore import Qt, QPointF, QRectF, QObject, pyqtSignal, QRunnable, pyqtSlot, QThreadPool, QTimer\n'), ((667, 684), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['tuple'], {}), '(tuple)\n', (677, 684), False, 'from PyQt5.QtCore import Qt, QPointF, QRectF, QObject, pyqtSignal, QRunnable, pyqtSlot, QThreadPool, QTimer\n'), ((698, 716), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['object'], {}), '(object)\n', (708, 716), False, 'from PyQt5.QtCore import Qt, QPointF, QRectF, QObject, pyqtSignal, QRunnable, pyqtSlot, QThreadPool, QTimer\n'), ((1364, 1374), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (1372, 1374), False, 'from PyQt5.QtCore import Qt, QPointF, QRectF, QObject, pyqtSignal, QRunnable, pyqtSlot, QThreadPool, QTimer\n'), ((4861, 4883), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (4873, 4883), False, 'from PyQt5.QtWidgets import QWidget, QApplication\n'), ((1978, 1991), 'PyQt5.QtCore.QThreadPool', 'QThreadPool', ([], {}), '()\n', (1989, 1991), False, 'from PyQt5.QtCore import Qt, QPointF, QRectF, QObject, pyqtSignal, QRunnable, pyqtSlot, QThreadPool, QTimer\n'), ((3421, 3431), 'PyQt5.QtGui.QPainter', 'QPainter', ([], {}), '()\n', (3429, 3431), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((4284, 4321), 'PyQt5.QtCore.QRectF', 'QRectF', (['top_left', 'bigger_bottom_right'], {}), '(top_left, bigger_bottom_right)\n', (4290, 4321), False, 'from PyQt5.QtCore import Qt, QPointF, QRectF, QObject, pyqtSignal, QRunnable, pyqtSlot, QThreadPool, QTimer\n'), ((2546, 2558), 'time.sleep', 'sleep', (['(0.015)'], {}), '(0.015)\n', (2551, 2558), False, 'from time import sleep\n'), ((2874, 2884), 'PyQt5.QtGui.QPainter', 'QPainter', ([], {}), '()\n', (2882, 2884), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((1599, 1620), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1618, 1620), False, 'import traceback\n'), ((1649, 1663), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1661, 1663), False, 'import sys\n'), ((4408, 4418), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4416, 4418), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((4740, 4750), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4748, 4750), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((4757, 4767), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4765, 4767), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((4775, 4785), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4783, 4785), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((1721, 1743), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1741, 1743), False, 'import traceback\n'), ((4486, 4496), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4494, 4496), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((4661, 4671), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4669, 4671), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((4678, 4688), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4686, 4688), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n'), ((4698, 4708), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4706, 4708), False, 'from PyQt5.QtGui import QPainter, QColor, QFont, QPalette, QPolygonF, QBrush\n')]
|
import time
c = 1
print(c)
c += 1
print(c)
time.sleep(1)
c += 1
print(c)
time.sleep(1)
c += 1
print(c)
time.sleep(1)
c += 1
print(c)
time.sleep(1)
c += 1
print(c)
time.sleep(1)
print("jak by co")
time.sleep(0.5)
print("to kod jest z książki")
time.sleep(3)
print("nie kradziony")
time.sleep(2)
|
[
"time.sleep"
] |
[((43, 56), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (53, 56), False, 'import time\n'), ((73, 86), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (83, 86), False, 'import time\n'), ((103, 116), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (113, 116), False, 'import time\n'), ((133, 146), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (143, 146), False, 'import time\n'), ((163, 176), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (173, 176), False, 'import time\n'), ((196, 211), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (206, 211), False, 'import time\n'), ((243, 256), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (253, 256), False, 'import time\n'), ((280, 293), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (290, 293), False, 'import time\n')]
|
"""
DB Model for Posts and
relevant junction tables
"""
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import and_, select
from app.main import db
from app.main.models.base import Base
from app.main.models.comments import Comment
from app.main.models.movies import Movie
from app.main.models.postSearches import SearchableMixin
class Post(Base, SearchableMixin):
"""
Description of User model.
Columns
-----------
:id: int [pk]
:title: Text [not NULL]
:author_id: int [Foreign Key]
:creation_time: DateTime [not NULL]
:last_edit_time: DateTime [not NULL]
:post_body: Text
# Relationships
:comments: Relationship -> Comments (one to many)
"""
# Columns
id = db.Column(db.Integer, db.ForeignKey("base.id"), primary_key=True)
post_id = db.Column(db.Integer, autoincrement=True,
primary_key=True, unique=True)
title = db.Column(db.Text, nullable=False)
post_movie = db.Column(db.String(20))
tags = db.Column(db.JSON)
__searchable__ = ['title', 'body', 'tags']
__mapper_args__ = {
'polymorphic_identity': 'post',
'inherit_condition': (id == Base.id)
}
comments = db.relationship('Comment', primaryjoin="(Post.post_id == Comment.parent_post_id)",
backref=db.backref('post'), lazy='dynamic')
def __init__(self, author_id, post_movie, title, post_body, tags):
super().__init__(author_id, post_body, "post")
self.title = title
self.post_movie = post_movie
self.tags = tags
db.session.add(self)
db.session.commit()
def add_comment(self, author_id, comment_body):
parent_post_id = self.id
comment = Comment(author_id, parent_post_id, comment_body)
self.comments.append(comment)
db.session.commit()
return comment.id
def update_col(self, key, value):
setattr(self, key, value)
db.session.commit()
def delete_post(self, post_id):
post = Post.query.filter_by(id=post_id).delete()
db.session.commit()
|
[
"app.main.db.ForeignKey",
"app.main.db.backref",
"app.main.db.session.add",
"app.main.models.comments.Comment",
"app.main.db.session.commit",
"app.main.db.String",
"app.main.db.Column"
] |
[((847, 919), 'app.main.db.Column', 'db.Column', (['db.Integer'], {'autoincrement': '(True)', 'primary_key': '(True)', 'unique': '(True)'}), '(db.Integer, autoincrement=True, primary_key=True, unique=True)\n', (856, 919), False, 'from app.main import db\n'), ((956, 990), 'app.main.db.Column', 'db.Column', (['db.Text'], {'nullable': '(False)'}), '(db.Text, nullable=False)\n', (965, 990), False, 'from app.main import db\n'), ((1046, 1064), 'app.main.db.Column', 'db.Column', (['db.JSON'], {}), '(db.JSON)\n', (1055, 1064), False, 'from app.main import db\n'), ((789, 813), 'app.main.db.ForeignKey', 'db.ForeignKey', (['"""base.id"""'], {}), "('base.id')\n", (802, 813), False, 'from app.main import db\n'), ((1019, 1032), 'app.main.db.String', 'db.String', (['(20)'], {}), '(20)\n', (1028, 1032), False, 'from app.main import db\n'), ((1627, 1647), 'app.main.db.session.add', 'db.session.add', (['self'], {}), '(self)\n', (1641, 1647), False, 'from app.main import db\n'), ((1656, 1675), 'app.main.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1673, 1675), False, 'from app.main import db\n'), ((1780, 1828), 'app.main.models.comments.Comment', 'Comment', (['author_id', 'parent_post_id', 'comment_body'], {}), '(author_id, parent_post_id, comment_body)\n', (1787, 1828), False, 'from app.main.models.comments import Comment\n'), ((1875, 1894), 'app.main.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1892, 1894), False, 'from app.main import db\n'), ((2003, 2022), 'app.main.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2020, 2022), False, 'from app.main import db\n'), ((2125, 2144), 'app.main.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2142, 2144), False, 'from app.main import db\n'), ((1367, 1385), 'app.main.db.backref', 'db.backref', (['"""post"""'], {}), "('post')\n", (1377, 1385), False, 'from app.main import db\n')]
|
from django.conf import settings
import requests
import json
from NER.models import GoogleLocationCache
GOOGLE_GEOCODE_URL =\
'https://maps.googleapis.com/maps/api/geocode/json?key={}&address={}'
def get_google_geocode_url(location):
return GOOGLE_GEOCODE_URL.format(
getattr(settings, 'GOOGLE_API_KEY', ''),
location,
)
def get_location_info(location):
# First query the database, if not found, make a call
try:
cache = GoogleLocationCache.objects.get(_location=location.lower())
return cache.location_info
except GoogleLocationCache.DoesNotExist:
pass
r = requests.get(get_google_geocode_url(location))
try:
info = json.loads(r.text)
location_info = info.get('results')[0]
# save to database
GoogleLocationCache.objects.create(
location=location,
location_info=location_info
)
return location_info
except (ValueError, IndexError):
return {}
|
[
"json.loads",
"NER.models.GoogleLocationCache.objects.create"
] |
[((705, 723), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (715, 723), False, 'import json\n'), ((806, 893), 'NER.models.GoogleLocationCache.objects.create', 'GoogleLocationCache.objects.create', ([], {'location': 'location', 'location_info': 'location_info'}), '(location=location, location_info=\n location_info)\n', (840, 893), False, 'from NER.models import GoogleLocationCache\n')]
|
# -*- coding: utf-8 -*-
"""
Generate a package with IR implementations and tools.
"""
from __future__ import print_function, division, absolute_import
import os
from textwrap import dedent
from itertools import chain
from . import generator
from . import formatting
from . import astgen
from . import visitorgen
from . import naming
#------------------------------------------------------------------------
# Tools Flags
#------------------------------------------------------------------------
cython = 1
#------------------------------------------------------------------------
# Tools Resolution
#------------------------------------------------------------------------
class Tool(object):
def __init__(self, codegens, flags=0, depends=[]):
self.codegens = codegens
self.flags = flags
self.depends = depends
def __repr__(self):
return "Tool(codegens=[%s])" % ", ".join(map(str, self.codegens))
def resolve_tools(tool_list, mask, tools=None, seen=None):
if tools is None:
tools = []
seen = set()
for tool in tool_list:
if not (tool.flags & mask) and tool not in seen:
seen.add(tool)
resolve_tools(tool.depends, mask, tools, seen)
tools.append(tool)
return tools
def enumerate_tools(feature_names, mask):
tool_set = set(chain(*[features[name] for name in feature_names]))
tools = resolve_tools(tool_set, mask)
return tools
def enumerate_codegens(feature_names, mask):
tools = enumerate_tools(feature_names, mask)
codegens = list(chain(*[tool.codegens for tool in tools]))
return codegens
#------------------------------------------------------------------------
# Tool Definitions
#------------------------------------------------------------------------
def make_codegen_dict(codegens):
return dict((codegen.out_filename, codegen) for codegen in codegens)
all_codegens = astgen.codegens + visitorgen.codegens
gens = make_codegen_dict(all_codegens)
pxd_ast_tool = Tool([gens[naming.nodes + ".pxd"]], flags=cython)
py_ast_tool = Tool([gens[naming.nodes + ".py"]])
pxd_interface_tool = Tool([gens[naming.interface + ".pxd"]], flags=cython,
depends=[pxd_ast_tool])
py_interface_tool = Tool([gens[naming.interface + ".py"]],
depends=[py_ast_tool])
pxd_visitor_tool = Tool([gens[naming.visitor + ".pxd"]], flags=cython,
depends=[pxd_interface_tool])
py_visitor_tool = Tool([gens[naming.visitor + ".py"]],
depends=[py_interface_tool, pxd_visitor_tool])
pxd_transform_tool = Tool([gens[naming.transformer + ".pxd"]], flags=cython,
depends=[pxd_interface_tool])
py_transformr_tool = Tool([gens[naming.transformer + ".py"]],
depends=[py_interface_tool, pxd_transform_tool])
pxd_ast_tool.depends.extend([pxd_interface_tool, py_interface_tool])
#------------------------------------------------------------------------
# Feature Definitions & Entry Points
#------------------------------------------------------------------------
features = {
'all': [py_ast_tool, py_visitor_tool, py_transformr_tool],
'ast': [py_ast_tool],
'visitor': [py_visitor_tool],
'transformer': [py_transformr_tool],
}
def build_package(schema_filename, feature_names, output_dir, mask=0):
"""
Build a package from the given schema and feature names in output_dir.
:param mask: indicates which features to mask, e.g. specifying
'mask=build.cython' disables Cython support.
"""
codegens = enumerate_codegens(feature_names, mask)
disk_allocator = generator.generate_from_file(
schema_filename, codegens, output_dir)
try:
_make_package(disk_allocator, codegens)
finally:
disk_allocator.close()
#------------------------------------------------------------------------
# Package Building Utilities
#------------------------------------------------------------------------
source_name = lambda fn: os.path.splitext(os.path.basename(fn))[0]
def _make_package(disk_allocator, codegens):
_make_init(disk_allocator, codegens)
# Make Cython dependency optional
# disk_allocator.open_sourcefile("cython.py")
fns = [c.out_filename for c in codegens if c.out_filename.endswith('.pxd')]
if fns:
_make_setup(disk_allocator, [source_name(fn) + '.py' for fn in fns])
def _make_init(disk_allocator, codegens):
init = disk_allocator.open_sourcefile("__init__.py")
init.write(dedent("""
# Horrid hack to make work around circular cimports
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
"""))
for c in codegens:
if c.out_filename.endswith('.py'):
modname = source_name(c.out_filename)
init.write("from %s import *\n" % modname)
def _make_setup(disk_allocator, filenames):
setup = disk_allocator.open_sourcefile("setup.py")
ext_modules = ["Extension('%s', ['%s'])" % (source_name(fn), fn)
for fn in filenames]
setup.write(dedent("""
from distutils.core import setup
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension
ext_modules = [
%s
]
setup(
# ext_modules=cythonize('*.pyx'),
ext_modules=ext_modules,
cmdclass={'build_ext': build_ext},
)
""") % formatting.py_formatter.format_stats(",\n", 4, ext_modules))
|
[
"textwrap.dedent",
"itertools.chain",
"os.path.basename"
] |
[((1350, 1400), 'itertools.chain', 'chain', (['*[features[name] for name in feature_names]'], {}), '(*[features[name] for name in feature_names])\n', (1355, 1400), False, 'from itertools import chain\n'), ((1576, 1617), 'itertools.chain', 'chain', (['*[tool.codegens for tool in tools]'], {}), '(*[tool.codegens for tool in tools])\n', (1581, 1617), False, 'from itertools import chain\n'), ((4607, 4787), 'textwrap.dedent', 'dedent', (['"""\n # Horrid hack to make work around circular cimports\n import os, sys\n sys.path.append(os.path.dirname(os.path.abspath(__file__)))\n """'], {}), '(\n """\n # Horrid hack to make work around circular cimports\n import os, sys\n sys.path.append(os.path.dirname(os.path.abspath(__file__)))\n """\n )\n', (4613, 4787), False, 'from textwrap import dedent\n'), ((4121, 4141), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (4137, 4141), False, 'import os\n'), ((5181, 5561), 'textwrap.dedent', 'dedent', (['"""\n from distutils.core import setup\n from Cython.Distutils import build_ext\n from Cython.Distutils.extension import Extension\n\n ext_modules = [\n %s\n ]\n\n setup(\n # ext_modules=cythonize(\'*.pyx\'),\n ext_modules=ext_modules,\n cmdclass={\'build_ext\': build_ext},\n )\n """'], {}), '(\n """\n from distutils.core import setup\n from Cython.Distutils import build_ext\n from Cython.Distutils.extension import Extension\n\n ext_modules = [\n %s\n ]\n\n setup(\n # ext_modules=cythonize(\'*.pyx\'),\n ext_modules=ext_modules,\n cmdclass={\'build_ext\': build_ext},\n )\n """\n )\n', (5187, 5561), False, 'from textwrap import dedent\n')]
|
import serial
import cv2
# 시리얼 연결 실패시 오류 메시지 클래스
class errorMessage(Exception):
def __init__(self, msg='init_error_msg'):
self.msg = msg
def __str__(self):
return self.msg
# 초기 시리얼 연결
try:
print("Suceessfully connected with PLC")
ser = serial.Serial(
port='COM3',
baudrate=9600,
parity=serial.PARITY_NONE, \
stopbits=serial.STOPBITS_ONE, \
bytesize=serial.EIGHTBITS, \
timeout=0.5 # PLC가 1초에 한번 보내므로 그보다 적게.
)
except:
print("[ERROR] : please check PLC RS232")
# raise errorMessage('[ERROR] : please check PLC RS232')
# PASS 때 '1' 보내고, RS232 통신 닫기
def passSignal():
print("PASS the judgement")
# PLC신호 읽음
if ser.readable():
res = ser.readline()
PLC_ready = res.decode()
PLC_ready = PLC_ready.lower() # 소문자로 변환
if PLC_ready[0:5] == 'ready':
print("PLC로부터 받은 프로토콜:", PLC_ready[0:5]) # 받은 프로토콜
passSig = '1'.encode()
ser.write(passSig) # 전송
ser.close() # 닫기
# NG 때 '2' 보내고, RS232 통신 닫기
def NGSignal():
print("NG the judgement")
# PLC신호 읽음
if ser.readable():
res = ser.readline()
PLC_ready = res.decode()
PLC_ready = PLC_ready.lower() # 소문자로 변환
if PLC_ready[0:5] == 'ready':
print("PLC로부터 받은 프로토콜:", PLC_ready[0:5]) # 받은 프로토콜
NGSig = '2'.encode()
ser.write(NGSig) # 전송
ser.close() # 닫기
'''
PLC 에서 온 값 'READY'를 읽고,
합격일때 : judgeSignal(1)
불합일때 : judgeSignal(2)
의 형태로 함수 사용.
'''
def judgeSignal(signal=0):
print("checking PASS or NG signal...")
# PLC신호 읽음
if ser.readable():
res = ser.readline()
PLC_ready = res.decode()
PLC_ready = PLC_ready.lower() # 소문자로 변환
if PLC_ready[0:5] == 'ready':
print("PLC로부터 받은 프로토콜:", PLC_ready[0:5]) # 받은 프로토콜
signal = str(signal)
signal = signal.encode()
ser.write(signal) # 전송
ser.close() # 닫기
if __name__ == "__main__":
while 1:
judgeSignal()
# passSignal()
# NGSignal()
|
[
"serial.Serial"
] |
[((272, 414), 'serial.Serial', 'serial.Serial', ([], {'port': '"""COM3"""', 'baudrate': '(9600)', 'parity': 'serial.PARITY_NONE', 'stopbits': 'serial.STOPBITS_ONE', 'bytesize': 'serial.EIGHTBITS', 'timeout': '(0.5)'}), "(port='COM3', baudrate=9600, parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=0.5)\n", (285, 414), False, 'import serial\n')]
|
from django.core.files.storage import Storage
"""
1.您的自定义存储系统必须是以下的子类 :django.core.files.storage.Storage
2.Django必须能够在没有任何参数的情况下实例化您的存储系统。
这意味着任何设置都应该来自:django.conf.settings
3.您的存储类必须实现_open()和_save() 方法,
以适合您的存储类中的任何其他方法一起。请参阅下面的这些方法。
4.您的存储类必须是可解构的, 以便在迁移中的字段上使用时可以对其进行序列化。
只要您的字段具有可自行序列化的参数,
就 可以使用 django.utils.deconstruct.deconstructible类装饰器
(这就是Django在FileSystemStorage上使用的)
"""
from fdfs_client.client import Fdfs_client
from django.utils.deconstruct import deconstructible
from mall import settings
@deconstructible
class MyStorage(Storage):
# 初始化的时候 有任何配置信息 都设置为 默认值
def __init__(self, config_path=None,ip=None):
if not config_path:
config_path = settings.FDFS_CLIENT_CONF
self.config_path = config_path
if not ip:
ip = settings.FDFS_URL
self.ip = ip
# open 打开文件(图片)
# Fdfs 是通过 HTTP来访问我们的图片资源的,不需要打开
# http://192.168.229.148:8888/group1/M00/00/02/wKjllFx4r_aAJyv2AAGByoOJNyU855.jpg
def _open(self, name, mode='rb'):
pass
# save 保存
# 保存是通过 Fdfs来实现保存的,所以我们要在save中实现保存操作
def _save(self, name, content, max_length=None):
# name, content, max_length=None
# name: 图片名字
# content: 图片资源
# max_length: 最大长度
# 1.创建上传 的客户端
# client = Fdfs_client('utils/fastdfs/client.conf')
# client = Fdfs_client(settings.FDFS_CLIENT_CONF)
client = Fdfs_client(self.config_path)
# 2.获取图片 我们不能通过name找到图片,所以通过content来获取图片内容
# 读取的是 图片的二进制
data = content.read()
# 3.上传
# buffer 二进制
result = client.upload_by_buffer(data)
# result 就是上传之后的返回值
# 4.根据上传的状态获取 remote file_id
"""
{'Group name': 'group1',
'Remote file_id': 'group1/M00/00/02/wKjllFx42-6AW-JBAAGByoOJNyU783.jpg',
'Status': 'Upload successed.',
'Local file name': '/home/python/Desktop/images/2.jpg',
'Uploaded size': '96.00KB',
'Storage IP': '192.168.229.148'}
"""
if result.get('Status') == 'Upload successed.':
#上传成功,返回 remote file_id
file_id = result.get('Remote file_id')
else:
raise Exception('上传失败')
# 我们需要把 remote file_id 返回回去
# 系统要使用
return file_id
# exists 是否存在
# Fdfs 已经帮我们做了 重名的处理,所以我们不需要判断 图片是否重复
# 直接上它上传就可以
def exists(self, name):
return False
# url 默认是把 name返回回去,
# 在Fdfs中 name其实就是 remote file_id
# 但是我们在访问图片的时候 需要自己再添加 http://ip:port/ + name
# 所以我们重写 url方法,添加 http://ip:prot/ + name
def url(self, name):
# return 'http://192.168.229.148:8888/' + name
# return settings.FDFS_URL + name
return self.ip + name
# return name
# pass
|
[
"fdfs_client.client.Fdfs_client"
] |
[((1418, 1447), 'fdfs_client.client.Fdfs_client', 'Fdfs_client', (['self.config_path'], {}), '(self.config_path)\n', (1429, 1447), False, 'from fdfs_client.client import Fdfs_client\n')]
|
import numpy as np
n = 300
serial = int(input())
grid = np.array([[int(str(((x+10)*y+serial)*(x+10))[-3])-5 for y in range(1, n+1)] for x in range(1, n+1)])
coord = (0, 0)
mVal, dim = 0, 0
for d in range(4, 2, -1):
squares = sum(grid[x:x-d+1 or None, y:y-d+1 or None] for x in range(d) for y in range(d))
val = int(squares.max())
if mVal < val:
coord = np.where(squares == val)
mVal = val
dim = d
x,y = coord[0][0], coord[1][0]
print(f'({x+1}, {y+1}) X {dim} = {mVal}')
|
[
"numpy.where"
] |
[((374, 398), 'numpy.where', 'np.where', (['(squares == val)'], {}), '(squares == val)\n', (382, 398), True, 'import numpy as np\n')]
|
import multiprocessing as p
import time
def doit():
''' check CPU parallelism
while True:
pass
#'''
time.sleep(1)
print('並行')
count=0
def main():
def listener(x):
global count
count+=1
time.sleep(1)
print(count)
threads=5
pool=p.Pool()
for i in range(threads):
pool.apply_async(doit, callback=listener)
# '''
while threads!=count:
time.sleep(1)
#'''
main()
|
[
"multiprocessing.Pool",
"time.sleep"
] |
[((127, 140), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (137, 140), False, 'import time\n'), ((306, 314), 'multiprocessing.Pool', 'p.Pool', ([], {}), '()\n', (312, 314), True, 'import multiprocessing as p\n'), ((246, 259), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (256, 259), False, 'import time\n'), ((440, 453), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (450, 453), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 29 18:17:51 2014
@author: tvu
"""
def classify(features_train, labels_train):
### your code goes here--should return a trained decision tree classifer
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features_train, labels_train)
return clf
|
[
"sklearn.tree.DecisionTreeClassifier"
] |
[((248, 277), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (275, 277), False, 'from sklearn import tree\n')]
|
"""Check the maximum length of stay of donors in MIMIC-III.
If it is <48 it does not affect our study."""
import os
import pandas as pd
df = pd.read_csv('ADMISSIONS.csv')
df = df.loc[(df['DIAGNOSIS'].str.lower() == 'organ donor') | (df['DIAGNOSIS'].str.lower() == 'organ donor account')]
files = os.listdir('root')
ods = list(df['SUBJECT_ID'])
los_list = []
for od in ods:
try:
df_tmp = pd.read_csv(os.path.join('root', str(od), 'stays.csv'))
los_list += list(df_tmp['LOS'].values)
except:
pass
print(max(los_list))
"""
Result: 37.2832
"""
|
[
"pandas.read_csv",
"os.listdir"
] |
[((143, 172), 'pandas.read_csv', 'pd.read_csv', (['"""ADMISSIONS.csv"""'], {}), "('ADMISSIONS.csv')\n", (154, 172), True, 'import pandas as pd\n'), ((299, 317), 'os.listdir', 'os.listdir', (['"""root"""'], {}), "('root')\n", (309, 317), False, 'import os\n')]
|
from django.shortcuts import render, HttpResponse
from datetime import date
from .models import Email
import json
# Create your views here.
def getEmail(request):
if request.method=="POST":
email = request.POST.get('email')
Email.objects.create(
email = email,
register_date = date.today(),
)
res = {'ret':0, 'msg':'ok'}
return HttpResponse(json.dumps(res),content_type='application/json')
return render(request,'index.html')
|
[
"django.shortcuts.render",
"datetime.date.today",
"json.dumps"
] |
[((469, 498), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (475, 498), False, 'from django.shortcuts import render, HttpResponse\n'), ((409, 424), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (419, 424), False, 'import json\n'), ((321, 333), 'datetime.date.today', 'date.today', ([], {}), '()\n', (331, 333), False, 'from datetime import date\n')]
|
# Created by Hansi at 12/22/2021
import re
import demoji
from nltk import TweetTokenizer
from sklearn.model_selection import StratifiedShuffleSplit
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*',
'+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█',
'½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥',
'▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾',
'Ã', '⋅', '‘', '∞',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹',
'≤', '‡', '√', '..', '...', '…']
def remove_links(sentence, substitute=''):
"""
Method to remove links in the given text
parameters
-----------
:param sentence: str
:param substitute: str
which to replace link
:return: str
String without links
"""
sentence = re.sub('https?:\/\/\S+', substitute, sentence, flags=re.MULTILINE)
return sentence.strip()
def remove_repeating_characters(sentence):
"""
remove non alphaneumeric characters which repeat more than 3 times by its 3 occurrence (e.g. ----- to ---)
:param sentence:
:return:
"""
sentence = re.sub('(\W)\\1{3,}', '\\1', sentence)
return sentence.strip()
def remove_retweet_notations(sentence):
"""
Method to remove retweet notations in the given text
parameters
-----------
:param sentence: str
:return: str
String without retweet notations
"""
updated_sentence = re.sub(r'RT @[a-zA-Z0-9_/-]*:', '', sentence)
return updated_sentence.strip()
def add_emoji_text(x):
"""
Covert emoji to text
:param x: str
:return: str
String where emojis are replaced by text
"""
emoji_text = demoji.findall(x)
for em in emoji_text.keys():
x = x.replace(em, ' ' + emoji_text[em] + ' ')
x = ' '.join(x.split())
return x
def preprocess_data(text, preserve_case=False, emoji_to_text=False):
"""
A Pipeline to preprocess data
:param text: str
:param preserve_case: boolean, optional
:param emoji_to_text: boolean, optional
:return: str
"""
text = text.replace("\n", " ")
text = remove_links(text, substitute='')
text = remove_retweet_notations(text)
text = remove_repeating_characters(text)
if emoji_to_text:
text = add_emoji_text(text)
# tokenize and lower case
tknzr = TweetTokenizer(preserve_case=preserve_case, reduce_len=True, strip_handles=False)
tokens = tknzr.tokenize(text)
text = " ".join(tokens)
# text.replace(symbol, "#") # remove # in hash tags
# remove white spaces at the beginning and end of the text
text = text.strip()
# remove extra whitespace, newline, tab
text = ' '.join(text.split())
return text
def split_data(df, seed, label_column='label', test_size=0.1):
"""
StratifiedShuffleSplit the given DataFrame
:param df: DataFrame
:param seed: int
:param label_column: str
:param test_size: float
:return: DataFrame, DataFrame
train and test
"""
y = df[label_column]
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=seed)
train_index, test_index = next(sss.split(df, y))
train = df.iloc[train_index]
test = df.iloc[test_index]
return train, test
|
[
"sklearn.model_selection.StratifiedShuffleSplit",
"demoji.findall",
"nltk.TweetTokenizer",
"re.sub"
] |
[((1200, 1269), 're.sub', 're.sub', (['"""https?:\\\\/\\\\/\\\\S+"""', 'substitute', 'sentence'], {'flags': 're.MULTILINE'}), "('https?:\\\\/\\\\/\\\\S+', substitute, sentence, flags=re.MULTILINE)\n", (1206, 1269), False, 'import re\n'), ((1516, 1555), 're.sub', 're.sub', (['"""(\\\\W)\\\\1{3,}"""', '"""\\\\1"""', 'sentence'], {}), "('(\\\\W)\\\\1{3,}', '\\\\1', sentence)\n", (1522, 1555), False, 'import re\n'), ((1835, 1879), 're.sub', 're.sub', (['"""RT @[a-zA-Z0-9_/-]*:"""', '""""""', 'sentence'], {}), "('RT @[a-zA-Z0-9_/-]*:', '', sentence)\n", (1841, 1879), False, 'import re\n'), ((2084, 2101), 'demoji.findall', 'demoji.findall', (['x'], {}), '(x)\n', (2098, 2101), False, 'import demoji\n'), ((2743, 2829), 'nltk.TweetTokenizer', 'TweetTokenizer', ([], {'preserve_case': 'preserve_case', 'reduce_len': '(True)', 'strip_handles': '(False)'}), '(preserve_case=preserve_case, reduce_len=True, strip_handles=\n False)\n', (2757, 2829), False, 'from nltk import TweetTokenizer\n'), ((3448, 3522), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': 'test_size', 'random_state': 'seed'}), '(n_splits=1, test_size=test_size, random_state=seed)\n', (3470, 3522), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n')]
|
from django.test import TestCase
from services.models import (
Service,
ServiceStatusChoices,
ServiceRelation,
ServiceRealtionChoice,
Link,
LinkTypeChoice,
)
from tenancy.models import Tenant
from platforms.models import Platform
class ServiceModelTest(TestCase):
def setUp(self):
self.tenant_owner = Tenant.objects.create(name="Acme Corp.")
self.tenant_operator = Tenant.objects.create(name="Operator Incl.")
self.platform = Platform.objects.create(
name="Road Runner Cloud", tenant=self.tenant_owner
)
def test_slug_is_generated_on_save(self):
service = Service(
name="Prometheus",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
self.assertEquals("", service.slug)
service.save()
self.assertEquals("prometheus", service.slug)
def test_service_is_active_by_default(self):
service = Service(
name="Prometheus",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
self.assertEquals(ServiceStatusChoices.ACTIVE, service.status)
def test_service_has_related_services(self):
source = Service.objects.create(
name="Source",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
dest = Service.objects.create(
name="Dest",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
ServiceRelation.objects.create(
source=source,
relation=ServiceRealtionChoice.RELATED,
dest=dest,
comment="test",
)
inbound_list = dest.get_inbound_relations()
self.assertEqual(1, len(inbound_list))
self.assertEqual("test", inbound_list.first().comment)
self.assertEqual("Source", inbound_list.first().source.name)
outbound_list = source.get_outbound_relations()
self.assertEqual(1, len(outbound_list))
self.assertEqual("test", outbound_list.first().comment)
self.assertEqual("Dest", outbound_list.first().dest.name)
def test_service_has_link(self):
svc = Service.objects.create(
name="Service",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
link = Link.objects.create(
link_type=LinkTypeChoice.WEBSITE,
url="http://example.com",
description="My fancy Website",
service=svc,
)
self.assertEqual(svc.links.first().url, "http://example.com")
self.assertEqual(svc.links.first().link_type, LinkTypeChoice.WEBSITE)
self.assertEqual(svc.links.first().description, "My fancy Website")
|
[
"tenancy.models.Tenant.objects.create",
"services.models.Service",
"services.models.ServiceRelation.objects.create",
"platforms.models.Platform.objects.create",
"services.models.Link.objects.create",
"services.models.Service.objects.create"
] |
[((340, 380), 'tenancy.models.Tenant.objects.create', 'Tenant.objects.create', ([], {'name': '"""Acme Corp."""'}), "(name='Acme Corp.')\n", (361, 380), False, 'from tenancy.models import Tenant\n'), ((412, 456), 'tenancy.models.Tenant.objects.create', 'Tenant.objects.create', ([], {'name': '"""Operator Incl."""'}), "(name='Operator Incl.')\n", (433, 456), False, 'from tenancy.models import Tenant\n'), ((481, 556), 'platforms.models.Platform.objects.create', 'Platform.objects.create', ([], {'name': '"""Road Runner Cloud"""', 'tenant': 'self.tenant_owner'}), "(name='Road Runner Cloud', tenant=self.tenant_owner)\n", (504, 556), False, 'from platforms.models import Platform\n'), ((644, 755), 'services.models.Service', 'Service', ([], {'name': '"""Prometheus"""', 'operator': 'self.tenant_operator', 'owner': 'self.tenant_owner', 'platform': 'self.platform'}), "(name='Prometheus', operator=self.tenant_operator, owner=self.\n tenant_owner, platform=self.platform)\n", (651, 755), False, 'from services.models import Service, ServiceStatusChoices, ServiceRelation, ServiceRealtionChoice, Link, LinkTypeChoice\n'), ((1000, 1111), 'services.models.Service', 'Service', ([], {'name': '"""Prometheus"""', 'operator': 'self.tenant_operator', 'owner': 'self.tenant_owner', 'platform': 'self.platform'}), "(name='Prometheus', operator=self.tenant_operator, owner=self.\n tenant_owner, platform=self.platform)\n", (1007, 1111), False, 'from services.models import Service, ServiceStatusChoices, ServiceRelation, ServiceRealtionChoice, Link, LinkTypeChoice\n'), ((1304, 1426), 'services.models.Service.objects.create', 'Service.objects.create', ([], {'name': '"""Source"""', 'operator': 'self.tenant_operator', 'owner': 'self.tenant_owner', 'platform': 'self.platform'}), "(name='Source', operator=self.tenant_operator, owner=\n self.tenant_owner, platform=self.platform)\n", (1326, 1426), False, 'from services.models import Service, ServiceStatusChoices, ServiceRelation, ServiceRealtionChoice, Link, LinkTypeChoice\n'), ((1496, 1616), 'services.models.Service.objects.create', 'Service.objects.create', ([], {'name': '"""Dest"""', 'operator': 'self.tenant_operator', 'owner': 'self.tenant_owner', 'platform': 'self.platform'}), "(name='Dest', operator=self.tenant_operator, owner=\n self.tenant_owner, platform=self.platform)\n", (1518, 1616), False, 'from services.models import Service, ServiceStatusChoices, ServiceRelation, ServiceRealtionChoice, Link, LinkTypeChoice\n'), ((1680, 1797), 'services.models.ServiceRelation.objects.create', 'ServiceRelation.objects.create', ([], {'source': 'source', 'relation': 'ServiceRealtionChoice.RELATED', 'dest': 'dest', 'comment': '"""test"""'}), "(source=source, relation=\n ServiceRealtionChoice.RELATED, dest=dest, comment='test')\n", (1710, 1797), False, 'from services.models import Service, ServiceStatusChoices, ServiceRelation, ServiceRealtionChoice, Link, LinkTypeChoice\n'), ((2371, 2494), 'services.models.Service.objects.create', 'Service.objects.create', ([], {'name': '"""Service"""', 'operator': 'self.tenant_operator', 'owner': 'self.tenant_owner', 'platform': 'self.platform'}), "(name='Service', operator=self.tenant_operator, owner\n =self.tenant_owner, platform=self.platform)\n", (2393, 2494), False, 'from services.models import Service, ServiceStatusChoices, ServiceRelation, ServiceRealtionChoice, Link, LinkTypeChoice\n'), ((2565, 2694), 'services.models.Link.objects.create', 'Link.objects.create', ([], {'link_type': 'LinkTypeChoice.WEBSITE', 'url': '"""http://example.com"""', 'description': '"""My fancy Website"""', 'service': 'svc'}), "(link_type=LinkTypeChoice.WEBSITE, url=\n 'http://example.com', description='My fancy Website', service=svc)\n", (2584, 2694), False, 'from services.models import Service, ServiceStatusChoices, ServiceRelation, ServiceRealtionChoice, Link, LinkTypeChoice\n')]
|
import os.path
import datetime
class ErrorLog():
@staticmethod
def writeToFile(log):
time = datetime.datetime.now()
f = open('error_log.txt', 'a', encoding='utf8')
f.write(time.strftime("%Y-%m-%d %H:%M:%S ") + log + '\n')
f.close()
#test = LogFile.checkExistsFile()
#LogFile.writeToFile('test1')
|
[
"datetime.datetime.now"
] |
[((111, 134), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (132, 134), False, 'import datetime\n')]
|
from __future__ import annotations
from functools import reduce
from os import environ
from pathlib import Path
from typing import Collection, List, Optional, Set
from typing_extensions import TypedDict
Team = TypedDict(
"Team", {"name": str, "members": List[str], "children": List[str]}
)
Organisation = TypedDict(
"Organisation", {"name": str, "owners": List[str], "teams": List[Team]}
)
Config = TypedDict("Config", {"organisation": Organisation})
def get_teams(config: Config) -> Collection[Team]:
return config["organisation"]["teams"]
def get_team(config: Config, name: str) -> Optional[Team]:
elems = [x for x in get_teams(config) if x["name"] == name]
assert len(elems) <= 1 # nosec: testing only
if elems:
return elems[0]
return None
def _get_team_exists(config: Config, name: str) -> Team:
team = get_team(config, name)
if not team:
raise RuntimeError("team {} not found".format(name))
return team
def team_name(team: Team) -> str:
return team["name"]
# direct members of a team, not taking members of descendants teams into
# account
def team_direct_members(team: Team) -> Collection[str]:
if team["members"]:
return team["members"]
return []
# effective members of a team (direct members + members of descendants teams)
def team_effective_members(config: Config, team: Team) -> Set[str]:
return reduce(
lambda acc, child: acc | set(team_direct_members(child)),
[_get_team_exists(config, x) for x in team_descendants(config, team)],
set(team_direct_members(team)),
)
def team_children(team: Team) -> Collection[str]:
if "children" in team:
return team["children"]
return []
def team_descendants(config: Config, team: Team) -> Set[str]:
def reduce_function(acc: Set[str], child_name: str) -> Set[str]:
child_team = _get_team_exists(config, child_name)
return acc | team_descendants(config, child_team) | {child_name}
if "children" in team:
return reduce(reduce_function, set(team_children(team)), set())
return set()
def team_parents(config: Config, team: Team) -> Collection[Team]:
def is_parent(entry: Team, team: Team) -> bool:
return team_name(team) in team_children(entry)
return [x for x in get_teams(config) if is_parent(x, team)]
def team_parent(config: Config, team: Team) -> Optional[Team]:
elems = team_parents(config, team)
assert len(elems) <= 1 # nosec: testing only
if elems:
return next(iter(elems))
return None
def team_ancestors(config: Config, team: Team) -> Set[str]:
ancestors = set()
parents = team_parents(config, team)
for parent in parents:
ancestors.update(team_ancestors(config, parent))
ancestors.update(map(team_name, parents))
return ancestors
def user_teams(config: Config, email: str) -> Collection[Team]:
return [x for x in get_teams(config) if email in team_direct_members(x)]
def is_owner(config: Config, email: str) -> bool:
return email in config["organisation"]["owners"]
def default_dir() -> Path:
def parent_dir() -> Path:
xdg_home = environ.get("XDG_CONFIG_HOME")
if xdg_home:
return Path(xdg_home)
home = environ.get("HOME")
if home:
return Path(home) / ".config"
return Path("/")
return parent_dir() / "ghaudit"
|
[
"os.environ.get",
"pathlib.Path",
"typing_extensions.TypedDict"
] |
[((213, 290), 'typing_extensions.TypedDict', 'TypedDict', (['"""Team"""', "{'name': str, 'members': List[str], 'children': List[str]}"], {}), "('Team', {'name': str, 'members': List[str], 'children': List[str]})\n", (222, 290), False, 'from typing_extensions import TypedDict\n'), ((312, 399), 'typing_extensions.TypedDict', 'TypedDict', (['"""Organisation"""', "{'name': str, 'owners': List[str], 'teams': List[Team]}"], {}), "('Organisation', {'name': str, 'owners': List[str], 'teams': List[\n Team]})\n", (321, 399), False, 'from typing_extensions import TypedDict\n'), ((410, 461), 'typing_extensions.TypedDict', 'TypedDict', (['"""Config"""', "{'organisation': Organisation}"], {}), "('Config', {'organisation': Organisation})\n", (419, 461), False, 'from typing_extensions import TypedDict\n'), ((3167, 3197), 'os.environ.get', 'environ.get', (['"""XDG_CONFIG_HOME"""'], {}), "('XDG_CONFIG_HOME')\n", (3178, 3197), False, 'from os import environ\n'), ((3268, 3287), 'os.environ.get', 'environ.get', (['"""HOME"""'], {}), "('HOME')\n", (3279, 3287), False, 'from os import environ\n'), ((3362, 3371), 'pathlib.Path', 'Path', (['"""/"""'], {}), "('/')\n", (3366, 3371), False, 'from pathlib import Path\n'), ((3238, 3252), 'pathlib.Path', 'Path', (['xdg_home'], {}), '(xdg_home)\n', (3242, 3252), False, 'from pathlib import Path\n'), ((3324, 3334), 'pathlib.Path', 'Path', (['home'], {}), '(home)\n', (3328, 3334), False, 'from pathlib import Path\n')]
|
"""
implement a CNN network as mentioned in VIN paper.
Author: <NAME>
"""
import tensorflow as tf
from tensorflow.contrib.layers import conv2d, fully_connected, max_pool2d, dropout
'''
normal structure for each conv layer:conv -> elu -> bn -> pooling.
conv-1: 3x3 s:1, 50 channels .no padding .
max pooling: 2x2
conv-2: 3x3,s:1, 50 channels. no padding .
conv-3: 3x3,s:1. 100 channels. no padding .
max pooling: 2x2
conv-4: 3x3 s:1, 100 channels. no padding.
conv-5: 3x3 s:1, 100 channels. no padding.
fc-1:200-units. followed by elu.
fc-2: 4-units. output is logits.
output: unscaled logits of each actions:
up
left
right
down
=== state space:
s_image of grid map
s_goal
s_curr_pos of current state
'''
TRAINING_CFG = tf.app.flags.FLAGS # alias
class CNNModel:
def __init__(self, cnn_model_cfg,optimizer,is_training, scope="cnn_model"):
self.cnn_model_cfg = cnn_model_cfg
self.optimizer = optimizer
self.scope = scope
self.is_training = is_training
def create_net(self, state_inputs, labels, global_step_tensor):
"""
:param labels:
:param global_step_tensor:
:param state_inputs:
:return:
"""
prev_layer = state_inputs
conv_layers = []
fc_layers = []
with tf.variable_scope(self.scope):
# conv layers
# TODO add batch_norm to input process.
for (n_maps, kernel_size, stride, padding, activation, initializer, normalizer, norm_param,
regularizer, pooling_kernel_size, pooling_stride, keep_prob) in \
zip(
self.cnn_model_cfg.conv_n_feature_maps, self.cnn_model_cfg.conv_kernel_sizes,
self.cnn_model_cfg.conv_strides, self.cnn_model_cfg.conv_paddings, self.cnn_model_cfg.conv_activations,
self.cnn_model_cfg.conv_initializers, self.cnn_model_cfg.conv_normalizers, self.cnn_model_cfg.conv_norm_params,
self.cnn_model_cfg.conv_regularizers, self.cnn_model_cfg.pooling_kernel_sizes, self.cnn_model_cfg.pooling_strides,
self.cnn_model_cfg.conv_dropout_keep_probs):
prev_layer = conv2d(prev_layer, num_outputs=n_maps, kernel_size=kernel_size,
stride=stride, padding=padding,
activation_fn=activation,
data_format='NHWC',
normalizer_fn=normalizer,
normalizer_params=norm_param,
weights_initializer=initializer,
weights_regularizer=regularizer,
trainable=True)
if pooling_kernel_size:
# max pooling only
prev_layer = max_pool2d(prev_layer, pooling_kernel_size, pooling_stride,
padding='VALID', data_format='NHWC')
if keep_prob < 1:
prev_layer = dropout(prev_layer, keep_prob,is_training=self.is_training)
conv_layers.append(prev_layer)
##fc layers.inc output layer.
# flatten the output of last conv layer to (batch_size, n_fc_in)
prev_layer = tf.reshape(conv_layers[-1], shape=[-1,conv_layers[-1].shape[1] * conv_layers[-1].shape[2] * conv_layers[-1].shape[3]])
for n_unit, activation, initializer, normalizer, norm_param, regularizer,keep_prob \
in zip(
self.cnn_model_cfg.n_fc_units, self.cnn_model_cfg.fc_activations, self.cnn_model_cfg.fc_initializers,
self.cnn_model_cfg.fc_normalizers, self.cnn_model_cfg.fc_norm_params, self.cnn_model_cfg.fc_regularizers,
self.cnn_model_cfg.fc_dropout_keep_probs):
prev_layer = fully_connected(prev_layer, num_outputs=n_unit,
activation_fn=activation,
weights_initializer=initializer,
normalizer_fn=normalizer,
normalizer_params=norm_param,
weights_regularizer=regularizer,
trainable=True)
if keep_prob < 1:
prev_layer = dropout(prev_layer, keep_prob, is_training=self.is_training)
fc_layers.append(prev_layer)
# logits should be [batch_size, num_action]
logits = prev_layer
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
cross_entropy_loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
predicted_classes = tf.argmax(logits, axis=1)
total_loss = tf.add_n(reg_loss + [cross_entropy_loss], name='total_loss')
train_op = self.optimizer.minimize(total_loss, global_step_tensor)
total_loss_mean = tf.reduce_mean(total_loss)
with tf.name_scope('loss'):
tf.summary.scalar(name='total_loss', tensor=total_loss_mean,
collections=[TRAINING_CFG.summary_keys])
# with tf.name_scope('d_policy_loss_da_grads'):
# d_policy_loss_da_grads=tf.gradients(ys=policy_loss,xs=actor.action_bounded_tensors)
# for i in range(len(dq_da_grads)):
# tf.summary.scalar(name='d_policy_loss_da_grads_'+str(i)+'norm',tensor=tf.norm(d_policy_loss_da_grads[i]),
# collections=[self.cnn_model_cfg.actor_summary_keys])
# == end with variable_scope() ==
return train_op, total_loss, predicted_classes
|
[
"tensorflow.contrib.layers.max_pool2d",
"tensorflow.add_n",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.summary.scalar",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.get_collection",
"tensorflow.argmax",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"tensorflow.contrib.layers.conv2d",
"tensorflow.contrib.layers.dropout",
"tensorflow.name_scope"
] |
[((1369, 1398), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {}), '(self.scope)\n', (1386, 1398), True, 'import tensorflow as tf\n'), ((3389, 3512), 'tensorflow.reshape', 'tf.reshape', (['conv_layers[-1]'], {'shape': '[-1, conv_layers[-1].shape[1] * conv_layers[-1].shape[2] * conv_layers[-1].\n shape[3]]'}), '(conv_layers[-1], shape=[-1, conv_layers[-1].shape[1] *\n conv_layers[-1].shape[2] * conv_layers[-1].shape[3]])\n', (3399, 3512), True, 'import tensorflow as tf\n'), ((4741, 4794), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), '(tf.GraphKeys.REGULARIZATION_LOSSES)\n', (4758, 4794), True, 'import tensorflow as tf\n'), ((4828, 4882), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', (['labels', 'logits'], {}), '(labels, logits)\n', (4866, 4882), True, 'import tensorflow as tf\n'), ((4915, 4940), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (4924, 4940), True, 'import tensorflow as tf\n'), ((4966, 5026), 'tensorflow.add_n', 'tf.add_n', (['(reg_loss + [cross_entropy_loss])'], {'name': '"""total_loss"""'}), "(reg_loss + [cross_entropy_loss], name='total_loss')\n", (4974, 5026), True, 'import tensorflow as tf\n'), ((5137, 5163), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['total_loss'], {}), '(total_loss)\n', (5151, 5163), True, 'import tensorflow as tf\n'), ((2249, 2545), 'tensorflow.contrib.layers.conv2d', 'conv2d', (['prev_layer'], {'num_outputs': 'n_maps', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'activation_fn': 'activation', 'data_format': '"""NHWC"""', 'normalizer_fn': 'normalizer', 'normalizer_params': 'norm_param', 'weights_initializer': 'initializer', 'weights_regularizer': 'regularizer', 'trainable': '(True)'}), "(prev_layer, num_outputs=n_maps, kernel_size=kernel_size, stride=\n stride, padding=padding, activation_fn=activation, data_format='NHWC',\n normalizer_fn=normalizer, normalizer_params=norm_param,\n weights_initializer=initializer, weights_regularizer=regularizer,\n trainable=True)\n", (2255, 2545), False, 'from tensorflow.contrib.layers import conv2d, fully_connected, max_pool2d, dropout\n'), ((3973, 4196), 'tensorflow.contrib.layers.fully_connected', 'fully_connected', (['prev_layer'], {'num_outputs': 'n_unit', 'activation_fn': 'activation', 'weights_initializer': 'initializer', 'normalizer_fn': 'normalizer', 'normalizer_params': 'norm_param', 'weights_regularizer': 'regularizer', 'trainable': '(True)'}), '(prev_layer, num_outputs=n_unit, activation_fn=activation,\n weights_initializer=initializer, normalizer_fn=normalizer,\n normalizer_params=norm_param, weights_regularizer=regularizer,\n trainable=True)\n', (3988, 4196), False, 'from tensorflow.contrib.layers import conv2d, fully_connected, max_pool2d, dropout\n'), ((5181, 5202), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (5194, 5202), True, 'import tensorflow as tf\n'), ((5220, 5326), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': '"""total_loss"""', 'tensor': 'total_loss_mean', 'collections': '[TRAINING_CFG.summary_keys]'}), "(name='total_loss', tensor=total_loss_mean, collections=[\n TRAINING_CFG.summary_keys])\n", (5237, 5326), True, 'import tensorflow as tf\n'), ((2929, 3029), 'tensorflow.contrib.layers.max_pool2d', 'max_pool2d', (['prev_layer', 'pooling_kernel_size', 'pooling_stride'], {'padding': '"""VALID"""', 'data_format': '"""NHWC"""'}), "(prev_layer, pooling_kernel_size, pooling_stride, padding='VALID',\n data_format='NHWC')\n", (2939, 3029), False, 'from tensorflow.contrib.layers import conv2d, fully_connected, max_pool2d, dropout\n'), ((3137, 3197), 'tensorflow.contrib.layers.dropout', 'dropout', (['prev_layer', 'keep_prob'], {'is_training': 'self.is_training'}), '(prev_layer, keep_prob, is_training=self.is_training)\n', (3144, 3197), False, 'from tensorflow.contrib.layers import conv2d, fully_connected, max_pool2d, dropout\n'), ((4522, 4582), 'tensorflow.contrib.layers.dropout', 'dropout', (['prev_layer', 'keep_prob'], {'is_training': 'self.is_training'}), '(prev_layer, keep_prob, is_training=self.is_training)\n', (4529, 4582), False, 'from tensorflow.contrib.layers import conv2d, fully_connected, max_pool2d, dropout\n')]
|
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# factory/tool specific condorLogs helper
#
import binascii
import gzip
import io
import mmap
import os.path
import re
import time
from glideinwms.factory import glideFactoryLogParser
from glideinwms.lib import condorLogParser
from glideinwms.lib.defaults import BINARY_ENCODING, force_bytes
# get the list of jobs that were active at a certain time
def get_glideins(log_dir_name, date_arr, time_arr):
glidein_list = []
cldata = glideFactoryLogParser.dirSummaryTimingsOutFull(log_dir_name, cache_dir=None)
cldata.load(active_only=False)
glidein_data = cldata.data["Completed"] # I am interested only in the completed ones
ref_ctime = time.mktime(date_arr + time_arr + (0, 0, -1))
for glidein_el in glidein_data:
glidein_id, fistTimeStr, runningStartTimeStr, lastTimeStr = glidein_el
runningStartTime = condorLogParser.rawTime2cTimeLastYear(runningStartTimeStr)
if runningStartTime > ref_ctime:
continue # not one of them, started after
lastTime = condorLogParser.rawTime2cTimeLastYear(lastTimeStr)
if lastTime < ref_ctime:
continue # not one of them, ended before
glidein_list.append(glidein_id)
return glidein_list
# get the list of log files for an entry that were active at a certain time
def get_glidein_logs_entry(factory_dir, entry, date_arr, time_arr, ext="err"):
log_list = []
log_dir_name = os.path.join(factory_dir, "entry_%s/log" % entry)
glidein_list = get_glideins(log_dir_name, date_arr, time_arr)
for glidein_id in glidein_list:
glidein_log_file = "job.%i.%i." % condorLogParser.rawJobId2Nr(glidein_id)
glidein_log_file += ext
glidein_log_filepath = os.path.join(log_dir_name, glidein_log_file)
if os.path.exists(glidein_log_filepath):
log_list.append(glidein_log_filepath)
return log_list
# get the list of log files for an entry that were active at a certain time
def get_glidein_logs(factory_dir, entries, date_arr, time_arr, ext="err"):
log_list = []
for entry in entries:
entry_log_list = get_glidein_logs_entry(factory_dir, entry, date_arr, time_arr, ext)
log_list += entry_log_list
return log_list
# extract the blob from a glidein log file starting from position
def get_Compressed_raw(log_fname, start_str, start_pos=0):
SL_START_RE = re.compile(b"%s\nbegin-base64 644 -\n" % force_bytes(start_str, BINARY_ENCODING), re.M | re.DOTALL)
size = os.path.getsize(log_fname)
if size == 0:
return "" # mmap would fail... and I know I will not find anything anyhow
with open(log_fname) as fd:
buf = mmap.mmap(fd.fileno(), size, access=mmap.ACCESS_READ)
try:
# first find the header that delimits the log in the file
start_re = SL_START_RE.search(buf, 0)
if start_re is None:
return "" # no StartLog section
log_start_idx = start_re.end()
# find where it ends
log_end_idx = buf.find(b"\n====", log_start_idx)
if log_end_idx < 0: # up to the end of the file
return buf[log_start_idx:].decode(BINARY_ENCODING)
else:
return buf[log_start_idx:log_end_idx].decode(BINARY_ENCODING)
finally:
buf.close()
# extract the blob from a glidein log file
def get_Compressed(log_fname, start_str):
raw_data = get_Compressed_raw(log_fname, start_str)
if raw_data != "":
gzip_data = binascii.a2b_base64(raw_data)
del raw_data
data_fd = gzip.GzipFile(fileobj=io.BytesIO(gzip_data))
data = data_fd.read().decode(BINARY_ENCODING)
else:
data = raw_data
return data
# extract the blob from a glidein log file
def get_Simple(log_fname, start_str, end_str):
SL_START_RE = re.compile(force_bytes(start_str, BINARY_ENCODING) + b"\n", re.M | re.DOTALL)
SL_END_RE = re.compile(end_str, re.M | re.DOTALL)
size = os.path.getsize(log_fname)
if size == 0:
return "" # mmap would fail... and I know I will not find anything anyhow
with open(log_fname) as fd:
buf = mmap.mmap(fd.fileno(), size, access=mmap.ACCESS_READ)
try:
# first find the header that delimits the log in the file
start_re = SL_START_RE.search(buf, 0)
if start_re is None:
return "" # no StartLog section
log_start_idx = start_re.end()
# find where it ends
log_end_idx = SL_END_RE.search(buf, log_start_idx)
if log_end_idx is None: # up to the end of the file
return buf[log_start_idx:].decode(BINARY_ENCODING)
else:
return buf[log_start_idx : log_end_idx.start()].decode(BINARY_ENCODING)
finally:
buf.close()
# extract the Condor Log from a glidein log file
# condor_log_id should be something like "StartdLog"
def get_CondorLog(log_fname, condor_log_id):
start_str = "^%s\n======== gzip . uuencode =============" % condor_log_id
return get_Compressed(log_fname, start_str)
# extract the XML Result from a glidein log file
def get_XMLResult(log_fname):
start_str = "^=== Encoded XML description of glidein activity ==="
s = get_Compressed(log_fname, start_str)
if s != "":
return s
# not found, try the uncompressed version
start_str = "^=== XML description of glidein activity ==="
end_str = "^=== End XML description of glidein activity ==="
return get_Simple(log_fname, start_str, end_str)
# extract slot names
def get_StarterSlotNames(log_fname, condor_log_id="(StarterLog.slot[0-9]*[_]*[0-9]*)"):
start_str = "^%s\n======== gzip . uuencode =============" % condor_log_id
SL_START_RE = re.compile(b"%s\nbegin-base64 644 -\n" % force_bytes(start_str, BINARY_ENCODING), re.M | re.DOTALL)
size = os.path.getsize(log_fname)
if size == 0:
return "" # mmap would fail... and I know I will not find anything anyhow
with open(log_fname) as fd:
buf = mmap.mmap(fd.fileno(), size, access=mmap.ACCESS_READ)
try:
strings = [s.decode(BINARY_ENCODING) for s in SL_START_RE.findall(buf, 0)]
return strings
finally:
buf.close()
|
[
"io.BytesIO",
"glideinwms.factory.glideFactoryLogParser.dirSummaryTimingsOutFull",
"re.compile",
"glideinwms.lib.defaults.force_bytes",
"time.mktime",
"glideinwms.lib.condorLogParser.rawJobId2Nr",
"binascii.a2b_base64",
"glideinwms.lib.condorLogParser.rawTime2cTimeLastYear"
] |
[((606, 682), 'glideinwms.factory.glideFactoryLogParser.dirSummaryTimingsOutFull', 'glideFactoryLogParser.dirSummaryTimingsOutFull', (['log_dir_name'], {'cache_dir': 'None'}), '(log_dir_name, cache_dir=None)\n', (652, 682), False, 'from glideinwms.factory import glideFactoryLogParser\n'), ((825, 870), 'time.mktime', 'time.mktime', (['(date_arr + time_arr + (0, 0, -1))'], {}), '(date_arr + time_arr + (0, 0, -1))\n', (836, 870), False, 'import time\n'), ((4104, 4141), 're.compile', 're.compile', (['end_str', '(re.M | re.DOTALL)'], {}), '(end_str, re.M | re.DOTALL)\n', (4114, 4141), False, 'import re\n'), ((1014, 1072), 'glideinwms.lib.condorLogParser.rawTime2cTimeLastYear', 'condorLogParser.rawTime2cTimeLastYear', (['runningStartTimeStr'], {}), '(runningStartTimeStr)\n', (1051, 1072), False, 'from glideinwms.lib import condorLogParser\n'), ((1188, 1238), 'glideinwms.lib.condorLogParser.rawTime2cTimeLastYear', 'condorLogParser.rawTime2cTimeLastYear', (['lastTimeStr'], {}), '(lastTimeStr)\n', (1225, 1238), False, 'from glideinwms.lib import condorLogParser\n'), ((3682, 3711), 'binascii.a2b_base64', 'binascii.a2b_base64', (['raw_data'], {}), '(raw_data)\n', (3701, 3711), False, 'import binascii\n'), ((1780, 1819), 'glideinwms.lib.condorLogParser.rawJobId2Nr', 'condorLogParser.rawJobId2Nr', (['glidein_id'], {}), '(glidein_id)\n', (1807, 1819), False, 'from glideinwms.lib import condorLogParser\n'), ((2580, 2619), 'glideinwms.lib.defaults.force_bytes', 'force_bytes', (['start_str', 'BINARY_ENCODING'], {}), '(start_str, BINARY_ENCODING)\n', (2591, 2619), False, 'from glideinwms.lib.defaults import BINARY_ENCODING, force_bytes\n'), ((4021, 4060), 'glideinwms.lib.defaults.force_bytes', 'force_bytes', (['start_str', 'BINARY_ENCODING'], {}), '(start_str, BINARY_ENCODING)\n', (4032, 4060), False, 'from glideinwms.lib.defaults import BINARY_ENCODING, force_bytes\n'), ((5995, 6034), 'glideinwms.lib.defaults.force_bytes', 'force_bytes', (['start_str', 'BINARY_ENCODING'], {}), '(start_str, BINARY_ENCODING)\n', (6006, 6034), False, 'from glideinwms.lib.defaults import BINARY_ENCODING, force_bytes\n'), ((3773, 3794), 'io.BytesIO', 'io.BytesIO', (['gzip_data'], {}), '(gzip_data)\n', (3783, 3794), False, 'import io\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 <NAME> <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
Provides an implementation of the normal speed distribution. In this case a
speed of a node is assigned at random with the normal, i.e. Gaussian,
probability distribution.
"""
from sim2net.speed._speed import Speed
from sim2net.utility.validation import check_argument_type
__docformat__ = 'reStructuredText'
class Normal(Speed):
"""
This class implements the normal speed distribution that assigns node's
speeds with the Gaussian probability distribution.
"""
def __init__(self, mean=0.0, standard_deviation=0.2):
"""
(Defaults to **standard normal distribution**.)
*Parameters*:
- **mean** (`float`): a value of the expectation (default: `0.0`);
- **standard_deviation** (`float`): a value of the standard
deviation (default: `0.2`).
"""
super(Normal, self).__init__(Normal.__name__)
check_argument_type(Normal.__name__, 'mean', float, mean, self.logger)
self.__mean = float(mean)
check_argument_type(Normal.__name__, 'standard_deviation', float,
standard_deviation, self.logger)
self.__standard_deviation = float(standard_deviation)
self.__current_speed = None
self.get_new()
@property
def mean(self):
"""
(*Property*) A value of the expectation of type `float`.
"""
return self.__mean
@property
def current(self):
"""
(*Property*) A value of the current speed of type `float` (or `None`
if the value has yet not been assigned).
"""
return self.__current_speed
def get_new(self):
"""
Assigns a new speed value.
.. warning::
Depending on distribution parameters, negative values may be
randomly selected.
*Returns*:
(`float`) the absolute value of a new speed.
"""
self.__current_speed = \
self.random_generator.normal(self.__mean,
self.__standard_deviation)
return self.__current_speed
|
[
"sim2net.utility.validation.check_argument_type"
] |
[((1463, 1533), 'sim2net.utility.validation.check_argument_type', 'check_argument_type', (['Normal.__name__', '"""mean"""', 'float', 'mean', 'self.logger'], {}), "(Normal.__name__, 'mean', float, mean, self.logger)\n", (1482, 1533), False, 'from sim2net.utility.validation import check_argument_type\n'), ((1576, 1678), 'sim2net.utility.validation.check_argument_type', 'check_argument_type', (['Normal.__name__', '"""standard_deviation"""', 'float', 'standard_deviation', 'self.logger'], {}), "(Normal.__name__, 'standard_deviation', float,\n standard_deviation, self.logger)\n", (1595, 1678), False, 'from sim2net.utility.validation import check_argument_type\n')]
|
import pkg_resources
pkg_resources.declare_namespace(__name__)
from .authors import AuthorsMixin
from .images import *
from .publication import PublicationMixin
from .video import EmbeddedVideoMixin
|
[
"pkg_resources.declare_namespace"
] |
[((21, 62), 'pkg_resources.declare_namespace', 'pkg_resources.declare_namespace', (['__name__'], {}), '(__name__)\n', (52, 62), False, 'import pkg_resources\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.