repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
leobouts/Skyline_top_k_queries | k_values_graph.py | 5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0 | from a_top_k import *
from b_top_k import *
import time
def main():
# test the generator for the top-k input
# starting time
values_k = [1, 2, 5, 10, 20, 50, 100]
times_topk_join_a = []
times_topk_join_b = []
number_of_valid_lines_a = []
number_of_valid_lines_b = []
for k in values_k:
number_of_valid_lines = []
top_k_a_generator = generate_top_join_a(number_of_valid_lines)
start_time_a = time.time()
for i in range(k):
next(top_k_a_generator)
number_of_valid_lines_a.append(len(number_of_valid_lines))
top_k_time_a = time.time() - start_time_a
times_topk_join_a.append(top_k_time_a)
number_of_valid_lines = []
top_k_b_generator = generate_top_join_b(number_of_valid_lines)
start_time_b = time.time()
for i in range(k):
next(top_k_b_generator)
number_of_valid_lines_b.append(len(number_of_valid_lines))
top_k_time_b = time.time() - start_time_b
times_topk_join_b.append(top_k_time_b)
print(times_topk_join_a)
print(times_topk_join_b)
print(number_of_valid_lines_a)
print(number_of_valid_lines_b)
if __name__ == "__main__":
main()
| [((453, 464), 'time.time', 'time.time', ([], {}), '()\n', (462, 464), False, 'import time\n'), ((824, 835), 'time.time', 'time.time', ([], {}), '()\n', (833, 835), False, 'import time\n'), ((620, 631), 'time.time', 'time.time', ([], {}), '()\n', (629, 631), False, 'import time\n'), ((991, 1002), 'time.time', 'time.time', ([], {}), '()\n', (1000, 1002), False, 'import time\n')] |
CEOALT1/RefindPlusUDK | AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py | 116b957ad735f96fbb6d80a0ba582046960ba164 | """Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
| [] |
Renovamen/Text-Classification | models/TextCNN/cnn2d.py | 4a4aa4001c402ed4371ebaabe1393b27794e5992 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
class TextCNN2D(nn.Module):
"""
Implementation of 2D version of TextCNN proposed in paper [1].
`Here <https://github.com/yoonkim/CNN_sentence>`_ is the official
implementation of TextCNN.
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
n_kernels : int
Number of kernels
kernel_sizes : List[int]
Size of each kernel
dropout : float
Dropout
n_channels : int
Number of channels (1 / 2)
References
----------
1. "`Convolutional Neural Networks for Sentence Classification. \
<https://www.aclweb.org/anthology/D14-1181.pdf>`_" Yoon Kim. EMNLP 2014.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
n_kernels: int,
kernel_sizes: List[int],
dropout: float,
n_channels = 1
) -> None:
super(TextCNN2D, self).__init__()
# embedding layer
self.embedding1 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, fine_tune)
if n_channels == 2:
# multichannel: a static channel and a non-static channel
# which means embedding2 is frozen
self.embedding2 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, False)
else:
self.embedding2 = None
# 2d conv layer
self.convs = nn.ModuleList([
nn.Conv2d(
in_channels = n_channels,
out_channels = n_kernels,
kernel_size = (size, emb_size)
)
for size in kernel_sizes
])
self.fc = nn.Linear(len(kernel_sizes) * n_kernels, n_classes)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def set_embeddings(
self,
embeddings: torch.Tensor,
layer_id: int = 1,
fine_tune: bool = True
) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
layer_id : int
Embedding layer 1 or 2 (when adopting multichannel architecture)
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
if layer_id == 1:
self.embedding1.weight.data.uniform_(-0.1, 0.1)
else:
self.embedding2.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
if layer_id == 1:
self.embedding1.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
else:
self.embedding2.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
# word embedding
embeddings = self.embedding1(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
# multichannel
if self.embedding2:
embeddings2 = self.embedding2(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
embeddings = torch.cat((embeddings, embeddings2), dim = 1) # (batch_size, 2, word_pad_len, emb_size)
# conv
conved = [self.relu(conv(embeddings)).squeeze(3) for conv in self.convs] # [(batch size, n_kernels, word_pad_len - kernel_sizes[n] + 1)]
# pooling
pooled = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in conved] # [(batch size, n_kernels)]
# flatten
flattened = self.dropout(torch.cat(pooled, dim = 1)) # (batch size, n_kernels * len(kernel_sizes))
scores = self.fc(flattened) # (batch size, n_classes)
return scores
| [((1447, 1481), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'emb_size'], {}), '(vocab_size, emb_size)\n', (1459, 1481), True, 'import torch.nn as nn\n'), ((2223, 2242), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2233, 2242), True, 'import torch.nn as nn\n'), ((2263, 2272), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2270, 2272), True, 'import torch.nn as nn\n'), ((1712, 1746), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'emb_size'], {}), '(vocab_size, emb_size)\n', (1724, 1746), True, 'import torch.nn as nn\n'), ((4180, 4223), 'torch.cat', 'torch.cat', (['(embeddings, embeddings2)'], {'dim': '(1)'}), '((embeddings, embeddings2), dim=1)\n', (4189, 4223), False, 'import torch\n'), ((4603, 4627), 'torch.cat', 'torch.cat', (['pooled'], {'dim': '(1)'}), '(pooled, dim=1)\n', (4612, 4627), False, 'import torch\n'), ((1924, 2015), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'n_channels', 'out_channels': 'n_kernels', 'kernel_size': '(size, emb_size)'}), '(in_channels=n_channels, out_channels=n_kernels, kernel_size=(size,\n emb_size))\n', (1933, 2015), True, 'import torch.nn as nn\n'), ((3282, 3331), 'torch.nn.Parameter', 'nn.Parameter', (['embeddings'], {'requires_grad': 'fine_tune'}), '(embeddings, requires_grad=fine_tune)\n', (3294, 3331), True, 'import torch.nn as nn\n'), ((3393, 3442), 'torch.nn.Parameter', 'nn.Parameter', (['embeddings'], {'requires_grad': 'fine_tune'}), '(embeddings, requires_grad=fine_tune)\n', (3405, 3442), True, 'import torch.nn as nn\n')] |
seunghwanly/CODING-TEST | LEVEL2/다리를지나는트럭/solution.py | a820da950c163d399594770199aa2e782d1fbbde | def solution(bridge_length, weight, truck_weights):
answer = 0
# { weight, time }
wait = truck_weights[:]
bridge = []
passed = 0
currWeight = 0
while True:
if passed == len(truck_weights) and len(wait) == 0: return answer
answer += 1
# sth needs to be passed
if bridge:
if bridge[0]['t'] + bridge_length == answer:
front = bridge.pop(0)
currWeight -= front['w']
passed += 1
# add new truck
if wait:
if currWeight + wait[0] <= weight:
bridge.append({ 'w' : wait[0], 't' : answer })
currWeight += wait[0]
wait.pop(0)
# print(solution(2, 10, [7, 4, 5, 6]))
print(solution(100, 100, [10]))
| [] |
maestro-hybrid-cloud/heat | heat/tests/convergence/framework/testutils.py | 91a4bb3170bd81b1c67a896706851e55709c9b5a | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log as logging
from heat.tests.convergence.framework import reality
from heat.tests.convergence.framework import scenario_template
LOG = logging.getLogger(__name__)
def verify(test, reality, tmpl):
for name in tmpl.resources:
rsrc_count = len(reality.resources_by_logical_name(name))
test.assertEqual(1, rsrc_count,
'Found %d copies of resource "%s"' % (rsrc_count,
name))
all_rsrcs = reality.all_resources()
for name, defn in tmpl.resources.items():
phys_rsrc = reality.resources_by_logical_name(name)[0]
for prop_name, prop_def in defn.properties.items():
real_value = reality.resource_properties(phys_rsrc, prop_name)
if isinstance(prop_def, scenario_template.GetAtt):
targs = reality.resources_by_logical_name(prop_def.target_name)
att_value = targs[0].properties_data[prop_def.attr]
test.assertEqual(att_value, real_value)
elif isinstance(prop_def, scenario_template.GetRes):
targs = reality.resources_by_logical_name(prop_def.target_name)
test.assertEqual(targs[0].nova_instance, real_value)
else:
test.assertEqual(prop_def, real_value)
test.assertEqual(len(defn.properties), len(phys_rsrc.properties_data))
test.assertEqual(len(tmpl.resources), len(all_rsrcs))
def scenario_globals(procs, testcase):
return {
'test': testcase,
'reality': reality.reality,
'verify': functools.partial(verify,
testcase,
reality.reality),
'Template': scenario_template.Template,
'RsrcDef': scenario_template.RsrcDef,
'GetRes': scenario_template.GetRes,
'GetAtt': scenario_template.GetAtt,
'engine': procs.engine,
'worker': procs.worker,
}
| [((752, 779), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (769, 779), True, 'from oslo_log import log as logging\n'), ((1115, 1138), 'heat.tests.convergence.framework.reality.all_resources', 'reality.all_resources', ([], {}), '()\n', (1136, 1138), False, 'from heat.tests.convergence.framework import reality\n'), ((2215, 2267), 'functools.partial', 'functools.partial', (['verify', 'testcase', 'reality.reality'], {}), '(verify, testcase, reality.reality)\n', (2232, 2267), False, 'import functools\n'), ((872, 911), 'heat.tests.convergence.framework.reality.resources_by_logical_name', 'reality.resources_by_logical_name', (['name'], {}), '(name)\n', (905, 911), False, 'from heat.tests.convergence.framework import reality\n'), ((1206, 1245), 'heat.tests.convergence.framework.reality.resources_by_logical_name', 'reality.resources_by_logical_name', (['name'], {}), '(name)\n', (1239, 1245), False, 'from heat.tests.convergence.framework import reality\n'), ((1335, 1384), 'heat.tests.convergence.framework.reality.resource_properties', 'reality.resource_properties', (['phys_rsrc', 'prop_name'], {}), '(phys_rsrc, prop_name)\n', (1362, 1384), False, 'from heat.tests.convergence.framework import reality\n'), ((1473, 1528), 'heat.tests.convergence.framework.reality.resources_by_logical_name', 'reality.resources_by_logical_name', (['prop_def.target_name'], {}), '(prop_def.target_name)\n', (1506, 1528), False, 'from heat.tests.convergence.framework import reality\n'), ((1743, 1798), 'heat.tests.convergence.framework.reality.resources_by_logical_name', 'reality.resources_by_logical_name', (['prop_def.target_name'], {}), '(prop_def.target_name)\n', (1776, 1798), False, 'from heat.tests.convergence.framework import reality\n')] |
AstroShen/fpga21-scaled-tech | device_geometry.py | 8a7016913c18d71844f733bc80a3ceaa2d033ac2 | """Holds the device gemoetry parameters (Table 5), taken from Wu et al.,
>> A Predictive 3-D Source/Drain Resistance Compact Model and the Impact on 7 nm and Scaled FinFets<<, 2020, with interpolation for 4nm. 16nm is taken from PTM HP.
"""
node_names = [16, 7, 5, 4, 3]
GP = [64, 56, 48, 44, 41]
FP = [40, 30, 28, 24, 22]
GL = [20, 18, 16, 15, 14]
FH = [26, 35, 45, 50, 55]
FW = [12, 6.5, 6, 5.5, 5.5]
vdd = [0.85, 0.75, 0.7, 0.65, 0.65]
| [] |
vmthunder/nova | nova/tests/servicegroup/test_zk_driver.py | baf05caab705c5778348d9f275dc541747b7c2de | # Copyright (c) AT&T 2012-2013 Yun Mao <[email protected]>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the ZooKeeper driver for servicegroup.
You need to install ZooKeeper locally and related dependencies
to run the test. It's unclear how to install python-zookeeper lib
in venv so you might have to run the test without it.
To set up in Ubuntu 12.04:
$ sudo apt-get install zookeeper zookeeperd python-zookeeper
$ sudo pip install evzookeeper
$ nosetests nova.tests.servicegroup.test_zk_driver
"""
import eventlet
from nova import servicegroup
from nova import test
class ZKServiceGroupTestCase(test.NoDBTestCase):
def setUp(self):
super(ZKServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
from nova.servicegroup.drivers import zk
self.flags(servicegroup_driver='zk')
self.flags(address='localhost:2181', group="zookeeper")
try:
zk.ZooKeeperDriver()
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
def test_join_leave(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
self.servicegroup_api.join(service_id['host'], service_id['topic'])
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
self.servicegroup_api.leave(service_id['host'], service_id['topic'])
# make sure zookeeper is updated and watcher is triggered
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
def test_stop(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
pulse = self.servicegroup_api.join(service_id['host'],
service_id['topic'], None)
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
pulse.stop()
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
| [((1666, 1684), 'nova.servicegroup.API', 'servicegroup.API', ([], {}), '()\n', (1682, 1684), False, 'from nova import servicegroup\n'), ((2048, 2065), 'eventlet.sleep', 'eventlet.sleep', (['(1)'], {}), '(1)\n', (2062, 2065), False, 'import eventlet\n'), ((2198, 2216), 'nova.servicegroup.API', 'servicegroup.API', ([], {}), '()\n', (2214, 2216), False, 'from nova import servicegroup\n'), ((2513, 2530), 'eventlet.sleep', 'eventlet.sleep', (['(1)'], {}), '(1)\n', (2527, 2530), False, 'import eventlet\n'), ((1484, 1504), 'nova.servicegroup.drivers.zk.ZooKeeperDriver', 'zk.ZooKeeperDriver', ([], {}), '()\n', (1502, 1504), False, 'from nova.servicegroup.drivers import zk\n')] |
lordmauve/chopsticks | tests/test_misc.py | 87c6a5d0049a45db1477a21510cba650f470a8ac | """Tests for miscellaneous properties, such as debuggability."""
import time
from chopsticks.tunnel import Docker
from chopsticks.group import Group
def test_tunnel_repr():
"""Tunnels have a usable repr."""
tun = Docker('py36', image='python:3.6')
assert repr(tun) == "Docker('py36')"
def test_group_repr():
"""Groups have a usable repr."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
assert repr(grp) == "Group([Docker('py35'), Docker('py36')])"
def test_group_reuse():
"""We can re-use a group."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
with grp:
grp.call(time.time)
grp.call(time.time)
| [((223, 257), 'chopsticks.tunnel.Docker', 'Docker', (['"""py36"""'], {'image': '"""python:3.6"""'}), "('py36', image='python:3.6')\n", (229, 257), False, 'from chopsticks.tunnel import Docker\n'), ((387, 421), 'chopsticks.tunnel.Docker', 'Docker', (['"""py35"""'], {'image': '"""python:3.5"""'}), "('py35', image='python:3.5')\n", (393, 421), False, 'from chopsticks.tunnel import Docker\n'), ((431, 465), 'chopsticks.tunnel.Docker', 'Docker', (['"""py36"""'], {'image': '"""python:3.6"""'}), "('py36', image='python:3.6')\n", (437, 465), False, 'from chopsticks.tunnel import Docker\n'), ((624, 658), 'chopsticks.tunnel.Docker', 'Docker', (['"""py35"""'], {'image': '"""python:3.5"""'}), "('py35', image='python:3.5')\n", (630, 658), False, 'from chopsticks.tunnel import Docker\n'), ((668, 702), 'chopsticks.tunnel.Docker', 'Docker', (['"""py36"""'], {'image': '"""python:3.6"""'}), "('py36', image='python:3.6')\n", (674, 702), False, 'from chopsticks.tunnel import Docker\n')] |
AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion | Evaluation/PostProcesing.py | 366ac5073cea96b662b934c3657446c9f1aa2f65 | import pandas as pd
import re
import glob
def rebuild_counts_from_csv(path,n_dims, shots):
df = pd.read_csv(path)
return rebuild_counts_from_dataframe(dataframe=df, n_dims=n_dims, shots=shots)
def rebuild_counts_from_dataframe(dataframe,n_dims,shots):
dimension_counts = {}
for dimension in range(n_dims):
dimension_counts[dimension] = []
pde = list(dataframe.probability_density)
for idx, density in enumerate(pde):
n_counts = int(density*shots)
for _ in range(n_counts):
# print(dataframe["dimension_0"][idx])
for dimension in range(n_dims):
dimension_key = "dimension_{}".format(dimension)
#
dimension_counts[dimension]+=[dataframe[dimension_key][idx]]
# print(dimension_counts)
rebuilt_dict = {}
for dimension in range(n_dims):
rebuilt_dict[f"d{dimension}"] = dimension_counts[dimension]
return rebuilt_dict
def rebuild_counts_from_dictionary(dictionary:dict, n_dims, shots):
dataframe = pd.DataFrame(dictionary)
return rebuild_counts_from_dataframe(dataframe=dataframe, n_dims=n_dims, shots=shots)
def get_stats_from_counts_dict(results_dict:dict):
dataframe = pd.DataFrame(results_dict)
return get_stats_from_counts_dataframe(dataframe)
def get_stats_from_counts_dataframe(counts_dataframe: pd.DataFrame)-> dict:
results_dict = {}
results_dict["corr"] = counts_dataframe.corr()
results_dict["cov"] = counts_dataframe.cov()
results_dict["mean"] = counts_dataframe.mean()
results_dict['var'] = counts_dataframe.var()
return results_dict
def get_n_steps_from_filepath(filepath)-> int:
filename = filepath.split('/')[-1]
return int(re.findall(r"\d+_steps",filename)[0].split('_')[0])
def get_n_shots_from_path(path)-> int:
experiment_dir_name = path.split('/')[-1]
nshots = int(re.findall(r"\d+shots",experiment_dir_name)[0].split('s')[0])
return nshots
def get_n_dims_from_path(path)-> int:
experiment_dir_name = path.split('/')[-1]
ndims = int(re.findall(r"\d+D_",experiment_dir_name)[0].split('D')[0])
return ndims
def extract_mean_variance_vs_nsteps(directory_path: str,dimension = 0):
nshots = get_n_shots_from_path(directory_path)
ndims = get_n_dims_from_path(directory_path)
assert dimension < ndims, "queried dimension exceeds experiment space"
files = glob.glob(directory_path+'/*/data/**.csv')
files.sort(key = get_n_steps_from_filepath)
n_steps = []
variance = []
mean = []
for filepath in files:
filename = filepath.split('/')[-1]
nsteps = int(re.findall(r"\d+_steps",filename)[0].split('_')[0])
rebuilt_dict = rebuild_counts_from_csv(filepath,n_dims=ndims,shots=nshots)
stats = get_stats_from_counts_dict(rebuilt_dict)
variance.append(stats['var'][dimension])
mean.append(stats['mean'][dimension])
n_steps.append(nsteps)
return n_steps, variance, mean
| [((102, 119), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (113, 119), True, 'import pandas as pd\n'), ((1068, 1092), 'pandas.DataFrame', 'pd.DataFrame', (['dictionary'], {}), '(dictionary)\n', (1080, 1092), True, 'import pandas as pd\n'), ((1252, 1278), 'pandas.DataFrame', 'pd.DataFrame', (['results_dict'], {}), '(results_dict)\n', (1264, 1278), True, 'import pandas as pd\n'), ((2435, 2479), 'glob.glob', 'glob.glob', (["(directory_path + '/*/data/**.csv')"], {}), "(directory_path + '/*/data/**.csv')\n", (2444, 2479), False, 'import glob\n'), ((1763, 1797), 're.findall', 're.findall', (['"""\\\\d+_steps"""', 'filename'], {}), "('\\\\d+_steps', filename)\n", (1773, 1797), False, 'import re\n'), ((1918, 1962), 're.findall', 're.findall', (['"""\\\\d+shots"""', 'experiment_dir_name'], {}), "('\\\\d+shots', experiment_dir_name)\n", (1928, 1962), False, 'import re\n'), ((2099, 2140), 're.findall', 're.findall', (['"""\\\\d+D_"""', 'experiment_dir_name'], {}), "('\\\\d+D_', experiment_dir_name)\n", (2109, 2140), False, 'import re\n'), ((2668, 2702), 're.findall', 're.findall', (['"""\\\\d+_steps"""', 'filename'], {}), "('\\\\d+_steps', filename)\n", (2678, 2702), False, 'import re\n')] |
michel-rodrigues/viggio_backend | app/wirecard/tasks.py | f419f0b939209722e1eb1e272f33de172cd5c1f1 | from sentry_sdk import capture_exception
from dateutil.parser import parse
from project_configuration.celery import app
from orders.models import Charge
from request_shoutout.domain.models import Charge as DomainCharge
from .models import WirecardTransactionData
CROSS_SYSTEMS_STATUS_MAPPING = {
'WAITING': DomainCharge.PROCESSING,
'IN_ANALYSIS': DomainCharge.PROCESSING,
'PRE_AUTHORIZED': DomainCharge.PRE_AUTHORIZED,
'AUTHORIZED': DomainCharge.PAID,
'CANCELLED': DomainCharge.CANCELLED,
'REFUNDED': DomainCharge.CANCELLED,
'REVERSED': DomainCharge.CANCELLED,
'SETTLED': DomainCharge.PAID,
}
def _update_status(wirecard_status, wirecard_payment_hash):
(
Charge.objects
.filter(order__third_party_transaction__wirecard_payment_hash=wirecard_payment_hash)
.update(status=CROSS_SYSTEMS_STATUS_MAPPING[wirecard_status])
)
def _update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp):
wirecard_transaction.payment_event_last_timestamp = payment_event_timestamp
wirecard_transaction.save()
def _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction):
if wirecard_transaction.payment_event_last_timestamp:
return payment_event_timestamp < wirecard_transaction.payment_event_last_timestamp
return False
@app.task
def update_payment_status(notification):
payment_event_timestamp = parse(notification['resource']['payment']['updatedAt'])
payment_status = notification['resource']['payment']['status']
wirecard_payment_hash = notification['resource']['payment']['id']
try:
wirecard_transaction = (
WirecardTransactionData.objects.get(wirecard_payment_hash=wirecard_payment_hash)
)
# Algumas vezes tem subido essa exceção, como não sabemos se é devido à falhas na sandbox
# da wirecard, estamos evitando quebrar a aplicação e enviando a exceção para o sentry
except WirecardTransactionData.DoesNotExist:
capture_exception()
else:
if not _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction):
_update_status(payment_status, wirecard_payment_hash)
_update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp)
| [((1418, 1473), 'dateutil.parser.parse', 'parse', (["notification['resource']['payment']['updatedAt']"], {}), "(notification['resource']['payment']['updatedAt'])\n", (1423, 1473), False, 'from dateutil.parser import parse\n'), ((705, 808), 'orders.models.Charge.objects.filter', 'Charge.objects.filter', ([], {'order__third_party_transaction__wirecard_payment_hash': 'wirecard_payment_hash'}), '(order__third_party_transaction__wirecard_payment_hash\n =wirecard_payment_hash)\n', (726, 808), False, 'from orders.models import Charge\n'), ((1998, 2017), 'sentry_sdk.capture_exception', 'capture_exception', ([], {}), '()\n', (2015, 2017), False, 'from sentry_sdk import capture_exception\n')] |
coalpha/coalpha.github.io | py/multiple_dispatch_example.py | 8a620314a5c0bcbe2225d29f733379d181534430 | from typing import *
from multiple_dispatch import multiple_dispatch
@overload
@multiple_dispatch
def add(a: Literal[4, 6, 8], b):
raise TypeError("No adding 2, 4, 6, or 8!")
@overload
@multiple_dispatch
def add(a: int, b: str):
return f"int + str = {a} + {b}"
@overload
@multiple_dispatch
def add(a: int, b: int):
return a + b
@multiple_dispatch
def add(a, b):
return f"Any + Any = {a} + {b}"
print(add(2, "hello"))
| [] |
Sunyingbin/models | dygraph/alexnet/network.py | 30a7f1757bfad79935aa865f4362a7b38e63a415 | """
动态图构建 AlexNet
"""
import paddle.fluid as fluid
import numpy as np
class Conv2D(fluid.dygraph.Layer):
def __init__(self,
name_scope,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(Conv2D, self).__init__(name_scope)
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
param_attr=param_attr,
bias_attr=bias_attr,
act=act,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
return x
class Conv2DPool(fluid.dygraph.Layer):
def __init__(self,
name_scope,
num_channels,
num_filters,
filter_size,
pool_size,
pool_stride,
pool_padding=0,
pool_type='max',
global_pooling=False,
conv_stride=1,
conv_padding=0,
conv_dilation=1,
conv_groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(Conv2DPool, self).__init__(name_scope)
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation,
groups=conv_groups,
param_attr=param_attr,
bias_attr=bias_attr,
act=act,
use_cudnn=use_cudnn)
self._pool2d = fluid.dygraph.Pool2D(
pool_size=pool_size,
pool_type=pool_type,
pool_stride=pool_stride,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
x = self._pool2d(x)
return x
class AlexNet(fluid.dygraph.Layer):
def __init__(self, name_scope, class_dim):
super(AlexNet, self).__init__(name_scope)
self.conv_pool_1 = Conv2DPool(self.full_name(), 3, 64, 11, 3, 2, conv_stride=4, conv_padding=2, act='relu')
self.conv_pool_2 = Conv2DPool(self.full_name(), 64, 192, 5, 3, 2, conv_stride=1, conv_padding=2, act='relu')
self.conv_3 = Conv2D(self.full_name(), 192, 384, 3, 1, 1, act='relu')
self.conv_4 = Conv2D(self.full_name(), 384, 256, 3, 1, 1, act='relu')
self.conv_pool_5 = Conv2DPool(self.full_name(), 256, 256, 3, 3, 2, conv_stride=1, conv_padding=1, act='relu')
self.fc6 = fluid.dygraph.FC(self.full_name(), 9216, 4096, act='relu')
self.fc7 = fluid.dygraph.FC(self.full_name(), 4096, 4096, act='relu')
self.fc8 = fluid.dygraph.FC(self.full_name(), 4096, class_dim, act='softmax')
def forward(self, inputs, label=None):
out = self.conv_pool_1(inputs)
out = self.conv_pool_2(out)
out = self.conv_3(out)
out = self.conv_4(out)
out = self.conv_pool_5(out)
out = self.fc6(out)
out = fluid.layers.dropout(out, 0.5)
out = self.fc7(out)
out = fluid.layers.dropout(out, 0.5)
out = self.fc8(out)
if label is not None:
acc = fluid.layers.accuracy(input=out, label=label)
return out, acc
else:
return out
if __name__ == '__main__':
with fluid.dygraph.guard():
alexnet = AlexNet('alex-net', 3)
img = np.zeros([2, 3, 224, 224]).astype('float32')
img = fluid.dygraph.to_variable(img)
outs = alexnet(img).numpy()
print(outs)
| [((586, 836), 'paddle.fluid.dygraph.Conv2D', 'fluid.dygraph.Conv2D', ([], {'num_channels': 'num_channels', 'num_filters': 'num_filters', 'filter_size': 'filter_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups', 'param_attr': 'param_attr', 'bias_attr': 'bias_attr', 'act': 'act', 'use_cudnn': 'use_cudnn'}), '(num_channels=num_channels, num_filters=num_filters,\n filter_size=filter_size, stride=stride, padding=padding, dilation=\n dilation, groups=groups, param_attr=param_attr, bias_attr=bias_attr,\n act=act, use_cudnn=use_cudnn)\n', (606, 836), True, 'import paddle.fluid as fluid\n'), ((1746, 2015), 'paddle.fluid.dygraph.Conv2D', 'fluid.dygraph.Conv2D', ([], {'num_channels': 'num_channels', 'num_filters': 'num_filters', 'filter_size': 'filter_size', 'stride': 'conv_stride', 'padding': 'conv_padding', 'dilation': 'conv_dilation', 'groups': 'conv_groups', 'param_attr': 'param_attr', 'bias_attr': 'bias_attr', 'act': 'act', 'use_cudnn': 'use_cudnn'}), '(num_channels=num_channels, num_filters=num_filters,\n filter_size=filter_size, stride=conv_stride, padding=conv_padding,\n dilation=conv_dilation, groups=conv_groups, param_attr=param_attr,\n bias_attr=bias_attr, act=act, use_cudnn=use_cudnn)\n', (1766, 2015), True, 'import paddle.fluid as fluid\n'), ((2174, 2349), 'paddle.fluid.dygraph.Pool2D', 'fluid.dygraph.Pool2D', ([], {'pool_size': 'pool_size', 'pool_type': 'pool_type', 'pool_stride': 'pool_stride', 'pool_padding': 'pool_padding', 'global_pooling': 'global_pooling', 'use_cudnn': 'use_cudnn'}), '(pool_size=pool_size, pool_type=pool_type, pool_stride=\n pool_stride, pool_padding=pool_padding, global_pooling=global_pooling,\n use_cudnn=use_cudnn)\n', (2194, 2349), True, 'import paddle.fluid as fluid\n'), ((3700, 3730), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (['out', '(0.5)'], {}), '(out, 0.5)\n', (3720, 3730), True, 'import paddle.fluid as fluid\n'), ((3775, 3805), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (['out', '(0.5)'], {}), '(out, 0.5)\n', (3795, 3805), True, 'import paddle.fluid as fluid\n'), ((4041, 4062), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (4060, 4062), True, 'import paddle.fluid as fluid\n'), ((4181, 4211), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['img'], {}), '(img)\n', (4206, 4211), True, 'import paddle.fluid as fluid\n'), ((3887, 3932), 'paddle.fluid.layers.accuracy', 'fluid.layers.accuracy', ([], {'input': 'out', 'label': 'label'}), '(input=out, label=label)\n', (3908, 3932), True, 'import paddle.fluid as fluid\n'), ((4121, 4147), 'numpy.zeros', 'np.zeros', (['[2, 3, 224, 224]'], {}), '([2, 3, 224, 224])\n', (4129, 4147), True, 'import numpy as np\n')] |
Ayon134/code_for_Kids | turtlegameproject/turtlegame.py | d90698bb38efe5e26c31f02bd129bfdadea158e2 | import turtle
import random
p1=turtle.Turtle()
p1.color("green")
p1.shape("turtle")
p1.penup()
p1.goto(-200,100)
p2=p1.clone()
p2.color("blue")
p2.penup()
p2.goto(-200,-100)
p1.goto(300,60)
p1.pendown()
p1.circle(40)
p1.penup()
p1.goto(-200,100)
p2.goto(300,-140)
p2.pendown()
p2.circle(40)
p2.penup()
p2.goto(-200,-100)
die=[1,2,3,4,5,6]
i=1
while(i <= 20):
if p1.pos() >= (300,100):
print("p1 wins")
break
elif p2.pos() >= (300,-100):
print("p2 wins")
break
else:
p1_turn=input("press enter to start")
die_out=random.choice(die)
print("you get", die_out)
print("the number of steps:", 20*die_out)
p1.forward(20*die_out)
p2_turn=input("press enter to challenge")
d=random.choice(die)
print("you get",d)
print("the number os steps:",20*d)
p2.forward(20*d) | [((32, 47), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (45, 47), False, 'import turtle\n'), ((589, 607), 'random.choice', 'random.choice', (['die'], {}), '(die)\n', (602, 607), False, 'import random\n'), ((792, 810), 'random.choice', 'random.choice', (['die'], {}), '(die)\n', (805, 810), False, 'import random\n')] |
neherlab/hivwholeseq | hivwholeseq/sequencing/check_pipeline.py | 978ce4060362e4973f92b122ed5340a5314d7844 | #!/usr/bin/env python
# vim: fdm=marker
'''
author: Fabio Zanini
date: 15/06/14
content: Check the status of the pipeline for one or more sequencing samples.
'''
# Modules
import os
import sys
from itertools import izip
import argparse
from Bio import SeqIO
from hivwholeseq.utils.generic import getchar
from hivwholeseq.sequencing.samples import SampleSeq, load_sequencing_run
from hivwholeseq.patients.patients import load_samples_sequenced as lssp
from hivwholeseq.patients.patients import SamplePat
from hivwholeseq.sequencing.samples import load_samples_sequenced as lss
from hivwholeseq.utils.mapping import get_number_reads
from hivwholeseq.cluster.fork_cluster import fork_check_pipeline as fork_self
# Globals
len_fr = 8
len_msg = 6
spacing_fragments = 4
# Functions
def check_status(sample, step, detail=1):
'''Check for a sample a certain step of the pipeline at a certain detail'''
if detail == 1:
if step == 'premapped':
return [os.path.isfile(sample.get_premapped_filename())]
elif step == 'divided':
return [(fr, os.path.isfile(sample.get_divided_filename(fr)))
for fr in sample.regions_complete]
elif step == 'consensus':
return [(fr, os.path.isfile(sample.get_consensus_filename(fr)))
for fr in sample.regions_generic]
elif step == 'mapped':
return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=False)))
for fr in sample.regions_generic]
elif step == 'filtered':
return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=True)))
for fr in sample.regions_generic]
elif step == 'mapped_initial':
return [(fr, os.path.isfile(sample.get_mapped_to_initial_filename(fr)))
for fr in sample.regions_generic]
elif step == 'mapped_filtered':
# Check whether the mapped filtered is older than the mapped_initial
from hivwholeseq.utils.generic import modification_date
out = []
for fr in sample.regions_generic:
fn_mi = sample.get_mapped_to_initial_filename(fr)
fn_mf = sample.get_mapped_filtered_filename(fr)
if not os.path.isfile(fn_mf):
out.append((fr, False))
continue
if not os.path.isfile(fn_mi):
out.append((fr, True))
continue
md_mi = modification_date(fn_mi)
md_mf = modification_date(fn_mf)
if md_mf < md_mi:
out.append((fr, 'OLD'))
else:
out.append((fr, True))
return out
elif detail == 2:
if step in ('filtered', 'consensus'):
return check_status(sample, step, detail=3)
else:
return check_status(sample, step, detail=1)
elif detail == 3:
if step == 'premapped':
if os.path.isfile(sample.get_premapped_filename()):
return [get_number_reads(sample.get_premapped_filename())]
else:
return [False]
elif step == 'divided':
stati = []
for fr in sample.regions_complete:
fn = sample.get_divided_filename(fr)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'consensus':
stati = []
for fr in sample.regions_generic:
fn = sample.get_consensus_filename(fr)
if os.path.isfile(fn):
status = (fr, len(SeqIO.read(fn, 'fasta')))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'mapped':
stati = []
for fr in sample.regions_generic:
fn = sample.get_mapped_filename(fr, filtered=False)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'filtered':
stati = []
for fr in sample.regions_generic:
fn = sample.get_mapped_filename(fr, filtered=True)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
# TODO: add mapped_to_initial and downstream
elif step in ('mapped_initial', 'mapped_filtered'):
return check_status(sample, step, detail=1)
def print_info(name, status, detail=1):
'''Print info on these files'''
print '{:<20s}'.format(name+':'),
if name.lower() in ['premapped']:
status = status[0]
if status == True:
print 'OK'
elif status == False:
print 'MISS'
else:
print str(status)
else:
stati = list(status)
msg = []
for (fr, status) in stati:
ms = ('{:<'+str(len_fr)+'s}').format(fr+':')
if status == True:
msg.append(ms+('{:>'+str(len_msg)+'}').format('OK'))
elif status == False:
msg.append(ms+('{:>'+str(len_msg)+'}').format('MISS'))
else:
msg.append(ms+('{:>'+str(len_msg)+'}').format(str(status)))
print (' ' * spacing_fragments).join(msg)
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Check sequencing run for missing parts of the analysis',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--runs', required=True, nargs='+',
help='Seq runs to analyze (e.g. Tue28, test_tiny)')
parser.add_argument('--adaIDs', nargs='+',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--nopatients', action='store_false', dest='use_pats',
help='Include non-patient samples (e.g. reference strains)')
parser.add_argument('--interactive', action='store_true',
help='Interactive mode')
parser.add_argument('--detail', type=int, default=1,
help='Include details on number of reads, length of consensus')
parser.add_argument('--submit', action='store_true',
help='Execute the script in parallel on the cluster')
args = parser.parse_args()
seq_runs = args.runs
adaIDs = args.adaIDs
use_pats = args.use_pats
use_interactive = args.interactive
detail = args.detail
submit = args.submit
if submit:
fork_self(seq_runs, adaIDs=adaIDs,
pats=use_pats,
detail=detail)
sys.exit()
samples_pat = lssp(include_wrong=True)
samples = lss()
samples = samples.loc[samples['seq run'].isin(seq_runs)]
if adaIDs is not None:
samples = samples.loc[samples.adapter.isin(adaIDs)]
if len(seq_runs) >= 2:
samples.sort(columns=['patient sample', 'seq run'], inplace=True)
for isa, (samplename, sample) in enumerate(samples.iterrows()):
sample = SampleSeq(sample)
print sample.name, 'seq:', sample['seq run'], sample.adapter,
if sample['patient sample'] == 'nan':
print 'not a patient sample',
if use_pats:
print '(skip)'
continue
else:
print ''
else:
sample_pat = samples_pat.loc[sample['patient sample']]
print 'patient: '+sample_pat.patient
steps = ['premapped', 'divided', 'consensus', 'mapped', 'filtered',
'mapped_initial', 'mapped_filtered']
for step in steps:
status = check_status(sample, step, detail=detail)
print_info(step.capitalize(), status, detail=detail)
if (isa != len(samples) - 1):
print ''
if use_interactive and (isa != len(samples) - 1):
print 'Press q to exit',
sys.stdout.flush()
ch = getchar()
if ch.lower() in ['q']:
print 'stopped'
break
else:
sys.stdout.write("\x1b[1A")
print ''
| [] |
thliang01/nba-s | app.py | 660d0e830989916b7b9f3123eb809d143b714186 | import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# --------------------------------------------------------------
# Import and clean data
game_details = pd.read_csv('games_details.csv')
# print(game_details.head(5))
game_details.drop(['GAME_ID', 'TEAM_ID', 'PLAYER_ID', 'START_POSITION',
'COMMENT', 'TEAM_ABBREVIATION'], axis=1, inplace=True)
game_details['FTL'] = game_details['FTA'] - game_details['FTM']
game_details = game_details.dropna()
# game_details.shape
# game_details.info()
game_details['MIN'] = game_details['MIN'].str.strip(':').str[0:2]
df = game_details.copy()
if st.checkbox('Show dataframe'):
st.write("Players Game Details")
st.dataframe(df.head(10))
# --------------------------------------------------------------
st.write("Top 20 Players in the NBA")
top_activities = df.groupby(by='PLAYER_NAME')['PTS'].sum().sort_values(ascending=False).head(20).reset_index()
plt.figure(figsize=(15, 10))
plt.xlabel('POINTS', fontsize=15)
plt.ylabel('PLAYER_NAME', fontsize=15)
plt.title('Top 20 Players in the NBA League', fontsize=20)
ax = sns.barplot(x=top_activities['PTS'], y=top_activities['PLAYER_NAME'])
for i, (value, name) in enumerate(zip(top_activities['PTS'], top_activities['PLAYER_NAME'])):
ax.text(value, i - .05, f'{value:,.0f}', size=10, ha='left', va='center')
ax.set(xlabel='POINTS', ylabel='PLAYER_NAME')
st.pyplot(plt)
player = st.multiselect(
"Choose Player", df['PLAYER_NAME']
)
st.write("""
# My first app
Hello *world!*
""")
x = st.slider("Select a number")
st.write("You selected:", x)
| [((221, 253), 'pandas.read_csv', 'pd.read_csv', (['"""games_details.csv"""'], {}), "('games_details.csv')\n", (232, 253), True, 'import pandas as pd\n'), ((671, 700), 'streamlit.checkbox', 'st.checkbox', (['"""Show dataframe"""'], {}), "('Show dataframe')\n", (682, 700), True, 'import streamlit as st\n'), ((836, 873), 'streamlit.write', 'st.write', (['"""Top 20 Players in the NBA"""'], {}), "('Top 20 Players in the NBA')\n", (844, 873), True, 'import streamlit as st\n'), ((985, 1013), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (995, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1047), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""POINTS"""'], {'fontsize': '(15)'}), "('POINTS', fontsize=15)\n", (1024, 1047), True, 'import matplotlib.pyplot as plt\n'), ((1048, 1086), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PLAYER_NAME"""'], {'fontsize': '(15)'}), "('PLAYER_NAME', fontsize=15)\n", (1058, 1086), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1145), 'matplotlib.pyplot.title', 'plt.title', (['"""Top 20 Players in the NBA League"""'], {'fontsize': '(20)'}), "('Top 20 Players in the NBA League', fontsize=20)\n", (1096, 1145), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1220), 'seaborn.barplot', 'sns.barplot', ([], {'x': "top_activities['PTS']", 'y': "top_activities['PLAYER_NAME']"}), "(x=top_activities['PTS'], y=top_activities['PLAYER_NAME'])\n", (1162, 1220), True, 'import seaborn as sns\n'), ((1439, 1453), 'streamlit.pyplot', 'st.pyplot', (['plt'], {}), '(plt)\n', (1448, 1453), True, 'import streamlit as st\n'), ((1464, 1514), 'streamlit.multiselect', 'st.multiselect', (['"""Choose Player"""', "df['PLAYER_NAME']"], {}), "('Choose Player', df['PLAYER_NAME'])\n", (1478, 1514), True, 'import streamlit as st\n'), ((1522, 1569), 'streamlit.write', 'st.write', (['"""\n# My first app\nHello *world!*\n"""'], {}), '("""\n# My first app\nHello *world!*\n""")\n', (1530, 1569), True, 'import streamlit as st\n'), ((1575, 1603), 'streamlit.slider', 'st.slider', (['"""Select a number"""'], {}), "('Select a number')\n", (1584, 1603), True, 'import streamlit as st\n'), ((1604, 1632), 'streamlit.write', 'st.write', (['"""You selected:"""', 'x'], {}), "('You selected:', x)\n", (1612, 1632), True, 'import streamlit as st\n'), ((706, 738), 'streamlit.write', 'st.write', (['"""Players Game Details"""'], {}), "('Players Game Details')\n", (714, 738), True, 'import streamlit as st\n')] |
kepolol/craftassist | python/craftassist/voxel_models/geoscorer/geoscorer_util.py | f60a7edd0b4ea72b774cca45ba468d2e275445c2 | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
from datetime import datetime
import sys
import argparse
import torch
import os
from inspect import currentframe, getframeinfo
GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../")
sys.path.append(CRAFTASSIST_DIR)
from shapes import get_bounds
def pretty_log(log_string):
cf = currentframe().f_back
filename = getframeinfo(cf).filename.split("/")[-1]
print(
"{} {}:{} {}".format(
datetime.now().strftime("%m/%d/%Y %H:%M:%S"), filename, cf.f_lineno, log_string
)
)
sys.stdout.flush()
## Train Fxns ##
def get_base_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", type=int, default=1, help="0 for cpu")
parser.add_argument("--batchsize", type=int, default=64, help="batchsize")
parser.add_argument("--dataset", default="shapes", help="shapes/segments/both")
parser.add_argument(
"--epochsize", type=int, default=1000, help="number of examples in an epoch"
)
parser.add_argument("--nepoch", type=int, default=1000, help="number of epochs")
parser.add_argument("--context_sidelength", type=int, default=32, help="size of cube")
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument("--num_layers", type=int, default=3, help="num layers")
parser.add_argument(
"--blockid_embedding_dim", type=int, default=8, help="size of blockid embedding"
)
parser.add_argument(
"--num_words", type=int, default=256, help="number of words for the blockid embeds"
)
parser.add_argument("--lr", type=float, default=0.1, help="step size for net")
parser.add_argument(
"--optim", type=str, default="adagrad", help="optim type to use (adagrad|sgd|adam)"
)
parser.add_argument("--momentum", type=float, default=0.0, help="momentum")
parser.add_argument("--checkpoint", default="", help="where to save model")
parser.add_argument("--num_workers", type=int, default=4, help="number of dataloader workers")
return parser
def add_dataset_flags(parser):
parser.add_argument(
"--dataset_ratios", type=str, default="shape:1.0", help="comma separated name:prob"
)
parser.add_argument("--useid", type=bool, default=False, help="use blockid")
parser.add_argument("--fixed_cube_size", type=int, default=None, help="fixed_cube_size")
parser.add_argument("--fixed_center", type=bool, default=False, help="fixed_center")
parser.add_argument(
"--min_seg_size", type=int, default=6, help="min seg size for seg data type"
)
parser.add_argument(
"--use_saved_data",
type=bool,
default=False,
help="use preparsed data for this min_seg_size",
)
def add_directional_flags(parser):
parser.add_argument("--spatial_embedding_dim", type=int, default=8, help="size of spatial emb")
parser.add_argument("--output_embedding_dim", type=int, default=8, help="size of output emb")
parser.add_argument(
"--seg_direction_net", type=bool, default=False, help="use segdirnet module"
)
parser.add_argument(
"--seg_use_viewer_pos", type=bool, default=False, help="use viewer pos in seg"
)
parser.add_argument(
"--seg_use_viewer_look", type=bool, default=False, help="use viewer look in seg"
)
parser.add_argument(
"--seg_use_direction", type=bool, default=False, help="use direction in seg"
)
parser.add_argument("--num_seg_dir_layers", type=int, default=3, help="num segdir net layers")
parser.add_argument(
"--cont_use_direction", type=bool, default=False, help="use direction in context"
)
parser.add_argument(
"--cont_use_xyz_from_viewer_look",
type=bool,
default=False,
help="use xyz position relative to viewer look in context emb",
)
def get_dataloader(dataset, opts, collate_fxn):
def init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32))
return torch.utils.data.DataLoader(
dataset,
batch_size=opts["batchsize"],
shuffle=True,
pin_memory=True,
drop_last=True,
num_workers=opts["num_workers"],
worker_init_fn=init_fn,
collate_fn=collate_fxn,
)
def to_cuda(list_modules):
for m in list_modules:
m.cuda()
def multitensor_collate_fxn(x):
"""
Takes a list of BATCHSIZE lists of tensors of length D.
Returns a list of length D of batched tensors.
"""
num_tensors_to_batch = len(x[0])
regroup_tensors = [[] for i in range(num_tensors_to_batch)]
for t_list in x:
for i, t in enumerate(t_list):
regroup_tensors[i].append(t.unsqueeze(0))
batched_tensors = [torch.cat(tl) for tl in regroup_tensors]
return batched_tensors
## 3D Utils ##
def get_side_lengths(bounds):
"""
Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z].
Returns a list of the side lengths.
"""
return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])]
def coord_to_index(coord, sl):
"""
Takes a 3D coordinate in a cube and the cube side length.
Returns index in flattened 3D array.
"""
return coord[0] * sl * sl + coord[1] * sl + coord[2]
def index_to_coord(index, sl):
"""
Takes an index into a flattened 3D array and its side length.
Returns the coordinate in the cube.
"""
coord = []
two_d_slice_size = sl * sl
coord.append(index // two_d_slice_size)
remaining = index % two_d_slice_size
coord.append(remaining // sl)
coord.append(remaining % sl)
return coord
def shift_subsegment_corner(S):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns the segment in the same form, shifted to the origin, and the shift vec
"""
bounds = get_bounds(S)
shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]]
new_S = []
for s in S:
new_S.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1]))
return new_S, shift_zero_vec
def subset_and_scale_3d(init_array, mins, maxs, scale=1):
return scale * init_array[mins[0] : maxs[0], mins[1] : maxs[1], mins[2] : maxs[2]]
def combine_seg_context(seg, context, seg_shift, seg_mult=1):
completed_context = context.clone()
# Calculate the region to copy over, sometimes the segment
# falls outside the range of the context bounding box
c_mins = [int(i) for i in seg_shift]
c_maxs = [int(min(ss + 8, 32)) for ss in seg_shift]
s_mins = [0 for i in range(3)]
# If the edge of the segment goes past the edge of the context (ss + 8 > 32),
# remove the extra from the segment.
s_maxs = [int(8 - max(0, (ss + 8) - 32)) for ss in seg_shift]
seg_to_add = subset_and_scale_3d(seg, s_mins, s_maxs, seg_mult)
context_subset = subset_and_scale_3d(completed_context, c_mins, c_maxs, 1)
completed_context[c_mins[0] : c_maxs[0], c_mins[1] : c_maxs[1], c_mins[2] : c_maxs[2]] = (
seg_to_add + context_subset
)
return completed_context
def get_vector(start, end):
return end - start
def get_random_viewer_info(sl):
viewer_pos = torch.tensor(random_int_triple(0, sl - 1))
viewer_look = torch.tensor(random_int_triple(0, sl - 1))
if viewer_pos.eq(viewer_look).sum() == viewer_pos.size(0):
if viewer_look[0] < sl + 1:
viewer_look[0] += 1
else:
viewer_look[0] -= 1
return viewer_pos, viewer_look
def b_greater_than_a(a, b):
if a == b:
return 0
return 1 if b > a else -1
def shift_block(b, s):
return tuple((tuple((b[0][0] + s[0], b[0][1] + s[1], b[0][2] + s[2])), b[1]))
def rotate_block(b, c, r):
""" rotates the block b around the point c by 90*r degrees
in the xz plane. r should be 1 or -1."""
# TODO add a reflection
c = np.array(c)
p = np.add(b[0], -c)
x = p[0]
z = p[2]
if r == -1:
p[0] = z
p[2] = -x
else:
p[0] = -z
p[2] = x
return (tuple(p + c), b[1])
def random_int_triple(minval, maxval):
t = [
random.randint(minval, maxval),
random.randint(minval, maxval),
random.randint(minval, maxval),
]
return t
def check_inrange(x, minval, maxval):
"""inclusive check"""
return all([v >= minval for v in x]) and all([v <= maxval for v in x])
def normalize(batched_vector):
vec = batched_vector.double()
norm = torch.norm(vec, dim=1)
# Set norm to 1 if it's 0
norm = norm + norm.eq(0).double()
expanded_norm = norm.unsqueeze(1).expand(-1, vec.size()[1])
return torch.div(vec, expanded_norm)
def get_rotation_matrix(viewer_pos, viewer_look):
# VP, VL: N x 3, VP_to_VL: N x 3
vp_to_vl = get_vector(viewer_pos, viewer_look)[:, :2]
nlook_vec = normalize(vp_to_vl)
nly = nlook_vec[:, 1]
# Nlx necessary to correct for the range of acrcos
nlx = nlook_vec[:, 0]
nlx = nlx.gt(0).double() - nlx.lt(0).double() - nlx.eq(0).double()
# Take care of nans created by raising 0 to a power
# and then masking the sin theta to 0 as intended
base = 1 - nly * nly
nan_mask = torch.isnan(torch.pow(base, 0.5)).double()
base = base + nan_mask
sin_theta = nlx * nan_mask.eq(0).double() * torch.pow(base, 0.5)
nly = nly.unsqueeze(1)
sin_theta = sin_theta.unsqueeze(1)
rm_pt1 = torch.cat([nly, sin_theta], 1).unsqueeze(1)
rm_pt2 = torch.cat([-sin_theta, nly], 1).unsqueeze(1)
rm = torch.cat([rm_pt1, rm_pt2], 1)
return rm
def rotate_x_y(coord, rotation_matrix):
return torch.mm(coord.unsqueeze(0), rotation_matrix).squeeze(0)
def float_equals(a, b, epsilon):
return True if abs(a - b) < epsilon else False
def get_argmax_list(vals, epsilon, minlist=False, maxlen=None):
mult = -1 if minlist else 1
max_ind = []
for i, v in enumerate(vals):
if not max_ind or float_equals(max_ind[0][1], v, epsilon):
if maxlen and len(max_ind) == maxlen:
continue
max_ind.append((i, v))
elif mult * (v - max_ind[0][1]) > 0:
max_ind = [(i, v)]
return max_ind
def get_firstmax(vals, epsilon, minlist=False):
return get_argmax_list(vals, epsilon, minlist, 1)[0]
# N -> batch size in training
# D -> num target coord per element
# Viewer pos, viewer_look are N x 3 tensors
# Batched target coords is a N x D x 3 tensor
# Output is a N x D x 3 tensor
def get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords):
# First verify the sizing and unsqueeze if necessary
btc_sizes = batched_target_coords.size()
vp_sizes = viewer_pos.size()
vl_sizes = viewer_look.size()
if len(btc_sizes) > 3 or len(vp_sizes) > 2 or len(vl_sizes) > 2:
raise Exception("One input has too many dimensions")
if btc_sizes[-1] != 3 or vp_sizes[-1] != 3 or vl_sizes[-1] != 3:
raise Exception("The last dimension of all inputs should be size 3")
if len(btc_sizes) < 3:
for i in range(3 - len(btc_sizes)):
batched_target_coords = batched_target_coords.unsqueeze(0)
if len(vp_sizes) == 1:
viewer_pos = viewer_pos.unsqueeze(0)
if len(vl_sizes) == 1:
viewer_look = viewer_look.unsqueeze(0)
n = batched_target_coords.size()[0]
d = batched_target_coords.size()[1]
# Handle xy and z separately
# XY = N X D x 2
xy = batched_target_coords[:, :, 0:2].double()
# Z = N x D x 1
z = batched_target_coords[:, :, 2].unsqueeze(2).double()
## XY
# Shift such that viewer pos is the origin
# VPXY, VLXY: N x 2
vpxy = viewer_pos.double()[:, 0:2]
vlxy = viewer_look.double()[:, 0:2]
vpxy_to_vlxy = vlxy - vpxy
# VPXY to XY: N x D x 2
vpxy_to_xy = xy - vpxy.unsqueeze(1).expand(n, d, -1)
# Rotate them around the viewer position such that a normalized
# viewer look vector would be (0, 1)
# Rotation_matrix: N x 2 x 2
rotation_matrix = get_rotation_matrix(viewer_pos, viewer_look)
# N x 1 x 2 mm N x 2 x 2 ==> N x 1 x 2 ==> N x 2
r_vpxy_to_vlxy = torch.bmm(vpxy_to_vlxy.unsqueeze(1), rotation_matrix).unsqueeze(1)
# RM: N x 2 x 2 ==> N x D x 2 x 2
expanded_rm = rotation_matrix.unsqueeze(1).expand(n, d, 2, 2).contiguous().view(-1, 2, 2)
# N x D x 2 ==> N*D x 1 x 2 mm N*D x 2 x 2 ==> N*D x 1 x 2 ==> N x D x 2
reshape_vpxy_to_xy = vpxy_to_xy.contiguous().view(-1, 1, 2)
r_vpxy_to_xy = torch.bmm(reshape_vpxy_to_xy, expanded_rm).contiguous().view(n, d, 2)
# N x D x 2
# Get the xy position in this rotated coord system with rvl as the origin
rvl_to_rxy = r_vpxy_to_xy - r_vpxy_to_vlxy.squeeze(1).expand(n, d, 2)
## Z
# VLZ = N x 1
vlz = viewer_look.double()[:, 2]
# Z = N x D x 1
diffz = z - vlz.view(-1, 1, 1).expand(n, d, -1)
## Combine
# rvl_to_rxy: N x D x 2, diffz: N x D x 1
new_xyz = torch.cat([rvl_to_rxy, diffz], 2)
return new_xyz
def get_dir_dist(viewer_pos, viewer_look, batched_target_coords):
if len(batched_target_coords.size()) == 1:
batched_target_coords = batched_target_coords.unsqueeze(0)
xyz = get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords)
dist = xyz.abs()
direction = xyz.gt(0).double() - xyz.lt(0).double()
return direction, dist
def get_sampled_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.random.choice(3, p=ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
def get_max_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.argmax(ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
# outputs a dense voxel rep (np array) from a sparse one.
# size should be a tuple of (H, W, D) for the desired voxel representation
# useid=True puts the block id into the voxel representation,
# otherwise put a 1
def densify(blocks, size, center=(0, 0, 0), useid=False):
V = np.zeros((size[0], size[1], size[2]), dtype="int32")
offsets = (size[0] // 2 - center[0], size[1] // 2 - center[1], size[2] // 2 - center[2])
for b in blocks:
x = b[0][0] + offsets[0]
y = b[0][1] + offsets[1]
z = b[0][2] + offsets[2]
if x >= 0 and y >= 0 and z >= 0 and x < size[0] and y < size[1] and z < size[2]:
if type(b[1]) is int:
V[x, y, z] = b[1]
else:
V[x, y, z] = b[1][0]
if not useid:
V[V > 0] = 1
return V, offsets
def center_of_mass(S, seg=None):
seg = seg or [True for i in S]
if len(S[0]) == 2:
m = list(np.round(np.mean([S[i][0] for i in range(len(S)) if seg[i]], axis=0)))
else:
m = list(np.round(np.mean([S[i] for i in range(len(S)) if seg[i]], axis=0)))
return [int(i) for i in m]
def check_l1_dist(a, b, d):
return abs(b[0] - a[0]) <= d[0] and abs(b[1] - a[1]) <= d[1] and abs(b[2] - a[2]) <= d[2]
def sparsify_segment(seg, context):
seg_sparse = []
for i, use in enumerate(seg):
if use:
seg_sparse.append(context[i])
return seg_sparse
def get_dense_array_from_sl(sparse_shape, sl, useid):
center = [sl // 2, sl // 2, sl // 2]
shape_dense, _ = np.asarray(densify(sparse_shape, [sl, sl, sl], center=center, useid=useid))
return shape_dense
def convert_sparse_context_seg_to_example(
context_sparse, seg_sparse, c_sl, s_sl, useid, vis=False
):
context_dense = get_dense_array_from_sl(context_sparse, c_sl, useid)
seg_dense_uncentered = get_dense_array_from_sl(seg_sparse, c_sl, useid)
# For visualization
if vis:
context_dense = context_dense + seg_dense_uncentered
else:
context_dense = context_dense - seg_dense_uncentered
shifted_seg_sparse, shift_vec = shift_subsegment_corner(seg_sparse)
seg_dense_centered = get_dense_array_from_sl(shifted_seg_sparse, s_sl, useid)
target_coord = [-x for x in shift_vec]
target_index = coord_to_index(target_coord, c_sl)
return [
torch.from_numpy(context_dense),
torch.from_numpy(seg_dense_centered),
torch.tensor([target_index]),
]
############################################################################
# For these "S" is a list of blocks in ((x,y,z),(id, meta)) format
# the segment is a list of the same length as S with either True or False
# at each entry marking whether that block is in the segment
# each outputs a list of blocks in ((x,y,z),(id, meta)) format
def shift_negative_vec(S, segment, vec, args):
N = []
for s in range(len(segment)):
if not segment[s]:
new_coords = tuple(np.add(S[s][0], vec))
N.append([new_coords, S[s][1]])
else:
if "seg_id" in args:
N.append([S[s][0], (args["seg_id"], S[s][1][1])])
else:
N.append(S[s])
return N
def shift_negative(S, segment, args):
shift_max = args["shift_max"]
"""takes the blocks not in the sgement and shifts them randomly"""
shift_vec = random_int_triple(-shift_max, shift_max)
return shift_negative_vec(S, segment, shift_vec, args)
def rotate_negative(S, segment, args):
c = center_of_mass(S, seg=segment)
r = random.choice([1, -1])
return [rotate_block(S[i], c, r) if segment[i] else S[i] for i in range(len(S))]
def replace_negative(S, segment, args):
data = args["data"]
oseg, oS = data.get_positive()
c_pos = center_of_mass(S, seg=segment)
c_neg = center_of_mass(oS, seg=oseg)
offset = np.add(c_pos, -np.array(c_neg))
N = [S[i] for i in range(len(S)) if not segment[i]]
return N + [shift_block(oS[i], offset) for i in range(len(oS)) if oseg[i]]
class NegativeSampler:
def __init__(self, dataloader, shift_max=10, ntype_probs=[0.6, 0.2, 0.2]):
# self.data_prob = [x['prob'] for x in dataloaders.values()]
# self.dataloaders = [x['data'] for x in dataloaders.values()]
self.dataloader = dataloader
self.shift_max = shift_max
self.ntype_probs = ntype_probs
self.negative_samplers = [shift_negative, rotate_negative, replace_negative]
def build_negative(self, S, segment):
negative_fn = np.random.choice(self.negative_samplers, p=self.ntype_probs)
return negative_fn(S, segment, {"shift_max": self.shift_max, "data": self.dataloader})
| [((297, 331), 'os.path.join', 'os.path.join', (['GEOSCORER_DIR', '"""../"""'], {}), "(GEOSCORER_DIR, '../')\n", (309, 331), False, 'import os\n'), ((332, 364), 'sys.path.append', 'sys.path.append', (['CRAFTASSIST_DIR'], {}), '(CRAFTASSIST_DIR)\n', (347, 364), False, 'import sys\n'), ((251, 277), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (267, 277), False, 'import os\n'), ((666, 684), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (682, 684), False, 'import sys\n'), ((748, 773), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (771, 773), False, 'import argparse\n'), ((4144, 4347), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': "opts['batchsize']", 'shuffle': '(True)', 'pin_memory': '(True)', 'drop_last': '(True)', 'num_workers': "opts['num_workers']", 'worker_init_fn': 'init_fn', 'collate_fn': 'collate_fxn'}), "(dataset, batch_size=opts['batchsize'], shuffle=\n True, pin_memory=True, drop_last=True, num_workers=opts['num_workers'],\n worker_init_fn=init_fn, collate_fn=collate_fxn)\n", (4171, 4347), False, 'import torch\n'), ((6053, 6066), 'shapes.get_bounds', 'get_bounds', (['S'], {}), '(S)\n', (6063, 6066), False, 'from shapes import get_bounds\n'), ((8076, 8087), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (8084, 8087), True, 'import numpy as np\n'), ((8096, 8112), 'numpy.add', 'np.add', (['b[0]', '(-c)'], {}), '(b[0], -c)\n', (8102, 8112), True, 'import numpy as np\n'), ((8676, 8698), 'torch.norm', 'torch.norm', (['vec'], {'dim': '(1)'}), '(vec, dim=1)\n', (8686, 8698), False, 'import torch\n'), ((8842, 8871), 'torch.div', 'torch.div', (['vec', 'expanded_norm'], {}), '(vec, expanded_norm)\n', (8851, 8871), False, 'import torch\n'), ((9714, 9744), 'torch.cat', 'torch.cat', (['[rm_pt1, rm_pt2]', '(1)'], {}), '([rm_pt1, rm_pt2], 1)\n', (9723, 9744), False, 'import torch\n'), ((13135, 13168), 'torch.cat', 'torch.cat', (['[rvl_to_rxy, diffz]', '(2)'], {}), '([rvl_to_rxy, diffz], 2)\n', (13144, 13168), False, 'import torch\n'), ((13823, 13852), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'p': 'ndists'}), '(3, p=ndists)\n', (13839, 13852), True, 'import numpy as np\n'), ((14008, 14053), 'torch.tensor', 'torch.tensor', (['(dim_l + dir_l)'], {'dtype': 'torch.long'}), '(dim_l + dir_l, dtype=torch.long)\n', (14020, 14053), False, 'import torch\n'), ((14306, 14323), 'numpy.argmax', 'np.argmax', (['ndists'], {}), '(ndists)\n', (14315, 14323), True, 'import numpy as np\n'), ((14479, 14524), 'torch.tensor', 'torch.tensor', (['(dim_l + dir_l)'], {'dtype': 'torch.long'}), '(dim_l + dir_l, dtype=torch.long)\n', (14491, 14524), False, 'import torch\n'), ((14811, 14863), 'numpy.zeros', 'np.zeros', (['(size[0], size[1], size[2])'], {'dtype': '"""int32"""'}), "((size[0], size[1], size[2]), dtype='int32')\n", (14819, 14863), True, 'import numpy as np\n'), ((18080, 18102), 'random.choice', 'random.choice', (['[1, -1]'], {}), '([1, -1])\n', (18093, 18102), False, 'import random\n'), ((435, 449), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (447, 449), False, 'from inspect import currentframe, getframeinfo\n'), ((4883, 4896), 'torch.cat', 'torch.cat', (['tl'], {}), '(tl)\n', (4892, 4896), False, 'import torch\n'), ((8326, 8356), 'random.randint', 'random.randint', (['minval', 'maxval'], {}), '(minval, maxval)\n', (8340, 8356), False, 'import random\n'), ((8366, 8396), 'random.randint', 'random.randint', (['minval', 'maxval'], {}), '(minval, maxval)\n', (8380, 8396), False, 'import random\n'), ((8406, 8436), 'random.randint', 'random.randint', (['minval', 'maxval'], {}), '(minval, maxval)\n', (8420, 8436), False, 'import random\n'), ((9502, 9522), 'torch.pow', 'torch.pow', (['base', '(0.5)'], {}), '(base, 0.5)\n', (9511, 9522), False, 'import torch\n'), ((16871, 16902), 'torch.from_numpy', 'torch.from_numpy', (['context_dense'], {}), '(context_dense)\n', (16887, 16902), False, 'import torch\n'), ((16912, 16948), 'torch.from_numpy', 'torch.from_numpy', (['seg_dense_centered'], {}), '(seg_dense_centered)\n', (16928, 16948), False, 'import torch\n'), ((16958, 16986), 'torch.tensor', 'torch.tensor', (['[target_index]'], {}), '([target_index])\n', (16970, 16986), False, 'import torch\n'), ((19072, 19132), 'numpy.random.choice', 'np.random.choice', (['self.negative_samplers'], {'p': 'self.ntype_probs'}), '(self.negative_samplers, p=self.ntype_probs)\n', (19088, 19132), True, 'import numpy as np\n'), ((9603, 9633), 'torch.cat', 'torch.cat', (['[nly, sin_theta]', '(1)'], {}), '([nly, sin_theta], 1)\n', (9612, 9633), False, 'import torch\n'), ((9660, 9691), 'torch.cat', 'torch.cat', (['[-sin_theta, nly]', '(1)'], {}), '([-sin_theta, nly], 1)\n', (9669, 9691), False, 'import torch\n'), ((18401, 18416), 'numpy.array', 'np.array', (['c_neg'], {}), '(c_neg)\n', (18409, 18416), True, 'import numpy as np\n'), ((4098, 4118), 'torch.initial_seed', 'torch.initial_seed', ([], {}), '()\n', (4116, 4118), False, 'import torch\n'), ((9396, 9416), 'torch.pow', 'torch.pow', (['base', '(0.5)'], {}), '(base, 0.5)\n', (9405, 9416), False, 'import torch\n'), ((17490, 17510), 'numpy.add', 'np.add', (['S[s][0]', 'vec'], {}), '(S[s][0], vec)\n', (17496, 17510), True, 'import numpy as np\n'), ((472, 488), 'inspect.getframeinfo', 'getframeinfo', (['cf'], {}), '(cf)\n', (484, 488), False, 'from inspect import currentframe, getframeinfo\n'), ((566, 580), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (578, 580), False, 'from datetime import datetime\n'), ((12683, 12725), 'torch.bmm', 'torch.bmm', (['reshape_vpxy_to_xy', 'expanded_rm'], {}), '(reshape_vpxy_to_xy, expanded_rm)\n', (12692, 12725), False, 'import torch\n')] |
akbarszcz/CryptoAttacks | CryptoAttacks/tests/Block/test_gcm.py | ae675d016b314414a3dc9b23c7d8a32da4c62457 | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
import subprocess
from builtins import bytes, range
from os.path import abspath, dirname
from os.path import join as join_path
from random import randint
from CryptoAttacks.Block.gcm import *
from CryptoAttacks.Utils import log
def test_polynomials():
print("Test polynomials")
Pmod = GF_2k_generator(128, [128,7,2,1,0])
P = Pmod(0b10011010101100110100100110011101100110010111111000111011101000000110110100010101000101100100111100011001010100100110100111011000)
Q = Pmod(0b01111010101010110111000011011100010011101111000001010000011000010000111010001111100001111010110001001000011101000011111110010101)
print(P.to_bits(), bin(P.to_int()), P)
print(Q.to_bits(), bin(Q.to_int()), Q)
w = P*Q
print(w.to_bits(), bin(w.to_int()), w)
assert Q.coefficients == Pmod(Q.coefficients).coefficients
assert Q.coefficients == Pmod(Q.to_int()).coefficients
assert Q.coefficients == Pmod(Q.to_bytes()).coefficients
print('')
Pmod = GF_2k_generator(10, [11,7,2,1,0])
c1 = Pmod(1)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(0)
polynomial1 = Polynomial_128([c1,c2,c3,c4])
c1 = Pmod(1236)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(0)
polynomial2 = Polynomial_128([c1,c2,c3,c4])
print(polynomial1)
print(polynomial2)
print("+", polynomial1 + polynomial2)
print("*", polynomial1 * polynomial2)
q = polynomial1 / polynomial2
r = polynomial1 % polynomial2
print("/", q)
print("%", r)
print('')
print(polynomial1)
print(polynomial2*q + r)
print('')
def test_gcm():
print("Test GCM")
plaintext = bytes(b'hn9YA(F BW&B (W&&W(RT&WEF f7*WB FTgsdc')
additional = bytes(b'j gej8g0SRYH8s 8s9yf sgd78taDS* GASyd ')
key = bytes(b'xgrtjdh&LA28XNwh')
nonce = bytes(b'a drO*1@((js')
ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce)
assert gcm_verify(tag, ciphertext, additional, key, nonce)
blocks = aes_bytes_to_poly_blocks(ciphertext, additional)
ciphertext2, additional2 = poly_blocks_to_aes_bytes(blocks)
assert ciphertext == ciphertext2
assert additional == additional2
def polynomial_factors_product(factorization):
"""factorization: [(poly1, power), (poly2, power)]"""
result = factorization[0][0].one_element()
for f, f_degree in factorization:
result *= f**f_degree
return result
def test_factor():
print("Test factor")
Pmod = GF_2k_generator(9, [9,7,2,1,0])
c1 = Pmod(31)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(3)
polynomial1 = Polynomial_128([c1,c2,c3,c4])
c1 = Pmod(237)
c2 = Pmod(1)
c3 = Pmod(0)
c4 = Pmod(10)
polynomial2 = Polynomial_128([c1,c2,c3,c4])
polynomial = polynomial1 * polynomial2
print(polynomial1)
print(polynomial2)
print(polynomial)
print(polynomial.monic())
print('')
factorization = factor_polynomial(polynomial)
print(factorization)
result = polynomial.one_element()
for f, f_degree in factorization:
result *= f**f_degree
print(result)
print('')
assert polynomial_factors_product(factorization) == polynomial.monic()
def test_repeated_nonce():
print("Test Key-Recovery Attack on GCM with Repeated Nonces")
for _ in range(3):
nonce = random_bytes(12)
key = random_bytes(16)
h = bytes(AES.new(key, AES.MODE_ECB).encrypt(bytes(b'\x00'*16)))
h = aes_polynomial(h)
ciphertexts_additionals_tags = []
for _ in range(4):
plaintext = random_bytes(randint(0, 50))
additional = random_bytes(randint(0, 50))
ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce)
ciphertexts_additionals_tags.append((ciphertext, additional, tag))
valid_ciphertext, valid_additional, valid_tag = ciphertexts_additionals_tags[0]
auth_key_candidates = recover_key_repated_nonce(ciphertexts_additionals_tags)
assert h.to_bytes() in auth_key_candidates
# try found auth key candidates
correct_auth_key_found = False
for auth_key in auth_key_candidates:
forged_ciphertext = random_bytes(randint(0, 10))
forged_additional = random_bytes(randint(0, 10))
forged_tag = gcm_forge_tag(ciphertext=forged_ciphertext, additional=forged_additional, auth_key=auth_key,
valid_ciphertext=valid_ciphertext, valid_additional=valid_additional, valid_tag=valid_tag)
if gcm_verify(forged_tag, forged_ciphertext, forged_additional, key, nonce):
correct_auth_key_found = True
break
assert correct_auth_key_found
def run():
log.level = 'debug'
test_polynomials()
test_gcm()
test_factor()
test_repeated_nonce()
if __name__ == "__main__":
run()
| [((1698, 1746), 'builtins.bytes', 'bytes', (["b'hn9YA(F BW&B (W&&W(RT&WEF f7*WB FTgsdc'"], {}), "(b'hn9YA(F BW&B (W&&W(RT&WEF f7*WB FTgsdc')\n", (1703, 1746), False, 'from builtins import bytes, range\n'), ((1764, 1813), 'builtins.bytes', 'bytes', (["b'j gej8g0SRYH8s 8s9yf sgd78taDS* GASyd '"], {}), "(b'j gej8g0SRYH8s 8s9yf sgd78taDS* GASyd ')\n", (1769, 1813), False, 'from builtins import bytes, range\n'), ((1824, 1850), 'builtins.bytes', 'bytes', (["b'xgrtjdh&LA28XNwh'"], {}), "(b'xgrtjdh&LA28XNwh')\n", (1829, 1850), False, 'from builtins import bytes, range\n'), ((1863, 1885), 'builtins.bytes', 'bytes', (["b'a drO*1@((js'"], {}), "(b'a drO*1@((js')\n", (1868, 1885), False, 'from builtins import bytes, range\n'), ((3334, 3342), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (3339, 3342), False, 'from builtins import bytes, range\n'), ((3580, 3588), 'builtins.range', 'range', (['(4)'], {}), '(4)\n', (3585, 3588), False, 'from builtins import bytes, range\n'), ((3462, 3481), 'builtins.bytes', 'bytes', (["(b'\\x00' * 16)"], {}), "(b'\\x00' * 16)\n", (3467, 3481), False, 'from builtins import bytes, range\n'), ((3627, 3641), 'random.randint', 'randint', (['(0)', '(50)'], {}), '(0, 50)\n', (3634, 3641), False, 'from random import randint\n'), ((3681, 3695), 'random.randint', 'randint', (['(0)', '(50)'], {}), '(0, 50)\n', (3688, 3695), False, 'from random import randint\n'), ((4249, 4263), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (4256, 4263), False, 'from random import randint\n'), ((4310, 4324), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (4317, 4324), False, 'from random import randint\n')] |
jfsolarte/python_clean_architecture | python_clean_architecture/use_cases/orderdata_use_case.py | 56b0c0eff50bc98774a0caee12e3030789476687 | from python_clean_architecture.shared import use_case as uc
from python_clean_architecture.shared import response_object as res
class OrderDataGetUseCase(uc.UseCase):
def __init__(self, repo):
self.repo = repo
def execute(self, request_object):
#if not request_object:
#return res.ResponseFailure.build_from_invalid_request_object(request_object)
storage_rooms = self.repo.order(items=request_object.items)
return res.ResponseSuccess(storage_rooms)
| [((471, 505), 'python_clean_architecture.shared.response_object.ResponseSuccess', 'res.ResponseSuccess', (['storage_rooms'], {}), '(storage_rooms)\n', (490, 505), True, 'from python_clean_architecture.shared import response_object as res\n')] |
b-cube/OwsCapable | owscapable/swe/common.py | a01815418fe982434503d6542cb18e1ac8989684 | from __future__ import (absolute_import, division, print_function)
from owscapable.util import nspath_eval
from owscapable.namespaces import Namespaces
from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime
from dateutil import parser
from datetime import timedelta
from owscapable.etree import etree
def get_namespaces():
ns = Namespaces()
return ns.get_namespaces(["swe20", "xlink"])
namespaces = get_namespaces()
def nspv(path):
return nspath_eval(path, namespaces)
def make_pair(string, cast=None):
if string is None:
return None
string = string.split(" ")
if cast is not None:
try:
string = map(lambda x: cast(x), string)
except:
print("Could not cast pair to correct type. Setting to an empty tuple!")
string = ""
return tuple(string)
def get_uom(element):
uom = testXMLAttribute(element, "code")
if uom is None:
uom = testXMLAttribute(element, nspv("xlink:href"))
return uom
def get_boolean(value):
if value is None:
return None
if value is True or value.lower() in ["yes","true"]:
return True
elif value is False or value.lower() in ["no","false"]:
return False
else:
return None
def get_int(value):
try:
return int(value)
except:
return None
def get_float(value):
try:
return float(value)
except:
return None
AnyScalar = map(lambda x: nspv(x), ["swe20:Boolean", "swe20:Count", "swe20:Quantity", "swe20:Time", "swe20:Category", "swe20:Text"])
AnyNumerical = map(lambda x: nspv(x), ["swe20:Count", "swe20:Quantity", "swe20:Time"])
AnyRange = map(lambda x: nspv(x), ["swe20:QuantityRange", "swe20:TimeRange", "swe20:CountRange", "swe20:CategoryRange"])
class NamedObject(object):
def __init__(self, element):
# No call to super(), the type object will process that.
self.name = testXMLAttribute(element, "name")
try:
self.content = eval(element[-1].tag.split("}")[-1])(element[-1])
except IndexError:
self.content = None
except BaseException:
raise
# Revert to the content if attribute does not exists
def __getattr__(self, name):
return getattr(self.content, name)
class AbstractSWE(object):
def __init__(self, element):
# Attributes
self.id = testXMLAttribute(element,"id") # string, optional
# Elements
self.extention = [] # anyType, min=0, max=X
class AbstractSWEIdentifiable(AbstractSWE):
def __init__(self, element):
super(AbstractSWEIdentifiable, self).__init__(element)
# Elements
self.identifier = testXMLValue(element.find(nspv("swe20:identifier"))) # anyURI, min=0
self.label = testXMLValue(element.find(nspv("swe20:label"))) # string, min=0
self.description = testXMLValue(element.find(nspv("swe20:description"))) # string, min=0
class AbstractDataComponent(AbstractSWEIdentifiable):
def __init__(self, element):
super(AbstractDataComponent, self).__init__(element)
# Attributes
self.definition = testXMLAttribute(element,"definition") # anyURI, required
self.updatable = get_boolean(testXMLAttribute(element,"updatable")) # boolean, optional
self.optional = get_boolean(testXMLAttribute(element,"optional")) or False # boolean, default=False
class AbstractSimpleComponent(AbstractDataComponent):
def __init__(self, element):
super(AbstractSimpleComponent, self).__init__(element)
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, optional
self.axisID = testXMLAttribute(element,"axisID") # string, optional
# Elements
self.quality = filter(None, [Quality(q) for q in [e.find('*') for e in element.findall(nspv("swe20:quality"))] if q is not None])
try:
self.nilValues = NilValues(element.find(nspv("swe20:nilValues")))
except:
self.nilValues = None
class Quality(object):
def __new__(cls, element):
t = element.tag.split("}")[-1]
if t == "Quantity":
return Quantity(element)
elif t == "QuantityRange":
return QuantityRange(element)
elif t == "Category":
return Category(element)
elif t == "Text":
return Text(element)
else:
return None
class NilValues(AbstractSWE):
def __init__(self, element):
super(NilValues, self).__init__(element)
self.nilValue = filter(None, [nilValue(x) for x in element.findall(nspv("swe20:nilValue"))]) # string, min=0, max=X
class nilValue(object):
def __init__(self, element):
self.reason = testXMLAttribute(element, "reason")
self.value = testXMLValue(element)
class AllowedTokens(AbstractSWE):
def __init__(self, element):
super(AllowedTokens, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]) # string, min=0, max=X
self.pattern = testXMLValue(element.find(nspv("swe20:pattern"))) # string (Unicode Technical Standard #18, Version 13), min=0
class AllowedValues(AbstractSWE):
def __init__(self, element):
super(AllowedValues, self).__init__(element)
self.value = filter(None, map(lambda x: get_float(x), [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]))
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class AllowedTimes(AbstractSWE):
def __init__(self, element):
super(AllowedTimes, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))])
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class Boolean(AbstractSimpleComponent):
def __init__(self, element):
super(Boolean, self).__init__(element)
# Elements
"""
6.2.1 Boolean
A Boolean representation of a proptery can take only two values that should be "true/false" or "yes/no".
"""
value = get_boolean(testXMLValue(element.find(nspv("swe20:value")))) # boolean, min=0, max=1
class Text(AbstractSimpleComponent):
def __init__(self, element):
super(Text, self).__init__(element)
# Elements
"""
Req 6. A textual representation shall at least consist of a character string.
"""
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class Category(AbstractSimpleComponent):
def __init__(self, element):
super(Category, self).__init__(element)
# Elements
self.codeSpace = testXMLAttribute(element.find(nspv("swe20:codeSpace")), nspv("xlink:href")) # Reference, min=0, max=1
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class CategoryRange(Category):
def __init__(self, element):
super(CategoryRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.values = make_pair(value) if value is not None else None
class Count(AbstractSimpleComponent):
def __init__(self, element):
super(Count, self).__init__(element)
# Elements
self.value = get_int(testXMLValue(element.find(nspv("swe20:value")))) # integer, min=0, max=1
try:
self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1
except:
self.constraint = None
class CountRange(Count):
def __init__(self, element):
super(CountRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.value = make_pair(value,int) if value is not None else None
class Quantity(AbstractSimpleComponent):
def __init__(self, element):
super(Quantity, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
self.value = get_float(testXMLValue(element.find(nspv("swe20:value")))) # double, min=0, max=1
try:
self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1
except:
self.constraint = None
class QuantityRange(Quantity):
def __init__(self, element):
super(QuantityRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.value = make_pair(value,float) if value is not None else None
def get_time(value, referenceTime, uom):
try:
value = parser.parse(value)
except (AttributeError, ValueError): # Most likely an integer/float using a referenceTime
try:
if uom.lower() == "s":
value = referenceTime + timedelta(seconds=float(value))
elif uom.lower() == "min":
value = referenceTime + timedelta(minutes=float(value))
elif uom.lower() == "h":
value = referenceTime + timedelta(hours=float(value))
elif uom.lower() == "d":
value = referenceTime + timedelta(days=float(value))
except (AttributeError, ValueError):
pass
except OverflowError: # Too many numbers (> 10) or INF/-INF
if value.lower() == "inf":
value = InfiniteDateTime()
elif value.lower() == "-inf":
value = NegativeInfiniteDateTime()
return value
class Time(AbstractSimpleComponent):
def __init__(self, element):
super(Time, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
try:
self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1
except:
self.constraint = None
# Attributes
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
try:
self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional
except (AttributeError, ValueError):
self.referenceTime = None
value = testXMLValue(element.find(nspv("swe20:value"))) # TimePosition, min=0, max=1
self.value = get_time(value, self.referenceTime, self.uom)
class TimeRange(AbstractSimpleComponent):
def __init__(self, element):
super(TimeRange, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
try:
self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1
except:
self.constraint = None
# Attributes
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
try:
self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional
except (AttributeError, ValueError):
self.referenceTime = None
values = make_pair(testXMLValue(element.find(nspv("swe20:value")))) # TimePosition, min=0, max=1
self.value = [get_time(t, self.referenceTime, self.uom) for t in values]
class DataRecord(AbstractDataComponent):
def __init__(self, element):
super(DataRecord, self).__init__(element)
# Elements
self.field = [Field(x) for x in element.findall(nspv("swe20:field"))]
def get_by_name(self, name):
return next((x for x in self.field if x.name == name), None)
class Field(NamedObject):
def __init__(self, element):
super(Field, self).__init__(element)
class Vector(AbstractDataComponent):
def __init__(self, element):
super(Vector, self).__init__(element)
# Elements
self.coordinate = [Coordinate(x) for x in element.findall(nspv("swe20:coordinate"))]
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, required
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
def get_by_name(self, name):
return next((x for x in self.coordinate if x.name == name), None)
class Coordinate(NamedObject):
def __init__(self, element):
super(Coordinate, self).__init__(element)
#if element[-1].tag not in AnyNumerical:
# print "Coordinate does not appear to be an AnyNumerical member"
class DataChoice(AbstractDataComponent):
def __init__(self, element):
super(DataChoice, self).__init__(element)
self.item = [Item(x) for x in element.findall(nspv("swe20:item"))]
def get_by_name(self, name):
return next((x for x in self.item if x.name == name), None)
class Item(NamedObject):
def __init__(self, element):
super(Item, self).__init__(element)
class DataArray(AbstractDataComponent):
def __init__(self, element):
super(DataArray, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required
self.values = testXMLValue(element.find(nspv("swe20:values")))
try:
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
except:
self.encoding = None
class Matrix(AbstractDataComponent):
def __init__(self, element):
super(Matrix, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
self.values = testXMLValue(element.find(nspv("swe20:values")))
self.referenceFrame = testXMLAttribute(element, "referenceFrame") # anyURI, required
self.localFrame = testXMLAttribute(element, "localFrame") # anyURI, optional
class DataStream(AbstractSWEIdentifiable):
def __init__(self, element):
super(DataStream, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # optional
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # optional
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
self.values = testXMLValue(element.find(nspv("swe20:values")))
class ElementType(NamedObject):
def __init__(self, element):
super(ElementType, self).__init__(element)
class AbstractEncoding(object):
def __new__(cls, element):
t = element[-1].tag.split("}")[-1]
if t == "TextEncoding":
return super(AbstractEncoding, cls).__new__(TextEncoding, element)
elif t == "XMLEncoding":
return super(AbstractEncoding, cls).__new__(XMLEncoding, element)
elif t == "BinaryEncoding":
return super(AbstractEncoding, cls).__new__(BinaryEncoding, element)
class TextEncoding(AbstractEncoding):
def __init__(self, element):
self.tokenSeparator = testXMLAttribute(element[-1], "tokenSeparator") # string, required
self.blockSeparator = testXMLAttribute(element[-1], "blockSeparator") # string, required
self.decimalSeparator = testXMLAttribute(element[-1], "decimalSeparator") or "." # string, optional, default="."
self.collapseWhiteSpaces = get_boolean(testXMLAttribute(element[-1], "collapseWhiteSpaces")) or True # boolean, optional, default=True
class XMLEncoding(AbstractEncoding):
def __init__(self, element):
raise NotImplementedError
class BinaryEncoding(AbstractEncoding):
def __init__(self, element):
raise NotImplementedError
| [((384, 396), 'owscapable.namespaces.Namespaces', 'Namespaces', ([], {}), '()\n', (394, 396), False, 'from owscapable.namespaces import Namespaces\n'), ((504, 533), 'owscapable.util.nspath_eval', 'nspath_eval', (['path', 'namespaces'], {}), '(path, namespaces)\n', (515, 533), False, 'from owscapable.util import nspath_eval\n'), ((920, 953), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""code"""'], {}), "(element, 'code')\n", (936, 953), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((1989, 2022), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""name"""'], {}), "(element, 'name')\n", (2005, 2022), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((2472, 2503), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""id"""'], {}), "(element, 'id')\n", (2488, 2503), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((3294, 3333), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""definition"""'], {}), "(element, 'definition')\n", (3310, 3333), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((3806, 3849), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""referenceFrame"""'], {}), "(element, 'referenceFrame')\n", (3822, 3849), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((3901, 3936), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""axisID"""'], {}), "(element, 'axisID')\n", (3917, 3936), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((5013, 5048), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""reason"""'], {}), "(element, 'reason')\n", (5029, 5048), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((5083, 5104), 'owscapable.util.testXMLValue', 'testXMLValue', (['element'], {}), '(element)\n', (5095, 5104), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((10161, 10180), 'dateutil.parser.parse', 'parser.parse', (['value'], {}), '(value)\n', (10173, 10180), False, 'from dateutil import parser\n'), ((11500, 11539), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""localFrame"""'], {}), "(element, 'localFrame')\n", (11516, 11539), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((12516, 12555), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""localFrame"""'], {}), "(element, 'localFrame')\n", (12532, 12555), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((13778, 13821), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""referenceFrame"""'], {}), "(element, 'referenceFrame')\n", (13794, 13821), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((13877, 13916), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""localFrame"""'], {}), "(element, 'localFrame')\n", (13893, 13916), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((15766, 15809), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""referenceFrame"""'], {}), "(element, 'referenceFrame')\n", (15782, 15809), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((15873, 15912), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""localFrame"""'], {}), "(element, 'localFrame')\n", (15889, 15912), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((17112, 17159), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element[-1]', '"""tokenSeparator"""'], {}), "(element[-1], 'tokenSeparator')\n", (17128, 17159), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((17244, 17291), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element[-1]', '"""blockSeparator"""'], {}), "(element[-1], 'blockSeparator')\n", (17260, 17291), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((3417, 3455), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""updatable"""'], {}), "(element, 'updatable')\n", (3433, 3455), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((17376, 17425), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element[-1]', '"""decimalSeparator"""'], {}), "(element[-1], 'decimalSeparator')\n", (17392, 17425), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((3529, 3566), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""optional"""'], {}), "(element, 'optional')\n", (3545, 3566), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((5274, 5289), 'owscapable.util.testXMLValue', 'testXMLValue', (['x'], {}), '(x)\n', (5286, 5289), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((6250, 6265), 'owscapable.util.testXMLValue', 'testXMLValue', (['x'], {}), '(x)\n', (6262, 6265), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((10908, 10926), 'owscapable.util.InfiniteDateTime', 'InfiniteDateTime', ([], {}), '()\n', (10924, 10926), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((11653, 11695), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""referenceTime"""'], {}), "(element, 'referenceTime')\n", (11669, 11695), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((12665, 12707), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element', '"""referenceTime"""'], {}), "(element, 'referenceTime')\n", (12681, 12707), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((17533, 17585), 'owscapable.util.testXMLAttribute', 'testXMLAttribute', (['element[-1]', '"""collapseWhiteSpaces"""'], {}), "(element[-1], 'collapseWhiteSpaces')\n", (17549, 17585), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((5731, 5746), 'owscapable.util.testXMLValue', 'testXMLValue', (['x'], {}), '(x)\n', (5743, 5746), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((5854, 5869), 'owscapable.util.testXMLValue', 'testXMLValue', (['x'], {}), '(x)\n', (5866, 5869), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((6372, 6387), 'owscapable.util.testXMLValue', 'testXMLValue', (['x'], {}), '(x)\n', (6384, 6387), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n'), ((10986, 11012), 'owscapable.util.NegativeInfiniteDateTime', 'NegativeInfiniteDateTime', ([], {}), '()\n', (11010, 11012), False, 'from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime\n')] |
gao969/scaffold-dgc-clustering | main_fed.py | 9f259dfdf0897dcb1dece2e1197268f585f54a69 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import os
import torch.distributed as dist
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import LocalUpdateF
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from torch.multiprocessing import Process
from deep_gradient_compression import DGC
import json
# __name__是内置的变量,在执行当前文件(main_fed.py)时,默认值为__main__
# 但是如果其他.py文件import当前文件(main_fed.py)时,在其他文件中执行main_fed.py中的__name__,此时main_fed.py中的__name__默认值为文件名main_fed.py
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu))
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
rank = 0
device_id = rank
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend='gloo', rank=rank, world_size=args.world_size)
# if torch.cuda.is_available() and args.gpu != -1 else 'cpu'
# load dataset and split users
if args.dataset == 'mnist':
# ToTensor():归一数据到(0,1),Normalize():(date-0.1307)/0.3081,将数据分布到(-1, 1)
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
if trans_mnist is not None:
print(1)
print(trans_mnist)
# 测试(60000)和训练集(10000)
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
# Noniid数据
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# print('df ',img_size) [1,28,28]
# build model
# print(args.model)
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
# print('x取值',x)
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
# add
control_global = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
# 设置为训练模型
net_glob.train()
print(net_glob)
control_weights =control_global.state_dict()
# copy weights
# 初始化全局权重
w_glob = net_glob.state_dict()
c_glob = copy.deepcopy(net_glob.state_dict())
# print(w_glob)
# training
loss_train = []
accuracy = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
count = 0, 0
test_acc_list = []
if args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(args.num_users)]
# add
else:
# 初始化本地权重
c_local = [MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) for i in
range(args.num_users)]
for net in c_local:
net.load_state_dict(control_weights)
delta_c = copy.deepcopy(net_glob.state_dict())
# delta_x = copy.deepcopy(net_glob.state_dict())
# with open("test.txt", "w") as f:
# for i in range(0, len(c_local)):
# for k,v in c_local[i].state_dict().items():
# f.write(f"{k},{v}\n".format(k,v))
# with open("test.txt", "a") as f:
# for i in range(0, len(c_local)):
# for k, v in w_locals[i].items():
# f.write(f"{k},{v}\n".format(k, v))
# add 初始化变化量
# print("why?")
for iter in range(args.epochs):
# 初始换控制变量
for i in delta_c:
delta_c[i] = 0.0
# for i in delta_x:
# delta_x[i] = 0.0
loss_locals = []
if not args.all_clients:
w_locals = []
m = max(int(args.frac * args.num_users), 1)
# 每次随机十位幸运观众
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
# momentum法SGD
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss, local_delta_c, local_delta, control_local_w= local.train(net=copy.deepcopy(net_glob).to(args.device), control_local
= c_local[idx], control_global=control_global, rank=rank, device_id=device_id, size=args.world_size)
# add
if iter != 0:
c_local[idx].load_state_dict(control_local_w)
if args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
# add
loss_locals.append(copy.deepcopy(loss))
# add
for i in delta_c:
if iter != 0:
delta_c[i] += w[i]
else:
delta_c[i] += local_delta_c[i]
# delta_x[i] += local_delta[i]
# add
# update the delta C
for i in delta_c:
delta_c[i] /= m
# delta_x[i] /= m
# update global weights
w_glob = FedAvg(w_locals)
# add 更新全局c,w
# w_glob = net_glob.state_dict()
control_global_w = control_global.state_dict()
for i in control_global_w:
if iter !=0:
# w_glob[i] = delta_x[i]
# else:
# w_glob[i] += delta_x[i]
control_global_w[i] += (m / args.num_users) * delta_c[i]
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# add
control_global.load_state_dict(control_global_w)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
loss_train.append(loss_avg)
# acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
accuracy.append(acc_test)
# add
for c in range(args.num_users):
local_model = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
torch.cuda.empty_cache()
# net_glob.eval()
# print("Training accuracy: {:.2f}".format(acc_train))
# print("Testing accuracy: {:.2f}".format(acc_test))
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
# Fedavg
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_globF = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_globF = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_globF = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_globF)
net_globF.train()
# copy weights
w_globF = net_globF.state_dict()
# training
loss_trainF = []
accuracyF = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
if args.all_clients:
print("Aggregation over all clients")
w_localsF = [w_globF for i in range(args.num_users)]
for iter in range(args.epochs):
loss_locals = []
if not args.all_clients:
w_localsF = []
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
localF = LocalUpdateF(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = localF.train(net=copy.deepcopy(net_globF).to(args.device))
if args.all_clients:
w_localsF[idx] = copy.deepcopy(w)
else:
w_localsF.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_globF = FedAvg(w_localsF)
# copy weight to net_globF
net_globF.load_state_dict(w_globF)
# print loss
loss_avgF = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avgF))
loss_trainF.append(loss_avgF)
acc_test, loss_test = test_img(net_globF, dataset_test, args)
accuracyF.append(acc_test)
# plot loss curve
plt.figure()
print(loss_train, loss_trainF)
plt.plot(range(len(loss_train)), loss_train, label='Scaffold', zorder=2)
plt.plot(range(len(loss_trainF)), loss_trainF, 'r', label='FedAvg',zorder=1)
plt.ylabel('train_loss')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'train_loss', args.iid))
# testing
net_glob.eval()
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train))
print("Testing accuracy: {:.2f}".format(acc_test))
# plot loss curve
plt.figure()
# plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r')
plt.plot(range(len(accuracy)), accuracy, label='Scaffold', zorder=2)
plt.plot(range(len(accuracyF)), accuracyF, 'r', label='FedAvg', zorder=1)
plt.ylabel('test_acc')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'acc_test', args.iid))
| [((87, 108), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (101, 108), False, 'import matplotlib\n'), ((876, 889), 'utils.options.args_parser', 'args_parser', ([], {}), '()\n', (887, 889), False, 'from utils.options import args_parser\n'), ((953, 973), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (970, 973), False, 'import torch\n'), ((1186, 1264), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""gloo"""', 'rank': 'rank', 'world_size': 'args.world_size'}), "(backend='gloo', rank=rank, world_size=args.world_size)\n", (1209, 1264), True, 'import torch.distributed as dist\n'), ((10241, 10253), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10251, 10253), True, 'import matplotlib.pyplot as plt\n'), ((10451, 10475), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""train_loss"""'], {}), "('train_loss')\n", (10461, 10475), True, 'import matplotlib.pyplot as plt\n'), ((10480, 10500), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (10490, 10500), True, 'import matplotlib.pyplot as plt\n'), ((10505, 10527), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (10515, 10527), True, 'import matplotlib.pyplot as plt\n'), ((10714, 10753), 'models.test.test_img', 'test_img', (['net_glob', 'dataset_train', 'args'], {}), '(net_glob, dataset_train, args)\n', (10722, 10753), False, 'from models.test import test_img\n'), ((10780, 10818), 'models.test.test_img', 'test_img', (['net_glob', 'dataset_test', 'args'], {}), '(net_glob, dataset_test, args)\n', (10788, 10818), False, 'from models.test import test_img\n'), ((10958, 10970), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10968, 10970), True, 'import matplotlib.pyplot as plt\n'), ((11190, 11212), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""test_acc"""'], {}), "('test_acc')\n", (11200, 11212), True, 'import matplotlib.pyplot as plt\n'), ((11217, 11237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (11227, 11237), True, 'import matplotlib.pyplot as plt\n'), ((11242, 11264), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (11252, 11264), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1821), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""../data/mnist/"""'], {'train': '(True)', 'download': '(True)', 'transform': 'trans_mnist'}), "('../data/mnist/', train=True, download=True, transform=\n trans_mnist)\n", (1748, 1821), False, 'from torchvision import datasets, transforms\n'), ((1840, 1928), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""../data/mnist/"""'], {'train': '(False)', 'download': '(True)', 'transform': 'trans_mnist'}), "('../data/mnist/', train=False, download=True, transform=\n trans_mnist)\n", (1854, 1928), False, 'from torchvision import datasets, transforms\n'), ((6473, 6489), 'models.Fed.FedAvg', 'FedAvg', (['w_locals'], {}), '(w_locals)\n', (6479, 6489), False, 'from models.Fed import FedAvg\n'), ((7284, 7322), 'models.test.test_img', 'test_img', (['net_glob', 'dataset_test', 'args'], {}), '(net_glob, dataset_test, args)\n', (7292, 7322), False, 'from models.test import test_img\n'), ((9819, 9836), 'models.Fed.FedAvg', 'FedAvg', (['w_localsF'], {}), '(w_localsF)\n', (9825, 9836), False, 'from models.Fed import FedAvg\n'), ((10137, 10176), 'models.test.test_img', 'test_img', (['net_globF', 'dataset_test', 'args'], {}), '(net_globF, dataset_test, args)\n', (10145, 10176), False, 'from models.test import test_img\n'), ((2012, 2052), 'utils.sampling.mnist_iid', 'mnist_iid', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (2021, 2052), False, 'from utils.sampling import mnist_iid, mnist_noniid, cifar_iid\n'), ((2092, 2135), 'utils.sampling.mnist_noniid', 'mnist_noniid', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (2104, 2135), False, 'from utils.sampling import mnist_iid, mnist_noniid, cifar_iid\n'), ((2316, 2404), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""../data/cifar"""'], {'train': '(True)', 'download': '(True)', 'transform': 'trans_cifar'}), "('../data/cifar', train=True, download=True, transform=\n trans_cifar)\n", (2332, 2404), False, 'from torchvision import datasets, transforms\n'), ((2423, 2512), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""../data/cifar"""'], {'train': '(False)', 'download': '(True)', 'transform': 'trans_cifar'}), "('../data/cifar', train=False, download=True, transform=\n trans_cifar)\n", (2439, 2512), False, 'from torchvision import datasets, transforms\n'), ((5401, 5468), 'models.Update.LocalUpdate', 'LocalUpdate', ([], {'args': 'args', 'dataset': 'dataset_train', 'idxs': 'dict_users[idx]'}), '(args=args, dataset=dataset_train, idxs=dict_users[idx])\n', (5412, 5468), False, 'from models.Update import LocalUpdate\n'), ((7439, 7506), 'models.Update.LocalUpdate', 'LocalUpdate', ([], {'args': 'args', 'dataset': 'dataset_train', 'idxs': 'dict_users[idx]'}), '(args=args, dataset=dataset_train, idxs=dict_users[idx])\n', (7450, 7506), False, 'from models.Update import LocalUpdate\n'), ((7519, 7543), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7541, 7543), False, 'import torch\n'), ((9415, 9483), 'models.Update.LocalUpdateF', 'LocalUpdateF', ([], {'args': 'args', 'dataset': 'dataset_train', 'idxs': 'dict_users[idx]'}), '(args=args, dataset=dataset_train, idxs=dict_users[idx])\n', (9427, 9483), False, 'from models.Update import LocalUpdateF\n'), ((1523, 1544), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1542, 1544), False, 'from torchvision import datasets, transforms\n'), ((1546, 1588), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (1566, 1588), False, 'from torchvision import datasets, transforms\n'), ((2554, 2594), 'utils.sampling.cifar_iid', 'cifar_iid', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (2563, 2594), False, 'from utils.sampling import mnist_iid, mnist_noniid, cifar_iid\n'), ((2924, 2943), 'models.Nets.CNNCifar', 'CNNCifar', ([], {'args': 'args'}), '(args=args)\n', (2932, 2943), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((5897, 5913), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (5910, 5913), False, 'import copy\n'), ((6032, 6051), 'copy.deepcopy', 'copy.deepcopy', (['loss'], {}), '(loss)\n', (6045, 6051), False, 'import copy\n'), ((8284, 8303), 'models.Nets.CNNCifar', 'CNNCifar', ([], {'args': 'args'}), '(args=args)\n', (8292, 8303), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((9631, 9647), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (9644, 9647), False, 'import copy\n'), ((9748, 9767), 'copy.deepcopy', 'copy.deepcopy', (['loss'], {}), '(loss)\n', (9761, 9767), False, 'import copy\n'), ((2212, 2233), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2231, 2233), False, 'from torchvision import datasets, transforms\n'), ((2235, 2289), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (2255, 2289), False, 'from torchvision import datasets, transforms\n'), ((3037, 3056), 'models.Nets.CNNMnist', 'CNNMnist', ([], {'args': 'args'}), '(args=args)\n', (3045, 3056), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((4140, 4200), 'models.Nets.MLP', 'MLP', ([], {'dim_in': 'len_in', 'dim_hidden': '(200)', 'dim_out': 'args.num_classes'}), '(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes)\n', (4143, 4200), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((5964, 5980), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (5977, 5980), False, 'import copy\n'), ((8398, 8417), 'models.Nets.CNNMnist', 'CNNMnist', ([], {'args': 'args'}), '(args=args)\n', (8406, 8417), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((9699, 9715), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (9712, 9715), False, 'import copy\n'), ((3222, 3282), 'models.Nets.MLP', 'MLP', ([], {'dim_in': 'len_in', 'dim_hidden': '(200)', 'dim_out': 'args.num_classes'}), '(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes)\n', (3225, 3282), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((3338, 3398), 'models.Nets.MLP', 'MLP', ([], {'dim_in': 'len_in', 'dim_hidden': '(200)', 'dim_out': 'args.num_classes'}), '(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes)\n', (3341, 3398), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((8554, 8614), 'models.Nets.MLP', 'MLP', ([], {'dim_in': 'len_in', 'dim_hidden': '(200)', 'dim_out': 'args.num_classes'}), '(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes)\n', (8557, 8614), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((5551, 5574), 'copy.deepcopy', 'copy.deepcopy', (['net_glob'], {}), '(net_glob)\n', (5564, 5574), False, 'import copy\n'), ((9523, 9547), 'copy.deepcopy', 'copy.deepcopy', (['net_globF'], {}), '(net_globF)\n', (9536, 9547), False, 'import copy\n')] |
gkazla/B.LambdaLayerCommon | b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py | 1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980 | from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Stack, Duration
from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack
from b_cfn_lambda_layer.package_version import PackageVersion
from b_lambda_layer_common.layer import Layer
from b_lambda_layer_common_test.unit import root
class FunctionWithUnitTests(Function):
"""
Function that lets us run unit tests inside lambda function. We want to run unit
tests both locally and remotely.
"""
def __init__(self, scope: Stack):
super().__init__(
scope=scope,
id=f'{TestingStack.global_prefix()}FunctionWithUnitTests',
code=Code.from_asset(root),
handler='handler.handler',
runtime=Runtime.PYTHON_3_8,
timeout=Duration.minutes(5),
memory_size=512,
layers=[
Layer(
scope=scope,
name=f'{TestingStack.global_prefix()}TestingLayerWithUnitTests',
dependencies={
# These dependencies are required for running unit tests inside lambda functions.
# Pytest is used for running actual unit tests.
'pytest': PackageVersion.from_string_version('6.2.5'),
# Pook is used for HTTP mocking, therefore it is also needed here.
'pook': PackageVersion.from_string_version('1.0.1'),
# Not sure about this dependency. Lambda runtime throws errors if its missing.
'aws-cdk.core': PackageVersion.from_string_version('1.99.0'),
# This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this.
# For some reason it doesn't.
# Tests would fail with import error otherwise.
'importlib-resources': PackageVersion.from_string_version('5.4.0')
}
)
]
)
| [((691, 712), 'aws_cdk.aws_lambda.Code.from_asset', 'Code.from_asset', (['root'], {}), '(root)\n', (706, 712), False, 'from aws_cdk.aws_lambda import Function, Code, Runtime\n'), ((813, 832), 'aws_cdk.core.Duration.minutes', 'Duration.minutes', (['(5)'], {}), '(5)\n', (829, 832), False, 'from aws_cdk.core import Stack, Duration\n'), ((621, 649), 'b_aws_testing_framework.tools.cdk_testing.testing_stack.TestingStack.global_prefix', 'TestingStack.global_prefix', ([], {}), '()\n', (647, 649), False, 'from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack\n'), ((1272, 1315), 'b_cfn_lambda_layer.package_version.PackageVersion.from_string_version', 'PackageVersion.from_string_version', (['"""6.2.5"""'], {}), "('6.2.5')\n", (1306, 1315), False, 'from b_cfn_lambda_layer.package_version import PackageVersion\n'), ((1440, 1483), 'b_cfn_lambda_layer.package_version.PackageVersion.from_string_version', 'PackageVersion.from_string_version', (['"""1.0.1"""'], {}), "('1.0.1')\n", (1474, 1483), False, 'from b_cfn_lambda_layer.package_version import PackageVersion\n'), ((1628, 1672), 'b_cfn_lambda_layer.package_version.PackageVersion.from_string_version', 'PackageVersion.from_string_version', (['"""1.99.0"""'], {}), "('1.99.0')\n", (1662, 1672), False, 'from b_cfn_lambda_layer.package_version import PackageVersion\n'), ((1977, 2020), 'b_cfn_lambda_layer.package_version.PackageVersion.from_string_version', 'PackageVersion.from_string_version', (['"""5.4.0"""'], {}), "('5.4.0')\n", (2011, 2020), False, 'from b_cfn_lambda_layer.package_version import PackageVersion\n'), ((968, 996), 'b_aws_testing_framework.tools.cdk_testing.testing_stack.TestingStack.global_prefix', 'TestingStack.global_prefix', ([], {}), '()\n', (994, 996), False, 'from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack\n')] |
neurips2020submission11699/metarl | tests/metarl/tf/baselines/test_baselines.py | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | """
This script creates a test that fails when
metarl.tf.baselines failed to initialize.
"""
import tensorflow as tf
from metarl.envs import MetaRLEnv
from metarl.tf.baselines import ContinuousMLPBaseline
from metarl.tf.baselines import GaussianMLPBaseline
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestTfBaselines(TfGraphTestCase):
def test_baseline(self):
"""Test the baseline initialization."""
box_env = MetaRLEnv(DummyBoxEnv())
deterministic_mlp_baseline = ContinuousMLPBaseline(env_spec=box_env)
gaussian_mlp_baseline = GaussianMLPBaseline(env_spec=box_env)
self.sess.run(tf.compat.v1.global_variables_initializer())
deterministic_mlp_baseline.get_param_values()
gaussian_mlp_baseline.get_param_values()
box_env.close()
| [((551, 590), 'metarl.tf.baselines.ContinuousMLPBaseline', 'ContinuousMLPBaseline', ([], {'env_spec': 'box_env'}), '(env_spec=box_env)\n', (572, 590), False, 'from metarl.tf.baselines import ContinuousMLPBaseline\n'), ((623, 660), 'metarl.tf.baselines.GaussianMLPBaseline', 'GaussianMLPBaseline', ([], {'env_spec': 'box_env'}), '(env_spec=box_env)\n', (642, 660), False, 'from metarl.tf.baselines import GaussianMLPBaseline\n'), ((499, 512), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {}), '()\n', (510, 512), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((684, 727), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (725, 727), True, 'import tensorflow as tf\n')] |
trackit/trackit-legacy | api/files/api/app/monthly_report.py | 76cfab7941eddb9d390dd6c7b9a408a9ad4fc8da | import jinja2
import json
from send_email import send_email
from app.models import User, MyResourcesAWS, db
from app.es.awsdetailedlineitem import AWSDetailedLineitem
from sqlalchemy import desc
import subprocess
import datetime
from flask import render_template
def monthly_html_template():
template_dir = '/usr/trackit/templates'
loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader)
template = env.get_template('emailPDFreport.html')
now = datetime.datetime.now()
try:
users = User.query.all()
for user in users:
if user.report_last_emailed_at == None:
user.report_last_emailed_at = datetime.datetime.utcnow()
db.session.add(user)
db.session.commit()
last_emailed_days = (now - user.report_last_emailed_at).days
if last_emailed_days >= 30:
for key in user.aws_keys:
date = "{} {}".format(now.strftime("%B"), now.year)
pretty_key = user.get_aws_key(key.key).pretty + ' ' + key.key
monthly_cost = AWSDetailedLineitem.get_monthly_cost_by_product(key.get_aws_user_id())
estimation_hour, estimation_month = get_estimation(user, key)
total = sum(float(i.get("cost")) for i in monthly_cost['products'])
email_template = template.render(email=user.email, date=date, key=pretty_key, products=monthly_cost['products'], total=total, hourly_cost=estimation_hour, monthly_cost=estimation_month)
if user.email.endswith("msolution.io"):
send_email(user.email, 'Trackit monthly report', email_template.encode('utf-8').strip(), True)
user.report_last_emailed_at = datetime.datetime.utcnow()
db.session.add(user)
db.session.commit()
except Exception, e:
print("ERROR " + str(e))
def get_estimation(user, key):
estimation = MyResourcesAWS.query.filter(MyResourcesAWS.key == key.key).order_by(desc(MyResourcesAWS.date)).first()
estimation = [] if not estimation else estimation.json()
cost = sum(estimation_cost(e) for e in estimation)
return cost, cost*720
def estimation_cost(estimation):
return sum(item['cost'] for item in estimation['prices'] if item['name'] == 'aws')
| [] |
rdturnermtl/mlpaper | slow_tests/boot_test.py | 5da5cb7b3a56d3cfdc7162d01fac2679c9050e76 | # Ryan Turner ([email protected])
from __future__ import division, print_function
from builtins import range
import numpy as np
import scipy.stats as ss
import mlpaper.constants as cc
import mlpaper.mlpaper as bt
import mlpaper.perf_curves as pc
from mlpaper.classification import DEFAULT_NGRID, curve_boot
from mlpaper.test_constants import FPR
from mlpaper.util import area, interp1d
_FPR = FPR / 3.0 # Divide by number of test funcs
def fail_check_stat(fail, runs, expect_p_fail, fpr):
pvals_2side = [ss.binom_test(ff, runs, expect_p_fail) for ff in fail]
pvals_1side = [ss.binom_test(ff, runs, expect_p_fail, alternative="greater") for ff in fail]
# Note that we are not going multiple comparison correction between the
# two sided and one sided tests.
print(fail)
print(pvals_2side)
assert np.min(pvals_2side) >= fpr / len(pvals_2side)
print(pvals_1side)
assert np.min(pvals_1side) >= fpr / len(pvals_1side)
def test_boot(runs=100):
N = 201
confidence = 0.95
# Drawing more seeds than we need to be safe
seeds = np.nditer(np.random.randint(low=0, high=int(1e6), size=runs * 5))
def run_trial(y_true, y_score, y_score_ref, true_curve, curve_f, seed, x_grid=None):
epsilon = 1e-6
curve, _ = curve_f(y_true, y_score[:, 1])
auc, = area(*curve)
curve, _ = curve_f(y_true, y_score_ref[:, 1])
auc_ref, = area(*curve)
true_value, = area(*true_curve)
np.random.seed(seed)
(auc_, EB, pval), curve = curve_boot(
y_true, y_score, ref=true_value, curve_f=curve_f, confidence=confidence, x_grid=x_grid
)
true_curve_grid, = interp1d(curve[cc.XGRID].values, *true_curve)
assert auc_ == auc
fail_EB = np.abs(auc - true_value) > EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P = pval < 1.0 - confidence
fail_curve = (true_curve_grid < curve[cc.LB].values - epsilon) | (
curve[cc.UB].values + epsilon < true_curve_grid
)
assert (x_grid is None) or np.all(curve[cc.XGRID].values == x_grid)
np.random.seed(seed)
(auc_, EB_, pval), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=False, x_grid=x_grid
)
assert auc_ == auc
assert EB_ == EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P2 = pval < 1.0 - confidence
assert np.all(curve_.values == curve.values)
np.random.seed(seed)
(auc_, EB, pval_), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=True, x_grid=x_grid
)
assert auc_ == auc
fail_EB2 = np.abs(auc - auc_ref) > EB
# Could also test distn with 1-sided KS test but this easier for now
assert pval_ == pval
assert np.all(curve_.values == curve.values)
return fail_EB, fail_P, fail_EB2, fail_P2, fail_curve
fail = [0] * 12
fail_curve_roc = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_ap = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_prg = np.zeros(DEFAULT_NGRID, dtype=int)
for ii in range(runs):
mu = np.random.randn(2)
S = np.random.randn(2, 2)
S = np.dot(S, S.T)
# Coverage, esp at edges, is worse for imbalanced data. See issue #20.
p = 0.5
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 1.0]]), pc.LINEAR)
y_true = np.random.rand(N) <= p
y_score = np.random.multivariate_normal(mu, S, size=N)
if np.random.randn() <= 0.5: # resample to test dupes
idx = np.random.choice(N, size=N, replace=True)
y_score = y_score[idx, :]
y_score, y_score_ref = y_score.T
y_score = np.stack((np.zeros(N), y_score), axis=1)
y_score_ref = np.stack((np.zeros(N), y_score_ref), axis=1)
# Coverage doesn't hold at edges, hence [0.05, 0.95]. See issue #20.
x_grid = np.linspace(0.05, 0.95, DEFAULT_NGRID)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.roc_curve, next(seeds), x_grid
)
fail[0] += fail_EB
fail[1] += fail_P
fail[2] += fail_EB2
fail[3] += fail_P2
fail_curve_roc += fail_curve
true_curve = (np.array([[0.0, 1.0]]), np.array([[p, p]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.recall_precision_curve, next(seeds), x_grid
)
fail[4] += fail_EB
fail[5] += fail_P
fail[6] += fail_EB2
fail[7] += fail_P2
fail_curve_ap += fail_curve
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 0.0]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.prg_curve, next(seeds), x_grid
)
fail[8] += fail_EB
fail[9] += fail_P
fail[10] += fail_EB2
fail[11] += fail_P2
fail_curve_prg += fail_curve
sub_FPR = _FPR / 4.0
expect_p_fail = 1.0 - confidence
fail_check_stat(fail, runs, expect_p_fail, sub_FPR)
print("ROC curve")
fail_check_stat(fail_curve_roc, runs, expect_p_fail, sub_FPR)
print("RP curve")
fail_check_stat(fail_curve_ap, runs, expect_p_fail, sub_FPR)
print("PRG curve")
fail_check_stat(fail_curve_prg, runs, expect_p_fail, sub_FPR)
def test_boot_mean(runs=100):
N = 201
confidence = 0.95
fail = 0
for ii in range(runs):
mu = np.random.randn()
S = np.abs(np.random.randn())
x = mu + S * np.random.randn(N)
mu_est = np.mean(x)
EB = bt.boot_EB(x, confidence=0.95)
fail += np.abs(mu - mu_est) > EB
expect_p_fail = 1.0 - confidence
print("boot mean")
fail_check_stat([fail], runs, expect_p_fail, _FPR)
def test_boot_EB_and_test(runs=100):
"""Arguably this should do out to its own file since it tests bt core."""
mu = np.random.randn()
stdev = np.abs(np.random.randn())
N = 201
confidence = 0.95
def run_trial(x, true_value):
_, _, CI = bt._boot_EB_and_test(x, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI = (true_value < LB) or (UB < true_value)
_, pval, CI = bt._boot_EB_and_test(x - true_value, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI2 = (0 < LB) or (UB < 0)
fail_P = pval < 1.0 - confidence
return fail_CI, fail_CI2, fail_P
fail = [0] * 3
for ii in range(runs):
x = mu + stdev * np.random.randn(N)
fail_CI, fail_CI2, fail_P = run_trial(x, mu)
fail[0] += fail_CI
fail[1] += fail_CI2
fail[2] += fail_P
expect_p_fail = 1.0 - confidence
print("boot mean and test")
fail_check_stat(fail, runs, expect_p_fail, _FPR)
if __name__ == "__main__":
np.random.seed(56467)
test_boot()
test_boot_mean()
test_boot_EB_and_test()
print("passed")
| [((3118, 3152), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3126, 3152), True, 'import numpy as np\n'), ((3173, 3207), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3181, 3207), True, 'import numpy as np\n'), ((3229, 3263), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3237, 3263), True, 'import numpy as np\n'), ((3278, 3289), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (3283, 3289), False, 'from builtins import range\n'), ((5816, 5827), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (5821, 5827), False, 'from builtins import range\n'), ((6294, 6311), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (6309, 6311), True, 'import numpy as np\n'), ((6854, 6865), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (6859, 6865), False, 'from builtins import range\n'), ((7201, 7222), 'numpy.random.seed', 'np.random.seed', (['(56467)'], {}), '(56467)\n', (7215, 7222), True, 'import numpy as np\n'), ((523, 561), 'scipy.stats.binom_test', 'ss.binom_test', (['ff', 'runs', 'expect_p_fail'], {}), '(ff, runs, expect_p_fail)\n', (536, 561), True, 'import scipy.stats as ss\n'), ((597, 658), 'scipy.stats.binom_test', 'ss.binom_test', (['ff', 'runs', 'expect_p_fail'], {'alternative': '"""greater"""'}), "(ff, runs, expect_p_fail, alternative='greater')\n", (610, 658), True, 'import scipy.stats as ss\n'), ((838, 857), 'numpy.min', 'np.min', (['pvals_2side'], {}), '(pvals_2side)\n', (844, 857), True, 'import numpy as np\n'), ((918, 937), 'numpy.min', 'np.min', (['pvals_1side'], {}), '(pvals_1side)\n', (924, 937), True, 'import numpy as np\n'), ((1332, 1344), 'mlpaper.util.area', 'area', (['*curve'], {}), '(*curve)\n', (1336, 1344), False, 'from mlpaper.util import area, interp1d\n'), ((1418, 1430), 'mlpaper.util.area', 'area', (['*curve'], {}), '(*curve)\n', (1422, 1430), False, 'from mlpaper.util import area, interp1d\n'), ((1454, 1471), 'mlpaper.util.area', 'area', (['*true_curve'], {}), '(*true_curve)\n', (1458, 1471), False, 'from mlpaper.util import area, interp1d\n'), ((1481, 1501), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1495, 1501), True, 'import numpy as np\n'), ((1536, 1639), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'true_value', 'curve_f': 'curve_f', 'confidence': 'confidence', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=true_value, curve_f=curve_f, confidence=\n confidence, x_grid=x_grid)\n', (1546, 1639), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((1684, 1729), 'mlpaper.util.interp1d', 'interp1d', (['curve[cc.XGRID].values', '*true_curve'], {}), '(curve[cc.XGRID].values, *true_curve)\n', (1692, 1729), False, 'from mlpaper.util import area, interp1d\n'), ((2153, 2173), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2167, 2173), True, 'import numpy as np\n'), ((2210, 2333), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'y_score_ref', 'curve_f': 'curve_f', 'confidence': 'confidence', 'pairwise_CI': '(False)', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=\n confidence, pairwise_CI=False, x_grid=x_grid)\n', (2220, 2333), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((2537, 2574), 'numpy.all', 'np.all', (['(curve_.values == curve.values)'], {}), '(curve_.values == curve.values)\n', (2543, 2574), True, 'import numpy as np\n'), ((2584, 2604), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2598, 2604), True, 'import numpy as np\n'), ((2641, 2763), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'y_score_ref', 'curve_f': 'curve_f', 'confidence': 'confidence', 'pairwise_CI': '(True)', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=\n confidence, pairwise_CI=True, x_grid=x_grid)\n', (2651, 2763), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((2975, 3012), 'numpy.all', 'np.all', (['(curve_.values == curve.values)'], {}), '(curve_.values == curve.values)\n', (2981, 3012), True, 'import numpy as np\n'), ((3304, 3322), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (3319, 3322), True, 'import numpy as np\n'), ((3335, 3356), 'numpy.random.randn', 'np.random.randn', (['(2)', '(2)'], {}), '(2, 2)\n', (3350, 3356), True, 'import numpy as np\n'), ((3369, 3383), 'numpy.dot', 'np.dot', (['S', 'S.T'], {}), '(S, S.T)\n', (3375, 3383), True, 'import numpy as np\n'), ((3497, 3534), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.99)', 'DEFAULT_NGRID'], {}), '(0.0, 0.99, DEFAULT_NGRID)\n', (3508, 3534), True, 'import numpy as np\n'), ((3674, 3718), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'S'], {'size': 'N'}), '(mu, S, size=N)\n', (3703, 3718), True, 'import numpy as np\n'), ((4142, 4180), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.95)', 'DEFAULT_NGRID'], {}), '(0.05, 0.95, DEFAULT_NGRID)\n', (4153, 4180), True, 'import numpy as np\n'), ((4909, 4946), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.99)', 'DEFAULT_NGRID'], {}), '(0.0, 0.99, DEFAULT_NGRID)\n', (4920, 4946), True, 'import numpy as np\n'), ((5842, 5859), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5857, 5859), True, 'import numpy as np\n'), ((5956, 5966), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (5963, 5966), True, 'import numpy as np\n'), ((5980, 6010), 'mlpaper.mlpaper.boot_EB', 'bt.boot_EB', (['x'], {'confidence': '(0.95)'}), '(x, confidence=0.95)\n', (5990, 6010), True, 'import mlpaper.mlpaper as bt\n'), ((6331, 6348), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (6346, 6348), True, 'import numpy as np\n'), ((6439, 6501), 'mlpaper.mlpaper._boot_EB_and_test', 'bt._boot_EB_and_test', (['x'], {'confidence': 'confidence', 'return_CI': '(True)'}), '(x, confidence=confidence, return_CI=True)\n', (6459, 6501), True, 'import mlpaper.mlpaper as bt\n'), ((6602, 6677), 'mlpaper.mlpaper._boot_EB_and_test', 'bt._boot_EB_and_test', (['(x - true_value)'], {'confidence': 'confidence', 'return_CI': '(True)'}), '(x - true_value, confidence=confidence, return_CI=True)\n', (6622, 6677), True, 'import mlpaper.mlpaper as bt\n'), ((1775, 1799), 'numpy.abs', 'np.abs', (['(auc - true_value)'], {}), '(auc - true_value)\n', (1781, 1799), True, 'import numpy as np\n'), ((2103, 2143), 'numpy.all', 'np.all', (['(curve[cc.XGRID].values == x_grid)'], {}), '(curve[cc.XGRID].values == x_grid)\n', (2109, 2143), True, 'import numpy as np\n'), ((2827, 2848), 'numpy.abs', 'np.abs', (['(auc - auc_ref)'], {}), '(auc - auc_ref)\n', (2833, 2848), True, 'import numpy as np\n'), ((3557, 3579), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (3565, 3579), True, 'import numpy as np\n'), ((3581, 3603), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (3589, 3603), True, 'import numpy as np\n'), ((3633, 3650), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (3647, 3650), True, 'import numpy as np\n'), ((3730, 3747), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (3745, 3747), True, 'import numpy as np\n'), ((3800, 3841), 'numpy.random.choice', 'np.random.choice', (['N'], {'size': 'N', 'replace': '(True)'}), '(N, size=N, replace=True)\n', (3816, 3841), True, 'import numpy as np\n'), ((4515, 4537), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (4523, 4537), True, 'import numpy as np\n'), ((4539, 4557), 'numpy.array', 'np.array', (['[[p, p]]'], {}), '([[p, p]])\n', (4547, 4557), True, 'import numpy as np\n'), ((4969, 4991), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (4977, 4991), True, 'import numpy as np\n'), ((4993, 5015), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (5001, 5015), True, 'import numpy as np\n'), ((5879, 5896), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5894, 5896), True, 'import numpy as np\n'), ((6028, 6047), 'numpy.abs', 'np.abs', (['(mu - mu_est)'], {}), '(mu - mu_est)\n', (6034, 6047), True, 'import numpy as np\n'), ((3949, 3960), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3957, 3960), True, 'import numpy as np\n'), ((4012, 4023), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4020, 4023), True, 'import numpy as np\n'), ((5919, 5937), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (5934, 5937), True, 'import numpy as np\n'), ((6892, 6910), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (6907, 6910), True, 'import numpy as np\n')] |
yuqil725/benchmark_lib | TTBenchmark/check_benchmark.py | f404ff829d7b3a8bb0f6b00689038cf533bba83e | def check_difference():
pass
def update_benchmark():
pass
| [] |
ajmal017/amp | core/test/test_timeseries_study.py | 8de7e3b88be87605ec3bad03c139ac64eb460e5c | from typing import Any, Dict
import numpy as np
import pandas as pd
import core.artificial_signal_generators as sig_gen
import core.statistics as stats
import core.timeseries_study as tss
import helpers.unit_test as hut
class TestTimeSeriesDailyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsds = tss.TimeSeriesDailyStudy(ts)
tsds.execute()
class TestTimeSeriesMinutelyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31", freq="5T")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsms = tss.TimeSeriesMinutelyStudy(ts, freq_name="5 minutes")
tsms.execute()
class TestMapDictToDataframeTest1(hut.TestCase):
def test1(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict, functions=stat_funcs
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
add_prefix=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
progress_bar=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = sig_gen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
def _get_dict_of_series(self, seed: int) -> Dict[Any, pd.Series]:
n_items = 15
test_keys = ["test_key_" + str(x) for x in range(n_items)]
result_dict = {key: self._get_series(seed) for key in test_keys}
return result_dict
| [((323, 364), 'pandas.date_range', 'pd.date_range', (['"""2018-12-31"""', '"""2019-01-31"""'], {}), "('2018-12-31', '2019-01-31')\n", (336, 364), True, 'import pandas as pd\n'), ((419, 445), 'pandas.Series', 'pd.Series', (['vals'], {'index': 'idx'}), '(vals, index=idx)\n', (428, 445), True, 'import pandas as pd\n'), ((461, 489), 'core.timeseries_study.TimeSeriesDailyStudy', 'tss.TimeSeriesDailyStudy', (['ts'], {}), '(ts)\n', (485, 489), True, 'import core.timeseries_study as tss\n'), ((617, 669), 'pandas.date_range', 'pd.date_range', (['"""2018-12-31"""', '"""2019-01-31"""'], {'freq': '"""5T"""'}), "('2018-12-31', '2019-01-31', freq='5T')\n", (630, 669), True, 'import pandas as pd\n'), ((724, 750), 'pandas.Series', 'pd.Series', (['vals'], {'index': 'idx'}), '(vals, index=idx)\n', (733, 750), True, 'import pandas as pd\n'), ((766, 820), 'core.timeseries_study.TimeSeriesMinutelyStudy', 'tss.TimeSeriesMinutelyStudy', (['ts'], {'freq_name': '"""5 minutes"""'}), "(ts, freq_name='5 minutes')\n", (793, 820), True, 'import core.timeseries_study as tss\n'), ((1159, 1225), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs'}), '(dict_=result_dict, functions=stat_funcs)\n', (1184, 1225), True, 'import core.timeseries_study as tss\n'), ((1272, 1316), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (1296, 1316), True, 'import helpers.unit_test as hut\n'), ((1623, 1711), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs', 'add_prefix': '(False)'}), '(dict_=result_dict, functions=stat_funcs,\n add_prefix=False)\n', (1648, 1711), True, 'import core.timeseries_study as tss\n'), ((1779, 1823), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (1803, 1823), True, 'import helpers.unit_test as hut\n'), ((2130, 2220), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs', 'progress_bar': '(False)'}), '(dict_=result_dict, functions=stat_funcs,\n progress_bar=False)\n', (2155, 2220), True, 'import core.timeseries_study as tss\n'), ((2288, 2332), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (2312, 2332), True, 'import helpers.unit_test as hut\n'), ((2457, 2480), 'numpy.array', 'np.array', (['[0.75, -0.25]'], {}), '([0.75, -0.25])\n', (2465, 2480), True, 'import numpy as np\n'), ((2500, 2522), 'numpy.array', 'np.array', (['[0.65, 0.35]'], {}), '([0.65, 0.35])\n', (2508, 2522), True, 'import numpy as np\n'), ((2546, 2585), 'core.artificial_signal_generators.ArmaProcess', 'sig_gen.ArmaProcess', (['arparams', 'maparams'], {}), '(arparams, maparams)\n', (2565, 2585), True, 'import core.artificial_signal_generators as sig_gen\n')] |
takat0m0/infoGAN | util.py | bc3ba0d4e407851e97f49322add98ea2e7e429de | #! -*- coding:utf-8 -*-
import os
import sys
import cv2
import numpy as np
def _resizing(img):
#return cv2.resize(img, (256, 256))
return cv2.resize(img, (32, 32))
def _reg(img):
return img/127.5 - 1.0
def _re_reg(img):
return (img + 1.0) * 127.5
def get_figs(target_dir):
ret = []
for file_name in os.listdir(target_dir):
target_file = os.path.join(target_dir, file_name)
img = cv2.imread(target_file, 0)
ret.append(_reg(_resizing(img)))
return np.asarray(ret, dtype = np.float32)
def dump_figs(figs, dump_dir):
for i, fig in enumerate(figs):
target_file = os.path.join(dump_dir, '{}.jpg'.format(i))
cv2.imwrite(target_file, _re_reg(fig))
| [((148, 173), 'cv2.resize', 'cv2.resize', (['img', '(32, 32)'], {}), '(img, (32, 32))\n', (158, 173), False, 'import cv2\n'), ((328, 350), 'os.listdir', 'os.listdir', (['target_dir'], {}), '(target_dir)\n', (338, 350), False, 'import os\n'), ((503, 536), 'numpy.asarray', 'np.asarray', (['ret'], {'dtype': 'np.float32'}), '(ret, dtype=np.float32)\n', (513, 536), True, 'import numpy as np\n'), ((374, 409), 'os.path.join', 'os.path.join', (['target_dir', 'file_name'], {}), '(target_dir, file_name)\n', (386, 409), False, 'import os\n'), ((424, 450), 'cv2.imread', 'cv2.imread', (['target_file', '(0)'], {}), '(target_file, 0)\n', (434, 450), False, 'import cv2\n')] |
MutuaFranklin/MyHood | myhoodApp/migrations/0002_healthfacilities_hospital_image.py | 6ddd21c4a67936c8926d6f5a8665a06edf81f39e | # Generated by Django 3.2.7 on 2021-09-23 20:01
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myhoodApp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='healthfacilities',
name='hospital_image',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, verbose_name='Hospital Image'),
),
]
| [] |
ans682/SafePredict_and_Forecasting | forecasting_algorithms/Multiple_Timeseries/VAR/var.py | 30ac5a0b665fce090567476bc07b54489b2f3d0f | # VAR example
from statsmodels.tsa.vector_ar.var_model import VAR
from random import random
# contrived dataset with dependency
data = list()
for i in range(100):
v1 = i + random()
v2 = v1 + random()
row = [v1, v2]
data.append(row)
# fit model
model = VAR(data)
model_fit = model.fit()
# make prediction
yhat = model_fit.forecast(model_fit.y, steps=1)
print(yhat)
| [((268, 277), 'statsmodels.tsa.vector_ar.var_model.VAR', 'VAR', (['data'], {}), '(data)\n', (271, 277), False, 'from statsmodels.tsa.vector_ar.var_model import VAR\n'), ((176, 184), 'random.random', 'random', ([], {}), '()\n', (182, 184), False, 'from random import random\n'), ((199, 207), 'random.random', 'random', ([], {}), '()\n', (205, 207), False, 'from random import random\n')] |
jonykarki/hamroscraper | candidate-scrape.py | a7e34a9cdca89be10422d045f1ed34e9956bd75f | import json
import urllib.request
import MySQLdb
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="", # your password
db="election")
cur = db.cursor()
# user_agent for sending headers with the request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
# header
headers={'User-Agent':user_agent,}
district = input("Enter the Name of the district: ")
url = "http://election.ujyaaloonline.com/api/candidates?district=" + district
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
source = response.read()
# print(source)
data = json.loads(source)
#print(data['candidates']['2']['400'][0]['cName'])
election_area = data['election_areas']
# get all the possible election-areas from the district
# data needed for the database
'''
resultno :> autoincrement
constituencyname :>
stateno :> Remove the column?
districtno :>
candidate :>
gender :> Remove the column???
votes :> set to zero for now
'''
i = 0
j = 0
for key, value in election_area.items():
area_key = key
district_name = data['district_slug']
try:
for item in data["candidates"]['1'][area_key]:
print(item['aName'])
print(item["cName"])
i = i + 1
except:
for item in data["candidates"]['2'][area_key]:
constituencyname = item['aName'].encode('utf-8')
candidatename = item["cName"].encode('utf-8')
sql = "INSERT INTO `test` (`id`, `candidatename`, `constituencyname`) VALUES (NULL, %s, %s)"
cur.execute(sql, (candidatename, constituencyname))
db.commit()
print('INSERTED ' + item["cName"] + " into the database")
j = j + 1
print(data['district_slug'] + " has " + str(i) + " candidates in provincial election")
print(data['district_slug'] + " has " + str(j) + " candidates in federal election")
print("Total: " + str(i + j) + " candidates added to the database")
| [((54, 126), 'MySQLdb.connect', 'MySQLdb.connect', ([], {'host': '"""localhost"""', 'user': '"""root"""', 'passwd': '""""""', 'db': '"""election"""'}), "(host='localhost', user='root', passwd='', db='election')\n", (69, 126), False, 'import MySQLdb\n'), ((770, 788), 'json.loads', 'json.loads', (['source'], {}), '(source)\n', (780, 788), False, 'import json\n')] |
ashiscs/sangita | sangita/hindi/lemmatizer.py | b90c49859339147137db1c2bdb60a1039a00c706 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 23:28:21 2017
@author: samriddhi
"""
import re
import sangita.hindi.tokenizer as tok
import sangita.hindi.corpora.lemmata as lt
def numericLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
num_match = re.compile(r'([०१२३४५६७८९]+[\.\,]*)+[०१२३४५६७८९]+|([-+]*\d+[\.\,]*)+\d+|([०१२३४५६७८९]+|\d+)')
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if num_match.search(str(item[0])):
instr[index] = (instr[index][1], instr[index][1])
else:
if num_match.search(str(item)):
instr[index] = (instr[index], instr[index][1])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
numericLemmatizer(instr)
else:
print("not supported")
return(instr)
def defaultLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) != tup):
instr[index] = (instr[index], instr[index])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
defaultLemmatizer(instr)
else:
print("not supported")
return(instr)
def lookupLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
lemmatalist = lt.drawlist()
words = []
lemma = []
for item in lemmatalist:
words.append(item.split("\t")[0])
lemma.append(item.split("\t")[1])
tokens = set(words)
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index][1],tag)
else:
if(type(item) != tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index], tag)
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
lookupLemmatizer(instr)
else:
print("not supported")
return(instr)
def Lemmatizer(instr):
instr = lookupLemmatizer(instr)
instr = numericLemmatizer(instr)
instr = defaultLemmatizer(instr)
return(instr)
if __name__ == '__main__':
input_str = 'पुंछ में हुई मुठभेड़ के बारे में एक सरकारी अधिकारी ने बताया कि १३वीं सिख लाईट इनफेंट्री द्वारा लश्कर-ए - ताइबा गुट के आतंकियों को नियंत्रण-रेखा पर चुनौती देने पर मुठभेड़ रात ११.४५ बजे शुरू हुई।'
print(lookupLemmatizer(input_str))
print(numericLemmatizer(input_str))
print(defaultLemmatizer(input_str))
print(Lemmatizer(input_str))
| [((337, 446), 're.compile', 're.compile', (['"""([०१२३४५६७८९]+[\\\\.\\\\,]*)+[०१२३४५६७८९]+|([-+]*\\\\d+[\\\\.\\\\,]*)+\\\\d+|([०१२३४५६७८९]+|\\\\d+)"""'], {}), "(\n '([०१२३४५६७८९]+[\\\\.\\\\,]*)+[०१२३४५६७८९]+|([-+]*\\\\d+[\\\\.\\\\,]*)+\\\\d+|([०१२३४५६७८९]+|\\\\d+)'\n )\n", (347, 446), False, 'import re\n'), ((1676, 1689), 'sangita.hindi.corpora.lemmata.drawlist', 'lt.drawlist', ([], {}), '()\n', (1687, 1689), True, 'import sangita.hindi.corpora.lemmata as lt\n'), ((901, 920), 'sangita.hindi.tokenizer.tokenize', 'tok.tokenize', (['instr'], {}), '(instr)\n', (913, 920), True, 'import sangita.hindi.tokenizer as tok\n'), ((1397, 1416), 'sangita.hindi.tokenizer.tokenize', 'tok.tokenize', (['instr'], {}), '(instr)\n', (1409, 1416), True, 'import sangita.hindi.tokenizer as tok\n'), ((2414, 2433), 'sangita.hindi.tokenizer.tokenize', 'tok.tokenize', (['instr'], {}), '(instr)\n', (2426, 2433), True, 'import sangita.hindi.tokenizer as tok\n')] |
ttobisawa/gsutil | gslib/tests/test_stet_util.py | ef665b590aa8e6cecfe251295bce8bf99ea69467 | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stet_util.py."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
from gslib import storage_url
from gslib.tests import testcase
from gslib.tests import util
from gslib.tests.util import unittest
from gslib.utils import execution_util
from gslib.utils import stet_util
import mock
class TestStetUtil(testcase.GsUtilUnitTestCase):
"""Test STET utils."""
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_uses_binary_and_config_from_boto(
self, mock_execute_external_command):
fake_config_path = self.CreateTempFile()
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
'fake_binary_path',
'encrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_runs_with_binary_from_path_with_correct_settings(
self, mock_execute_external_command):
fake_config_path = self.CreateTempFile()
temporary_path_directory = self.CreateTempDir()
fake_stet_binary_path = self.CreateTempFile(tmpdir=temporary_path_directory,
file_name='stet')
previous_path = os.getenv('PATH')
os.environ['PATH'] += os.path.pathsep + temporary_path_directory
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', None),
('GSUtil', 'stet_config_path', fake_config_path),
]):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
fake_stet_binary_path,
'encrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
os.environ['PATH'] = previous_path
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_upload_uses_config_from_default_path_with_correct_settings(
self, mock_execute_external_command):
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', None),
]):
with mock.patch.object(os.path,
'exists',
new=mock.Mock(return_value=True)):
out_file_url = stet_util.encrypt_upload(source_url, destination_url,
mock_logger)
self.assertEqual(out_file_url,
storage_url.StorageUrlFromString('in_.stet_tmp'))
mock_execute_external_command.assert_called_once_with([
'fake_binary_path',
'encrypt',
'--config-file={}'.format(
os.path.expanduser(stet_util.DEFAULT_STET_CONFIG_PATH)),
'--blob-id=gs://bucket/obj',
'in',
'in_.stet_tmp',
])
mock_logger.debug.assert_called_once_with('stderr')
@mock.patch.object(shutil, 'move')
@mock.patch.object(execution_util, 'ExecuteExternalCommand')
def test_stet_download_runs_binary_and_replaces_temp_file(
self, mock_execute_external_command, mock_move):
fake_config_path = self.CreateTempFile()
mock_execute_external_command.return_value = ('stdout', 'stderr')
mock_logger = mock.Mock()
source_url = storage_url.StorageUrlFromString('gs://bucket/obj')
destination_url = storage_url.StorageUrlFromString('out')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
stet_util.decrypt_download(source_url, destination_url, mock_logger)
mock_execute_external_command.assert_called_once_with([
'fake_binary_path', 'decrypt',
'--config-file={}'.format(fake_config_path),
'--blob-id=gs://bucket/obj', 'out', 'out_.stet_tmp'
])
mock_logger.debug.assert_called_once_with('stderr')
mock_move.assert_called_once_with('out_.stet_tmp', 'out')
@mock.patch.object(stet_util,
'_get_stet_binary_from_path',
new=mock.Mock(return_value=None))
def test_stet_util_errors_if_no_binary(self):
fake_config_path = self.CreateTempFile()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', None),
('GSUtil', 'stet_config_path', fake_config_path),
]):
with self.assertRaises(KeyError):
stet_util.encrypt_upload(source_url, destination_url, None)
def test_stet_util_errors_if_no_config(self):
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', None),
]):
with mock.patch.object(os.path,
'exists',
new=mock.Mock(return_value=False)):
with self.assertRaises(KeyError):
stet_util.encrypt_upload(source_url, destination_url, None)
@mock.patch.object(os.path, 'expanduser', autospec=True)
@mock.patch.object(execution_util,
'ExecuteExternalCommand',
new=mock.Mock(return_value=('stdout', 'stderr')))
def test_stet_util_expands_home_directory_symbol(self, mock_expanduser):
fake_config_path = self.CreateTempFile()
source_url = storage_url.StorageUrlFromString('in')
destination_url = storage_url.StorageUrlFromString('gs://bucket/obj')
with util.SetBotoConfigForTest([
('GSUtil', 'stet_binary_path', 'fake_binary_path'),
('GSUtil', 'stet_config_path', fake_config_path),
]):
stet_util.encrypt_upload(source_url, destination_url, mock.Mock())
mock_expanduser.assert_has_calls(
[mock.call('fake_binary_path'),
mock.call(fake_config_path)])
| [((1122, 1181), 'mock.patch.object', 'mock.patch.object', (['execution_util', '"""ExecuteExternalCommand"""'], {}), "(execution_util, 'ExecuteExternalCommand')\n", (1139, 1181), False, 'import mock\n'), ((2265, 2324), 'mock.patch.object', 'mock.patch.object', (['execution_util', '"""ExecuteExternalCommand"""'], {}), "(execution_util, 'ExecuteExternalCommand')\n", (2282, 2324), False, 'import mock\n'), ((3760, 3819), 'mock.patch.object', 'mock.patch.object', (['execution_util', '"""ExecuteExternalCommand"""'], {}), "(execution_util, 'ExecuteExternalCommand')\n", (3777, 3819), False, 'import mock\n'), ((5061, 5094), 'mock.patch.object', 'mock.patch.object', (['shutil', '"""move"""'], {}), "(shutil, 'move')\n", (5078, 5094), False, 'import mock\n'), ((5098, 5157), 'mock.patch.object', 'mock.patch.object', (['execution_util', '"""ExecuteExternalCommand"""'], {}), "(execution_util, 'ExecuteExternalCommand')\n", (5115, 5157), False, 'import mock\n'), ((7334, 7389), 'mock.patch.object', 'mock.patch.object', (['os.path', '"""expanduser"""'], {'autospec': '(True)'}), "(os.path, 'expanduser', autospec=True)\n", (7351, 7389), False, 'import mock\n'), ((1416, 1427), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1425, 1427), False, 'import mock\n'), ((1446, 1484), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in"""'], {}), "('in')\n", (1478, 1484), False, 'from gslib import storage_url\n'), ((1507, 1558), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""gs://bucket/obj"""'], {}), "('gs://bucket/obj')\n", (1539, 1558), False, 'from gslib import storage_url\n'), ((2706, 2723), 'os.getenv', 'os.getenv', (['"""PATH"""'], {}), "('PATH')\n", (2715, 2723), False, 'import os\n'), ((2882, 2893), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2891, 2893), False, 'import mock\n'), ((2912, 2950), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in"""'], {}), "('in')\n", (2944, 2950), False, 'from gslib import storage_url\n'), ((2973, 3024), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""gs://bucket/obj"""'], {}), "('gs://bucket/obj')\n", (3005, 3024), False, 'from gslib import storage_url\n'), ((4028, 4039), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4037, 4039), False, 'import mock\n'), ((4058, 4096), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in"""'], {}), "('in')\n", (4090, 4096), False, 'from gslib import storage_url\n'), ((4119, 4170), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""gs://bucket/obj"""'], {}), "('gs://bucket/obj')\n", (4151, 4170), False, 'from gslib import storage_url\n'), ((5407, 5418), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5416, 5418), False, 'import mock\n'), ((5437, 5488), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""gs://bucket/obj"""'], {}), "('gs://bucket/obj')\n", (5469, 5488), False, 'from gslib import storage_url\n'), ((5511, 5550), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""out"""'], {}), "('out')\n", (5543, 5550), False, 'from gslib import storage_url\n'), ((6376, 6414), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in"""'], {}), "('in')\n", (6408, 6414), False, 'from gslib import storage_url\n'), ((6437, 6488), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""gs://bucket/obj"""'], {}), "('gs://bucket/obj')\n", (6469, 6488), False, 'from gslib import storage_url\n'), ((6812, 6850), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in"""'], {}), "('in')\n", (6844, 6850), False, 'from gslib import storage_url\n'), ((6873, 6924), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""gs://bucket/obj"""'], {}), "('gs://bucket/obj')\n", (6905, 6924), False, 'from gslib import storage_url\n'), ((7682, 7720), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in"""'], {}), "('in')\n", (7714, 7720), False, 'from gslib import storage_url\n'), ((7743, 7794), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""gs://bucket/obj"""'], {}), "('gs://bucket/obj')\n", (7775, 7794), False, 'from gslib import storage_url\n'), ((1568, 1701), 'gslib.tests.util.SetBotoConfigForTest', 'util.SetBotoConfigForTest', (["[('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil',\n 'stet_config_path', fake_config_path)]"], {}), "([('GSUtil', 'stet_binary_path',\n 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path)])\n", (1593, 1701), False, 'from gslib.tests import util\n'), ((1743, 1809), 'gslib.utils.stet_util.encrypt_upload', 'stet_util.encrypt_upload', (['source_url', 'destination_url', 'mock_logger'], {}), '(source_url, destination_url, mock_logger)\n', (1767, 1809), False, 'from gslib.utils import stet_util\n'), ((1913, 1961), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in_.stet_tmp"""'], {}), "('in_.stet_tmp')\n", (1945, 1961), False, 'from gslib import storage_url\n'), ((3034, 3153), 'gslib.tests.util.SetBotoConfigForTest', 'util.SetBotoConfigForTest', (["[('GSUtil', 'stet_binary_path', None), ('GSUtil', 'stet_config_path',\n fake_config_path)]"], {}), "([('GSUtil', 'stet_binary_path', None), ('GSUtil',\n 'stet_config_path', fake_config_path)])\n", (3059, 3153), False, 'from gslib.tests import util\n'), ((3195, 3261), 'gslib.utils.stet_util.encrypt_upload', 'stet_util.encrypt_upload', (['source_url', 'destination_url', 'mock_logger'], {}), '(source_url, destination_url, mock_logger)\n', (3219, 3261), False, 'from gslib.utils import stet_util\n'), ((3365, 3413), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in_.stet_tmp"""'], {}), "('in_.stet_tmp')\n", (3397, 3413), False, 'from gslib import storage_url\n'), ((4180, 4301), 'gslib.tests.util.SetBotoConfigForTest', 'util.SetBotoConfigForTest', (["[('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil',\n 'stet_config_path', None)]"], {}), "([('GSUtil', 'stet_binary_path',\n 'fake_binary_path'), ('GSUtil', 'stet_config_path', None)])\n", (4205, 4301), False, 'from gslib.tests import util\n'), ((4658, 4706), 'gslib.storage_url.StorageUrlFromString', 'storage_url.StorageUrlFromString', (['"""in_.stet_tmp"""'], {}), "('in_.stet_tmp')\n", (4690, 4706), False, 'from gslib import storage_url\n'), ((5560, 5693), 'gslib.tests.util.SetBotoConfigForTest', 'util.SetBotoConfigForTest', (["[('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil',\n 'stet_config_path', fake_config_path)]"], {}), "([('GSUtil', 'stet_binary_path',\n 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path)])\n", (5585, 5693), False, 'from gslib.tests import util\n'), ((5720, 5788), 'gslib.utils.stet_util.decrypt_download', 'stet_util.decrypt_download', (['source_url', 'destination_url', 'mock_logger'], {}), '(source_url, destination_url, mock_logger)\n', (5746, 5788), False, 'from gslib.utils import stet_util\n'), ((6498, 6617), 'gslib.tests.util.SetBotoConfigForTest', 'util.SetBotoConfigForTest', (["[('GSUtil', 'stet_binary_path', None), ('GSUtil', 'stet_config_path',\n fake_config_path)]"], {}), "([('GSUtil', 'stet_binary_path', None), ('GSUtil',\n 'stet_config_path', fake_config_path)])\n", (6523, 6617), False, 'from gslib.tests import util\n'), ((6236, 6264), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (6245, 6264), False, 'import mock\n'), ((6934, 7055), 'gslib.tests.util.SetBotoConfigForTest', 'util.SetBotoConfigForTest', (["[('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil',\n 'stet_config_path', None)]"], {}), "([('GSUtil', 'stet_binary_path',\n 'fake_binary_path'), ('GSUtil', 'stet_config_path', None)])\n", (6959, 7055), False, 'from gslib.tests import util\n'), ((7804, 7937), 'gslib.tests.util.SetBotoConfigForTest', 'util.SetBotoConfigForTest', (["[('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil',\n 'stet_config_path', fake_config_path)]"], {}), "([('GSUtil', 'stet_binary_path',\n 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path)])\n", (7829, 7937), False, 'from gslib.tests import util\n'), ((7499, 7543), 'mock.Mock', 'mock.Mock', ([], {'return_value': "('stdout', 'stderr')"}), "(return_value=('stdout', 'stderr'))\n", (7508, 7543), False, 'import mock\n'), ((4486, 4552), 'gslib.utils.stet_util.encrypt_upload', 'stet_util.encrypt_upload', (['source_url', 'destination_url', 'mock_logger'], {}), '(source_url, destination_url, mock_logger)\n', (4510, 4552), False, 'from gslib.utils import stet_util\n'), ((6686, 6745), 'gslib.utils.stet_util.encrypt_upload', 'stet_util.encrypt_upload', (['source_url', 'destination_url', 'None'], {}), '(source_url, destination_url, None)\n', (6710, 6745), False, 'from gslib.utils import stet_util\n'), ((8018, 8029), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (8027, 8029), False, 'import mock\n'), ((8078, 8107), 'mock.call', 'mock.call', (['"""fake_binary_path"""'], {}), "('fake_binary_path')\n", (8087, 8107), False, 'import mock\n'), ((8118, 8145), 'mock.call', 'mock.call', (['fake_config_path'], {}), '(fake_config_path)\n', (8127, 8145), False, 'import mock\n'), ((4862, 4916), 'os.path.expanduser', 'os.path.expanduser', (['stet_util.DEFAULT_STET_CONFIG_PATH'], {}), '(stet_util.DEFAULT_STET_CONFIG_PATH)\n', (4880, 4916), False, 'import os\n'), ((7270, 7329), 'gslib.utils.stet_util.encrypt_upload', 'stet_util.encrypt_upload', (['source_url', 'destination_url', 'None'], {}), '(source_url, destination_url, None)\n', (7294, 7329), False, 'from gslib.utils import stet_util\n'), ((4432, 4460), 'mock.Mock', 'mock.Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (4441, 4460), False, 'import mock\n'), ((7186, 7215), 'mock.Mock', 'mock.Mock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (7195, 7215), False, 'import mock\n')] |
makyo/markdown-editing | markdown_editing/tests/test_extension.py | ecbc8970f4d416038f9d2c46fae22d4dbb79c647 | from markdown import markdown
from unittest import TestCase
from markdown_editing.extension import EditingExtension
class TestExtension(TestCase):
def test_substitution(self):
source = '~{out with the old}{in with the new}'
expected = '<p><span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
# Only need to test this once.
html = markdown(source, extensions=['markdown_editing'])
self.assertEqual(html, expected)
def test_addition(self):
source = 'foo +{bar} baz +{qux}(yap)'
expected = '<p>foo <ins class="addition">bar</ins> baz <ins class="addition">qux<q class="comment">yap</q></ins></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_deletion(self):
source = 'foo -{bar} baz -{qux}(yap)'
expected = '<p>foo <del class="deletion">bar</del> baz <del class="deletion">qux<q class="comment">yap</q></del></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_selected(self):
source = 'foo ?{bar}(qux) baz'
expected = '<p>foo <mark class="selected">bar<q class="comment">qux</q></mark> baz</p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_comments(self):
self.maxDiff = None
source = """
* Substitution: ~{out with the old}{in with the new}
* With comment: ~{out with the old}{in with the new}(is what I always say)
* With attribution: ~{out with the old}{in with the new}(is what I always say (Makyo))
* With date: ~{out with the old}{in with the new}(is what I always say (Makyo 2020-04-21))
* Comment thread: +{Foxes}(More foxes are always good)!{SGTM}
* Comment with attribution: !{SGTM}(Makyo 2020-04-22)
""".strip()
expected = """
<ul>
<li>Substitution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></li>
<li>With comment: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say</q></span></li>
<li>With attribution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span></q></span></li>
<li>With date: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span><span class="date">2020-04-21</span></q></span></li>
<li>Comment thread: <ins class="addition">Foxes<q class="comment">More foxes are always good</q></ins><q class="comment">SGTM</q></li>
<li>Comment with attribution: <q class="comment">SGTM<span class="attribution">Makyo</span><span class="date">2020-04-22</span></q></li>
</ul>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_level(self):
source = """
```
?{Some text}(bad wolf)
```
?{Some text}(bad wolf)
> ?{Some text}(good doggy)
""".strip()
expected = """
<p><code>?{Some text}(bad wolf)</code></p>
<pre><code>?{Some text}(bad wolf)
</code></pre>
<blockquote>
<p><mark class="selected">Some text<q class="comment">good doggy</q></mark></p>
</blockquote>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_nesting(self):
source = """
?{The only currently working form of nesting}(But what if...!{NO})
""".strip()
expected = """
<p><mark class="selected">The only currently working form of nesting<q class="comment">But what if...<q class="comment">NO</q></q></mark></p>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_mixed(self):
source = """
+{some *fancy* new stuff}(With a **fancy** comment)
""".strip()
expected = """
<p><ins class="addition">some <em>fancy</em> new stuff<q class="comment">With a <strong>fancy</strong> comment</q></ins></p>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
| [((518, 567), 'markdown.markdown', 'markdown', (['source'], {'extensions': "['markdown_editing']"}), "(source, extensions=['markdown_editing'])\n", (526, 567), False, 'from markdown import markdown\n'), ((401, 419), 'markdown_editing.extension.EditingExtension', 'EditingExtension', ([], {}), '()\n', (417, 419), False, 'from markdown_editing.extension import EditingExtension\n'), ((856, 874), 'markdown_editing.extension.EditingExtension', 'EditingExtension', ([], {}), '()\n', (872, 874), False, 'from markdown_editing.extension import EditingExtension\n'), ((1165, 1183), 'markdown_editing.extension.EditingExtension', 'EditingExtension', ([], {}), '()\n', (1181, 1183), False, 'from markdown_editing.extension import EditingExtension\n'), ((1437, 1455), 'markdown_editing.extension.EditingExtension', 'EditingExtension', ([], {}), '()\n', (1453, 1455), False, 'from markdown_editing.extension import EditingExtension\n'), ((3078, 3096), 'markdown_editing.extension.EditingExtension', 'EditingExtension', ([], {}), '()\n', (3094, 3096), False, 'from markdown_editing.extension import EditingExtension\n'), ((3582, 3600), 'markdown_editing.extension.EditingExtension', 'EditingExtension', ([], {}), '()\n', (3598, 3600), False, 'from markdown_editing.extension import EditingExtension\n'), ((4012, 4030), 'markdown_editing.extension.EditingExtension', 'EditingExtension', ([], {}), '()\n', (4028, 4030), False, 'from markdown_editing.extension import EditingExtension\n'), ((4408, 4426), 'markdown_editing.extension.EditingExtension', 'EditingExtension', ([], {}), '()\n', (4424, 4426), False, 'from markdown_editing.extension import EditingExtension\n')] |
thomasyi17/diana2 | apps/siren/test_handlers.py | 2167053dfe15b782d96cb1e695047433f302d4dd | """
SIREN/DIANA basic functionality testing framework
Requires env vars:
- GMAIL_USER
- GMAIL_APP_PASSWORD
- GMAIL_BASE_NAME -- ie, abc -> [email protected]
These env vars are set to default:
- ORTHANC_PASSWORD
- SPLUNK_PASSWORD
- SPLUNK_HEC_TOKEN
TODO: Move stuff to archive after collected
TODO: Write data into daily folder or something from mi-share ingress
TODO: Suppress dicom-simplify missing (series) creation time
"""
import time
import logging
import shutil
import io
import tempfile
from pathlib import Path
from pprint import pformat
from contextlib import redirect_stdout
from multiprocessing import Process
from datetime import datetime, timedelta
from interruptingcow import timeout
from crud.manager import EndpointManager
from crud.abc import Watcher, Trigger
from crud.endpoints import Splunk
from wuphf.endpoints import SmtpMessenger
from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir
from diana.dixel import Dixel, ShamDixel
from diana.utils.dicom import DicomLevel as DLv, DicomEventType as DEv
from wuphf.cli.string_descs import *
from diana.utils import unpack_data
from crud.utils import deserialize_dict
from diana.utils.gateways import suppress_urllib_debug
from diana.utils.endpoint.watcher import suppress_watcher_debug
from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, \
handle_file_arrived, start_watcher, tagged_studies
from trial_dispatcher import TrialDispatcher as Dispatcher
LOCAL_SERVICES = False # Set False to use UMich services
USE_GMAIL = True # Set False to use UMich smtp
DO_DIR_UPLOAD = False
CHECK_SPLUNK = False # Set False to skip long wait for dixel to index
CHECK_WATCH_STUDIES= False # Set False to skip long wait for orthanc watcher
EMAIL_DRYRUN = False # Set False to send live emails
# CONFIG
_services = "@services.yaml"
_subscriptions = "@subscriptions.yaml"
os.environ["SPLUNK_INDEX"] = "testing"
SMTP_MESSENGER_NAME = "smtp_server"
if LOCAL_SERVICES:
# Set everythin back to default
os.environ["UMICH_HOST"] = "localhost" # For testing
del os.environ["ORTHANC_USER"]
del os.environ["ORTHANC_PASSWORD"]
del os.environ["SPLUNK_USER"]
del os.environ["SPLUNK_PASSWORD"]
if USE_GMAIL:
SMTP_MESSENGER_NAME = "gmail:"
test_email_addr1 = "[email protected]"
#test_email_addr1 = "[email protected]"
#test_email_addr1 = os.environ.get("TEST_EMAIL_ADDR1")
# os.environ["TEST_GMAIL_BASE"] = test_email_addr1.split("@")[0]
anon_salt = "Test+Test+Test"
fkey = b'o-KzB3u1a_Vlb8Ji1CdyfTFpZ2FvdsPK4yQCRzFCcss='
msg_t = """to: {{ recipient.email }}\nfrom: {{ from_addr }}\nsubject: Test Message\n\nThis is the message text: "{{ item.msg_text }}"\n"""
notify_msg_t = "@./notify.txt.j2"
# TESTING CONfIG
test_sample_zip = os.path.abspath("../../tests/resources/dcm_zip/test.zip")
test_sample_file = os.path.abspath("../../tests/resources/dcm/IM2263")
test_sample_dir = os.path.expanduser("~/data/test") # Need to dl separately
# TESTS
def test_upload_one(orth: Orthanc, dixel: Dixel):
print("Testing can upload")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
assert (len(orth.studies()) > 0)
assert (orth.exists(dixel))
print("Passed!")
return True
def test_anonymize_one(orth: Orthanc, dixel: Dixel):
print("Testing can anonymize, tag, and untag")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
anon = ShamDixel.from_dixel(dixel, salt=anon_salt)
afile = orth.anonymize(anon, replacement_map=anon.orthanc_sham_map())
anon.file = afile
orth.put(anon)
orth.putm(anon.sham_parent_oid(DLv.STUDIES),
level=DLv.STUDIES,
key="signature",
value=anon.pack_fields(fkey))
assert (len(orth.studies()) == 2)
orth.delete(dixel)
assert (len(orth.studies()) == 1)
oid = orth.studies()[0]
test = orth.get(oid)
assert( test.tags["PatientName"] == anon.meta["ShamName"] )
enc = orth.getm(test, key="signature")
tags = unpack_data(enc, fkey)
assert( tags["PatientName"] in dixel.tags["PatientName"] )
print("Passed!")
return True
def test_index_one( splunk: Splunk, dixel: Dixel, check_exists=CHECK_SPLUNK ):
print("Testing can index")
splunk.put(dixel, index=os.environ.get("SPLUNK_INDEX"))
if check_exists:
print("Waiting for 1 min to index")
time.sleep(60)
time_range = [
datetime.now()-timedelta(minutes=2),
datetime.now()
]
r = splunk.find("search index=testing", time_range=time_range)
logging.debug(r)
assert( len(r) > 0 )
print("Passed")
return True
def test_email_messenger( messenger: SmtpMessenger, dryrun=EMAIL_DRYRUN ):
print("Testing can email from template")
outgoing = "The quick brown fox jumped over the lazy dog"
data = {"item": {"msg_text": outgoing},
"recipient": {"email": test_email_addr1}}
msg = messenger.get(data, target=test_email_addr1)
assert( test_email_addr1 in msg )
assert( outgoing in msg )
if not dryrun:
messenger.send(data, target=test_email_addr1)
print("Passed!")
return True
def test_distribute( subscriptions, messenger: SmtpMessenger ):
print("Testing can dispatch")
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(channel_tags=ch)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
logging.debug(pformat(dispatch.subscribers))
data = {"tags": {"AccessionNumber": "ABC123",
"PatientName": "DOE^JOHN^S"},
"meta": {"signature":
{"trial": "hobit",
"site": "duke"}
}
}
sent = dispatch.put(data, dryrun=EMAIL_DRYRUN)
data["meta"]["signature"]["site"] = "detroit"
sent += dispatch.put(data, dryrun=EMAIL_DRYRUN)
print(sent)
msgs = [x['msg'] for x in sent]
msgs = "\n".join(msgs)
# logging.debug(pformat(msgs))
assert( "SIREN/HOBIT" in msgs )
assert( "[email protected]" in msgs )
assert( 'subject jacket for "DOE^JOHN^S"' in msgs )
print("Passed!")
return True
def test_upload_dir_handler(dcm_dir: DcmDir, orth: Orthanc):
print("Testing can upload dir w handler")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
handle_upload_dir(dcm_dir, orth, fkey, anon_salt=anon_salt)
assert (len(orth.instances()) > 20)
print("Passed!")
return True
def test_upload_zip_handler(zip_file, orth: Orthanc):
print("Testing can upload zip w handler")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
handle_upload_zip(DcmDir(), zip_file, orth, fkey, anon_salt=anon_salt)
assert (len(orth.instances()) > 1)
print("Passed!")
return True
def test_file_arrived_handler(dcm_file, zip_file, orth: Orthanc):
print("Testing can handle file arrived")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "my_trial", "my_site")
os.makedirs(site_path)
shutil.copy(zip_file, site_path)
data = {"fn": os.path.join( site_path, Path(zip_file).name )}
handle_file_arrived(data, DcmDir(path=watch_path), orth,
fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature")
assert (len(orth.instances()) > 1)
oid = orth.studies()[0]
data = orth.getm(oid, key="signature")
clear = unpack_data(data, fkey)
print(pformat(clear))
assert(clear["trial"] == "my_trial")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
shutil.copy(dcm_file, site_path)
data = {"fn": os.path.join(site_path, Path(dcm_file).name)}
handle_file_arrived(data, DcmDir(path=watch_path), orth,
fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature")
assert (len(orth.instances()) == 1)
time.sleep(1.0)
oid = orth.studies()[0]
data = orth.getm(oid, key="signature")
clear = unpack_data(data, fkey)
print(pformat(clear))
assert(clear["trial"] == "my_trial")
orth.clear()
assert (len(orth.studies()) == 0)
shutil.rmtree(watch_path, ignore_errors=True)
print("Passed!")
return True
def test_notify_handler(dixel, orth: Orthanc,
subscriptions, messenger: SmtpMessenger,
indexer: Splunk, dryrun=EMAIL_DRYRUN):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
dixel.meta["trial"] = "hobit"
dixel.meta["site"] = "testing"
orth.putm(dixel.parent_oid(DLv.STUDIES),
level=DLv.STUDIES,
key="signature",
value=dixel.pack_fields(fkey, fields=["trial", "site"]))
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(
channel_tags=ch
)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
data = {"oid": dixel.parent_oid(DLv.STUDIES)}
handle_notify_study(data, source=orth,
dispatcher=dispatch, dryrun=dryrun,
indexer=indexer, index_name=SPLUNK_INDEX,
fkey=fkey)
print("Passed!")
return True
def test_watch_orthanc(test_dixel, orth: ObservableOrthanc):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
watcher = Watcher()
trigger0 = Trigger(
evtype=DEv.INSTANCE_ADDED,
source=orth,
action=orth.say)
watcher.add_trigger(trigger0)
trigger1 = Trigger(
evtype=DEv.STUDY_ADDED,
source=orth,
action=orth.say)
watcher.add_trigger(trigger1)
def runner():
"""Pause to start watcher and then copy sample file to incoming"""
time.sleep(1.0)
orth.put(test_dixel)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting watcher")
with redirect_stdout(f):
print("In capture")
try:
with timeout(5): # Give it a little time to say the instance
watcher.run()
except RuntimeError:
print("Stopping watcher")
finally:
watcher.stop()
out = f.getvalue()
print("Watcher output:")
print(out)
if dixel.oid() in out:
print("Passed!")
return True
def test_watch_dir(test_file):
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "my_trial", "my_site")
os.makedirs(site_path)
dcm_dir = ObservableDcmDir(path=watch_path)
watcher = Watcher()
trigger = Trigger(
evtype=DEv.FILE_ADDED,
source=dcm_dir,
action=dcm_dir.say)
watcher.add_trigger(trigger)
def runner():
"""Pause to start watcher and then copy sample file to incoming"""
time.sleep(1.0)
shutil.copy(test_file, site_path)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting watcher")
with redirect_stdout(f):
print("In capture")
try:
with timeout(5): # Give it a little time to say the filename
watcher.run()
except RuntimeError:
print("Stopping watcher")
finally:
watcher.stop()
out = f.getvalue()
print("Watcher output:")
print(out)
shutil.rmtree(watch_path, ignore_errors=True)
from pathlib import Path
if Path(test_file).name in out:
print("Passed!")
return True
def test_siren_receiver(test_file, orth: Orthanc,
subscriptions, messenger: SmtpMessenger,
indexer: Splunk, dryrun=EMAIL_DRYRUN):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(
channel_tags=ch
)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "hobit", "testing")
os.makedirs(site_path)
incoming = ObservableDcmDir(path=watch_path)
def runner():
"""Pause to start watcher and then copy sample file to incoming/trial/site"""
time.sleep(1.0)
shutil.copy(test_file, site_path)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting SIREN Receiver")
with redirect_stdout(f):
print("In capture")
try:
with timeout(90): # Give it a little time for the study to settle
watcher = start_watcher(
incoming,
orth,
fkey=fkey,
anon_salt=anon_salt,
dispatcher=dispatch,
dryrun=dryrun,
indexer=indexer,
index_name=os.environ.get("SPLUNK_INDEX")
)
except RuntimeError:
print("Stopping watcher subprocess")
out = f.getvalue()
print("SIREN Reciever output:")
print(out)
shutil.rmtree(watch_path, ignore_errors=True)
return True
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
suppress_urllib_debug()
suppress_watcher_debug()
# Create service endpoints
services = EndpointManager(serialized_ep_descs=_services)
print(pformat(services.ep_descs))
orth: ObservableOrthanc = services.get("hobit")
orth.polling_interval = 2.0
messenger: SmtpMessenger = services.get(SMTP_MESSENGER_NAME)
messenger.msg_t = msg_t
splunk: Splunk = services.get("splunk")
dcm_dir = DcmDir(path=test_sample_dir)
# Load a dixel
dixel = dcm_dir.get("HOBIT1172/IM0", file=True)
# assert( dixel )
# assert( dixel.file )
#
# # Verify that all endpoints are online
# assert( orth.check() )
# assert( messenger.check() )
# assert( splunk.check() )
#
# # Verify basic capabilities:
# # - upload
# # - anonymize
# # - index
# # - message
# # - distribute
#
# assert( test_upload_one(orth, dixel) )
# assert( test_anonymize_one(orth, dixel) )
# assert( test_index_one(splunk, dixel) )
assert( test_email_messenger(messenger) )
# assert( test_distribute(_subscriptions, messenger) )
exit()
# Verify observer daemons:
# - watch dir
# - watch orth
assert( test_watch_dir(test_sample_file) )
assert( test_watch_orthanc(dixel, orth) )
# Verify handlers:
# - directory
# - zip
# - file
# - notify
if DO_DIR_UPLOAD:
assert( test_upload_dir_handler(dcm_dir, orth) )
assert( test_upload_zip_handler(test_sample_zip, orth) )
assert( test_file_arrived_handler(test_sample_file, test_sample_zip, orth) )
assert( test_notify_handler(dixel, orth, _subscriptions, messenger, splunk) )
# Verify watcher pipeline
# - run watcher
assert( test_siren_receiver(test_sample_file, orth, _subscriptions, messenger, splunk) )
| [((3192, 3214), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (3212, 3214), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((3510, 3532), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (3530, 3532), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((3604, 3647), 'diana.dixel.ShamDixel.from_dixel', 'ShamDixel.from_dixel', (['dixel'], {'salt': 'anon_salt'}), '(dixel, salt=anon_salt)\n', (3624, 3647), False, 'from diana.dixel import Dixel, ShamDixel\n'), ((4187, 4209), 'diana.utils.unpack_data', 'unpack_data', (['enc', 'fkey'], {}), '(enc, fkey)\n', (4198, 4209), False, 'from diana.utils import unpack_data\n'), ((5481, 5512), 'crud.utils.deserialize_dict', 'deserialize_dict', (['subscriptions'], {}), '(subscriptions)\n', (5497, 5512), False, 'from crud.utils import deserialize_dict\n'), ((5528, 5555), 'trial_dispatcher.TrialDispatcher', 'Dispatcher', ([], {'channel_tags': 'ch'}), '(channel_tags=ch)\n', (5538, 5555), True, 'from trial_dispatcher import TrialDispatcher as Dispatcher\n'), ((6564, 6586), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (6584, 6586), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((6630, 6689), 'handlers.handle_upload_dir', 'handle_upload_dir', (['dcm_dir', 'orth', 'fkey'], {'anon_salt': 'anon_salt'}), '(dcm_dir, orth, fkey, anon_salt=anon_salt)\n', (6647, 6689), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((6892, 6914), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (6912, 6914), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((7241, 7263), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (7261, 7263), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((7320, 7338), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7336, 7338), False, 'import tempfile\n'), ((7435, 7467), 'shutil.copy', 'shutil.copy', (['zip_file', 'site_path'], {}), '(zip_file, site_path)\n', (7446, 7467), False, 'import shutil\n'), ((7806, 7829), 'diana.utils.unpack_data', 'unpack_data', (['data', 'fkey'], {}), '(data, fkey)\n', (7817, 7829), False, 'from diana.utils import unpack_data\n'), ((7919, 7941), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (7939, 7941), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((7985, 8017), 'shutil.copy', 'shutil.copy', (['dcm_file', 'site_path'], {}), '(dcm_file, site_path)\n', (7996, 8017), False, 'import shutil\n'), ((8277, 8292), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (8287, 8292), False, 'import time\n'), ((8376, 8399), 'diana.utils.unpack_data', 'unpack_data', (['data', 'fkey'], {}), '(data, fkey)\n', (8387, 8399), False, 'from diana.utils import unpack_data\n'), ((8528, 8573), 'shutil.rmtree', 'shutil.rmtree', (['watch_path'], {'ignore_errors': '(True)'}), '(watch_path, ignore_errors=True)\n', (8541, 8573), False, 'import shutil\n'), ((8810, 8832), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (8830, 8832), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((9158, 9189), 'crud.utils.deserialize_dict', 'deserialize_dict', (['subscriptions'], {}), '(subscriptions)\n', (9174, 9189), False, 'from crud.utils import deserialize_dict\n'), ((9205, 9232), 'trial_dispatcher.TrialDispatcher', 'Dispatcher', ([], {'channel_tags': 'ch'}), '(channel_tags=ch)\n', (9215, 9232), True, 'from trial_dispatcher import TrialDispatcher as Dispatcher\n'), ((9416, 9547), 'handlers.handle_notify_study', 'handle_notify_study', (['data'], {'source': 'orth', 'dispatcher': 'dispatch', 'dryrun': 'dryrun', 'indexer': 'indexer', 'index_name': 'SPLUNK_INDEX', 'fkey': 'fkey'}), '(data, source=orth, dispatcher=dispatch, dryrun=dryrun,\n indexer=indexer, index_name=SPLUNK_INDEX, fkey=fkey)\n', (9435, 9547), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((9739, 9761), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (9759, 9761), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((9815, 9824), 'crud.abc.Watcher', 'Watcher', ([], {}), '()\n', (9822, 9824), False, 'from crud.abc import Watcher, Trigger\n'), ((9841, 9905), 'crud.abc.Trigger', 'Trigger', ([], {'evtype': 'DEv.INSTANCE_ADDED', 'source': 'orth', 'action': 'orth.say'}), '(evtype=DEv.INSTANCE_ADDED, source=orth, action=orth.say)\n', (9848, 9905), False, 'from crud.abc import Watcher, Trigger\n'), ((9981, 10042), 'crud.abc.Trigger', 'Trigger', ([], {'evtype': 'DEv.STUDY_ADDED', 'source': 'orth', 'action': 'orth.say'}), '(evtype=DEv.STUDY_ADDED, source=orth, action=orth.say)\n', (9988, 10042), False, 'from crud.abc import Watcher, Trigger\n'), ((10258, 10280), 'multiprocessing.Process', 'Process', ([], {'target': 'runner'}), '(target=runner)\n', (10265, 10280), False, 'from multiprocessing import Process\n'), ((10304, 10317), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (10315, 10317), False, 'import io\n'), ((10826, 10844), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (10842, 10844), False, 'import tempfile\n'), ((10951, 10984), 'diana.apis.ObservableDcmDir', 'ObservableDcmDir', ([], {'path': 'watch_path'}), '(path=watch_path)\n', (10967, 10984), False, 'from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir\n'), ((11000, 11009), 'crud.abc.Watcher', 'Watcher', ([], {}), '()\n', (11007, 11009), False, 'from crud.abc import Watcher, Trigger\n'), ((11025, 11091), 'crud.abc.Trigger', 'Trigger', ([], {'evtype': 'DEv.FILE_ADDED', 'source': 'dcm_dir', 'action': 'dcm_dir.say'}), '(evtype=DEv.FILE_ADDED, source=dcm_dir, action=dcm_dir.say)\n', (11032, 11091), False, 'from crud.abc import Watcher, Trigger\n'), ((11319, 11341), 'multiprocessing.Process', 'Process', ([], {'target': 'runner'}), '(target=runner)\n', (11326, 11341), False, 'from multiprocessing import Process\n'), ((11365, 11378), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (11376, 11378), False, 'import io\n'), ((11768, 11813), 'shutil.rmtree', 'shutil.rmtree', (['watch_path'], {'ignore_errors': '(True)'}), '(watch_path, ignore_errors=True)\n', (11781, 11813), False, 'import shutil\n'), ((12127, 12149), 'handlers.tagged_studies.clear', 'tagged_studies.clear', ([], {}), '()\n', (12147, 12149), False, 'from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, handle_file_arrived, start_watcher, tagged_studies\n'), ((12204, 12235), 'crud.utils.deserialize_dict', 'deserialize_dict', (['subscriptions'], {}), '(subscriptions)\n', (12220, 12235), False, 'from crud.utils import deserialize_dict\n'), ((12251, 12278), 'trial_dispatcher.TrialDispatcher', 'Dispatcher', ([], {'channel_tags': 'ch'}), '(channel_tags=ch)\n', (12261, 12278), True, 'from trial_dispatcher import TrialDispatcher as Dispatcher\n'), ((12425, 12443), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (12441, 12443), False, 'import tempfile\n'), ((12548, 12581), 'diana.apis.ObservableDcmDir', 'ObservableDcmDir', ([], {'path': 'watch_path'}), '(path=watch_path)\n', (12564, 12581), False, 'from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir\n'), ((12762, 12784), 'multiprocessing.Process', 'Process', ([], {'target': 'runner'}), '(target=runner)\n', (12769, 12784), False, 'from multiprocessing import Process\n'), ((12808, 12821), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (12819, 12821), False, 'import io\n'), ((13529, 13574), 'shutil.rmtree', 'shutil.rmtree', (['watch_path'], {'ignore_errors': '(True)'}), '(watch_path, ignore_errors=True)\n', (13542, 13574), False, 'import shutil\n'), ((13626, 13666), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (13645, 13666), False, 'import logging\n'), ((13671, 13694), 'diana.utils.gateways.suppress_urllib_debug', 'suppress_urllib_debug', ([], {}), '()\n', (13692, 13694), False, 'from diana.utils.gateways import suppress_urllib_debug\n'), ((13699, 13723), 'diana.utils.endpoint.watcher.suppress_watcher_debug', 'suppress_watcher_debug', ([], {}), '()\n', (13721, 13723), False, 'from diana.utils.endpoint.watcher import suppress_watcher_debug\n'), ((13771, 13817), 'crud.manager.EndpointManager', 'EndpointManager', ([], {'serialized_ep_descs': '_services'}), '(serialized_ep_descs=_services)\n', (13786, 13817), False, 'from crud.manager import EndpointManager\n'), ((14093, 14121), 'diana.apis.DcmDir', 'DcmDir', ([], {'path': 'test_sample_dir'}), '(path=test_sample_dir)\n', (14099, 14121), False, 'from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir\n'), ((4559, 4573), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (4569, 4573), False, 'import time\n'), ((4762, 4778), 'logging.debug', 'logging.debug', (['r'], {}), '(r)\n', (4775, 4778), False, 'import logging\n'), ((5690, 5719), 'pprint.pformat', 'pformat', (['dispatch.subscribers'], {}), '(dispatch.subscribers)\n', (5697, 5719), False, 'from pprint import pformat\n'), ((6976, 6984), 'diana.apis.DcmDir', 'DcmDir', ([], {}), '()\n', (6982, 6984), False, 'from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir\n'), ((7564, 7587), 'diana.apis.DcmDir', 'DcmDir', ([], {'path': 'watch_path'}), '(path=watch_path)\n', (7570, 7587), False, 'from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir\n'), ((7840, 7854), 'pprint.pformat', 'pformat', (['clear'], {}), '(clear)\n', (7847, 7854), False, 'from pprint import pformat\n'), ((8113, 8136), 'diana.apis.DcmDir', 'DcmDir', ([], {'path': 'watch_path'}), '(path=watch_path)\n', (8119, 8136), False, 'from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir\n'), ((8410, 8424), 'pprint.pformat', 'pformat', (['clear'], {}), '(clear)\n', (8417, 8424), False, 'from pprint import pformat\n'), ((10204, 10219), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (10214, 10219), False, 'import time\n'), ((10357, 10375), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (10372, 10375), False, 'from contextlib import redirect_stdout\n'), ((11252, 11267), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (11262, 11267), False, 'import time\n'), ((11276, 11309), 'shutil.copy', 'shutil.copy', (['test_file', 'site_path'], {}), '(test_file, site_path)\n', (11287, 11309), False, 'import shutil\n'), ((11418, 11436), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (11433, 11436), False, 'from contextlib import redirect_stdout\n'), ((12695, 12710), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (12705, 12710), False, 'import time\n'), ((12719, 12752), 'shutil.copy', 'shutil.copy', (['test_file', 'site_path'], {}), '(test_file, site_path)\n', (12730, 12752), False, 'import shutil\n'), ((12868, 12886), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (12883, 12886), False, 'from contextlib import redirect_stdout\n'), ((13829, 13855), 'pprint.pformat', 'pformat', (['services.ep_descs'], {}), '(services.ep_descs)\n', (13836, 13855), False, 'from pprint import pformat\n'), ((4658, 4672), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4670, 4672), False, 'from datetime import datetime, timedelta\n'), ((11851, 11866), 'pathlib.Path', 'Path', (['test_file'], {}), '(test_file)\n', (11855, 11866), False, 'from pathlib import Path\n'), ((4609, 4623), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4621, 4623), False, 'from datetime import datetime, timedelta\n'), ((4624, 4644), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(2)'}), '(minutes=2)\n', (4633, 4644), False, 'from datetime import datetime, timedelta\n'), ((7511, 7525), 'pathlib.Path', 'Path', (['zip_file'], {}), '(zip_file)\n', (7515, 7525), False, 'from pathlib import Path\n'), ((8061, 8075), 'pathlib.Path', 'Path', (['dcm_file'], {}), '(dcm_file)\n', (8065, 8075), False, 'from pathlib import Path\n'), ((10436, 10446), 'interruptingcow.timeout', 'timeout', (['(5)'], {}), '(5)\n', (10443, 10446), False, 'from interruptingcow import timeout\n'), ((11497, 11507), 'interruptingcow.timeout', 'timeout', (['(5)'], {}), '(5)\n', (11504, 11507), False, 'from interruptingcow import timeout\n'), ((12947, 12958), 'interruptingcow.timeout', 'timeout', (['(90)'], {}), '(90)\n', (12954, 12958), False, 'from interruptingcow import timeout\n')] |
jeking3/boost-deptree | deptree.py | 27eda54df2d022af17347df4ba4892c39392e474 | #
# Copyright (c) 2019 James E. King III
#
# Use, modification, and distribution are subject to the
# Boost Software License, Version 1.0. (See accompanying file
# LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
#
import json
import networkx
import re
from pathlib import Path
class BoostDependencyTree(object):
"""
Generates a PlantUML dependency tree to visualize the dependencies.
One of the benefits of generating a visual graph is that cycles become
immediately evident.
"""
EDGES = {
2: "-->",
1: "..>"
}
STRENGTHS = {
"include": 2,
"src": 2,
"test": 1,
"tests": 1
}
def __init__(self, root: Path, out: Path):
"""
Arguments:
root: path to BOOST_ROOT
out: path to output file
"""
self.exp = re.compile(r"^\s*#\s*include\s*[<\"](?P<header>[^>\"]+)[>\"]\s*$")
self.graph = networkx.DiGraph()
self.headers = {} # key: header include path; value: repo key
self.repos = {} # key: repo key; value: repo path
self.out = out
self.root = root
self.libs = self.root / "libs"
with (self.libs / "config" / "include" / "boost" / "version.hpp").open() as fp:
vlines = fp.readlines()
for vline in vlines:
if "BOOST_LIB_VERSION" in vline:
#define BOOST_LIB_VERSION "1_71"
tokens = vline.split(" ")
self.boost_version = tokens[2].strip()[1:-1].replace("_", ".")
def load(self):
self.collect()
self.analyze()
def collect(self):
"""
Locate every .hpp and .h file and associate it with a repository.
"""
metas = self.libs.glob("**/libraries.json")
for meta in metas:
with meta.open() as fp:
metadata = json.loads(fp.read())
repodir = meta.parent.parent
metadata = metadata[0] if isinstance(metadata, list) else metadata # for boost/core
repokey = metadata["key"]
repoinc = repodir / "include"
if repoinc.is_dir(): # libs/geometry/index has no include but looks like a repo?
self.graph.add_node(repokey)
self.repos[repokey] = repodir
headers = repoinc.glob("**/*.h??")
for header in headers:
# print(str(header))
incpath = header.relative_to(repoinc)
assert incpath not in self.headers,\
f"{incpath} in {repokey} already in header map from "\
f"{self.headers[incpath]} - duplicate header paths!"
self.headers[str(incpath)] = repokey
def analyze(self):
"""
Find every include statement and create a graph of dependencies.
"""
for repokey, repodir in self.repos.items():
for ext in ["c", "cpp", "h", "hpp", "ipp"]:
files = repodir.glob("**/*." + ext)
for code in files:
inside = code.relative_to(repodir).parts[0]
if inside not in self.STRENGTHS.keys():
continue
weight = self.STRENGTHS[inside]
with code.open() as fp:
try:
#print(str(code))
source = fp.readlines()
except UnicodeDecodeError:
continue
for line in source:
match = self.exp.search(line)
if match:
include = match.group("header")
if include in self.headers:
deprepo = self.headers[include]
if repokey != deprepo: # avoid self-references
data = self.graph.get_edge_data(repokey, deprepo, {"weight": 0})
if data["weight"] > 0 and data["weight"] < weight:
self.graph.remove_edge(repokey, deprepo)
data["weight"] = 0
if data["weight"] == 0:
self.graph.add_edge(repokey, deprepo, weight=weight)
def report_cycles(self):
with self.out.open("w") as fp:
fp.write("@startuml\n")
fp.write("\n")
fp.write(f"title Boost {self.boost_version} Direct Dependency Cycles\n")
fp.write("footer Generated by boost-deptree (C) 2019 James E. King III\n")
fp.write("\n")
for edge in self.graph.edges:
fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"]
if fwdweight > 1:
if self.graph.get_edge_data(edge[1], edge[0], {"weight": 0})["weight"] > 1:
fp.write(f"['{edge[0]}'] --> ['{edge[1]}']\n")
fp.write("\n")
fp.write("@enduml\n")
def report_dependencies_from(self, repokey):
with self.out.open("w") as fp:
fp.write("@startuml\n")
fp.write("\n")
fp.write(f"title Boost {self.boost_version} dependencies of {repokey}\n")
fp.write("footer Generated by boost-deptree (C) 2019 James E. King III\n")
fp.write("\n")
for edge in self.graph.edges:
if edge[0] == repokey:
fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"]
fp.write(f"['{edge[0]}'] {self.EDGES[fwdweight]} ['{edge[1]}']\n")
fp.write("\n")
fp.write("@enduml\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Generate PlantUML dependency tree.')
parser.add_argument('root', type=str, help='Boost root directory.')
parser.add_argument('out', type=str, help='Output filename.')
require_one = parser.add_mutually_exclusive_group(required=True)
require_one.add_argument('--cycles', action='store_true', help='Show direct repository dependency cycles.')
require_one.add_argument('--from', help='Show dependencies from a given repository.')
args = parser.parse_args()
root = Path(args.root)
assert root.is_dir(), "root is not a directory"
out = Path(args.out)
tree = BoostDependencyTree(root, out)
tree.load()
if args.cycles:
tree.report_cycles()
else:
tree.report_dependencies_from(args.__dict__["from"])
| [((5939, 6012), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate PlantUML dependency tree."""'}), "(description='Generate PlantUML dependency tree.')\n", (5962, 6012), False, 'import argparse\n'), ((6464, 6479), 'pathlib.Path', 'Path', (['args.root'], {}), '(args.root)\n', (6468, 6479), False, 'from pathlib import Path\n'), ((6542, 6556), 'pathlib.Path', 'Path', (['args.out'], {}), '(args.out)\n', (6546, 6556), False, 'from pathlib import Path\n'), ((860, 932), 're.compile', 're.compile', (['"""^\\\\s*#\\\\s*include\\\\s*[<\\\\"](?P<header>[^>\\\\"]+)[>\\\\"]\\\\s*$"""'], {}), '(\'^\\\\s*#\\\\s*include\\\\s*[<\\\\"](?P<header>[^>\\\\"]+)[>\\\\"]\\\\s*$\')\n', (870, 932), False, 'import re\n'), ((949, 967), 'networkx.DiGraph', 'networkx.DiGraph', ([], {}), '()\n', (965, 967), False, 'import networkx\n')] |
adiHusky/uber_backend | uberbackend.py | adc78882c081f7636b809d6e1889ba3297309e20 | from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort
from flask_cors import CORS
# from flask import status
from datetime import date, datetime, timedelta
from calendar import monthrange
from dateutil.parser import parse
import pytz
import os
import sys
import time
import uuid
import json
import random
import string
import pathlib
import io
from uuid import UUID
from bson.objectid import ObjectId
# straight mongo access
from pymongo import MongoClient
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn="https://[email protected]/5685529",
integrations=[FlaskIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# By default the SDK will try to use the SENTRY_RELEASE
# environment variable, or infer a git commit
# SHA as release, however you may want to set
# something more human-readable.
# release="[email protected]",
)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# mongo
# mongo_client = MongoClient('mongodb://localhost:27017/')
mongo_client = MongoClient(
"mongodb+srv://Mahitha-Maddi:Mahitha%[email protected]/test")
app = Flask(__name__)
# CORS(app)
CORS(app, resources={r"/*": {"origins": "*"}})
basedir = os.path.abspath(os.path.dirname(__file__))
# Here are my datasets
bookings = dict()
################
# Apply to mongo
################
def atlas_connect():
# Node
# const MongoClient = require('mongodb').MongoClient;
# const uri = "mongodb+srv://admin:<password>@tweets.8ugzv.mongodb.net/myFirstDatabase?retryWrites=true&w=majority";
# const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true });
# client.connect(err => {
# const collection = client.db("test").collection("devices");
# // perform actions on the collection object
# client.close();
# });
# Python
client = pymongo.MongoClient(
"mongodb+srv://Mahitha-Maddi:Mahitha%[email protected]/test")
db = client.test
# database access layer
def insert_one(r):
start_time = datetime.now()
with mongo_client:
# start_time_db = datetime.now()
db = mongo_client['Uber']
# microseconds_caching_db = (datetime.now() - start_time_db).microseconds
# print("*** It took " + str(microseconds_caching_db) + " microseconds to cache mongo handle.")
print("...insert_one() to mongo: ", r)
try:
mongo_collection = db['bookings']
result = mongo_collection.insert_one(r)
print("inserted _ids: ", result.inserted_id)
except Exception as e:
print(e)
microseconds_doing_mongo_work = (datetime.now() - start_time).microseconds
print("*** It took " + str(microseconds_doing_mongo_work) +
" microseconds to insert_one.")
def tryexcept(requesto, key, default):
lhs = None
try:
lhs = requesto.json[key]
# except Exception as e:
except:
lhs = default
return lhs
def ssm():
now = datetime.now()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
return str((now - midnight).seconds)
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# endpoint to check Availability
@app.route("/checkAvailability", methods=["POST"])
def check_availability():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
with mongo_client:
#raise InvalidUsage('This view is gone', status_code=410)
db = mongo_client['Uber']
mongo_collection = db['available']
print(source)
myquery = {"source": {"$regex": str(source)}, "destination": {
"$regex": str(destination)}, "date": {"$regex": str(date)}}
cursor = dict()
cursor = mongo_collection.find(myquery, {"_id": 0})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
print(type(sorted_records))
return jsonify(sorted_records)
# endpoint to create new Booking
@app.route("/book", methods=["POST"])
def book_bus():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
startTime = request.json['startTime']
endTime = request.json['endTime']
user = request.json['user']
busnumber = request.json['busnumber']
booking = dict(user=user, source=source, destination=destination, busnumber=busnumber,
date=date, startTime=startTime, endTime=endTime, bookeddate=datetime.now(
).strftime("%Y-%m-%d %H:%M:%S"),
_id=str(ObjectId()))
insert_one(booking)
return jsonify(booking)
@app.route("/bookings-results", methods=["GET"])
def get_tweets_results():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['bookings']
cursor = mongo_collection.find({})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
return jsonify(sorted_records)
##################
# Apply from mongo
##################
def applyRecordLevelUpdates():
return None
def applyCollectionLevelUpdates():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['available']
cursor = mongo_collection.find({})
records = list(cursor)
# bookings[0] = records[0]
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
# return json.dumps({"results": sorted_records })
for booking in sorted_records:
bookings[booking['_id']] = booking
@app.route("/")
def home():
return """Welcome to Uber backend!<br/>"""
##################
# ADMINISTRATION #
##################
# This runs once before the first single request
# Used to bootstrap our collections
@app.before_first_request
def before_first_request_func():
applyCollectionLevelUpdates()
# This runs once before any request
@app.before_request
def before_request_func():
applyRecordLevelUpdates()
############################
# INFO on containerization #
############################
# To containerize a flask app:
# https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| [((1658, 1750), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb+srv://Mahitha-Maddi:Mahitha%[email protected]/test"""'], {}), "(\n 'mongodb+srv://Mahitha-Maddi:Mahitha%[email protected]/test')\n", (1669, 1750), False, 'from pymongo import MongoClient\n'), ((1758, 1773), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1763, 1773), False, 'from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort\n'), ((1786, 1831), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/*': {'origins': '*'}}"}), "(app, resources={'/*': {'origins': '*'}})\n", (1790, 1831), False, 'from flask_cors import CORS\n'), ((1859, 1884), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1874, 1884), False, 'import os\n'), ((2682, 2696), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2694, 2696), False, 'from datetime import date, datetime, timedelta\n'), ((3638, 3652), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3650, 3652), False, 'from datetime import date, datetime, timedelta\n'), ((4809, 4832), 'flask.jsonify', 'jsonify', (['sorted_records'], {}), '(sorted_records)\n', (4816, 4832), False, 'from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort\n'), ((5503, 5519), 'flask.jsonify', 'jsonify', (['booking'], {}), '(booking)\n', (5510, 5519), False, 'from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort\n'), ((5955, 5978), 'flask.jsonify', 'jsonify', (['sorted_records'], {}), '(sorted_records)\n', (5962, 5978), False, 'from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort\n'), ((727, 745), 'sentry_sdk.integrations.flask.FlaskIntegration', 'FlaskIntegration', ([], {}), '()\n', (743, 745), False, 'from sentry_sdk.integrations.flask import FlaskIntegration\n'), ((3287, 3301), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3299, 3301), False, 'from datetime import date, datetime, timedelta\n'), ((5454, 5464), 'bson.objectid.ObjectId', 'ObjectId', ([], {}), '()\n', (5462, 5464), False, 'from bson.objectid import ObjectId\n'), ((5361, 5375), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5373, 5375), False, 'from datetime import date, datetime, timedelta\n')] |
mirfan899/MTTS | sppas/sppas/src/models/acm/htkscripts.py | 3167b65f576abcc27a8767d24c274a04712bd948 | """
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.models.acm.htkscripts.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import os.path
import logging
# ---------------------------------------------------------------------------
class sppasHtkScripts(object):
"""HTK-ASCII scripts reader/writer.
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
:author: Brigitte Bigi
:contact: [email protected]
This class is able to write all scripts of the VoxForge tutorial.
They are used to train acoustic models thanks to the HTK toolbox.
For details, refer to: http://www.voxforge.org/
"""
def __init__(self):
"""Create a sppasHtkScripts instance."""
self.configfile = ""
self.globalfile = ""
self.mkphones0file = ""
self.mkphones1file = ""
self.mktrifile = ""
self.maketriphonesfile = ""
self.silfile = ""
# -----------------------------------------------------------------------
def write_all(self, dirname):
"""Write all scripts at once.
Write scripts with their default name, in the given directory.
:param dirname: (str) a directory name (existing or to be created).
"""
if os.path.exists(dirname) is False:
os.mkdir(dirname)
self.write_global_ded(os.path.join(dirname, "global.ded"))
self.write_mkphones0_led(os.path.join(dirname, "mkphones0.led"))
self.write_mkphones1_led(os.path.join(dirname, "mkphones1.led"))
self.write_mktri_led(os.path.join(dirname, "mktri.led"))
self.write_maketriphones_ded(os.path.join(dirname, "maketriphones.ded"))
self.write_sil_hed(os.path.join(dirname, "sil.hed"))
# -----------------------------------------------------------------------
def write_global_ded(self, filename):
"""Write the htk script `global.ded`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AS sp\n")
fp.write("RS cmu\n")
fp.write("MP sil sil sp\n")
fp.write("\n")
fp.close()
self.globalfile = filename
# -----------------------------------------------------------------------
def write_mkphones0_led(self, filename):
"""Write the htk script `mkphones0.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("EX\n")
fp.write("IS sil sil\n")
fp.write("DE sp\n")
fp.write("\n")
fp.close()
self.mkphones0file = filename
# -----------------------------------------------------------------------
def write_mkphones1_led(self, filename):
"""Write the htk script `mkphones1.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("EX\n")
fp.write("IS sil sil\n")
fp.write("\n")
fp.close()
self.mkphones1file = filename
# -----------------------------------------------------------------------
def write_mktri_led(self, filename):
"""Write the htk script `mktri.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("WB sp\n")
fp.write("WB sil\n")
fp.write("TC\n")
fp.write("\n")
fp.close()
self.mktrifile = filename
# -----------------------------------------------------------------------
def write_maketriphones_ded(self, filename):
"""Write the htk script `maketriphones.ded`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AS sp\n")
fp.write("MP sil sil sp\n")
fp.write("TC\n")
fp.write("\n")
fp.close()
self.maketriphonesfile = filename
# -----------------------------------------------------------------------
def write_sil_hed(self, filename):
"""Write the htk script `sil.hed`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AT 2 4 0.2 {sil.transP}\n")
fp.write("AT 4 2 0.2 {sil.transP}\n")
fp.write("AT 1 3 0.3 {sp.transP}\n")
fp.write("TI silst {sil.state[3],sp.state[2]}\n")
fp.write("\n")
fp.close()
self.silfile = filename
| [((2586, 2609), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (2600, 2609), False, 'import os\n'), ((2632, 2649), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (2640, 2649), False, 'import os\n'), ((2681, 2716), 'os.path.join', 'os.path.join', (['dirname', '"""global.ded"""'], {}), "(dirname, 'global.ded')\n", (2693, 2716), False, 'import os\n'), ((2751, 2789), 'os.path.join', 'os.path.join', (['dirname', '"""mkphones0.led"""'], {}), "(dirname, 'mkphones0.led')\n", (2763, 2789), False, 'import os\n'), ((2824, 2862), 'os.path.join', 'os.path.join', (['dirname', '"""mkphones1.led"""'], {}), "(dirname, 'mkphones1.led')\n", (2836, 2862), False, 'import os\n'), ((2893, 2927), 'os.path.join', 'os.path.join', (['dirname', '"""mktri.led"""'], {}), "(dirname, 'mktri.led')\n", (2905, 2927), False, 'import os\n'), ((2966, 3008), 'os.path.join', 'os.path.join', (['dirname', '"""maketriphones.ded"""'], {}), "(dirname, 'maketriphones.ded')\n", (2978, 3008), False, 'import os\n'), ((3037, 3069), 'os.path.join', 'os.path.join', (['dirname', '"""sil.hed"""'], {}), "(dirname, 'sil.hed')\n", (3049, 3069), False, 'import os\n')] |
ic-labs/django-icekit | icekit/plugins/map/tests.py | c507ea5b1864303732c53ad7c5800571fca5fa94 | from mock import patch
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.core import exceptions
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
class MapItemTestCase(WebTest):
def setUp(self):
self.embed_code = '''
<iframe
src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670"
width="600"
height="450"
frameborder="0"
style="border:0"
allowfullscreen
></iframe>
'''
self.cleaned_embed_code = '<iframe allowfullscreen="" frameborder="0" src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" style="border: 0;"></iframe>'
self.layout_1 = G(
Layout,
template_name='icekit/layouts/default.html',
)
self.layout_1.content_types.add(
ContentType.objects.get_for_model(LayoutPage))
self.layout_1.save()
self.staff_1 = User.objects.create(
email='[email protected]',
is_staff=True,
is_active=True,
is_superuser=True,
)
self.page_1 = LayoutPage()
self.page_1.title = 'Test Page'
self.page_1.slug = 'test-page'
self.page_1.parent_site = Site.objects.first()
self.page_1.layout = self.layout_1
self.page_1.author = self.staff_1
self.page_1.status = LayoutPage.PUBLISHED
self.page_1.save()
self.map_1 = fluent_contents.create_content_instance(
models.MapItem,
self.page_1,
_embed_code=self.embed_code,
)
self.map_item = models.MapItem(
parent_type=ContentType.objects.get_for_model(type(self.page_1)),
parent_id=self.page_1.id,
placeholder=self.page_1.get_placeholder_by_slot('main')[0],
_embed_code=self.embed_code,
)
self.page_1.publish()
def test_map_renders(self):
response = self.app.get(self.page_1.get_published().get_absolute_url())
response.mustcontain(self.cleaned_embed_code)
def test_cleaned_embed_code(self):
self.assertEqual(self.map_1._cleaned_embed_code.strip(), self.cleaned_embed_code)
| [((447, 463), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (461, 463), False, 'from django.contrib.auth import get_user_model\n'), ((1461, 1515), 'django_dynamic_fixture.G', 'G', (['Layout'], {'template_name': '"""icekit/layouts/default.html"""'}), "(Layout, template_name='icekit/layouts/default.html')\n", (1462, 1515), False, 'from django_dynamic_fixture import G\n'), ((1877, 1889), 'icekit.page_types.layout_page.models.LayoutPage', 'LayoutPage', ([], {}), '()\n', (1887, 1889), False, 'from icekit.page_types.layout_page.models import LayoutPage\n'), ((2003, 2023), 'django.contrib.sites.models.Site.objects.first', 'Site.objects.first', ([], {}), '()\n', (2021, 2023), False, 'from django.contrib.sites.models import Site\n'), ((2208, 2309), 'icekit.utils.fluent_contents.create_content_instance', 'fluent_contents.create_content_instance', (['models.MapItem', 'self.page_1'], {'_embed_code': 'self.embed_code'}), '(models.MapItem, self.page_1,\n _embed_code=self.embed_code)\n', (2247, 2309), False, 'from icekit.utils import fluent_contents\n'), ((1604, 1649), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['LayoutPage'], {}), '(LayoutPage)\n', (1637, 1649), False, 'from django.contrib.contenttypes.models import ContentType\n')] |
saravanabalagi/imshowtools | example/example.py | ea81af888c69223ff8b42b5c4b8c034483eebe21 | from imshowtools import imshow
import cv2
if __name__ == '__main__':
image_lenna = cv2.imread("lenna.png")
imshow(image_lenna, mode='BGR', window_title="LennaWindow", title="Lenna")
image_lenna_bgr = cv2.imread("lenna_bgr.png")
imshow(image_lenna, image_lenna_bgr, mode=['BGR', 'RGB'], title=['lenna_rgb', 'lenna_bgr'])
imshow(*[image_lenna for _ in range(12)], title=["Lenna" for _ in range(12)], window_title="LennaWindow")
imshow(*[image_lenna for _ in range(30)], title="Lenna", padding=(1, 1, 0, (0, 0, 0.8, 0.8)))
| [((90, 113), 'cv2.imread', 'cv2.imread', (['"""lenna.png"""'], {}), "('lenna.png')\n", (100, 113), False, 'import cv2\n'), ((118, 192), 'imshowtools.imshow', 'imshow', (['image_lenna'], {'mode': '"""BGR"""', 'window_title': '"""LennaWindow"""', 'title': '"""Lenna"""'}), "(image_lenna, mode='BGR', window_title='LennaWindow', title='Lenna')\n", (124, 192), False, 'from imshowtools import imshow\n'), ((216, 243), 'cv2.imread', 'cv2.imread', (['"""lenna_bgr.png"""'], {}), "('lenna_bgr.png')\n", (226, 243), False, 'import cv2\n'), ((248, 344), 'imshowtools.imshow', 'imshow', (['image_lenna', 'image_lenna_bgr'], {'mode': "['BGR', 'RGB']", 'title': "['lenna_rgb', 'lenna_bgr']"}), "(image_lenna, image_lenna_bgr, mode=['BGR', 'RGB'], title=[\n 'lenna_rgb', 'lenna_bgr'])\n", (254, 344), False, 'from imshowtools import imshow\n')] |
amehta1/t1-python | terminalone/models/concept.py | 4f7eb0bec7671b29baf3105b8cafafb373107e7b | # -*- coding: utf-8 -*-
"""Provides concept object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
class Concept(Entity):
"""Concept entity."""
collection = 'concepts'
resource = 'concept'
_relations = {
'advertiser',
}
_pull = {
'advertiser_id': int,
'created_on': t1types.strpt,
'id': int,
'name': None,
'status': t1types.int_to_bool,
'updated_on': t1types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Concept, self).__init__(session, properties, **kwargs)
| [] |
dmeklund/asyncdemo | videofeed.py | 956f193c0fa38744965362966ac7f8ef224409b4 | """
Mock up a video feed pipeline
"""
import asyncio
import logging
import sys
import cv2
logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s")
logger = logging.getLogger('async')
logger.setLevel(logging.INFO)
async def process_video(filename):
cap = cv2.VideoCapture(filename)
tasks = list()
frame_ind = 0
while cap.isOpened():
ret, frame = cap.read()
tasks.append(asyncio.ensure_future(process_frame(frame, frame_ind)))
frame_ind += 1
await asyncio.sleep(0)
await asyncio.gather(tasks)
async def process_frame(frame, frame_ind):
logger.info("Processing frame {}".format(frame_ind))
await asyncio.sleep(20.0)
logger.info("Finished processing frame {}".format(frame_ind))
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(process_video(sys.argv[1]))
logger.info("Completed")
if __name__ == '__main__':
main()
| [((92, 160), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(thread)-5d]%(asctime)s: %(message)s"""'}), "(format='[%(thread)-5d]%(asctime)s: %(message)s')\n", (111, 160), False, 'import logging\n'), ((170, 196), 'logging.getLogger', 'logging.getLogger', (['"""async"""'], {}), "('async')\n", (187, 196), False, 'import logging\n'), ((274, 300), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (290, 300), False, 'import cv2\n'), ((782, 806), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (804, 806), False, 'import asyncio\n'), ((537, 558), 'asyncio.gather', 'asyncio.gather', (['tasks'], {}), '(tasks)\n', (551, 558), False, 'import asyncio\n'), ((671, 690), 'asyncio.sleep', 'asyncio.sleep', (['(20.0)'], {}), '(20.0)\n', (684, 690), False, 'import asyncio\n'), ((510, 526), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (523, 526), False, 'import asyncio\n')] |
mikeus9908/peracotta | parsers/read_lspci_and_glxinfo.py | c54c351acae8afec250185f4bc714a2f86c47c90 | #!/usr/bin/python3
"""
Read "lspci -v" and "glxinfo" outputs
"""
import re
from dataclasses import dataclass
from InputFileNotFoundError import InputFileNotFoundError
@dataclass
class VideoCard:
type = "graphics-card"
manufacturer_brand = ""
reseller_brand = ""
internal_name = ""
model = ""
capacity = -1 # bytes
warning = ""
def parse_lspci_output(gpu: VideoCard, lspci_path: str, interactive: bool = False):
try:
with open(lspci_path, "r") as f:
lspci_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(lspci_path)
lspci_sections = lspci_output.split("\n\n")
for section in lspci_sections:
if "VGA compatible controller" in section:
first_line = section.splitlines()[0].split(": ", 1)[
1
] # removes "VGA compatible controller:"
second_line = section.splitlines()[1]
part_between_square_brackets = None
try:
# take the first string between [] from the first line
part_between_square_brackets = first_line.split("[")[1].split("]")[0]
except IndexError:
# there may not be an argument in between []
pass
if "Subsystem:" in second_line:
# The model or model family is often repeated here, but removing it automatically is complicated
gpu.reseller_brand = (
second_line.split("Subsystem: ")[1].split("[", 1)[0].strip()
)
gpu.reseller_brand = gpu.reseller_brand.replace(
"Integrated Graphics Controller", ""
)
# -----------------------------------------------------------------
# AMD/ATI
# -----------------------------------------------------------------
if part_between_square_brackets is not None and (
"AMD" in part_between_square_brackets
or "ATI" in part_between_square_brackets
):
gpu.manufacturer_brand = part_between_square_brackets
# take second string between []
gpu.model = first_line.split("[")[2].split("]")[0]
if "controller" in gpu.model:
gpu.model = section.splitlines()[1].split(" ")[-1]
# -----------------------------------------------------------------
# Nvidia
# -----------------------------------------------------------------
elif "NVIDIA" in first_line.upper():
gpu.manufacturer_brand = "Nvidia"
gpu.model = part_between_square_brackets
if gpu.reseller_brand != "":
pieces = gpu.reseller_brand.rsplit(" ", 1)
gpu.reseller_brand = pieces[0]
gpu.internal_name = pieces[1]
# -----------------------------------------------------------------
# Intel
# -----------------------------------------------------------------
elif "INTEL" in first_line.upper():
gpu.manufacturer_brand = "Intel"
if "Integrated Graphics" in first_line:
tmp_model = first_line.split("Intel Corporation ")[1].split(
" Integrated Graphics"
)[0]
# if there are no numbers, e.g. "Core Processor", tmp_model is not a model number
if not re.search("\\d+", tmp_model):
tmp_model = ""
elif "HD Graphics" in first_line:
tmp_model = (
first_line.split("Intel Corporation ")[1]
.split("(", 1)[0]
.strip()
)
elif "[" in first_line and "]" in first_line:
tmp_model = first_line.split("[")[1].split("]")[0]
else:
tmp_model = ""
if tmp_model != "":
gpu.model = tmp_model
else:
gpu.model = ""
# -----------------------------------------------------------------
# VIA
# -----------------------------------------------------------------
elif first_line.startswith("VIA"):
gpu.manufacturer_brand = "VIA"
gpu.model = part_between_square_brackets
tmp_model = first_line.split("[")[0]
i = 0
for i, char in enumerate("VIA Technologies, Inc. "):
if tmp_model[i] != char:
break
gpu.internal_name = tmp_model[i:].strip()
# -----------------------------------------------------------------
# SiS
# -----------------------------------------------------------------
elif part_between_square_brackets == "SiS":
# May be written somewhere else on other models, but we have so few SiS cards that it's difficult to
# find more examples. Also, they haven't made any video card in the last 15 years or so.
gpu.manufacturer_brand = part_between_square_brackets
if gpu.reseller_brand.lower() == "silicon integrated systems":
gpu.reseller_brand = "SiS"
gpu.model = first_line.split("]", 1)[1]
# These may be useful for non-integrated cards, however the example ones are all integrated
if " PCIE" in gpu.model:
gpu.model = gpu.model.split(" PCIE", 1)[0].strip()
elif " PCI/AGP" in gpu.model:
gpu.model = gpu.model.split(" PCI/AGP", 1)[0].strip()
if gpu.model in gpu.reseller_brand:
gpu.reseller_brand = gpu.reseller_brand.split(gpu.model, 1)[
0
].strip()
else:
gpu.manufacturer_brand = None
error = (
"I couldn't find the Video Card brand. The model was set to 'None' and is to be edited "
"logging into the TARALLO afterwards. The information you're looking for should be in the "
f"following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
if gpu.model is None:
error = (
"I couldn't find the Integrated Graphics model. The model was set to 'None' and is to be "
"edited logging into the TARALLO afterwards. The information you're looking for should be in "
f"the following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
else:
# Try to remove duplicate information
gpu.reseller_brand = gpu.reseller_brand.replace(gpu.model, "").strip()
if gpu.internal_name is not None:
# Same
gpu.reseller_brand = gpu.reseller_brand.replace(
gpu.internal_name, ""
).strip()
break
def parse_glxinfo_output(gpu: VideoCard, glxinfo_path: str):
try:
with open(glxinfo_path, "r") as f:
glxinfo_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(glxinfo_path)
for i, line in enumerate(glxinfo_output.splitlines()):
# this line comes before the "Dedicated video memory" line
# this basically saves a default value if the dedicated memory line cannot be found
if "Video memory" in line:
try:
tmp_vid_mem = int(line.split(" ")[6].split(" ")[0][:-2])
tmp_vid_mem_multiplier = line[-2:]
except ValueError:
exit(-1)
return # To stop complaints from PyCharm
gpu.capacity = convert_video_memory_size(
tmp_vid_mem, tmp_vid_mem_multiplier
)
if "Dedicated video memory" in line:
try:
tmp_vram = int(line.split(" ")[7].split(" ")[0])
tmp_vram_multiplier = line[-2:]
except ValueError:
exit(-1)
return
capacity = convert_video_memory_size(tmp_vram, tmp_vram_multiplier)
if capacity < 0:
gpu.warning = "Could not find dedicated video memory"
if gpu.capacity < 0:
gpu.warning += ". The value cannot be trusted."
else:
gpu.capacity = capacity
break
if gpu.capacity > 0:
# Round to the next power of 2
# this may be different from human readable capacity...
rounded = 2 ** (gpu.capacity - 1).bit_length()
one_and_half = int(rounded / 2 * 1.5)
# Accounts for 3 GB VRAM cards and similar
# Yes they do exist, try to remove this part and watch tests fail (and the card was manually verified to be 3 GB)
if one_and_half >= gpu.capacity:
gpu.capacity = one_and_half
else:
gpu.capacity = rounded
def convert_video_memory_size(capacity, units_of_measure):
if units_of_measure == "GB":
capacity *= 1024 * 1024 * 1024
elif units_of_measure == "MB":
capacity *= 1024 * 1024
elif units_of_measure.upper() == "KB":
capacity *= 1024
else:
capacity = -1
return capacity
def read_lspci_and_glxinfo(
has_dedicated: bool, lspci_path: str, glxinfo_path: str, interactive: bool = False
):
gpu = VideoCard()
if has_dedicated:
parse_lspci_output(gpu, lspci_path, interactive)
parse_glxinfo_output(gpu, glxinfo_path)
else: # integrated_in_mobo or integrated_in_cpu
parse_lspci_output(gpu, lspci_path, interactive)
# don't parse glxinfo because the VRAM is part of the RAM and varies
gpu.capacity = None
# print("The VRAM capacity could not be detected. "
# "Please try looking for it on the Video Card or on the Internet. "
# "The capacity value defaulted to 'None'. "
# "For an integrated GPU, the VRAM may also be shared with the system RAM, so an empty value is acceptable.")
result = {
"type": "graphics-card",
"brand": gpu.reseller_brand.strip(),
"model": gpu.model.strip(),
"internal-name": gpu.internal_name.strip(),
"capacity-byte": gpu.capacity,
"working": "yes", # Indeed it is working
}
if gpu.manufacturer_brand is not None and gpu.reseller_brand is not None:
if gpu.manufacturer_brand.lower() != gpu.reseller_brand.lower():
result["brand-manufacturer"] = gpu.manufacturer_brand
return result
if __name__ == "__main__":
import argparse
import json
parser = argparse.ArgumentParser(description="Parse lspci/glxinfo output")
parser.add_argument("lspci", type=str, nargs=1, help="path to lspci output")
parser.add_argument("glxinfo", type=str, nargs=1, help="path to glxinfo output")
parser.add_argument(
"-d",
"--dedicated",
action="store_true",
default=False,
help="computer has dedicated GPU",
)
args = parser.parse_args()
try:
print(
json.dumps(
read_lspci_and_glxinfo(args.dedicated, args.lspci[0], args.glxinfo[0]),
indent=2,
)
)
except InputFileNotFoundError as e:
print(str(e))
exit(1)
| [((11116, 11181), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse lspci/glxinfo output"""'}), "(description='Parse lspci/glxinfo output')\n", (11139, 11181), False, 'import argparse\n'), ((577, 611), 'InputFileNotFoundError.InputFileNotFoundError', 'InputFileNotFoundError', (['lspci_path'], {}), '(lspci_path)\n', (599, 611), False, 'from InputFileNotFoundError import InputFileNotFoundError\n'), ((7596, 7632), 'InputFileNotFoundError.InputFileNotFoundError', 'InputFileNotFoundError', (['glxinfo_path'], {}), '(glxinfo_path)\n', (7618, 7632), False, 'from InputFileNotFoundError import InputFileNotFoundError\n'), ((3542, 3570), 're.search', 're.search', (['"""\\\\d+"""', 'tmp_model'], {}), "('\\\\d+', tmp_model)\n", (3551, 3570), False, 'import re\n')] |
snymainn/tools- | upload.py | af57a1a4d0f1aecff33ab28c6f27acc893f37fbc | #!/usr/bin/python
import sys
from loglib import SNYLogger
import ftplib
import argparse
import re
import os
import calendar
import time
def read_skipfile(infile, log):
skiplines = list()
skipfile = open(infile, 'r')
for line in skipfile:
newline = line.rstrip('\r\n')
linelength = len(newline)
if linelength>0:
log.debug("Adding "+newline+" to skiplines")
tmpobjects = re.compile(newline)
skiplines.append(tmpobjects)
skipfile.close()
return skiplines
#GET LOCAL FILELIST
def get_local_files(localpath,log):
locallist = list()
os.chdir(localpath)
log.debug("*** GETTING LOCAL FILELIST ***")
for name in os.listdir("."):
if (not name.startswith('.')):
statinfo = os.stat(name)
if (statinfo.st_mode>=32768):
entrytype = "file"
else:
entrytype = "dir"
size = statinfo.st_size
date = statinfo.st_mtime
log.debug("Date:"+str(int(date))+" type:"+entrytype+", name:"+name+" size:"+str(size))
locallist.append({'name':name,'type':entrytype,'modify':int(date),'size':size})
return locallist
#
# login to ftp server
#
def ftp_login(args, log):
ftp = ftplib.FTP()
port = 21
ftp.connect(args.host, port)
try:
log.debug("Logging in...")
ftp.login(args.user, args.password)
log.debug(ftp.getwelcome())
except ftplib.error_perm, resp:
log.logprint(str(resp))
except:
log.logprint("Login section failed..")
return ftp
#
# get remote files
#
def get_remote_files(ftp, remotepath, args, log):
# LIST CONTENTS
contents = list()
dirlist = list()
log.debug("*** GET REMOTE FILELIST ***")
try:
ftp.cwd(remotepath)
# Entry point
ftp.retrlines('MLSD', contents.append)
for line in contents:
# log.debug(line)
entry = line.split(";")
size = "0" #Set this because directories does not report size
for item in entry:
cell = item.split("=")
if (cell[0]=="modify"):
date = cell[1]
modify=calendar.timegm(time.strptime(str(date), "%Y%m%d%H%M%S"))
#for loops/if checks are not blocks in python, i.e. no need to predefine modify
if (cell[0]=="type"):
entrytype=cell[1]
if (cell[0]=="size"):
size = cell[1]
if (len(cell[0])>0) and cell[0].startswith(' '):
#If string does not contain =, cell[1] will not be defined
#and first entry in cell[0] string will be whitespace
name = cell[0].lstrip()
log.debug("Date:"+str(modify)+" type:"+entrytype+" Name:"+name+" size:"+size)
if (entrytype=='file' or entrytype=='dir'): #Do not include current and parent dir entries
dirlist.append({'name':name,'type':entrytype,'modify':int(modify),'size':size})
except ftplib.error_perm, resp:
log.logprint(str(resp))
exit(1)
return dirlist
def touch(fname):
try:
os.utime(fname, None)
except:
log.logprint("Updating mtime failed, "+fname+" does not exist")
def sync_files(ftp, args, skiplines, localpath, remotepath, log):
locallist = get_local_files(localpath,log)
remotelist = get_remote_files(ftp, remotepath, args, log)
#Create dictionaries for easy lookup
localdict = {}
index = 0
for lfile in locallist:
localdict[lfile['name']]=index
index+=1
remotedict = {}
index = 0
for rfile in remotelist:
remotedict[rfile['name']]=index
index+=1
# Traverse local filelist and
# check if local file is present on remote
for lfile in locallist:
#Check if file is present in skipfile
#If present in skipfile, skip to next file in locallist
skiptonext = False
for p in skiplines:
m=p.match(lfile['name'])
if (m):
#log.logprint(lfile['name']+" match "+m.group()+", thus present in skipfile "+args.skipfile)
log.logprint("Skipping: "+lfile['name'])
skiptonext = True
break
if skiptonext: continue
#
#Check if remote has the local file
#if present remote, type file and modify time is older than local file, set upload flag
#
upload = False #Set to True here instead of False since this will handle the case where
#remote does not exist, i.e. always upload except when remote is present
#and up to date
if lfile['name'] in remotedict:
rfile = remotelist[remotedict[lfile['name']]] #Get fileinfo from remotelist using index
if lfile['type']=="file":
log.debug(lfile['name']+" is present remote : "+rfile['name'])
if (lfile['modify']>rfile['modify']):
log.debug("Local file is newer by "+str(lfile['modify']-rfile['modify'])+" seconds, try to upload...")
upload = True
elif lfile['type']=="dir":
log.debug(lfile['name']+" is present remote and is directory: "+rfile['name'])
sync_files(ftp, args, skiplines, lfile['name'], rfile['name'], log)
elif lfile['type']=="dir":
log.debug(lfile['name']+" is NOT present remote and is directory: ")
try:
ftp.mkd(lfile['name'])
log.logprint("CREATED DIR : "+lfile['name'])
sync_files(ftp, args, skiplines, lfile['name'], lfile['name'], log)
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to create directory "+lfile['name']+" - "+str(resp))
elif lfile['type']=="file":
log.debug(lfile['name']+" is NOT present remote and is file")
upload = True
#Handle upload flag
if (upload and lfile['type']=="file"):
try:
touch(lfile['name']) #Touch local file to set modify time to approx the same as the remote will get
ftp.storbinary('STOR '+lfile['name'], open(lfile['name'], 'rb'))
log.logprint("UPLOADED : "+lfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to upload "+lfile['name']+" - "+str(resp))
#Make sure locally deleted items are deleted remotely
for rfile in remotelist:
if rfile['name'] not in localdict:
if rfile['type']=="file":
#Remote file is not present locally=>Delete it
try:
ftp.delete(rfile['name'])
log.logprint("DELETED: "+rfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete "+rfile['name']+" - "+str(resp))
elif rfile['type']=="dir":
log.debug("Remote dir "+rfile['name']+" not present locally, delete it recursively")
#Remote dir is not present locally, decend and recursively delete everything
#TODO: recursive_delete(ftp, rfile['name'])
delete_recursive(ftp, args, rfile['name'], log)
ftp.cwd("..")
os.chdir("..")
def delete_recursive(ftp, args, remotepath, log):
remotelist = get_remote_files(ftp, remotepath, args, log)
#Make sure locally deleted items are deleted remotely
for rfile in remotelist:
if rfile['type']=="file":
try:
ftp.delete(rfile['name'])
log.logprint("DELETED: "+rfile['name'])
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete "+rfile['name']+" - "+str(resp))
elif rfile['type']=="dir":
log.debug("Remote dir "+rfile['name']+" not present locally, delete it recursively")
delete_recursive(ftp, args, rfile['name'], log)
ftp.cwd("..")
try:
ftp.rmd(remotepath)
log.logprint("DELETED DIR: "+remotepath)
except ftplib.all_errors, resp:
log.logprint("ERROR: Failed to delete directory "+remotepath+" - "+str(resp))
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--host", help="ftp hostname", required=True)
parser.add_argument("-u", "--user", help="username on ftp server", required=True)
parser.add_argument("-p", "--password", help="password", required=True)
parser.add_argument("-d", "--debug",
help="print debug to terminal, default 0, use multiple times to increase verbosity, i.e. -d -d",
action="count")
parser.add_argument("-b", "--basedir", help="Toplevel directory on ftp server, default www")
parser.add_argument("-t", "--path", help="Local toplevel directory, default ., i.e. current dir")
parser.add_argument("-s", "--skipfile", help="Do not upload files in <skipfile>, default name upload.skip")
parser.set_defaults(debug=0)
parser.set_defaults(skipfile="upload.skip")
parser.set_defaults(basedir="www")
parser.set_defaults(path=".")
args = parser.parse_args()
log = SNYLogger(basename="upload", size_limit=10, no_logfiles=2, stdout=args.debug)
skiplines = read_skipfile(args.skipfile, log)
ftp = ftp_login(args, log)
sync_files(ftp, args, skiplines, args.path, args.basedir, log)
ftp.quit()
| [] |
chris-zen/phd-thesis | chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py | 1eefdff8e7ca1910304e27ae42551dc64496b101 | #!/usr/bin/env python
"""
Classify oncodrive gene results and prepare for combination
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
* Input:
- oncodrive_ids: The mrna.oncodrive_genes to process
* Output:
- combinations: The mrna.combination prepared to be calculated
* Entities:
- mrna.oncodrive_genes
- mrna.combination
"""
import uuid
import json
from wok.task import Task
from wok.element import DataElement
from intogen.data.entity.server import EntityServer
from intogen.data.entity import types
def run(task):
# Initialization
task.check_conf(["entities"])
conf = task.conf
log = task.logger()
task.check_in_ports(["oncodrive_ids"])
task.check_out_ports(["combinations"])
oncodrive_port = task.ports["oncodrive_ids"]
combination_port = task.ports["combinations"]
es = EntityServer(conf["entities"])
em = es.manager()
log.info("Indexing available combination results ...")
comb_results_index = em.group_ids(
["icdo_topography", "icdo_morphology", "id_type"],
types.MRNA_COMBINATION, unique = True)
ENSEMBL_GENE = "ensembl:gene"
classif = {}
log.info("Classifying oncodrive results ...")
for oid in oncodrive_port:
o = em.find(oid, types.MRNA_ONCODRIVE_GENES)
if o is None:
log.error("{0} not found: {1}".format(types.MRNA_ONCODRIVE_GENES, oid))
continue
okey = (o["study_id"], o["platform_id"], o["icdo_topography"], o["icdo_morphology"])
key = (o["icdo_topography"], o["icdo_morphology"], ENSEMBL_GENE)
log.debug("Oncodrive results ({0}) [{1}] classified into ({2}) ...".format(", ".join(okey), oid, ", ".join(key)))
if key in classif:
classif[key] += [o]
else:
classif[key] = [o]
log.info("Preparing combinations ...")
for key in sorted(classif):
if key in comb_results_index:
cid = comb_results_index[key][0]
c = em.find(cid, types.MRNA_COMBINATION)
if c is None:
log.error("{0} not found: {1}".format(types.MRNA_COMBINATION, cid))
return
else:
c = DataElement(key_sep = "/")
c["id"] = cid = str(uuid.uuid4())
c["icdo_topography"] = key[0]
c["icdo_morphology"] = key[1]
c["id_type"] = ENSEMBL_GENE
olist = classif[key]
log.info("({0}) [{1}] --> {2} results".format(", ".join(key), cid, len(olist)))
ids = c.create_list()
flist = c.create_list()
for o in olist:
ids += [o["id"]]
flist += [o["results_file"]]
c["source"] = src = c.create_element()
src["type"] = types.MRNA_ONCODRIVE_GENES
src["ids"] = ids
c["files"] = flist
combination_port.write(json.dumps(c.to_native()))
em.close()
if __name__ == "__main__":
Task(run).start()
| [((849, 879), 'intogen.data.entity.server.EntityServer', 'EntityServer', (["conf['entities']"], {}), "(conf['entities'])\n", (861, 879), False, 'from intogen.data.entity.server import EntityServer\n'), ((2006, 2030), 'wok.element.DataElement', 'DataElement', ([], {'key_sep': '"""/"""'}), "(key_sep='/')\n", (2017, 2030), False, 'from wok.element import DataElement\n'), ((2619, 2628), 'wok.task.Task', 'Task', (['run'], {}), '(run)\n', (2623, 2628), False, 'from wok.task import Task\n'), ((2056, 2068), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2066, 2068), False, 'import uuid\n')] |
CDCgov/prime-public-health-data-infrastructure | src/FunctionApps/DevOps/tests/test_get_ip.py | 7e4849c3a486a84e94765bf0023b80261c510c57 | def test_get_ip_placeholder():
"""placeholder so pytest does not fail"""
pass
| [] |
Laurenhut/Machine_Learning_Final | data/models/svm_benchmark.py | 4fca33754ef42acde504cc64e6bbe4e463caadf8 | #!/usr/bin/env python
from sklearn import svm
import csv_io
def main():
training, target = csv_io.read_data("../Data/train.csv")
training = [x[1:] for x in training]
target = [float(x) for x in target]
test, throwaway = csv_io.read_data("../Data/test.csv")
test = [x[1:] for x in test]
svc = svm.SVC(kernel='poly', degree=2)
scores = cross_val_score(rf, training, target, cv=10)
print np.mean(scores)
# svc.fit(training, target)
# predicted_probs = svc.predict_proba(test)
# predicted_probs = [[min(max(x,0.001),0.999) for x in y]
# for y in predicted_probs]
# predicted_probs = [["%f" % x for x in y] for y in predicted_probs]
# csv_io.write_delimited_file("../Submissions/svm_benchmark.csv",
# predicted_probs)
if __name__=="__main__":
main()
| [] |
user-wu/SOD_eval_metrics | configs/utils/config_generator.py | d5b8804580cb52a4237c8e613818d10591dc6597 | # -*- coding: utf-8 -*-
from matplotlib import colors
# max = 148
_COLOR_Genarator = iter(
sorted(
[
color
for name, color in colors.cnames.items()
if name not in ["red", "white"] or not name.startswith("light") or "gray" in name
]
)
)
def curve_info_generator():
line_style_flag = True
def _template_generator(
method_info: dict, method_name: str, line_color: str = None, line_width: int = 3
) -> dict:
nonlocal line_style_flag
template_info = dict(
path_dict=method_info,
curve_setting=dict(
line_style="-" if line_style_flag else "--",
line_label=method_name,
line_width=line_width,
),
)
print(method_name)
if method_name == "Ours":
template_info["curve_setting"]["line_color"] = 'red'
template_info["curve_setting"]["line_style"] = '-'
# line_style_flag = not line_style_flag
else:
if line_color is not None:
template_info["curve_setting"]["line_color"] = line_color
else:
template_info["curve_setting"]["line_color"] = next(_COLOR_Genarator)
line_style_flag = not line_style_flag
return template_info
return _template_generator
def simple_info_generator():
def _template_generator(method_info: dict, method_name: str) -> dict:
template_info = dict(path_dict=method_info, label=method_name)
return template_info
return _template_generator
| [((164, 185), 'matplotlib.colors.cnames.items', 'colors.cnames.items', ([], {}), '()\n', (183, 185), False, 'from matplotlib import colors\n')] |
kartik1000/jcc-registration-portal | core/sms_service.py | 053eade1122fa760ae112a8599a396d68dfb16b8 | from urllib.parse import urlencode
from decouple import config
import hashlib
import requests
BASE = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
auth_key = config('AUTH_KEY')
url = 'http://sms.globehost.com/api/sendhttp.php?'
def encode_base(num, array=BASE):
if(num == 0):
return array[0]
retarr = []
base = len(array)
while num:
num, res = divmod(num, base)
retarr.append(array[res])
retarr.reverse()
return ''.join(retarr)[:6]
def generate(alphanum):
short = (hashlib.md5(alphanum.encode())).hexdigest()
short = int(short, 16)
short = encode_base(short)
return short
def send_message(team_name, team_id, contact):
message = 'Your unique team ID for Junior Code Cracker 2k18 is ' + \
team_id + '.Kindly take note and submit this at the event.'
data = {
'authkey': auth_key,
'mobiles': contact,
'message': message,
'sender': 'GNULUG',
'route': '4',
}
data_encoded = urlencode(data)
r = requests.get(url + data_encoded)
print('Message Sent Successfully !!')
return r.status_code
| [((178, 196), 'decouple.config', 'config', (['"""AUTH_KEY"""'], {}), "('AUTH_KEY')\n", (184, 196), False, 'from decouple import config\n'), ((1026, 1041), 'urllib.parse.urlencode', 'urlencode', (['data'], {}), '(data)\n', (1035, 1041), False, 'from urllib.parse import urlencode\n'), ((1050, 1082), 'requests.get', 'requests.get', (['(url + data_encoded)'], {}), '(url + data_encoded)\n', (1062, 1082), False, 'import requests\n')] |
Protagonistss/sanic-for-v3 | scripts/fetch_images.py | ba7e94273b77914b8d85d67cf513041ada00780d | import sys
import os
sys.path.append(os.pardir)
import random
import time
import requests
from contextlib import closing
from help import utils
from threading import Thread
def get_train_set_path(path: str):
create_path = utils.join_root_path(path)
return create_path
def create_train_set_dir(path='auth-set'):
create_path = get_train_set_path(path)
is_existed = os.path.exists(create_path)
if not is_existed:
os.mkdir(create_path)
def gen_image_name(char_pool):
prefix = ''
for i in range(4):
prefix += random.choice(char_pool)
suffix = str(time.time()).replace('.', '')
return "{}_{}".format(prefix, suffix)
def gen_image_all_url(path):
rule = '0123456789'
return '{}/{}.png'.format(path, gen_image_name(rule))
def get_image(url, count=20000, path='auth-set'):
create_train_set_dir(path)
for loop in range(count):
response = requests.get(url, verify=False, stream=True)
with closing(response) as response:
with open(gen_image_all_url(get_train_set_path(path)), 'wb') as f:
for i in response.iter_content(chunk_size=512):
f.write(i)
print('第{}张图片保存成功'.format(loop + 1))
def main():
get_image('https://gray.930pm.cn/home.php/Login/verify_c', path='auth-set')
if __name__ == '__main__':
t1 = Thread(target=main)
t2 = Thread(target=main)
t3 = Thread(target=main)
t4 = Thread(target=main)
t1.start()
t2.start()
t3.start()
t4.start()
| [((22, 48), 'sys.path.append', 'sys.path.append', (['os.pardir'], {}), '(os.pardir)\n', (37, 48), False, 'import sys\n'), ((230, 256), 'help.utils.join_root_path', 'utils.join_root_path', (['path'], {}), '(path)\n', (250, 256), False, 'from help import utils\n'), ((385, 412), 'os.path.exists', 'os.path.exists', (['create_path'], {}), '(create_path)\n', (399, 412), False, 'import os\n'), ((1355, 1374), 'threading.Thread', 'Thread', ([], {'target': 'main'}), '(target=main)\n', (1361, 1374), False, 'from threading import Thread\n'), ((1384, 1403), 'threading.Thread', 'Thread', ([], {'target': 'main'}), '(target=main)\n', (1390, 1403), False, 'from threading import Thread\n'), ((1413, 1432), 'threading.Thread', 'Thread', ([], {'target': 'main'}), '(target=main)\n', (1419, 1432), False, 'from threading import Thread\n'), ((1442, 1461), 'threading.Thread', 'Thread', ([], {'target': 'main'}), '(target=main)\n', (1448, 1461), False, 'from threading import Thread\n'), ((444, 465), 'os.mkdir', 'os.mkdir', (['create_path'], {}), '(create_path)\n', (452, 465), False, 'import os\n'), ((556, 580), 'random.choice', 'random.choice', (['char_pool'], {}), '(char_pool)\n', (569, 580), False, 'import random\n'), ((915, 959), 'requests.get', 'requests.get', (['url'], {'verify': '(False)', 'stream': '(True)'}), '(url, verify=False, stream=True)\n', (927, 959), False, 'import requests\n'), ((973, 990), 'contextlib.closing', 'closing', (['response'], {}), '(response)\n', (980, 990), False, 'from contextlib import closing\n'), ((598, 609), 'time.time', 'time.time', ([], {}), '()\n', (607, 609), False, 'import time\n')] |
haoruilee/statslibrary | GeneralStats/example.py | 01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a | import GeneralStats as gs
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosistest
import pandas as pd
if __name__ == "__main__":
gen=gs.GeneralStats()
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
print("data = ", data)
print("data1 = ", data1)
res=gen.average(data,rowvar=True)
res1=gen.average(data1,rowvar=True)
print("data平均值 = ",res)
print("data1平均值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.median(data,rowvar=True)
res1=gen.median(data1,rowvar=True)
print("data中位值 = ",res)
print("data1中位值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.mode(data,rowvar=True)
res1=gen.mode(data1,rowvar=True)
print("data众数值 = ",res)
print("data1众数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数
res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数
print("data 0.5分位数值 = ",res)
print("data1 0.5分位数值 = ",res1)
res=gen.quantile(data,0.25,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower')
print("data 0.25分位数值s = ",res)
print("data1 0.25分位数值 = ",res1)
res=gen.quantile(data,0.75,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower')
print("data 0.75分位数值 = ",res)
print("data1 0.75分位数值 = ",res1)
res=gen.quantile(data,1.0,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower')
print("data 1.0分位数值 = ",res)
print("data1 1.0分位数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.range(data,rowvar=True)
res1=gen.range(data1,rowvar=True)
print("data极差 = ",res)
print("data1极差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.variance(data,rowvar=True)
res1=gen.variance(data1,rowvar=True)
print("data方差 = ",res)
print("data1方差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.standard_dev(data,rowvar=True)
res1=gen.standard_dev(data1,rowvar=True)
print("data标准差 = ",res)
print("data1标准差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.skewness(data,rowvar=True)
res1=gen.skewness(data1,rowvar=True)
print("data偏度 = ",res)
print("data1偏度 = ",res1)
res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])])
print("使用scipy skew方法验证的data偏度 = ",res)
res1=np.array(skew(data1))
print("使用scipy skew方法验证的data1偏度 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([53, 61, 49, 66, 78, 47])
res=gen.kurtosis(data,rowvar=True)
res1=gen.kurtosis(data1,rowvar=True)
print("data峰度 = ",res)
print("data1峰度 = ",res1)
data_0=pd.Series(data[0])
data_1=pd.Series(data[1])
data_2=pd.Series(data[2])
data_3=pd.Series(data[3])
print("使用pandas kurt方法验证的data峰度 = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()])
data1=pd.Series(data1)
print("使用pandas kurt方法验证的data1峰度 = ",data1.kurt())
| [((178, 195), 'GeneralStats.GeneralStats', 'gs.GeneralStats', ([], {}), '()\n', (193, 195), True, 'import GeneralStats as gs\n'), ((208, 286), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (216, 286), True, 'import numpy as np\n'), ((295, 320), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (303, 320), True, 'import numpy as np\n'), ((531, 609), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (539, 609), True, 'import numpy as np\n'), ((618, 643), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (626, 643), True, 'import numpy as np\n'), ((790, 868), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (798, 868), True, 'import numpy as np\n'), ((877, 902), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (885, 902), True, 'import numpy as np\n'), ((1045, 1123), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (1053, 1123), True, 'import numpy as np\n'), ((1132, 1157), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1140, 1157), True, 'import numpy as np\n'), ((2074, 2152), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2082, 2152), True, 'import numpy as np\n'), ((2161, 2186), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2169, 2186), True, 'import numpy as np\n'), ((2329, 2407), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2337, 2407), True, 'import numpy as np\n'), ((2416, 2441), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2424, 2441), True, 'import numpy as np\n'), ((2590, 2668), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2598, 2668), True, 'import numpy as np\n'), ((2677, 2702), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2685, 2702), True, 'import numpy as np\n'), ((2861, 2939), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2869, 2939), True, 'import numpy as np\n'), ((2948, 2973), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2956, 2973), True, 'import numpy as np\n'), ((3323, 3401), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (3331, 3401), True, 'import numpy as np\n'), ((3410, 3444), 'numpy.array', 'np.array', (['[53, 61, 49, 66, 78, 47]'], {}), '([53, 61, 49, 66, 78, 47])\n', (3418, 3444), True, 'import numpy as np\n'), ((3597, 3615), 'pandas.Series', 'pd.Series', (['data[0]'], {}), '(data[0])\n', (3606, 3615), True, 'import pandas as pd\n'), ((3628, 3646), 'pandas.Series', 'pd.Series', (['data[1]'], {}), '(data[1])\n', (3637, 3646), True, 'import pandas as pd\n'), ((3659, 3677), 'pandas.Series', 'pd.Series', (['data[2]'], {}), '(data[2])\n', (3668, 3677), True, 'import pandas as pd\n'), ((3690, 3708), 'pandas.Series', 'pd.Series', (['data[3]'], {}), '(data[3])\n', (3699, 3708), True, 'import pandas as pd\n'), ((3820, 3836), 'pandas.Series', 'pd.Series', (['data1'], {}), '(data1)\n', (3829, 3836), True, 'import pandas as pd\n'), ((3251, 3262), 'scipy.stats.skew', 'skew', (['data1'], {}), '(data1)\n', (3255, 3262), False, 'from scipy.stats import skew\n'), ((3129, 3142), 'scipy.stats.skew', 'skew', (['data[0]'], {}), '(data[0])\n', (3133, 3142), False, 'from scipy.stats import skew\n'), ((3143, 3156), 'scipy.stats.skew', 'skew', (['data[1]'], {}), '(data[1])\n', (3147, 3156), False, 'from scipy.stats import skew\n'), ((3157, 3170), 'scipy.stats.skew', 'skew', (['data[2]'], {}), '(data[2])\n', (3161, 3170), False, 'from scipy.stats import skew\n'), ((3171, 3184), 'scipy.stats.skew', 'skew', (['data[3]'], {}), '(data[3])\n', (3175, 3184), False, 'from scipy.stats import skew\n')] |
tqchen/yarn-ec2 | bootstrap.py | 303f3980ad41770011b72532ed9f7c6bbe876508 | #!/usr/bin/env python
# encoding: utf-8
"""
script to install all the necessary things
for working on a linux machine with nothing
Installing minimum dependencies
"""
import sys
import os
import logging
import subprocess
import xml.etree.ElementTree as ElementTree
import xml.dom.minidom as minidom
import socket
import time
import pwd
###---------------------------------------------------##
# Configuration Section, will be modified by script #
###---------------------------------------------------##
node_apt_packages = [
'emacs',
'git',
'g++',
'make',
'python-numpy',
'libprotobuf-dev',
'libcurl4-openssl-dev']
# master only packages
master_apt_packages = [
'protobuf-compiler']
# List of r packages to be installed in master
master_r_packages = [
'r-base-dev',
'r-base',
'r-cran-statmod',
'r-cran-RCurl',
'r-cran-rjson'
]
# download link of hadoop.
hadoop_url = 'http://apache.claz.org/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz'
hadoop_dir = 'hadoop-2.8.0'
# customized installation script.
# See optional installation scripts for options.
def custom_master_install():
#install_spark()
#install_r()
pass
# customized installation script for all nodes.
def custom_all_nodes_install():
install_gcc()
pass
###---------------------------------------------------##
# Automatically set by script #
###---------------------------------------------------##
USER_NAME = 'ubuntu'
# setup variables
MASTER = os.getenv('MY_MASTER_DNS', '')
# node type the type of current node
NODE_TYPE = os.getenv('MY_NODE_TYPE', 'm3.xlarge')
NODE_VMEM = int(os.getenv('MY_NODE_VMEM', str(1024*15)))
NODE_VCPU = int(os.getenv('MY_NODE_VCPU', '4'))
AWS_ID = os.getenv('AWS_ACCESS_KEY_ID', 'undefined')
AWS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'undefined')
JAVA_HOME = os.getenv('JAVA_HOME')
HADOOP_HOME = os.getenv('HADOOP_HOME')
DISK_LIST = [('xvd' + chr(ord('b') + i)) for i in range(10)]
ENVIRON = os.environ.copy()
###--------------------------------##
# Optional installation scripts. #
###--------------------------------##
def install_r():
if master_r_packages:
sudo("apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9")
sudo("echo deb https://cran.r-project.org/bin/linux/ubuntu trusty/ >>/etc/apt/sources.list")
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(master_r_packages)))
def install_spark():
run('wget https://www.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz')
run('tar xf spark-2.1.1-bin-hadoop2.7.tgz')
run('rm -rf spark-2.1.1-bin-hadoop2.7.tgz')
with open('.bashrc', 'a') as fo:
fo.write('\nexport PATH=${PATH}:spark-2.1.1-bin-hadoop2.7\n')
def install_xgboost():
run('git clone --recursive https://github.com/dmlc/xgboost')
run('cd xgboost; cp make/config.mk .; echo USE_S3=1 >> config.mk; make -j4')
### Script section ###
def run(cmd):
try:
print cmd
logging.info(cmd)
proc = subprocess.Popen(cmd, shell=True, env = ENVIRON,
stdout=subprocess.PIPE, stderr = subprocess.PIPE)
out, err = proc.communicate()
retcode = proc.poll()
if retcode != 0:
logging.error('Command %s returns %d' % (cmd,retcode))
logging.error(out)
logging.error(err)
else:
print out
except Exception as e:
print(str(e))
logging.error('Exception running: %s' % cmd)
logging.error(str(e))
pass
def sudo(cmd):
run('sudo %s' % cmd)
### Installation helpers ###
def install_packages(pkgs):
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(pkgs)))
# install g++4.9, needed for regex match.
def install_gcc():
sudo('add-apt-repository -y ppa:ubuntu-toolchain-r/test')
sudo('apt-get -y update')
sudo('apt-get -y install g++-4.9')
def install_java():
"""
install java and setup environment variables
Returns environment variables that needs to be exported
"""
if not os.path.exists('jdk1.8.0_131'):
run('wget --no-check-certificate --no-cookies'\
' --header \"Cookie: oraclelicense=accept-securebackup-cookie\"'\
' http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz')
run('tar xf jdk-8u131-linux-x64.tar.gz')
run('rm -f jdk-8u131-linux-x64.tar.gz')
global JAVA_HOME
if JAVA_HOME is None:
JAVA_HOME = os.path.abspath('jdk1.8.0_131')
return [('JAVA_HOME', JAVA_HOME)]
def install_hadoop(is_master):
def update_site(fname, rmap):
"""
update the site script
"""
try:
tree = ElementTree.parse(fname)
root = tree.getroot()
except Exception:
cfg = ElementTree.Element("configuration")
tree = ElementTree.ElementTree(cfg)
root = tree.getroot()
rset = set()
for prop in root.getiterator('property'):
prop = dict((p.tag, p) for p in prop)
name = prop['name'].text.strip()
if name in rmap:
prop['value'].text = str(rmap[name])
rset.add(name)
for name, text in rmap.iteritems():
if name in rset:
continue
prop = ElementTree.SubElement(root, 'property')
ElementTree.SubElement(prop, 'name').text = name
ElementTree.SubElement(prop, 'value').text = str(text)
rough_string = ElementTree.tostring(root, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty = reparsed.toprettyxml(indent='\t')
fo = open(fname, 'w')
fo.write(pretty)
fo.close()
def setup_hadoop_site(master, hadoop_dir, hdfs_dir, vcpu, vmem):
"""
setup hadoop side given the parameters
Parameters
----------
master: the dns to master uri
hadoop_dir: the directory to store temp files
hdfs_dir: the directories for hdfs
vcpu: the number of cpus current machine have
vmem: the memory(MB) current machine have
"""
if vmem < 4 * 1024:
reserved_ram = 256
elif vmem < 8 * 1024:
reserved_ram = 1 * 1024
elif vmem < 24 * 1024 :
reserved_ram = 2 * 1024
elif vmem < 48 * 1024:
reserved_ram = 2 * 1024
elif vmem < 64 * 1024:
reserved_ram = 6 * 1024
else:
reserved_ram = 8 * 1024
ram_per_container = (vmem - reserved_ram) / vcpu
if is_master:
vcpu = vcpu - 2
tmp_dir = hadoop_dir[0]
core_site = {
'fs.defaultFS': 'hdfs://%s:9000/' % master,
'fs.s3n.impl': 'org.apache.hadoop.fs.s3native.NativeS3FileSystem',
'hadoop.tmp.dir': tmp_dir
}
if AWS_ID != 'undefined':
core_site['fs.s3n.awsAccessKeyId'] = AWS_ID
core_site['fs.s3n.awsSecretAccessKey'] = AWS_KEY
update_site('%s/etc/hadoop/core-site.xml' % HADOOP_HOME, core_site)
hdfs_site = {
'dfs.data.dir': ','.join(['%s/data' % d for d in hdfs_dir]),
'dfs.permissions': 'false',
'dfs.replication': '1'
}
update_site('%s/etc/hadoop/hdfs-site.xml' % HADOOP_HOME, hdfs_site)
yarn_site = {
'yarn.resourcemanager.resource-tracker.address': '%s:8025' % master,
'yarn.resourcemanager.scheduler.address': '%s:8030' % master,
'yarn.resourcemanager.address': '%s:8032' % master,
'yarn.scheduler.minimum-allocation-mb': 512,
'yarn.scheduler.maximum-allocation-mb': 640000,
'yarn.scheduler.minimum-allocation-vcores': 1,
'yarn.scheduler.maximum-allocation-vcores': 32,
'yarn.nodemanager.resource.memory-mb': vcpu * ram_per_container,
'yarn.nodemanager.resource.cpu-vcores': vcpu,
'yarn.log-aggregation-enable': 'true',
'yarn.nodemanager.vmem-check-enabled': 'false',
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.nodemanager.aux-services.mapreduce.shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
'yarn.nodemanager.remote-app-log-dir': os.path.join(tmp_dir, 'logs'),
'yarn.nodemanager.log-dirs': os.path.join(tmp_dir, 'userlogs'),
'yarn.nodemanager.local-dirs': ','.join(['%s/yarn/nm-local-dir' % d for d in hadoop_dir])
}
update_site('%s/etc/hadoop/yarn-site.xml' % HADOOP_HOME, yarn_site)
mapred_site = {
'mapreduce.application.classpath' : ':'.join(['$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*',
'$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*',
'$HADOOP_MAPRED_HOME/share/hadoop/tools/lib/*']),
'yarn.app.mapreduce.am.resource.mb': 2 * ram_per_container,
'yarn.app.mapreduce.am.command-opts': '-Xmx%dm' % int(0.8 * 2 * ram_per_container),
'mapreduce.framework.name': 'yarn',
'mapreduce.map.cpu.vcores': 1,
'mapreduce.map.memory.mb': ram_per_container,
'mapreduce.map.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container),
'mapreduce.reduce.cpu.vcores': 1,
'mapreduce.reduce.memory.mb': 2 * ram_per_container,
'mapreduce.reduce.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container)
}
update_site('%s/etc/hadoop/mapred-site.xml' % HADOOP_HOME, mapred_site)
capacity_site = {
'yarn.scheduler.capacity.resource-calculator': 'org.apache.hadoop.yarn.util.resource.DominantResourceCalculator'
}
update_site('%s/etc/hadoop/capacity-scheduler.xml' % HADOOP_HOME, capacity_site)
fo = open('%s/etc/hadoop/hadoop-env.sh' % HADOOP_HOME, 'w')
fo.write('export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HADOOP_PREFIX/share/hadoop/tools/lib/*\n')
fo.write('export HADOOP_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export YARN_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export JAVA_HOME=\"%s\"\n' % JAVA_HOME)
fo.close()
fo = open('%s/etc/hadoop/slaves' % HADOOP_HOME, 'w')
fo.write(master + '\n')
fo.close()
def run_install():
if not os.path.exists('hadoop-2.8.0'):
run('wget %s' % hadoop_url)
run('tar xf hadoop-2.8.0.tar.gz')
run('rm -f hadoop-2.8.0.tar.gz')
global HADOOP_HOME
if HADOOP_HOME is None:
HADOOP_HOME = os.path.abspath('hadoop-2.8.0')
env = [('HADOOP_HOME', HADOOP_HOME)]
env += [('HADOOP_PREFIX', HADOOP_HOME)]
env += [('HADOOP_MAPRED_HOME', HADOOP_HOME)]
env += [('HADOOP_COMMON_HOME', HADOOP_HOME)]
env += [('HADOOP_HDFS_HOME', HADOOP_HOME)]
env += [('YARN_HOME', HADOOP_HOME)]
env += [('YARN_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
env += [('HADOOP_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
disks = ['/disk/%s' % d for d in DISK_LIST if os.path.exists('/dev/%s' % d)]
setup_hadoop_site(MASTER,
['%s/hadoop' % d for d in disks],
['%s/hadoop/dfs' % d for d in disks],
NODE_VCPU, NODE_VMEM)
return env
return run_install()
def regsshkey(fname):
for dns in (open(fname).readlines() + ['localhost', '0.0.0.0']):
try:
run('ssh-keygen -R %s' % dns.strip())
except:
pass
run('ssh-keyscan %s >> ~/.ssh/known_hosts' % dns.strip())
# main script to install all dependencies
def install_main(is_master):
if is_master:
install_packages(master_apt_packages + node_apt_packages)
else:
install_packages(node_apt_packages)
env = []
env += install_java()
env += install_hadoop(is_master)
path = ['$HADOOP_HOME/bin', '$HADOOP_HOME/sbin', '$JAVA_HOME/bin']
env += [('LD_LIBRARY_PATH', '$HADOOP_HOME/native/lib')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:$HADOOP_HDFS_HOME/lib/native:$JAVA_HOME/jre/lib/amd64/server')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:/usr/local/lib')]
env += [('LIBHDFS_OPTS', '--Xmx128m')]
env += [('MY_MASTER_DNS', MASTER)]
env += [('MY_NODE_TYPE', NODE_TYPE)]
env += [('MY_NODE_VMEM', str(NODE_VMEM))]
env += [('MY_NODE_VCPU', str(NODE_VCPU))]
if AWS_ID != 'undefined':
env += [('AWS_ACCESS_KEY_ID', AWS_ID)]
if AWS_KEY != 'undefined':
env += [('AWS_SECRET_ACCESS_KEY', AWS_KEY)]
# setup environments
fo = open('.hadoop_env', 'w')
for k, v in env:
fo.write('export %s=%s\n' % (k,v))
ENVIRON[k] = v
fo.write('export PATH=$PATH:%s\n' % (':'.join(path)))
fo.write('export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib\n')
fo.close()
for l in open('.bashrc'):
if l.find('.hadoop_env') != -1:
return
run('echo source ~/.hadoop_env >> ~/.bashrc')
# allow ssh, if they already share the key.
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
run(key_setup)
regsshkey('%s/etc/hadoop/slaves' % HADOOP_HOME)
# end of instalation.
# Make startup script for bulding
def make_startup_script(is_master):
assert JAVA_HOME is not None
assert HADOOP_HOME is not None
assert NODE_VCPU is not None
assert NODE_VMEM is not None
disks = []
cmds = []
if is_master:
cmds.append('$HADOOP_HOME/sbin/stop-all.sh')
for d in DISK_LIST:
if os.path.exists('/dev/%s' % d):
cmds.append('sudo umount /dev/%s' % d)
cmds.append('sudo mkfs -t ext4 /dev/%s' % d)
cmds.append('sudo mkdir -p /disk/%s' % d)
cmds.append('sudo mount /dev/%s /disk/%s' % (d, d))
disks.append('/disk/%s' % d)
for d in disks:
cmds.append('sudo mkdir -p %s/hadoop' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/hadoop' % d)
cmds.append('sudo mkdir -p %s/tmp' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/tmp' % d)
cmds.append('rm -rf %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs/name' % d)
cmds.append('mkdir %s/hadoop/dfs/data' % d)
# run command
if is_master:
cmds.append('$HADOOP_HOME/bin/hadoop namenode -format')
cmds.append('$HADOOP_HOME/sbin/start-all.sh')
else:
cmds.append('export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec &&'\
' $HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_HOME/etc/hadoop start nodemanager')
with open('startup.sh', 'w') as fo:
fo.write('#!/bin/bash\n')
fo.write('set -v\n')
fo.write('\n'.join(cmds))
run('chmod +x startup.sh')
run('./startup.sh')
def main():
global MASTER
logging.basicConfig(filename = 'bootstrap.log', level = logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
if MASTER == '':
is_master = True
MASTER = socket.getfqdn()
logging.info('assuming master is myself as %s' % MASTER)
else:
is_master = socket.getfqdn() == MASTER
tstart = time.time()
install_main(is_master)
tmid = time.time()
logging.info('installation finishes in %g secs' % (tmid - tstart))
make_startup_script(is_master)
ENVIRON['HADOOP_HOME'] = HADOOP_HOME
ENVIRON['JAVA_HOME'] = JAVA_HOME
tend = time.time()
if is_master:
custom_master_install()
custom_all_nodes_install()
logging.info('boostrap finishes in %g secs' % (tend - tmid))
logging.info('all finishes in %g secs' % (tend - tstart))
if __name__ == '__main__':
pw_record = pwd.getpwnam(USER_NAME)
user_name = pw_record.pw_name
user_home_dir = pw_record.pw_dir
user_uid = pw_record.pw_uid
user_gid = pw_record.pw_gid
env = os.environ.copy()
cwd = user_home_dir
ENVIRON['HOME'] = user_home_dir
os.setgid(user_gid)
os.setuid(user_uid)
os.chdir(user_home_dir)
main()
| [] |
zmoon/scipy-lecture-notes | intro/matplotlib/examples/plot_good.py | 75a89ddedeb48930dbdb6fe25a76e9ef0587ae21 | """
A simple, good-looking plot
===========================
Demoing some simple features of matplotlib
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200)
Y = np.sin(2*np.pi*X)
plt.plot(X, Y, lw=2)
plt.ylim(-1.1, 1.1)
plt.grid()
plt.show()
| [((146, 167), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (160, 167), False, 'import matplotlib\n'), ((207, 241), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)', 'dpi': '(72)'}), '(figsize=(5, 4), dpi=72)\n', (217, 241), True, 'import matplotlib.pyplot as plt\n'), ((291, 313), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(200)'], {}), '(0, 2, 200)\n', (302, 313), True, 'import numpy as np\n'), ((318, 339), 'numpy.sin', 'np.sin', (['(2 * np.pi * X)'], {}), '(2 * np.pi * X)\n', (324, 339), True, 'import numpy as np\n'), ((336, 356), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'lw': '(2)'}), '(X, Y, lw=2)\n', (344, 356), True, 'import matplotlib.pyplot as plt\n'), ((357, 376), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (365, 376), True, 'import matplotlib.pyplot as plt\n'), ((377, 387), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (385, 387), True, 'import matplotlib.pyplot as plt\n'), ((389, 399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (397, 399), True, 'import matplotlib.pyplot as plt\n')] |
HiroakiMikami/pfio | pfio/_context.py | 1ac997dcba7babd5d91dd8c4f2793d27a6bab69b | import os
import re
from typing import Tuple
from pfio._typing import Union
from pfio.container import Container
from pfio.io import IO, create_fs_handler
class FileSystemDriverList(object):
def __init__(self):
# TODO(tianqi): dynamically create this list
# as well as the patterns upon loading the pfio module.
self.scheme_list = ["hdfs", "posix"]
self.posix_pattern = re.compile(r"file:\/\/(?P<path>.+)")
self.hdfs_pattern = re.compile(r"(?P<path>hdfs:\/\/.+)")
self.pattern_list = {"hdfs": self.hdfs_pattern,
"posix": self.posix_pattern, }
def _determine_fs_type(self, path: str) -> Tuple[str, str, bool]:
if None is not path:
for fs_type, pattern in self.pattern_list.items():
ret = pattern.match(path)
if ret:
return (fs_type, ret.groupdict()["path"], True)
return ("posix", path, False)
def format_path(self, fs: IO, path: str) -> Tuple[str, bool]:
fs_type = fs.type
if fs_type in self.pattern_list.keys():
pattern = self.pattern_list[fs_type]
ret = pattern.match(path)
if ret:
return (ret.groupdict()["path"], True)
else:
return (path, False)
else:
return (path, False)
def get_handler_from_path(self, path: str) -> Tuple[IO, str, bool]:
(fs_type, actual_path, is_URI) = self._determine_fs_type(path)
handler = create_fs_handler(fs_type)
return (handler, actual_path, is_URI)
def get_handler_for_root(self,
uri_or_handler_name: str) -> Tuple[IO, str, bool]:
if uri_or_handler_name in self.pattern_list.keys():
return (create_fs_handler(uri_or_handler_name), "", False)
else:
(new_handler, actual_path, is_URI) = self.get_handler_from_path(
uri_or_handler_name)
new_handler.root = actual_path
return (new_handler, actual_path, is_URI)
def is_supported_scheme(self, scheme: str) -> bool:
return scheme in self.scheme_list
class DefaultContext(object):
def __init__(self):
self._fs_handler_list = FileSystemDriverList()
self._root = ""
self._default_context = \
self._fs_handler_list.get_handler_for_root("posix")[0]
def set_root(self, uri_or_handler: Union[str, IO]) -> None:
# TODO(check) if root is directory
if isinstance(uri_or_handler, IO):
handler = uri_or_handler
self._root = ""
else:
(handler, self._root, is_URI) = \
self.get_handler_by_name(uri_or_handler)
assert handler is not None
if self._root:
if not handler.isdir(self._root):
raise RuntimeError("the URI does not point to a directory")
self._default_context = handler
def get_handler(self, path: str = "") -> Tuple[IO, str]:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
return (self._default_context, actual_path)
else:
return (handler, formatted_path)
def open_as_container(self, path: str) -> Container:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
handler = self._default_context
else:
actual_path = formatted_path
self._root = ""
return handler.open_as_container(actual_path)
def get_handler_by_name(self, path: str) -> Tuple[IO, str, bool]:
return self._fs_handler_list.get_handler_for_root(path)
def get_root_dir(self) -> str:
return self._root
def is_supported_scheme(self, scheme: str) -> bool:
return self._fs_handler_list.is_supported_scheme(scheme)
| [((409, 446), 're.compile', 're.compile', (['"""file:\\\\/\\\\/(?P<path>.+)"""'], {}), "('file:\\\\/\\\\/(?P<path>.+)')\n", (419, 446), False, 'import re\n'), ((474, 511), 're.compile', 're.compile', (['"""(?P<path>hdfs:\\\\/\\\\/.+)"""'], {}), "('(?P<path>hdfs:\\\\/\\\\/.+)')\n", (484, 511), False, 'import re\n'), ((1531, 1557), 'pfio.io.create_fs_handler', 'create_fs_handler', (['fs_type'], {}), '(fs_type)\n', (1548, 1557), False, 'from pfio.io import IO, create_fs_handler\n'), ((3182, 3222), 'os.path.join', 'os.path.join', (['self._root', 'formatted_path'], {}), '(self._root, formatted_path)\n', (3194, 3222), False, 'import os\n'), ((3549, 3589), 'os.path.join', 'os.path.join', (['self._root', 'formatted_path'], {}), '(self._root, formatted_path)\n', (3561, 3589), False, 'import os\n'), ((1800, 1838), 'pfio.io.create_fs_handler', 'create_fs_handler', (['uri_or_handler_name'], {}), '(uri_or_handler_name)\n', (1817, 1838), False, 'from pfio.io import IO, create_fs_handler\n')] |
Josue-Zea/tytus | parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py | f9e4be9a8c03eb698fade7a748972e4f52d46685 | import Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion as EjecutarFuncion
from Analisis_Ascendente.Instrucciones.PLPGSQL.plasignacion import Plasignacion
from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
from Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable
from Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace
from Analisis_Ascendente.Instrucciones.Select.select import Select
from Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use
from Analisis_Ascendente.Instrucciones.Select.select1 import selectTime
import Analisis_Ascendente.Instrucciones.Insert.insert as insert_import
from Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3
from Analisis_Ascendente.Instrucciones.Select import selectInst
from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion
from Analisis_Ascendente.Instrucciones.Drop.drop import Drop
from Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase
from Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable
from Analisis_Ascendente.Instrucciones.Update.Update import Update
from Analisis_Ascendente.Instrucciones.Delete.delete import Delete
from Analisis_Ascendente.Instrucciones.Select import SelectDist
from Analisis_Ascendente.Instrucciones.Type.type import CreateType
#----------------------------------Imports FASE2--------------------------
from Analisis_Ascendente.Instrucciones.Index.Index import Index
from Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction
from Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex
from Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex
from Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL
from Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall
from Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction
import C3D.GeneradorEtiquetas as GeneradorEtiquetas
import C3D.GeneradorTemporales as GeneradorTemporales
import Analisis_Ascendente.reportes.Reportes as Reportes
class Ifpl(Instruccion):
''' #1 If
#2 If elif else
#3 If else '''
def __init__(self, caso,e_if,s_if,elif_s,s_else, fila, columna):
self.caso = caso
self.e_if = e_if
self.s_if = s_if
self.elif_s = elif_s
self.s_else = s_else
self.fila = fila
self.columna = columna
def ejecutar(self,tsglobal,ts, consola, exceptions):
try:
if self.caso == 1:
resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions)
if resultado == True:
for x in range(0, len(self.s_if)):
self.procesar_instrucciones(self.s_if[x],ts,consola,exceptions,tsglobal)
else:
pass
elif self.caso == 2:
print('hola')
else:
resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions)
if resultado == True:
for x in range(0, len(self.s_if)):
self.procesar_instrucciones(self.s_if[x], ts, consola, exceptions,tsglobal)
else:
for x in range(0, len(self.s_else)):
self.procesar_instrucciones(self.s_else[x],ts,consola,exceptions,tsglobal)
except:
consola.append("XX000 : internal_error")
def procesar_instrucciones(self,instr,ts,consola,exceptions,tsglobal):
if isinstance(instr, CreateReplace):
CreateReplace.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Select):
if instr.caso == 1:
consola.append('caso 1')
selectTime.ejecutar(instr, ts, consola, exceptions, True)
elif instr.caso == 2:
consola.append('caso 2')
variable = SelectDist.Select_Dist()
SelectDist.Select_Dist.ejecutar(variable, instr, ts, consola, exceptions)
elif instr.caso == 3:
consola.append('caso 3')
variable = selectInst.Select_inst()
selectInst.Select_inst.ejecutar(variable, instr, ts, consola, exceptions)
elif instr.caso == 4:
consola.append('caso 4')
Selectp3.ejecutar(instr, ts, consola, exceptions, True)
elif instr.caso == 6:
consola.append('caso 6')
elif isinstance(instr, CreateTable):
CreateTable.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Use):
Use.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, insert_import.InsertInto):
insert_import.InsertInto.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute un insert")
elif isinstance(instr, Drop):
Drop.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute drop")
elif isinstance(instr, AlterDatabase):
AlterDatabase.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute alter database")
elif isinstance(instr, AlterTable):
AlterTable.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute alter table")
elif isinstance(instr, Delete):
Delete.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute delete")
elif isinstance(instr, Update):
Update.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CreateType):
CreateType.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Index):
Index.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute Index")
elif isinstance(instr, CreateFunction):
CreateFunction.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropFunction):
DropFunction.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropIndex):
DropIndex.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, AlterIndex):
AlterIndex.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropProcedure):
DropProcedure.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CreateProcedure):
CreateProcedure.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CasePL):
CasePL.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, plCall):
plCall.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Plasignacion):
EjecutarFuncion.ejecutarPlasignacionIf(instr,ts,consola,exceptions,tsglobal)
elif isinstance(instr, Ifpl):
instr.ejecutar(tsglobal,ts,consola,exceptions)
else:
return
def getC3D(self, lista_optimizaciones_C3D):
etiqueta_if = GeneradorEtiquetas.nueva_etiqueta()
etiqueta_else = GeneradorEtiquetas.nueva_etiqueta()
etiqueta_salida = GeneradorEtiquetas.nueva_etiqueta()
e_if = self.e_if.getC3D(lista_optimizaciones_C3D)
noOptimizado = '''if %s: goto .%s <br>
goto .%s<br>
label .%s<br>
<instrucciones><br>
label .%s''' % (e_if['tmp'], etiqueta_if, etiqueta_else, etiqueta_if, etiqueta_else)
optimizado = '''if not %s: goto .%s <br>
<instrucciones><br>
label .%s''' % (e_if['tmp'], etiqueta_else, etiqueta_else)
optimizacion1 = Reportes.ListaOptimizacion(noOptimizado, optimizado, Reportes.TipoOptimizacion.REGLA3)
lista_optimizaciones_C3D.append(optimizacion1)
sentencias_if = ''
for sentencias in self.s_if:
sentencias_if += sentencias.getC3D(lista_optimizaciones_C3D)
c3d = '''
%s
if not %s: goto .%s
%s
goto .%s
''' % (e_if['code'], e_if['tmp'], etiqueta_else, sentencias_if, etiqueta_salida)
if self.s_else is not None:
sentencias_else = ''
for sentencias in self.s_else:
sentencias_else += sentencias.getC3D(lista_optimizaciones_C3D)
c3d += ''' label .%s
%s
label .%s''' % (etiqueta_else, sentencias_else, etiqueta_salida)
else:
c3d += ''' label .%s
label .%s
''' % (etiqueta_else, etiqueta_salida)
return c3d
def get_quemado(self):
sententias_if = ''
for sentencia in self.s_if:
sententias_if += sentencia.get_quemado() + ';\n'
quemado = ''' if %s then
%s
''' % (self.e_if.get_quemado(), sententias_if)
if self.s_else is not None:
sentencias_else = ''
for sentencia in self.s_else:
sentencias_else += sentencia.get_quemado() + ';\n'
quemado += '''ELSE
%s
''' % sentencias_else
quemado += ' end if'
return quemado
| [((7347, 7382), 'C3D.GeneradorEtiquetas.nueva_etiqueta', 'GeneradorEtiquetas.nueva_etiqueta', ([], {}), '()\n', (7380, 7382), True, 'import C3D.GeneradorEtiquetas as GeneradorEtiquetas\n'), ((7408, 7443), 'C3D.GeneradorEtiquetas.nueva_etiqueta', 'GeneradorEtiquetas.nueva_etiqueta', ([], {}), '()\n', (7441, 7443), True, 'import C3D.GeneradorEtiquetas as GeneradorEtiquetas\n'), ((7471, 7506), 'C3D.GeneradorEtiquetas.nueva_etiqueta', 'GeneradorEtiquetas.nueva_etiqueta', ([], {}), '()\n', (7504, 7506), True, 'import C3D.GeneradorEtiquetas as GeneradorEtiquetas\n'), ((7918, 8009), 'Analisis_Ascendente.reportes.Reportes.ListaOptimizacion', 'Reportes.ListaOptimizacion', (['noOptimizado', 'optimizado', 'Reportes.TipoOptimizacion.REGLA3'], {}), '(noOptimizado, optimizado, Reportes.\n TipoOptimizacion.REGLA3)\n', (7944, 8009), True, 'import Analisis_Ascendente.reportes.Reportes as Reportes\n'), ((3866, 3920), 'Analisis_Ascendente.Instrucciones.Create.createDatabase.CreateReplace.ejecutar', 'CreateReplace.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (3888, 3920), False, 'from Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace\n'), ((2812, 2866), 'Analisis_Ascendente.Instrucciones.Expresiones.Expresion.Expresion.Resolver', 'Expresion.Resolver', (['self.e_if', 'ts', 'consola', 'exceptions'], {}), '(self.e_if, ts, consola, exceptions)\n', (2830, 2866), False, 'from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion\n'), ((3222, 3276), 'Analisis_Ascendente.Instrucciones.Expresiones.Expresion.Expresion.Resolver', 'Expresion.Resolver', (['self.e_if', 'ts', 'consola', 'exceptions'], {}), '(self.e_if, ts, consola, exceptions)\n', (3240, 3276), False, 'from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion\n'), ((4054, 4111), 'Analisis_Ascendente.Instrucciones.Select.select1.selectTime.ejecutar', 'selectTime.ejecutar', (['instr', 'ts', 'consola', 'exceptions', '(True)'], {}), '(instr, ts, consola, exceptions, True)\n', (4073, 4111), False, 'from Analisis_Ascendente.Instrucciones.Select.select1 import selectTime\n'), ((4840, 4892), 'Analisis_Ascendente.Instrucciones.Create.createTable.CreateTable.ejecutar', 'CreateTable.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (4860, 4892), False, 'from Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable\n'), ((4217, 4241), 'Analisis_Ascendente.Instrucciones.Select.SelectDist.Select_Dist', 'SelectDist.Select_Dist', ([], {}), '()\n', (4239, 4241), False, 'from Analisis_Ascendente.Instrucciones.Select import SelectDist\n'), ((4259, 4332), 'Analisis_Ascendente.Instrucciones.Select.SelectDist.Select_Dist.ejecutar', 'SelectDist.Select_Dist.ejecutar', (['variable', 'instr', 'ts', 'consola', 'exceptions'], {}), '(variable, instr, ts, consola, exceptions)\n', (4290, 4332), False, 'from Analisis_Ascendente.Instrucciones.Select import SelectDist\n'), ((4944, 4988), 'Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB.Use.ejecutar', 'Use.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (4956, 4988), False, 'from Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use\n'), ((4438, 4462), 'Analisis_Ascendente.Instrucciones.Select.selectInst.Select_inst', 'selectInst.Select_inst', ([], {}), '()\n', (4460, 4462), False, 'from Analisis_Ascendente.Instrucciones.Select import selectInst\n'), ((4480, 4553), 'Analisis_Ascendente.Instrucciones.Select.selectInst.Select_inst.ejecutar', 'selectInst.Select_inst.ejecutar', (['variable', 'instr', 'ts', 'consola', 'exceptions'], {}), '(variable, instr, ts, consola, exceptions)\n', (4511, 4553), False, 'from Analisis_Ascendente.Instrucciones.Select import selectInst\n'), ((5061, 5126), 'Analisis_Ascendente.Instrucciones.Insert.insert.InsertInto.ejecutar', 'insert_import.InsertInto.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (5094, 5126), True, 'import Analisis_Ascendente.Instrucciones.Insert.insert as insert_import\n'), ((4648, 4703), 'Analisis_Ascendente.Instrucciones.Select.Select2.Selectp3.ejecutar', 'Selectp3.ejecutar', (['instr', 'ts', 'consola', 'exceptions', '(True)'], {}), '(instr, ts, consola, exceptions, True)\n', (4665, 4703), False, 'from Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3\n'), ((5221, 5266), 'Analisis_Ascendente.Instrucciones.Drop.drop.Drop.ejecutar', 'Drop.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (5234, 5266), False, 'from Analisis_Ascendente.Instrucciones.Drop.drop import Drop\n'), ((5365, 5419), 'Analisis_Ascendente.Instrucciones.Alter.alterDatabase.AlterDatabase.ejecutar', 'AlterDatabase.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (5387, 5419), False, 'from Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase\n'), ((5525, 5576), 'Analisis_Ascendente.Instrucciones.Alter.alterTable.AlterTable.ejecutar', 'AlterTable.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (5544, 5576), False, 'from Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable\n'), ((5675, 5722), 'Analisis_Ascendente.Instrucciones.Delete.delete.Delete.ejecutar', 'Delete.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (5690, 5722), False, 'from Analisis_Ascendente.Instrucciones.Delete.delete import Delete\n'), ((5816, 5863), 'Analisis_Ascendente.Instrucciones.Update.Update.Update.ejecutar', 'Update.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (5831, 5863), False, 'from Analisis_Ascendente.Instrucciones.Update.Update import Update\n'), ((5922, 5973), 'Analisis_Ascendente.Instrucciones.Type.type.CreateType.ejecutar', 'CreateType.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (5941, 5973), False, 'from Analisis_Ascendente.Instrucciones.Type.type import CreateType\n'), ((6027, 6073), 'Analisis_Ascendente.Instrucciones.Index.Index.Index.ejecutar', 'Index.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6041, 6073), False, 'from Analisis_Ascendente.Instrucciones.Index.Index import Index\n'), ((6174, 6229), 'Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction.CreateFunction.ejecutar', 'CreateFunction.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6197, 6229), False, 'from Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction\n'), ((6290, 6343), 'Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction.DropFunction.ejecutar', 'DropFunction.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6311, 6343), False, 'from Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction\n'), ((6401, 6451), 'Analisis_Ascendente.Instrucciones.Index.DropIndex.DropIndex.ejecutar', 'DropIndex.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6419, 6451), False, 'from Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex\n'), ((6510, 6561), 'Analisis_Ascendente.Instrucciones.Index.AlterIndex.AlterIndex.ejecutar', 'AlterIndex.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6529, 6561), False, 'from Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex\n'), ((6623, 6677), 'Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure.DropProcedure.ejecutar', 'DropProcedure.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6645, 6677), False, 'from Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure\n'), ((6741, 6797), 'Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure.CreateProcedure.ejecutar', 'CreateProcedure.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6765, 6797), False, 'from Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure\n'), ((6852, 6899), 'Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL.CasePL.ejecutar', 'CasePL.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6867, 6899), False, 'from Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL\n'), ((6954, 7001), 'Analisis_Ascendente.Instrucciones.PLPGSQL.plCall.plCall.ejecutar', 'plCall.ejecutar', (['instr', 'ts', 'consola', 'exceptions'], {}), '(instr, ts, consola, exceptions)\n', (6969, 7001), False, 'from Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall\n'), ((7062, 7147), 'Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion.ejecutarPlasignacionIf', 'EjecutarFuncion.ejecutarPlasignacionIf', (['instr', 'ts', 'consola', 'exceptions', 'tsglobal'], {}), '(instr, ts, consola, exceptions, tsglobal\n )\n', (7100, 7147), True, 'import Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion as EjecutarFuncion\n')] |
vilkasgroup/epages_client | epages_client/dataobjects/enum_fetch_operator.py | 10e63d957ee45dc5d4df741064806f724fb1be1f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class FetchOperator(object):
'''Defines values for fetch operators'''
ADD = 1
REMOVE = 2
REPLACE = 3
| [] |
orca-eaa5a/dokkaebi_scanner | pyhwpscan/hwp_scan.py | 756314376e2cbbce6c03fd908ebd0b8cc27aa7fc | from threading import current_thread
from jsbeautifier.javascript.beautifier import remove_redundant_indentation
from pyparser.oleparser import OleParser
from pyparser.hwp_parser import HwpParser
from scan.init_scan import init_hwp5_scan
from scan.bindata_scanner import BinData_Scanner
from scan.jscript_scanner import JS_Scanner
from scan.paratext_scanner import ParaText_Scanner
import zipfile
import os
import sys
import platform
from common.errors import *
from utils.dumphex import print_hexdump
js_scanner = None
bindata_scanner = None
paratext_scanner = None
_platform = None
binary_info = {
"type": "",
"p": None
}
def cmd_handler(cmdline):
global binary_info
global js_scanner
global bindata_scanner
global paratext_scanner
global _platform
ty = binary_info["type"]
parser = binary_info["p"]
s_cmd = cmdline.split(" ")
cmd = s_cmd[0]
arg = s_cmd[1:]
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
print(">> "+cmdline)
if cmd == "help":
print("> tree")
print(" Print the structure of target Binary")
print("> dump [binary_name] [directory]")
print(" Dump OLE or Zipped Binary at specific direcotry (default is current direcotry)")
print("> show-hex [binary_name]")
print(" Print hexcidecimal view of specific OLE or Zipped Binary")
print("> scan")
print(" re-scanning the target file")
print("> exit")
print(" quit command liner")
return 1
elif cmd == "clear":
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
return 0
elif cmd == "tree":
if ty == "hwp":
parser.ole_container.print_dir_entry_all()
else:
for file in parser.filelist:
print(file.filename)
return 0
elif cmd == "dump":
if len(arg) > 1:
binary_name, target_dir = arg[0], arg[1]
else:
binary_name, target_dir = arg[0], None
if not target_dir:
target_dir = os.getcwd()
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
targ = ""
for file in parser.filelist:
fname = file.filename.split("/")[-1]
if fname == binary_name:
targ = file.filename
break
if not targ:
print("no file exist")
return 0
stream = parser.read(targ)
with open(target_dir+"/"+binary_name, "wb") as f:
f.write(stream)
print("dump succeed..")
return 1
elif cmd == "show-hex":
binary_name = arg[0]
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
stream = parser.read(binary_name)
print_hexdump(stream)
return 1
elif cmd == "scan":
if ty == "hwp":
bindata_scanner.scan()
js_scanner.scan()
else:
paratext_scanner.scan()
return 1
elif cmd == "exit":
return -1
else:
print("unknown command..")
return 0
print()
class HWPScanner:
def __init__(self) -> None:
self.__platform__ = platform.platform()
self.hwpx_flag = False
self.ole_parser = OleParser()
self.hwp_parser = None
pass
def parse_hwpdoc(self, file_name):
self.file_name = file_name
self.ole_parser.read_ole_binary(file_name)
try:
self.ole_parser.parse()
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def parse_hwpdoc(self):
try:
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def setup_scanner(self):
if not self.hwpx_flag:
self.js_scanner = JS_Scanner(self.hwp_parser)
self.bindata_scanner = BinData_Scanner(self.hwp_parser)
else:
self.paratext_scanner = ParaText_Scanner(self.hwpx_docs)
def get_file_structure(self):
strt = {}
if not self.hwpx_flag:
self.ole_parser.get_dir_entry_all(strt, entry_id=0, depth=0)
else:
for _file in self.hwpx_docs.filelist:
_path = os.path.split( _file.filename)
if _path[0] not in strt:
# root
if _path[0]:
strt[_path[0]] = {}
else:
strt[_path[1]] = _file.file_size
continue
cur_strt = strt[_path[0]]
for path in _path:
if path not in strt:
if path == _path[-1]:
cur_strt[path] = _file.file_size
else:
cur_strt[path] = {}
cur_strt = cur_strt[path]
else:
cur_strt = strt[path]
return strt
def scan(self):
scan_result = ""
if not self.hwpx_flag:
scan_result += self.js_scanner.scan()
scan_result += self.bindata_scanner.scan()
else:
scan_result += self.paratext_scanner.scan()
return scan_result | [((957, 973), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (966, 973), False, 'import os\n'), ((992, 1010), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1001, 1010), False, 'import os\n'), ((3501, 3520), 'platform.platform', 'platform.platform', ([], {}), '()\n', (3518, 3520), False, 'import platform\n'), ((3578, 3589), 'pyparser.oleparser.OleParser', 'OleParser', ([], {}), '()\n', (3587, 3589), False, 'from pyparser.oleparser import OleParser\n'), ((3839, 3865), 'pyparser.hwp_parser.HwpParser', 'HwpParser', (['self.ole_parser'], {}), '(self.ole_parser)\n', (3848, 3865), False, 'from pyparser.hwp_parser import HwpParser\n'), ((4602, 4629), 'scan.jscript_scanner.JS_Scanner', 'JS_Scanner', (['self.hwp_parser'], {}), '(self.hwp_parser)\n', (4612, 4629), False, 'from scan.jscript_scanner import JS_Scanner\n'), ((4665, 4697), 'scan.bindata_scanner.BinData_Scanner', 'BinData_Scanner', (['self.hwp_parser'], {}), '(self.hwp_parser)\n', (4680, 4697), False, 'from scan.bindata_scanner import BinData_Scanner\n'), ((4748, 4780), 'scan.paratext_scanner.ParaText_Scanner', 'ParaText_Scanner', (['self.hwpx_docs'], {}), '(self.hwpx_docs)\n', (4764, 4780), False, 'from scan.paratext_scanner import ParaText_Scanner\n'), ((1657, 1673), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (1666, 1673), False, 'import os\n'), ((1700, 1718), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1709, 1718), False, 'import os\n'), ((3921, 3963), 'scan.init_scan.init_hwp5_scan', 'init_hwp5_scan', (['self.hwp_parser.hwp_header'], {}), '(self.hwp_parser.hwp_header)\n', (3935, 3963), False, 'from scan.init_scan import init_hwp5_scan\n'), ((4035, 4071), 'zipfile.ZipFile', 'zipfile.ZipFile', (['self.file_name', '"""r"""'], {}), "(self.file_name, 'r')\n", (4050, 4071), False, 'import zipfile\n'), ((5027, 5056), 'os.path.split', 'os.path.split', (['_file.filename'], {}), '(_file.filename)\n', (5040, 5056), False, 'import os\n'), ((2178, 2189), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2187, 2189), False, 'import os\n'), ((3081, 3102), 'utils.dumphex.print_hexdump', 'print_hexdump', (['stream'], {}), '(stream)\n', (3094, 3102), False, 'from utils.dumphex import print_hexdump\n')] |
franalgaba/nile | tests/core/test_plugins.py | f771467f27f03c8d20b8032bac64b3ab60436d3c | """
Tests for plugins in core module.
Only unit tests for now.
"""
from unittest.mock import patch
import click
from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit
def test_skip_click_exit():
def dummy_method(a, b):
return a + b
dummy_result = dummy_method(1, 2)
decorated = skip_click_exit(dummy_method)
decorated_result = decorated(1, 2)
assert callable(decorated)
assert dummy_result == decorated_result
def testget_installed_plugins():
class Dummy:
value = "nile.core.plugins.get_installed_plugins"
name = "get_installed_plugins"
with patch("nile.core.plugins.entry_points", return_value=[Dummy()]):
installed_plugins = get_installed_plugins()
assert "get_installed_plugins" in installed_plugins
def test_load_plugins():
@click.group()
def cli():
"""Nile CLI group."""
pass
def dummy():
print("dummy_result")
with patch(
"nile.core.plugins.get_installed_plugins", return_value={"dummy": dummy}
):
app = load_plugins(cli)
assert callable(app)
| [((333, 362), 'nile.core.plugins.skip_click_exit', 'skip_click_exit', (['dummy_method'], {}), '(dummy_method)\n', (348, 362), False, 'from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit\n'), ((846, 859), 'click.group', 'click.group', ([], {}), '()\n', (857, 859), False, 'import click\n'), ((730, 753), 'nile.core.plugins.get_installed_plugins', 'get_installed_plugins', ([], {}), '()\n', (751, 753), False, 'from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit\n'), ((976, 1055), 'unittest.mock.patch', 'patch', (['"""nile.core.plugins.get_installed_plugins"""'], {'return_value': "{'dummy': dummy}"}), "('nile.core.plugins.get_installed_plugins', return_value={'dummy': dummy})\n", (981, 1055), False, 'from unittest.mock import patch\n'), ((1085, 1102), 'nile.core.plugins.load_plugins', 'load_plugins', (['cli'], {}), '(cli)\n', (1097, 1102), False, 'from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit\n')] |
Open-Source-eUdeC/UdeCursos-bot | commands/source.py | f900073044e1c74532af532618672501c0a43a13 | async def source(update, context):
source_code = "https://github.com/Open-Source-eUdeC/UdeCursos-bot"
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=(
"*UdeCursos bot v2.0*\n\n"
f"Código fuente: [GitHub]({source_code})"
),
parse_mode="Markdown"
)
| [] |
MPIB/Lagerregal | history/tests.py | 3c950dffcf4fa164008c5a304c4839bc282a3388 | from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.client import Client
from model_mommy import mommy
from devices.models import Device
from users.models import Lageruser
class HistoryTests(TestCase):
def setUp(self):
self.client = Client()
self.admin = Lageruser.objects.create_superuser('test', '[email protected]', "test")
self.client.login(username="test", password="test")
def test_global_view(self):
response = self.client.get('/history/global/')
self.assertEqual(response.status_code, 200)
def test_list_view(self):
content_type = ContentType.objects.get(model='device')
device = mommy.make(Device)
response = self.client.get('/history/%i/%i/' % (content_type.pk, device.pk))
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
device = mommy.make(Device)
response = self.client.post('/devices/%i/edit/' % device.pk, data={
'name': 'test',
'creator': self.admin.pk,
})
self.assertEqual(response.status_code, 302)
response = self.client.get('/history/version/1/')
self.assertEqual(response.status_code, 200)
| [((306, 314), 'django.test.client.Client', 'Client', ([], {}), '()\n', (312, 314), False, 'from django.test.client import Client\n'), ((336, 403), 'users.models.Lageruser.objects.create_superuser', 'Lageruser.objects.create_superuser', (['"""test"""', '"""[email protected]"""', '"""test"""'], {}), "('test', '[email protected]', 'test')\n", (370, 403), False, 'from users.models import Lageruser\n'), ((658, 697), 'django.contrib.contenttypes.models.ContentType.objects.get', 'ContentType.objects.get', ([], {'model': '"""device"""'}), "(model='device')\n", (681, 697), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((715, 733), 'model_mommy.mommy.make', 'mommy.make', (['Device'], {}), '(Device)\n', (725, 733), False, 'from model_mommy import mommy\n'), ((921, 939), 'model_mommy.mommy.make', 'mommy.make', (['Device'], {}), '(Device)\n', (931, 939), False, 'from model_mommy import mommy\n')] |
spapas/django-git | django_git_info/management/commands/get_git_info.py | a62215d315263bce5d5d0afcfa14152601f76901 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from django_git_info import get_git_info
class Command(BaseCommand):
help = 'Gets git info'
#@transaction.commit_manually
def handle(self, *args, **options):
info = get_git_info()
for key in info.keys():
print '{0}={1}'.format(key, info[key]) | [] |
robert-haas/mevis | mevis/_internal/conversion.py | 1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327 | from collections.abc import Callable as _Callable
import networkx as _nx
from opencog.type_constructors import AtomSpace as _AtomSpace
from .args import check_arg as _check_arg
def convert(data, graph_annotated=True, graph_directed=True,
node_label=None, node_color=None, node_opacity=None, node_size=None, node_shape=None,
node_border_color=None, node_border_size=None,
node_label_color=None, node_label_size=None, node_hover=None, node_click=None,
node_image=None, node_properties=None,
edge_label=None, edge_color=None, edge_opacity=None, edge_size=None,
edge_label_color=None, edge_label_size=None, edge_hover=None, edge_click=None):
"""Convert an Atomspace or list of Atoms to a NetworkX graph with annotations.
Several arguments accept a Callable.
- In case of node annotations, the Callable gets an Atom as input,
which the node represents in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
- In case of edge annotations, the Callable gets two Atoms as input,
which the edge connects in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
Several arguments accept a color, which can be in following formats:
- Name: ``"black"``, ``"red"``, ``"green"``, ...
- Color code
- 6 digit hex RGB code: ``"#05ac05"``
- 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``)
Parameters
----------
data : Atomspace, list of Atoms
Input that gets converted to a graph.
graph_annotated : bool
If ``False``, no annotations are added to the graph. This could be used for
converting large AtomSpaces quickly to graphs that use less RAM and can
be exported to smaller files (e.g. also compressed as gml.gz) for inspection
with other tools.
graph_directed : bool
If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created.
node_label : str, Callable
Set a label for each node, which is shown as text below it.
node_color : str, Callable
Set a color for each node, which becomes the fill color of its shape.
node_opacity : float between 0.0 and 1.0
Set an opacity for each node, which becomes the opacity of its shape.
Caution: This is only supported by d3.
node_size : int, float, Callable
Set a size for each node, which becomes the height and width of its shape.
node_shape : str, Callable
Set a shape for each node, which is some geometrical form that has the
node coordinates in its center.
Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"``
node_border_color : str, Callable
Set a border color for each node, which influences the border drawn around its shape.
node_border_size : int, float, Callable
Set a border size for each node, which influences the border drawn around its shape.
node_label_color : str, Callable
Set a label color for each node, which determines the font color
of the text below the node.
node_label_size : int, float, Callable
Set a label size for each node, which determines the font size
of the text below the node.
node_hover : str, Callable
Set a hover text for each node, which shows up besides the mouse cursor
when hovering over a node.
node_click : str, Callable
Set a click text for each node, which shows up in a div element below the plot
when clicking on a node and can easily be copied and pasted.
node_image : str, Callable
Set an image for each node, which appears within its shape.
Possible values:
- URL pointing to an image
- Data URL encoding the image
node_properties : str, dict, Callable
Set additional properties for each node, which may not immediately be translated
into a visual element, but can be chosen in the data selection menu in the
interactive HTML visualizations to map them on some plot element.
These properties also appear when exporting a graph to a file in a format
such as GML and may be recognized by external visualization tools.
Note that a Callable needs to return a dict in this case, and each key becomes
a property, which is equivalent to the other properties such as node_size and
node_color.
Special cases:
- ``node_properties="tv"`` is a shortcut for using a function that returns
``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}``
- Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates.
Examples:
- ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the
JavaScript layout algorithm does not influence it, but the nodes remain
free to move in the y and z directions.
- ``lambda atom: dict(x=2.0) if atom.is_node() else None``:
This fixes the x coordinate of each Atom of type Node to 2.0
but allows each Atom of type Link to move freely.
- ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)``
This fixes the y coordinates of Atoms at different heights. Atoms of type Node
are put at the bottom and Atoms of type Link are ordered by the number of their
outgoing edges. The results is a hierarchical visualization that has some
similarity with the "dot" layout.
- ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``:
This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100.
The results is a visualization with two lines of nodes that has some
similarity with the "bipartite" layout.
edge_label : str, Callable
Set a label for each edge, which becomes the text plotted in the middle of the edge.
edge_color : str, Callable
Set a color for each edge, which becomes the color of the line representing the edge.
edge_opacity : int, float, Callable
Set an opacity for each edge, which allows to make it transparent to some degree.
edge_size : int, float, Callable
Set a size for each edge, which becomes the width of the line representing the edge.
edge_label_color : str, Callable
Set a color for each edge label, which becomes the color of the text in the midpoint
of the edge.
edge_label_size : int, float, Callable
Set a size for each edge label, which becomes the size of the text in the midpoint
of the edge.
edge_hover : str, Callable
edge_click : str, Callable
Returns
-------
graph : NetworkX Graph or DiGraph
Whether an undirected or directed graph is created depends on the argument "directed".
"""
# Argument processing
_check_arg(data, 'data', (list, _AtomSpace))
_check_arg(graph_annotated, 'graph_annotated', bool)
_check_arg(graph_directed, 'graph_directed', bool)
_check_arg(node_label, 'node_label', (str, _Callable), allow_none=True)
_check_arg(node_color, 'node_color', (str, _Callable), allow_none=True)
_check_arg(node_opacity, 'node_opacity', (int, float, _Callable), allow_none=True)
_check_arg(node_size, 'node_size', (int, float, _Callable), allow_none=True)
_check_arg(node_shape, 'node_shape', (str, _Callable), allow_none=True)
_check_arg(node_border_color, 'node_border_color', (str, _Callable), allow_none=True)
_check_arg(node_border_size, 'node_border_size', (int, float, _Callable), allow_none=True)
_check_arg(node_label_color, 'node_label_color', (str, _Callable), allow_none=True)
_check_arg(node_label_size, 'node_label_size', (int, float, _Callable), allow_none=True)
_check_arg(node_hover, 'node_hover', (str, _Callable), allow_none=True)
_check_arg(node_click, 'node_click', (str, _Callable), allow_none=True)
_check_arg(node_image, 'node_image', (str, _Callable), allow_none=True)
_check_arg(node_properties, 'node_properties', (str, dict, _Callable), allow_none=True)
_check_arg(edge_label, 'edge_label', (str, _Callable), allow_none=True)
_check_arg(edge_color, 'edge_color', (str, _Callable), allow_none=True)
_check_arg(edge_opacity, 'edge_opacity', (int, float, _Callable), allow_none=True)
_check_arg(edge_size, 'edge_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_label_color, 'edge_label_color', (str, _Callable), allow_none=True)
_check_arg(edge_label_size, 'edge_label_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_hover, 'edge_hover', (str, _Callable), allow_none=True)
_check_arg(edge_click, 'edge_click', (str, _Callable), allow_none=True)
# Prepare annoation functions
if graph_annotated:
node_ann = prepare_node_func(
node_label, node_color, node_opacity, node_size, node_shape, node_border_color,
node_border_size, node_label_color, node_label_size, node_hover, node_click,
node_image, node_properties)
edge_ann = prepare_edge_func(
edge_label, edge_color, edge_opacity, edge_size,
edge_label_color, edge_label_size, edge_hover, edge_click)
else:
empty = dict()
def node_ann(atom):
return empty
def edge_ann(atom1, atom2):
return empty
# Create the NetworkX graph
graph = _nx.DiGraph() if graph_directed else _nx.Graph()
# 0) Set graph annotations
graph.graph['node_click'] = '$hover' # node_click will by default show content of node_hover
# 1) Add vertices and their annotations
for atom in data:
graph.add_node(to_uid(atom), **node_ann(atom))
# 2) Add edges and their annotations (separate step to exclude edges to filtered vertices)
for atom in data:
uid = to_uid(atom)
if atom.is_link():
# for all that is incoming to the Atom
for atom2 in atom.incoming:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid2, uid, **edge_ann(atom2, atom))
# for all that is outgoing of the Atom
for atom2 in atom.out:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid, uid2, **edge_ann(atom, atom2))
return graph
def prepare_node_func(node_label, node_color, node_opacity, node_size, node_shape,
node_border_color, node_border_size, node_label_color, node_label_size,
node_hover, node_click, node_image, node_properties):
"""Prepare a function that calculates all annoations for a node representing an Atom."""
# individual node annotation functions
node_label = use_node_def_or_str(node_label, node_label_default)
node_color = use_node_def_or_str(node_color, node_color_default)
node_opacity = use_node_def_or_num(node_opacity, node_opacity_default)
node_size = use_node_def_or_num(node_size, node_size_default)
node_shape = use_node_def_or_str(node_shape, node_shape_default)
node_border_color = use_node_def_or_str(node_border_color, node_border_color_default)
node_border_size = use_node_def_or_num(node_border_size, node_border_size_default)
node_label_color = use_node_def_or_str(node_label_color, node_label_color_default)
node_label_size = use_node_def_or_num(node_label_size, node_label_size_default)
node_hover = use_node_def_or_str(node_hover, node_hover_default)
node_click = use_node_def_or_str(node_click, node_click_default)
node_image = use_node_def_or_str(node_image, node_image_default)
# special case: additional user-defined node properties by a function that returns a dict
if node_properties is None:
node_properties = node_properties_default
elif isinstance(node_properties, dict):
val = node_properties
def node_properties(atom):
return val
elif node_properties == 'tv':
node_properties = node_properties_tv
# combined node annotation function: calls each of the individual ones
name_func = (
('label', node_label),
('color', node_color),
('opacity', node_opacity),
('size', node_size),
('shape', node_shape),
('border_color', node_border_color),
('border_size', node_border_size),
('label_color', node_label_color),
('label_size', node_label_size),
('hover', node_hover),
('click', node_click),
('image', node_image),
)
def func(atom):
data = {}
for n, f in name_func:
val = f(atom)
if val is not None:
data[n] = val
try:
data.update(node_properties(atom))
except Exception:
pass
return data
return func
def prepare_edge_func(edge_label, edge_color, edge_opacity, edge_size, edge_label_color,
edge_label_size, edge_hover, edge_click):
"""Prepare a function that calculates all annoations for an edge between Atoms."""
# individual edge annotation functions
edge_label = use_edge_def_or_str(edge_label, edge_label_default)
edge_color = use_edge_def_or_str(edge_color, edge_color_default)
edge_opacity = use_edge_def_or_num(edge_opacity, edge_opacity_default)
edge_size = use_edge_def_or_num(edge_size, edge_size_default)
edge_label_color = use_edge_def_or_str(edge_label_color, edge_label_color_default)
edge_label_size = use_edge_def_or_num(edge_label_size, edge_label_size_default)
edge_hover = use_edge_def_or_str(edge_hover, edge_hover_default)
edge_click = use_edge_def_or_str(edge_click, edge_click_default)
# combined edge annotation function: calls each of the individual ones
name_func = (
('label', edge_label),
('color', edge_color),
('opacity', edge_opacity),
('size', edge_size),
('label_color', edge_label_color),
('label_size', edge_label_size),
('hover', edge_hover),
('click', edge_click),
)
def func(atom1, atom2):
data = {}
for n, f in name_func:
val = f(atom1, atom2)
if val is not None:
data[n] = val
return data
return func
def use_node_def_or_str(given_value, default_func):
"""Transform a value of type (None, str, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, str):
given_value = str(given_value)
def func(atom):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_node_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
def func(atom):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_edge_def_or_str(given_value, default_func):
"""Transform a value of type (None, str, Callable) to an edge annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, str):
given_value = str(given_value)
def func(atom1, atom2):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_edge_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to an edge annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
def func(atom1, atom2):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
def to_uid(atom):
"""Return a unique identifier for an Atom."""
return atom.id_string()
# Default functions for node annotations
# - "return None" means that the attribute and value won't be included
# to the output data, so that defaults of the JS library are used and files get smaller
# - A return of a value in some cases and None in other cases means that the
# default value of the JS library is used in None cases and again files get smaller
def node_label_default(atom):
# None => no node labels
return '{} "{}"'.format(atom.type_name, atom.name) if atom.is_node() else atom.type_name
def node_color_default(atom):
# None => black
return 'red' if atom.is_node() else None
def node_opacity_default(atom):
# None => 1.0
return None
def node_size_default(atom):
# None => 10
return None
def node_shape_default(atom):
# None => circle
return 'rectangle' if atom.is_node() else None
def node_border_color_default(atom):
# None => black
return None
def node_border_size_default(atom):
# None => 0.0
return None
def node_label_color_default(atom):
# None => black
return None
def node_label_size_default(atom):
# None => 12.0
return None
def node_hover_default(atom):
# None => no hover text
return atom.short_string()
def node_click_default(atom):
# None => no click text (in addition to always shown "Node: <id>" in header)
return None
def node_image_default(atom):
# None => no image inside node
return None
def node_properties_default(atom):
# None => no extra node annotations
return None
def node_properties_tv(atom):
return dict(mean=atom.tv.mean, confidence=atom.tv.confidence)
# Default functions for edge annotations
def edge_label_default(atom1, atom2):
# None => no edge label
return None
def edge_color_default(atom1, atom2):
# None => black
return None if atom1.is_link() and atom2.is_link() else 'red'
def edge_opacity_default(atom1, atom2):
# None => 1.0
return None
def edge_size_default(atom1, atom2):
# None => 1.0
return None
def edge_label_color_default(atom1, atom2):
# None => black
return None
def edge_label_size_default(atom1, atom2):
# None => 8.0
return None
def edge_hover_default(atom1, atom2):
# None => no hover text
return None
def edge_click_default(atom1, atom2):
# None => no click text (in addition to always shown "Edge: <id>" in header)
return None
| [((9626, 9639), 'networkx.DiGraph', '_nx.DiGraph', ([], {}), '()\n', (9637, 9639), True, 'import networkx as _nx\n'), ((9663, 9674), 'networkx.Graph', '_nx.Graph', ([], {}), '()\n', (9672, 9674), True, 'import networkx as _nx\n')] |
seandstewart/typical-pycharm-plugin | testData/completion/classMethodCls.py | 4f6ec99766239421201faae9d75c32fa0ee3565a | from builtins import *
from pydantic import BaseModel
class A(BaseModel):
abc: str
@classmethod
def test(cls):
return cls.<caret>
| [] |
b-com/watcher-metering | watcher_metering/tests/agent/test_agent.py | 7c09b243347146e5a421700d5b07d1d0a5c4d604 | # -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import os
import types
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
import msgpack
import operator
from oslo_config import cfg
from oslotest.base import BaseTestCase
from stevedore.driver import DriverManager
from stevedore.extension import Extension
from watcher_metering.agent.agent import Agent
from watcher_metering.agent.measurement import Measurement
from watcher_metering.tests.agent.agent_fixtures import ConfFixture
from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller
from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller
class TestAgent(BaseTestCase):
# patches to be applied for each test in this test suite
patches = []
def setUp(self):
super(TestAgent, self).setUp()
self.conf = cfg.ConfigOpts()
# To load the drivers without using the config file
self.useFixture(ConfFixture(self.conf))
def _fake_parse(self, args=[]):
return cfg.ConfigOpts._parse_cli_opts(self, [])
_fake_parse_method = types.MethodType(_fake_parse, self.conf)
self.conf._parse_cli_opts = _fake_parse_method
# First dependency to be returned
self.dummy_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=DummyMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=DummyMetricPuller,
obj=None,
),
namespace='TESTING',
)
# 2nd dependency to be returned
self.fake_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=FakeMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=FakeMetricPuller,
obj=None,
),
namespace='TESTING',
)
self.defaults_drivers = {
DummyMetricPuller.get_name(): self.dummy_driver_manager,
FakeMetricPuller.get_name(): self.fake_driver_manager,
}
def _fake_loader(name, **kw):
return self.defaults_drivers[name]
# Patches the agent socket
self.m_agent_socket = MagicMock(autospec=True)
self.patches.extend([
# Deactivates the nanomsg socket
patch(
"watcher_metering.agent.agent.nanomsg.Socket",
new=self.m_agent_socket,
),
# Sets the test namespace to 'TESTING'
patch.object(
Agent,
"namespace",
PropertyMock(return_value='TESTING'),
),
# Patches the driver manager to retourn our test drivers
# instead of the real ones
patch(
"watcher_metering.load.loader.DriverManager",
MagicMock(side_effect=_fake_loader),
),
])
# Applies all of our patches before each test
for _patch in self.patches:
_patch.start()
self.agent = Agent(
conf=self.conf,
driver_names=self.conf.agent.driver_names,
use_nanoconfig_service=False,
publisher_endpoint="fake",
nanoconfig_service_endpoint="",
nanoconfig_update_endpoint="",
nanoconfig_profile="nanoconfig://test_profile"
)
# Default ticking is set to 0 to reduce test execution time
self.agent.TICK_INTERVAL = 0
def tearDown(self):
super(TestAgent, self).tearDown()
# The drivers are stored at the class level so we need to clear
# it after each test
self.agent.drivers.clear()
for _patch in self.patches:
_patch.stop()
def test_register_driver(self):
expected_driver1_key = "metrics_driver.dummy_data.puller.dummy"
expected_driver2_key = "metrics_driver.fake_data.puller.fake"
self.agent.register_drivers()
self.assertEqual(
sorted(self.agent.drivers.keys()),
[expected_driver1_key, expected_driver2_key]
)
sorted_drivers = OrderedDict(
sorted(self.agent.drivers.items(), key=operator.itemgetter(0))
)
self.assertEqual(len(sorted_drivers), 2)
driver1 = self.agent.drivers[expected_driver1_key]
driver2 = self.agent.drivers[expected_driver2_key]
self.assertEqual(driver1.title, "metrics_driver.dummy")
self.assertEqual(driver1.probe_id, "data.puller.dummy")
self.assertEqual(driver1.interval, 0.01)
self.assertEqual(driver2.title, "metrics_driver.fake")
self.assertEqual(driver2.probe_id, "data.puller.fake")
self.assertEqual(driver2.interval, 0.01)
self.assertIn(self.agent, driver1._observers)
self.assertIn(self.agent, driver2._observers)
def test_unregister_driver(self):
driver_key = "metrics_driver.dummy_data.puller.dummy"
self.agent.register_drivers()
self.agent.unregister_driver(driver_key)
# Initial is 2 drivers => 2 - 1 == 1
self.assertEqual(len(self.agent.drivers), 1)
@patch.object(Measurement, "as_dict")
def test_send_measurements(self, m_as_dict):
self.agent.register_drivers()
measurement_dict = OrderedDict(
name="dummy.data.puller",
unit="",
type_="",
value=13.37,
resource_id="test_hostname",
host="test_hostname",
timestamp="2015-08-04T15:15:45.703542",
)
m_as_dict.return_value = measurement_dict
measurement = Measurement(**measurement_dict)
for driver in self.agent.drivers.values():
driver.send_measurements([measurement])
break # only the first one
expected_encoded_msg = msgpack.dumps(measurement_dict)
self.m_agent_socket.return_value.send.assert_called_once_with(
expected_encoded_msg
)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive(self, m_lock, m_start, m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.return_value = True # Emulates a thread that is running
m_start.return_value = None
self.agent.register_drivers()
self.agent.check_drivers_alive()
self.assertTrue(m_is_alive.called)
self.assertFalse(m_start.called)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive_with_driver_stopped(self, m_lock, m_start,
m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.side_effect = [False, True]
m_start.side_effect = [RuntimeError, True, True] # Fails once
self.agent.register_drivers()
# should re-run the driver
self.agent.check_drivers_alive()
self.assertEqual(m_is_alive.call_count, 1)
self.assertEqual(m_start.call_count, 2)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_using_default(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = ""
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE") # First call
m_env_getter.assert_called_with("NN_CONFIG_UPDATES") # Last call
self.assertEqual(m_env_setter.call_count, 0)
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"FAKE_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"FAKE_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_custom_values(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_any_call("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
m_env_setter.assert_called_with("NN_CONFIG_UPDATES",
"CUSTOM_NN_CONFIG_UPDATES")
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"CUSTOM_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"CUSTOM_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_service(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_called_once_with("NN_CONFIG_SERVICE")
self.assertEqual(m_env_setter.call_count, 0)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_update(self, m_env_getter, m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = ""
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_called_once_with("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
@patch.object(Agent, 'check_drivers_alive', MagicMock())
@patch("watcher_metering.agent.manager."
"MetricManager.terminated",
new_callable=PropertyMock)
def test_run_agent(self, m_terminated):
# Patches the guard/exit condition of the thread periodic event loop
# -> 1st time = False (carry on) and 2nd = True (Should terminate)
m_terminated.side_effect = [False, True]
self.agent.run()
self.assertEqual(m_terminated.call_count, 2)
@patch.object(DummyMetricPuller, 'send_measurements', MagicMock())
def test_stop_agent(self):
self.agent.register_drivers()
self.agent.start()
self.agent.join(timeout=.01)
self.agent.stop()
self.assertEqual(len(self.agent.drivers.values()), 2)
self.assertTrue(
all([driver.terminated for driver in self.agent.drivers.values()])
)
self.assertTrue(self.agent.terminated)
self.assertFalse(self.agent.is_alive())
| [((5908, 5944), 'mock.patch.object', 'patch.object', (['Measurement', '"""as_dict"""'], {}), "(Measurement, 'as_dict')\n", (5920, 5944), False, 'from mock import patch\n'), ((6748, 6791), 'mock.patch.object', 'patch.object', (['DummyMetricPuller', '"""is_alive"""'], {}), "(DummyMetricPuller, 'is_alive')\n", (6760, 6791), False, 'from mock import patch\n'), ((6797, 6837), 'mock.patch.object', 'patch.object', (['DummyMetricPuller', '"""start"""'], {}), "(DummyMetricPuller, 'start')\n", (6809, 6837), False, 'from mock import patch\n'), ((6843, 6901), 'mock.patch', 'patch', (['"""watcher_metering.agent.manager.MetricManager.lock"""'], {}), "('watcher_metering.agent.manager.MetricManager.lock')\n", (6848, 6901), False, 'from mock import patch\n'), ((7412, 7455), 'mock.patch.object', 'patch.object', (['DummyMetricPuller', '"""is_alive"""'], {}), "(DummyMetricPuller, 'is_alive')\n", (7424, 7455), False, 'from mock import patch\n'), ((7461, 7501), 'mock.patch.object', 'patch.object', (['DummyMetricPuller', '"""start"""'], {}), "(DummyMetricPuller, 'start')\n", (7473, 7501), False, 'from mock import patch\n'), ((7507, 7565), 'mock.patch', 'patch', (['"""watcher_metering.agent.manager.MetricManager.lock"""'], {}), "('watcher_metering.agent.manager.MetricManager.lock')\n", (7512, 7565), False, 'from mock import patch\n'), ((8205, 8245), 'mock.patch.object', 'patch.object', (['os._Environ', '"""__setitem__"""'], {}), "(os._Environ, '__setitem__')\n", (8217, 8245), False, 'from mock import patch\n'), ((8251, 8303), 'mock.patch', 'patch', (['"""watcher_metering.agent.agent.os.environ.get"""'], {}), "('watcher_metering.agent.agent.os.environ.get')\n", (8256, 8303), False, 'from mock import patch\n'), ((9303, 9343), 'mock.patch.object', 'patch.object', (['os._Environ', '"""__setitem__"""'], {}), "(os._Environ, '__setitem__')\n", (9315, 9343), False, 'from mock import patch\n'), ((9349, 9401), 'mock.patch', 'patch', (['"""watcher_metering.agent.agent.os.environ.get"""'], {}), "('watcher_metering.agent.agent.os.environ.get')\n", (9354, 9401), False, 'from mock import patch\n'), ((10625, 10665), 'mock.patch.object', 'patch.object', (['os._Environ', '"""__setitem__"""'], {}), "(os._Environ, '__setitem__')\n", (10637, 10665), False, 'from mock import patch\n'), ((10671, 10723), 'mock.patch', 'patch', (['"""watcher_metering.agent.agent.os.environ.get"""'], {}), "('watcher_metering.agent.agent.os.environ.get')\n", (10676, 10723), False, 'from mock import patch\n'), ((11347, 11387), 'mock.patch.object', 'patch.object', (['os._Environ', '"""__setitem__"""'], {}), "(os._Environ, '__setitem__')\n", (11359, 11387), False, 'from mock import patch\n'), ((11393, 11445), 'mock.patch', 'patch', (['"""watcher_metering.agent.agent.os.environ.get"""'], {}), "('watcher_metering.agent.agent.os.environ.get')\n", (11398, 11445), False, 'from mock import patch\n'), ((12222, 12317), 'mock.patch', 'patch', (['"""watcher_metering.agent.manager.MetricManager.terminated"""'], {'new_callable': 'PropertyMock'}), "('watcher_metering.agent.manager.MetricManager.terminated',\n new_callable=PropertyMock)\n", (12227, 12317), False, 'from mock import patch\n'), ((1541, 1557), 'oslo_config.cfg.ConfigOpts', 'cfg.ConfigOpts', ([], {}), '()\n', (1555, 1557), False, 'from oslo_config import cfg\n'), ((1797, 1837), 'types.MethodType', 'types.MethodType', (['_fake_parse', 'self.conf'], {}), '(_fake_parse, self.conf)\n', (1813, 1837), False, 'import types\n'), ((2961, 2985), 'mock.MagicMock', 'MagicMock', ([], {'autospec': '(True)'}), '(autospec=True)\n', (2970, 2985), False, 'from mock import MagicMock\n'), ((3806, 4050), 'watcher_metering.agent.agent.Agent', 'Agent', ([], {'conf': 'self.conf', 'driver_names': 'self.conf.agent.driver_names', 'use_nanoconfig_service': '(False)', 'publisher_endpoint': '"""fake"""', 'nanoconfig_service_endpoint': '""""""', 'nanoconfig_update_endpoint': '""""""', 'nanoconfig_profile': '"""nanoconfig://test_profile"""'}), "(conf=self.conf, driver_names=self.conf.agent.driver_names,\n use_nanoconfig_service=False, publisher_endpoint='fake',\n nanoconfig_service_endpoint='', nanoconfig_update_endpoint='',\n nanoconfig_profile='nanoconfig://test_profile')\n", (3811, 4050), False, 'from watcher_metering.agent.agent import Agent\n'), ((6060, 6229), 'collections.OrderedDict', 'OrderedDict', ([], {'name': '"""dummy.data.puller"""', 'unit': '""""""', 'type_': '""""""', 'value': '(13.37)', 'resource_id': '"""test_hostname"""', 'host': '"""test_hostname"""', 'timestamp': '"""2015-08-04T15:15:45.703542"""'}), "(name='dummy.data.puller', unit='', type_='', value=13.37,\n resource_id='test_hostname', host='test_hostname', timestamp=\n '2015-08-04T15:15:45.703542')\n", (6071, 6229), False, 'from collections import OrderedDict\n'), ((6389, 6420), 'watcher_metering.agent.measurement.Measurement', 'Measurement', ([], {}), '(**measurement_dict)\n', (6400, 6420), False, 'from watcher_metering.agent.measurement import Measurement\n'), ((6596, 6627), 'msgpack.dumps', 'msgpack.dumps', (['measurement_dict'], {}), '(measurement_dict)\n', (6609, 6627), False, 'import msgpack\n'), ((6996, 7019), 'mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7000, 7019), False, 'from mock import Mock\n'), ((7075, 7098), 'mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7079, 7098), False, 'from mock import Mock\n'), ((7733, 7756), 'mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7737, 7756), False, 'from mock import Mock\n'), ((7812, 7835), 'mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7816, 7835), False, 'from mock import Mock\n'), ((12204, 12215), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (12213, 12215), False, 'from mock import MagicMock\n'), ((12723, 12734), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (12732, 12734), False, 'from mock import MagicMock\n'), ((1642, 1664), 'watcher_metering.tests.agent.agent_fixtures.ConfFixture', 'ConfFixture', (['self.conf'], {}), '(self.conf)\n', (1653, 1664), False, 'from watcher_metering.tests.agent.agent_fixtures import ConfFixture\n'), ((1726, 1766), 'oslo_config.cfg.ConfigOpts._parse_cli_opts', 'cfg.ConfigOpts._parse_cli_opts', (['self', '[]'], {}), '(self, [])\n', (1756, 1766), False, 'from oslo_config import cfg\n'), ((2675, 2703), 'watcher_metering.tests.agent.agent_fixtures.DummyMetricPuller.get_name', 'DummyMetricPuller.get_name', ([], {}), '()\n', (2701, 2703), False, 'from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller\n'), ((2744, 2771), 'watcher_metering.tests.agent.agent_fixtures.FakeMetricPuller.get_name', 'FakeMetricPuller.get_name', ([], {}), '()\n', (2769, 2771), False, 'from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller\n'), ((3074, 3151), 'mock.patch', 'patch', (['"""watcher_metering.agent.agent.nanomsg.Socket"""'], {'new': 'self.m_agent_socket'}), "('watcher_metering.agent.agent.nanomsg.Socket', new=self.m_agent_socket)\n", (3079, 3151), False, 'from mock import patch\n'), ((3345, 3381), 'mock.PropertyMock', 'PropertyMock', ([], {'return_value': '"""TESTING"""'}), "(return_value='TESTING')\n", (3357, 3381), False, 'from mock import PropertyMock\n'), ((3603, 3638), 'mock.MagicMock', 'MagicMock', ([], {'side_effect': '_fake_loader'}), '(side_effect=_fake_loader)\n', (3612, 3638), False, 'from mock import MagicMock\n'), ((4951, 4973), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (4970, 4973), False, 'import operator\n'), ((2060, 2088), 'watcher_metering.tests.agent.agent_fixtures.DummyMetricPuller.get_name', 'DummyMetricPuller.get_name', ([], {}), '()\n', (2086, 2088), False, 'from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller\n'), ((2427, 2454), 'watcher_metering.tests.agent.agent_fixtures.FakeMetricPuller.get_name', 'FakeMetricPuller.get_name', ([], {}), '()\n', (2452, 2454), False, 'from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller\n')] |
ndevenish/cctbx_project | mmtbx/bulk_solvent/mosaic.py | 1f1a2627ae20d01d403f367948e7269cef0f0217 | from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from scitbx import matrix
import math
from libtbx import adopt_init_args
import scitbx.lbfgs
from mmtbx.bulk_solvent import kbu_refinery
from cctbx import maptbx
import mmtbx.masks
import boost_adaptbx.boost.python as bp
asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext")
from libtbx import group_args
from mmtbx import bulk_solvent
from mmtbx.ncs import tncs
from collections import OrderedDict
import mmtbx.f_model
import sys
from libtbx.test_utils import approx_equal
from mmtbx import masks
from cctbx.masks import vdw_radii_from_xray_structure
ext = bp.import_ext("mmtbx_masks_ext")
mosaic_ext = bp.import_ext("mmtbx_mosaic_ext")
APPLY_SCALE_K1_TO_FOBS = False
def moving_average(x, n):
r = []
for i, xi in enumerate(x):
s = 0
cntr = 0
for j in range(max(0,i-n), min(i+n+1, len(x))):
s+=x[j]
cntr+=1
s = s/cntr
r.append(s)
return r
# Utilities used by algorithm 2 ------------------------------------------------
class minimizer(object):
def __init__(self, max_iterations, calculator):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.cntr=0
exception_handling_params = scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_step_at_lower_bound=True,
)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
exception_handling_params=exception_handling_params,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations))
def compute_functional_and_gradients(self):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
t = self.calculator.target()
g = self.calculator.gradients()
#print "step: %4d"%self.cntr, "target:", t, "params:", \
# " ".join(["%10.6f"%i for i in self.x]), math.log(t)
return t,g
class minimizer2(object):
def __init__(self, calculator, min_iterations=0, max_iterations=2000):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.n = self.x.size()
self.cntr=0
def run(self, use_curvatures=0):
self.minimizer = kbu_refinery.lbfgs_run(
target_evaluator=self,
min_iterations=self.min_iterations,
max_iterations=self.max_iterations,
use_curvatures=use_curvatures)
self(requests_f_and_g=True, requests_diag=False)
return self
def __call__(self, requests_f_and_g, requests_diag):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
if (not requests_f_and_g and not requests_diag):
requests_f_and_g = True
requests_diag = True
if (requests_f_and_g):
self.f = self.calculator.target()
self.g = self.calculator.gradients()
self.d = None
if (requests_diag):
self.d = self.calculator.curvatures()
#assert self.d.all_ne(0)
if(self.d.all_eq(0)): self.d=None
else:
self.d = 1 / self.d
#print "step: %4d"%self.cntr, "target:", self.f, "params:", \
# " ".join(["%10.6f"%i for i in self.x]) #, math.log(self.f)
return self.x, self.f, self.g, self.d
class tg(object):
def __init__(self, x, i_obs, F, use_curvatures):
self.x = x
self.i_obs = i_obs
self.F = F
self.t = None
self.g = None
self.d = None
# Needed to do sums from small to large to prefent loss
s = flex.sort_permutation(self.i_obs.data())
self.i_obs = self.i_obs.select(s)
self.F = [f.select(s) for f in self.F]
#
self.sum_i_obs = flex.sum(self.i_obs.data()) # needed for Python version
self.use_curvatures=use_curvatures
self.tgo = mosaic_ext.alg2_tg(
F = [f.data() for f in self.F],
i_obs = self.i_obs.data())
self.update_target_and_grads(x=x)
def update(self, x):
self.update_target_and_grads(x = x)
def update_target_and_grads(self, x):
self.x = x
self.tgo.update(self.x)
self.t = self.tgo.target()
self.g = self.tgo.gradient()
#
# Reference implementation in Python
# s = 1 #180/math.pi
# i_model = flex.double(self.i_obs.data().size(),0)
# for n, kn in enumerate(self.x):
# for m, km in enumerate(self.x):
# tmp = self.F[n].data()*flex.conj(self.F[m].data())
# i_model += kn*km*flex.real(tmp)
# #pn = self.F[n].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fn = flex.abs(self.F[n].data())
# #Fm = flex.abs(self.F[m].data())
# #i_model += kn*km*Fn*Fm*flex.cos(pn-pm)
# diff = i_model - self.i_obs.data()
# #print (flex.min(diff), flex.max(diff))
# t = flex.sum(diff*diff)/4
# #
# g = flex.double()
# for j in range(len(self.F)):
# tmp = flex.double(self.i_obs.data().size(),0)
# for m, km in enumerate(self.x):
# tmp += km * flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
# #pj = self.F[j].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fj = flex.abs(self.F[j].data())
# #Fm = flex.abs(self.F[m].data())
# #tmp += km * Fj*Fm*flex.cos(pj-pm)
# g.append(flex.sum(diff*tmp))
# self.t = t/self.sum_i_obs
# self.g = g/self.sum_i_obs
# #print (self.t,t1)
# #print (list(self.g))
# #print (list(g1))
# #print ()
# #assert approx_equal(self.t, t1, 5)
# #assert approx_equal(self.g, g1, 1.e-6)
#
if self.use_curvatures:
d = flex.double()
for j in range(len(self.F)):
tmp1 = flex.double(self.i_obs.data().size(),0)
tmp2 = flex.double(self.i_obs.data().size(),0)
for m, km in enumerate(self.x):
zz = flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
tmp1 += km * zz
tmp2 += zz
#pj = self.F[j].phases().data()*s
#pm = self.F[m].phases().data()*s
#Fj = flex.abs(self.F[j].data())
#Fm = flex.abs(self.F[m].data())
#tmp += km * Fj*Fm*flex.cos(pj-pm)
d.append(flex.sum(tmp1*tmp1 + tmp2))
self.d=d
def target(self): return self.t
def gradients(self): return self.g
def gradient(self): return self.gradients()
def curvatures(self): return self.d/self.sum_i_obs
#-------------------------------------------------------------------------------
def write_map_file(crystal_symmetry, map_data, file_name):
from iotbx import mrcfile
mrcfile.write_ccp4_map(
file_name = file_name,
unit_cell = crystal_symmetry.unit_cell(),
space_group = crystal_symmetry.space_group(),
map_data = map_data,
labels = flex.std_string([""]))
class refinery(object):
def __init__(self, fmodel, fv, alg, anomaly=True, log = sys.stdout):
assert alg in ["alg0", "alg2", "alg4", None]
self.log = log
self.f_obs = fmodel.f_obs()
self.r_free_flags = fmodel.r_free_flags()
k_mask_overall = fmodel.k_masks()[0]
self.bin_selections = fmodel.bin_selections
#
k_total = fmodel.k_total()
self.f_calc = fmodel.f_model()
self.F = [self.f_calc.deep_copy()] + fv.keys()
#
n_zones_start = len(self.F)
r4_start = fmodel.r_work4()
for it in range(5):
#
if(it>0):
r4 = self.fmodel.r_work4()
print(r4_start, r4, abs(round(r4-r4_start,4)))
if(abs(round(r4-r4_start,4))<1.e-4):
break
r4_start = r4
#if(it>0 and n_zones_start == len(self.F)): break
#
#if it>0:
# self.F = [self.fmodel.f_model().deep_copy()] + self.F[1:]
self._print("cycle: %2d"%it)
self._print(" volumes: "+" ".join([str(fv[f]) for f in self.F[1:]]))
f_obs = self.f_obs.deep_copy()
if it==0: k_total = fmodel.k_total()
else: k_total = self.fmodel.k_total()
i_obs = f_obs.customized_copy(data = f_obs.data()*f_obs.data())
K_MASKS = OrderedDict()
self.bin_selections = self.f_obs.log_binning(
n_reflections_in_lowest_resolution_bin = 100*len(self.F))
for i_bin, sel in enumerate(self.bin_selections):
d_max, d_min = f_obs.select(sel).d_max_min()
if d_max<3: continue
bin = " bin %2d: %5.2f-%-5.2f: "%(i_bin, d_max, d_min)
F = [f.select(sel) for f in self.F]
k_total_sel = k_total.select(sel)
F_scaled = [F[0].deep_copy()]+[f.customized_copy(data=f.data()*k_total_sel) for f in F[1:]]
#
# XXX WHY NOT THIS INSTEAD (INVESTIGATE LATER)?
#F_scaled = [f.customized_copy(data=f.data()*k_total_sel) for f in F]
#r00=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, F[0].data()*k_total_sel)
# algorithm_0
if(alg=="alg0"):
k_masks = algorithm_0(
f_obs = f_obs.select(sel),
F = F_scaled,
kt=k_total_sel)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r0=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_4
if(alg=="alg4"):
if it==0: phase_source = fmodel.f_model().select(sel)
else: phase_source = self.fmodel.f_model().select(sel)
k_masks = algorithm_4(
f_obs = self.f_obs.select(sel),
F = F_scaled,
auto_converge_eps = 0.0001,
phase_source = phase_source)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r4=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_2
if(alg=="alg2"):
k_masks = algorithm_2(
i_obs = i_obs.select(sel),
F = F_scaled,
x = self._get_x_init(i_bin),
use_curvatures = False)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r2=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
#self._print(bin+" ".join(["%6.2f"%k for k in k_masks])+" %6.4f %6.4f %6.4f %6.4f"%(r00,r0,r4, r2))
k_mean = flex.mean(k_mask_overall.select(sel))
k_masks_plus = [k_masks[0]]+[k_mean + k for k in k_masks[1:]]
self._print(bin+" ".join(["%6.2f"%k for k in k_masks_plus]) )
K_MASKS[sel] = [k_masks, k_masks_plus]
#
if(len(self.F)==2): break # stop and fall back onto using largest mask
#
#
#print()
#self.update_k_masks(K_MASKS)
#for k_masks in K_MASKS.values():
# self._print(bin+" ".join(["%6.2f"%k for k in k_masks]))
#
f_calc_data = self.f_calc.data().deep_copy()
f_bulk_data = flex.complex_double(fmodel.f_calc().data().size(), 0)
for sel, k_masks in zip(K_MASKS.keys(), K_MASKS.values()):
k_masks = k_masks[0] # 1 is shifted!
f_bulk_data_ = flex.complex_double(sel.count(True), 0)
for i_mask, k_mask in enumerate(k_masks):
if i_mask==0:
f_calc_data = f_calc_data.set_selected(sel,
f_calc_data.select(sel)*k_mask)
continue
f_bulk_data_ += self.F[i_mask].data().select(sel)*k_mask
f_bulk_data = f_bulk_data.set_selected(sel,f_bulk_data_)
#
self.update_F(K_MASKS)
f_bulk = fmodel.f_calc().customized_copy(data = f_bulk_data)
if(len(self.F)==2):
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
f_calc = fmodel.f_calc(),
f_mask = self.F[1],
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
else:
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.f_calc,
bin_selections = self.bin_selections,
f_mask = f_bulk,
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
#
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.fmodel.f_calc(),
f_mask = self.fmodel.f_bulk(),
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self._print(self.fmodel.r_factors(prefix=" "))
#self._print(self.fmodel.r_factors(prefix=" "))
self.mc = self.fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
#def update_k_masks(self, K_MASKS):
# tmp = []
# for i_mask, F in enumerate(self.F):
# k_masks = [k_masks_bin[i_mask] for k_masks_bin in K_MASKS.values()]
# found = False
# for i_bin, k_masks_bin in enumerate(K_MASKS.values()):
# if(not found and k_masks_bin[i_mask]<=0.009):
# found = True
# K_MASKS.values()[i_bin][i_mask]=0
# elif found:
# K_MASKS.values()[i_bin][i_mask]=0
def _print(self, m):
if(self.log is not None):
print(m, file=self.log)
def update_F(self, K_MASKS):
tmp = []
for i_mask, F in enumerate(self.F):
k_masks = [k_masks_bin[1][i_mask] for k_masks_bin in K_MASKS.values()]
if(i_mask == 0): tmp.append(self.F[0])
elif moving_average(k_masks,2)[0]>=0.03: tmp.append(F)
self.F = tmp[:]
def _get_x_init(self, i_bin):
return flex.double([1] + [1]*len(self.F[1:]))
#k_maks1_init = 0.35 - i_bin*0.35/len(self.bin_selections)
#x = flex.double([1,k_maks1_init])
#x.extend( flex.double(len(self.F)-2, 0.1))
#return x
def get_f_mask(xrs, ma, step, option = 2, r_shrink = None, r_sol = None):
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs.unit_cell(),
space_group_info = xrs.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
n_real = crystal_gridding.n_real()
atom_radii = vdw_radii_from_xray_structure(xray_structure = xrs)
mask_params = masks.mask_master_params.extract()
grid_step_factor = ma.d_min()/step
if(r_shrink is not None): mask_params.shrink_truncation_radius = r_shrink
if(r_sol is not None): mask_params.solvent_radius = r_sol
mask_params.grid_step_factor = grid_step_factor
# 1
if(option==1):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
group = xrs.space_group(),
resolution = ma.d_min(),
grid_step_factor = grid_step_factor,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 2
elif(option==2):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
space_group = xrs.space_group(),
gridding_n_real = n_real,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 3
elif(option==3):
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xrs,
p1 = True,
for_structure_factors = True,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
n_real = n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
mask = asu_map_ext.asymmetric_map(
xrs.crystal_symmetry().space_group().type(), mask_p1).data()
f_mask = ma.structure_factors_from_asu_map(
asu_map_data = mask, n_real = n_real)
# 4
elif(option==4):
f_mask = masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
grid_step = step,
atom_radii = atom_radii).structure_factors(
miller_set = ma)
elif(option==5):
o = mmtbx.masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
gridding_n_real = n_real,
atom_radii = atom_radii)
assert approx_equal(n_real, o.data.accessor().all())
f_mask = o.structure_factors(ma)
elif(option==6):
# XXX No control over n_real, so results with others don't match
mask_manager = masks.manager(
miller_array = ma,
miller_array_twin = None,
mask_params = mask_params)
f_mask = mask_manager.shell_f_masks(xray_structure=xrs, force_update=True)[0]
else: assert 0
#
return f_mask
def filter_mask(mask_p1, volume_cutoff, crystal_symmetry,
for_structure_factors = False):
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = True,
wrapping = True)
mi, ma = flex.min(mask_p1), flex.max(mask_p1)
print (mask_p1.size(), (mask_p1<0).count(True))
assert mi == 0, mi
assert ma == 1, ma
a,b,c = crystal_symmetry.unit_cell().parameters()[:3]
na,nb,nc = mask_p1.accessor().all()
step = flex.mean(flex.double([a/na, b/nb, c/nc]))
if(crystal_symmetry.space_group_number() != 1):
co.merge_symmetry_related_regions(space_group=crystal_symmetry.space_group())
conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
if(i==0): continue # skip macromolecule
# skip small volume
volume = v*step**3
if volume < volume_cutoff:
conn = conn.set_selected(conn==i, 0)
conn = conn.set_selected(conn>0, 1)
if for_structure_factors:
conn = conn / crystal_symmetry.space_group().order_z()
return conn
class mosaic_f_mask(object):
def __init__(self,
xray_structure,
step,
volume_cutoff=None,
mean_diff_map_threshold=None,
compute_whole=False,
preprocess_against_shallow=True,
largest_only=False,
wrapping=True,
f_obs=None,
r_sol=1.1,
r_shrink=0.9,
f_calc=None,
log = None,
write_masks=False):
adopt_init_args(self, locals())
#
self.dsel = f_obs.d_spacings().data()>=0 # XXX WHY????????????
self.miller_array = f_obs.select(self.dsel)
#
# To avoid "Miller index not in structure factor map" crash
step = min(step, self.miller_array.d_min()/3)
#
self.crystal_symmetry = self.xray_structure.crystal_symmetry()
# compute mask in p1 (via ASU)
self.crystal_gridding = maptbx.crystal_gridding(
unit_cell = xray_structure.unit_cell(),
space_group_info = xray_structure.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
self.n_real = self.crystal_gridding.n_real()
# XXX Where do we want to deal with H and occ==0?
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xray_structure,
p1 = True,
for_structure_factors = True,
solvent_radius = r_sol,
shrink_truncation_radius = r_shrink,
n_real = self.n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
self.f_mask_whole = None
if(compute_whole):
mask = asu_map_ext.asymmetric_map(
xray_structure.crystal_symmetry().space_group().type(), mask_p1).data()
self.f_mask_whole = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask, n_real = self.n_real)
self.solvent_content = 100.*mask_p1.count(1)/mask_p1.size()
if(write_masks):
write_map_file(crystal_symmetry=xray_structure.crystal_symmetry(),
map_data=mask_p1, file_name="mask_whole.mrc")
# conn analysis
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = preprocess_against_shallow,
wrapping = wrapping)
co.merge_symmetry_related_regions(space_group=xray_structure.space_group())
del mask_p1
self.conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
#
f_mask_data_0 = flex.complex_double(f_obs.data().size(), 0)
f_mask_data = flex.complex_double(f_obs.data().size(), 0)
self.FV = OrderedDict()
self.mc = None
diff_map = None
mean_diff_map = None
self.regions = OrderedDict()
self.f_mask_0 = None
self.f_mask = None
#
if(log is not None):
print(" # volume_p1 uc(%) mFo-DFc: min,max,mean,sd", file=log)
#
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
# skip macromolecule
if(i==0): continue
# skip small volume
volume = v*step**3
uc_fraction = v*100./self.conn.size()
if(volume_cutoff is not None):
if volume < volume_cutoff: continue
selection = self.conn==i
mask_i_asu = self.compute_i_mask_asu(selection = selection, volume = volume)
volume_asu = (mask_i_asu>0).count(True)*step**3
if(uc_fraction >= 1):
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data_0 += f_mask_i.data()
elif(largest_only): break
if(uc_fraction < 1 and diff_map is None):
diff_map = self.compute_diff_map(f_mask_data = f_mask_data_0)
mi,ma,me,sd = None,None,None,None
if(diff_map is not None):
blob = diff_map.select(selection.iselection())
mean_diff_map = flex.mean(diff_map.select(selection.iselection()))
mi,ma,me = flex.min(blob), flex.max(blob), flex.mean(blob)
sd = blob.sample_standard_deviation()
if(log is not None):
print("%3d"%i_seq,"%12.3f"%volume, "%8.4f"%round(uc_fraction,4),
"%7s"%str(None) if diff_map is None else "%7.3f %7.3f %7.3f %7.3f"%(
mi,ma,me,sd), file=log)
if(mean_diff_map_threshold is not None and
mean_diff_map is not None and mean_diff_map<=mean_diff_map_threshold):
continue
self.regions[i_seq] = group_args(
id = i,
i_seq = i_seq,
volume = volume,
uc_fraction = uc_fraction,
diff_map = group_args(mi=mi, ma=ma, me=me, sd=sd))
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data += f_mask_i.data()
self.FV[f_mask_i] = [round(volume, 3), round(uc_fraction,1)]
#
self.f_mask_0 = f_obs.customized_copy(data = f_mask_data_0)
self.f_mask = f_obs.customized_copy(data = f_mask_data)
self.do_mosaic = False
self.n_regions = len(self.FV.keys())
if(self.n_regions>1):
self.do_mosaic = True
def compute_f_mask_i(self, mask_i_asu):
f_mask_i = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask_i_asu, n_real = self.n_real)
data = flex.complex_double(self.dsel.size(), 0)
data = data.set_selected(self.dsel, f_mask_i.data())
return self.f_obs.set().array(data = data)
def compute_diff_map(self, f_mask_data):
if(self.f_calc is None): return None
f_mask = self.f_obs.customized_copy(data = f_mask_data)
fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
f_calc = self.f_calc,
f_mask = f_mask)
fmodel = fmodel.select(self.dsel)
fmodel.update_all_scales(remove_outliers=True,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self.mc = fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
fft_map = self.mc.fft_map(crystal_gridding = self.crystal_gridding)
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded()
def compute_i_mask_asu(self, selection, volume):
mask_i = flex.double(flex.grid(self.n_real), 0)
mask_i = mask_i.set_selected(selection, 1)
if(self.write_masks):
write_map_file(
crystal_symmetry = self.crystal_symmetry,
map_data = mask_i,
file_name = "mask_%s.mrc"%str(round(volume,3)))
tmp = asu_map_ext.asymmetric_map(
self.crystal_symmetry.space_group().type(), mask_i).data()
return tmp
def algorithm_0(f_obs, F, kt):
"""
Grid search
"""
fc, f_masks = F[0], F[1:]
k_mask_trial_range=[]
s = -1
while s<1:
k_mask_trial_range.append(s)
s+=0.0001
r = []
fc_data = fc.data()
for i, f_mask in enumerate(f_masks):
#print("mask ",i)
assert f_obs.data().size() == fc.data().size()
assert f_mask.data().size() == fc.data().size()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data))
kmask_, k_ = \
bulk_solvent.k_mask_and_k_overall_grid_search(
f_obs.data()*kt,
fc_data*kt,
f_mask.data()*kt,
flex.double(k_mask_trial_range),
flex.bool(fc.data().size(),True))
r.append(kmask_)
fc_data += fc_data*k_ + kmask_*f_mask.data()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_))
r = [1,]+r
return r
def algorithm_2(i_obs, F, x, use_curvatures=True, macro_cycles=10):
"""
Unphased one-step search
"""
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=use_curvatures)
for it in range(macro_cycles):
if(use_curvatures):
m = minimizer(max_iterations=100, calculator=calculator)
else:
#upper = flex.double([1.1] + [1]*(x.size()-1))
#lower = flex.double([0.9] + [-1]*(x.size()-1))
upper = flex.double([1.1] + [5]*(x.size()-1))
lower = flex.double([0.9] + [-5]*(x.size()-1))
#upper = flex.double([10] + [5]*(x.size()-1))
#lower = flex.double([0.1] + [-5]*(x.size()-1))
#upper = flex.double([10] + [0.65]*(x.size()-1))
#lower = flex.double([0.1] + [0]*(x.size()-1))
#upper = flex.double([1] + [0.65]*(x.size()-1))
#lower = flex.double([1] + [0]*(x.size()-1))
#upper = flex.double([1] + [5.65]*(x.size()-1))
#lower = flex.double([1] + [-5]*(x.size()-1))
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
initial_values = x).run()
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
if(use_curvatures):
for it in range(10):
m = minimizer(max_iterations=100, calculator=calculator)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
return m.x
def algorithm_3(i_obs, fc, f_masks):
"""
Unphased two-step search
"""
F = [fc]+f_masks
Gnm = []
cs = {}
cntr=0
nm=[]
# Compute and store Gnm
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
if m < n:
continue
Gnm.append( flex.real( Fn.data()*flex.conj(Fm.data()) ) )
cs[(n,m)] = cntr
cntr+=1
nm.append((n,m))
# Keep track of indices for "upper triangular matrix vs full"
for k,v in zip(list(cs.keys()), list(cs.values())):
i,j=k
if i==j: continue
else: cs[(j,i)]=v
# Generate and solve system Ax=b, x = A_1*b
A = []
b = []
for u, Gnm_u in enumerate(Gnm):
for v, Gnm_v in enumerate(Gnm):
scale = 2
n,m=nm[v]
if n==m: scale=1
A.append( flex.sum(Gnm_u*Gnm_v)*scale )
b.append( flex.sum(Gnm_u * i_obs.data()) )
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
# Expand Xmn from solution x
Xmn = []
for n, Fn in enumerate(F):
rows = []
for m, Fm in enumerate(F):
x_ = x[cs[(n,m)]]
rows.append(x_)
Xmn.append(rows)
# Do formula (19)
lnK = []
for j, Fj in enumerate(F):
t1 = flex.sum( flex.log( flex.double(Xmn[j]) ) )
t2 = 0
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
t2 += math.log(Xmn[n][m])
t2 = t2 / (2*len(F))
lnK.append( 1/len(F)*(t1-t2) )
return [math.exp(x) for x in lnK]
def algorithm_4(f_obs, F, phase_source, max_cycles=100, auto_converge_eps=1.e-7,
use_cpp=True):
"""
Phased simultaneous search (alg4)
"""
fc, f_masks = F[0], F[1:]
fc = fc.deep_copy()
F = [fc]+F[1:]
# C++ version
if(use_cpp):
return mosaic_ext.alg4(
[f.data() for f in F],
f_obs.data(),
phase_source.data(),
max_cycles,
auto_converge_eps)
# Python version (1.2-3 times slower, but much more readable!)
cntr = 0
x_prev = None
while True:
f_obs_cmpl = f_obs.phase_transfer(phase_source = phase_source)
A = []
b = []
for j, Fj in enumerate(F):
A_rows = []
for n, Fn in enumerate(F):
Gjn = flex.real( Fj.data()*flex.conj(Fn.data()) )
A_rows.append( flex.sum(Gjn) )
Hj = flex.real( Fj.data()*flex.conj(f_obs_cmpl.data()) )
b.append(flex.sum(Hj))
A.extend(A_rows)
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
#
fc_d = flex.complex_double(phase_source.indices().size(), 0)
for i, f in enumerate(F):
fc_d += f.data()*x[i]
phase_source = phase_source.customized_copy(data = fc_d)
x_ = x[:]
#
cntr+=1
if(cntr>max_cycles): break
if(x_prev is None): x_prev = x_[:]
else:
max_diff = flex.max(flex.abs(flex.double(x_prev)-flex.double(x_)))
if(max_diff<=auto_converge_eps): break
x_prev = x_[:]
return x_
| [((336, 377), 'boost_adaptbx.boost.python.import_ext', 'bp.import_ext', (['"""cctbx_asymmetric_map_ext"""'], {}), "('cctbx_asymmetric_map_ext')\n", (349, 377), True, 'import boost_adaptbx.boost.python as bp\n'), ((662, 694), 'boost_adaptbx.boost.python.import_ext', 'bp.import_ext', (['"""mmtbx_masks_ext"""'], {}), "('mmtbx_masks_ext')\n", (675, 694), True, 'import boost_adaptbx.boost.python as bp\n'), ((708, 741), 'boost_adaptbx.boost.python.import_ext', 'bp.import_ext', (['"""mmtbx_mosaic_ext"""'], {}), "('mmtbx_mosaic_ext')\n", (721, 741), True, 'import boost_adaptbx.boost.python as bp\n'), ((14610, 14659), 'cctbx.masks.vdw_radii_from_xray_structure', 'vdw_radii_from_xray_structure', ([], {'xray_structure': 'xrs'}), '(xray_structure=xrs)\n', (14639, 14659), False, 'from cctbx.masks import vdw_radii_from_xray_structure\n'), ((14678, 14712), 'mmtbx.masks.mask_master_params.extract', 'masks.mask_master_params.extract', ([], {}), '()\n', (14710, 14712), False, 'from mmtbx import masks\n'), ((18028, 18133), 'cctbx.maptbx.connectivity', 'maptbx.connectivity', ([], {'map_data': 'mask_p1', 'threshold': '(0.01)', 'preprocess_against_shallow': '(True)', 'wrapping': '(True)'}), '(map_data=mask_p1, threshold=0.01,\n preprocess_against_shallow=True, wrapping=True)\n', (18047, 18133), False, 'from cctbx import maptbx\n'), ((29045, 29058), 'scitbx.matrix.sqr', 'matrix.sqr', (['A'], {}), '(A)\n', (29055, 29058), False, 'from scitbx import matrix\n'), ((29085, 29098), 'scitbx.matrix.col', 'matrix.col', (['b'], {}), '(b)\n', (29095, 29098), False, 'from scitbx import matrix\n'), ((2186, 2344), 'mmtbx.bulk_solvent.kbu_refinery.lbfgs_run', 'kbu_refinery.lbfgs_run', ([], {'target_evaluator': 'self', 'min_iterations': 'self.min_iterations', 'max_iterations': 'self.max_iterations', 'use_curvatures': 'use_curvatures'}), '(target_evaluator=self, min_iterations=self.\n min_iterations, max_iterations=self.max_iterations, use_curvatures=\n use_curvatures)\n', (2208, 2344), False, 'from mmtbx.bulk_solvent import kbu_refinery\n'), ((18219, 18236), 'cctbx.array_family.flex.min', 'flex.min', (['mask_p1'], {}), '(mask_p1)\n', (18227, 18236), False, 'from cctbx.array_family import flex\n'), ((18238, 18255), 'cctbx.array_family.flex.max', 'flex.max', (['mask_p1'], {}), '(mask_p1)\n', (18246, 18255), False, 'from cctbx.array_family import flex\n'), ((18464, 18501), 'cctbx.array_family.flex.double', 'flex.double', (['[a / na, b / nb, c / nc]'], {}), '([a / na, b / nb, c / nc])\n', (18475, 18501), False, 'from cctbx.array_family import flex\n'), ((20753, 20787), 'cctbx.maptbx.unpad_in_place', 'maptbx.unpad_in_place', ([], {'map': 'mask_p1'}), '(map=mask_p1)\n', (20774, 20787), False, 'from cctbx import maptbx\n'), ((21329, 21460), 'cctbx.maptbx.connectivity', 'maptbx.connectivity', ([], {'map_data': 'mask_p1', 'threshold': '(0.01)', 'preprocess_against_shallow': 'preprocess_against_shallow', 'wrapping': 'wrapping'}), '(map_data=mask_p1, threshold=0.01,\n preprocess_against_shallow=preprocess_against_shallow, wrapping=wrapping)\n', (21348, 21460), False, 'from cctbx import maptbx\n'), ((21949, 21962), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21960, 21962), False, 'from collections import OrderedDict\n'), ((22046, 22059), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (22057, 22059), False, 'from collections import OrderedDict\n'), ((29588, 29599), 'math.exp', 'math.exp', (['x'], {}), '(x)\n', (29596, 29599), False, 'import math\n'), ((30517, 30530), 'scitbx.matrix.sqr', 'matrix.sqr', (['A'], {}), '(A)\n', (30527, 30530), False, 'from scitbx import matrix\n'), ((30561, 30574), 'scitbx.matrix.col', 'matrix.col', (['b'], {}), '(b)\n', (30571, 30574), False, 'from scitbx import matrix\n'), ((5390, 5403), 'cctbx.array_family.flex.double', 'flex.double', ([], {}), '()\n', (5401, 5403), False, 'from cctbx.array_family import flex\n'), ((6530, 6551), 'cctbx.array_family.flex.std_string', 'flex.std_string', (["['']"], {}), "([''])\n", (6545, 6551), False, 'from cctbx.array_family import flex\n'), ((7815, 7828), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7826, 7828), False, 'from collections import OrderedDict\n'), ((25382, 25404), 'cctbx.array_family.flex.grid', 'flex.grid', (['self.n_real'], {}), '(self.n_real)\n', (25391, 25404), False, 'from cctbx.array_family import flex\n'), ((26352, 26383), 'cctbx.array_family.flex.double', 'flex.double', (['k_mask_trial_range'], {}), '(k_mask_trial_range)\n', (26363, 26383), False, 'from cctbx.array_family import flex\n'), ((16378, 16412), 'cctbx.maptbx.unpad_in_place', 'maptbx.unpad_in_place', ([], {'map': 'mask_p1'}), '(map=mask_p1)\n', (16399, 16412), False, 'from cctbx import maptbx\n'), ((29385, 29404), 'cctbx.array_family.flex.double', 'flex.double', (['Xmn[j]'], {}), '(Xmn[j])\n', (29396, 29404), False, 'from cctbx.array_family import flex\n'), ((29498, 29517), 'math.log', 'math.log', (['Xmn[n][m]'], {}), '(Xmn[n][m])\n', (29506, 29517), False, 'import math\n'), ((30472, 30484), 'cctbx.array_family.flex.sum', 'flex.sum', (['Hj'], {}), '(Hj)\n', (30480, 30484), False, 'from cctbx.array_family import flex\n'), ((5945, 5973), 'cctbx.array_family.flex.sum', 'flex.sum', (['(tmp1 * tmp1 + tmp2)'], {}), '(tmp1 * tmp1 + tmp2)\n', (5953, 5973), False, 'from cctbx.array_family import flex\n'), ((23178, 23192), 'cctbx.array_family.flex.min', 'flex.min', (['blob'], {}), '(blob)\n', (23186, 23192), False, 'from cctbx.array_family import flex\n'), ((23194, 23208), 'cctbx.array_family.flex.max', 'flex.max', (['blob'], {}), '(blob)\n', (23202, 23208), False, 'from cctbx.array_family import flex\n'), ((23210, 23225), 'cctbx.array_family.flex.mean', 'flex.mean', (['blob'], {}), '(blob)\n', (23219, 23225), False, 'from cctbx.array_family import flex\n'), ((23821, 23859), 'libtbx.group_args', 'group_args', ([], {'mi': 'mi', 'ma': 'ma', 'me': 'me', 'sd': 'sd'}), '(mi=mi, ma=ma, me=me, sd=sd)\n', (23831, 23859), False, 'from libtbx import group_args\n'), ((27568, 27678), 'mmtbx.ncs.tncs.minimizer', 'tncs.minimizer', ([], {'potential': 'calculator', 'use_bounds': '(2)', 'lower_bound': 'lower', 'upper_bound': 'upper', 'initial_values': 'x'}), '(potential=calculator, use_bounds=2, lower_bound=lower,\n upper_bound=upper, initial_values=x)\n', (27582, 27678), False, 'from mmtbx.ncs import tncs\n'), ((28962, 28985), 'cctbx.array_family.flex.sum', 'flex.sum', (['(Gnm_u * Gnm_v)'], {}), '(Gnm_u * Gnm_v)\n', (28970, 28985), False, 'from cctbx.array_family import flex\n'), ((30378, 30391), 'cctbx.array_family.flex.sum', 'flex.sum', (['Gjn'], {}), '(Gjn)\n', (30386, 30391), False, 'from cctbx.array_family import flex\n'), ((30928, 30947), 'cctbx.array_family.flex.double', 'flex.double', (['x_prev'], {}), '(x_prev)\n', (30939, 30947), False, 'from cctbx.array_family import flex\n'), ((30948, 30963), 'cctbx.array_family.flex.double', 'flex.double', (['x_'], {}), '(x_)\n', (30959, 30963), False, 'from cctbx.array_family import flex\n'), ((16649, 16909), 'mmtbx.masks.bulk_solvent', 'masks.bulk_solvent', ([], {'xray_structure': 'xrs', 'ignore_zero_occupancy_atoms': '(False)', 'solvent_radius': 'mask_params.solvent_radius', 'shrink_truncation_radius': 'mask_params.shrink_truncation_radius', 'ignore_hydrogen_atoms': '(False)', 'grid_step': 'step', 'atom_radii': 'atom_radii'}), '(xray_structure=xrs, ignore_zero_occupancy_atoms=False,\n solvent_radius=mask_params.solvent_radius, shrink_truncation_radius=\n mask_params.shrink_truncation_radius, ignore_hydrogen_atoms=False,\n grid_step=step, atom_radii=atom_radii)\n', (16667, 16909), False, 'from mmtbx import masks\n'), ((17679, 17758), 'mmtbx.masks.manager', 'masks.manager', ([], {'miller_array': 'ma', 'miller_array_twin': 'None', 'mask_params': 'mask_params'}), '(miller_array=ma, miller_array_twin=None, mask_params=mask_params)\n', (17692, 17758), False, 'from mmtbx import masks\n')] |
lmatz/mars | mars/tensor/execution/tests/test_base_execute.py | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.tensor.execution.core import Executor
from mars import tensor as mt
from mars.tensor.expressions.datasource import tensor, ones, zeros, arange
from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \
flip, flipud, fliplr, repeat, tile, isin
from mars.tensor.expressions.merge import stack
from mars.tensor.expressions.reduction import all as tall
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
def testRechunkExecution(self):
raw = np.random.random((11, 8))
arr = tensor(raw, chunks=3)
arr2 = arr.rechunk(4)
res = self.executor.execute_tensor(arr2)
self.assertTrue(np.array_equal(res[0], raw[:4, :4]))
self.assertTrue(np.array_equal(res[1], raw[:4, 4:]))
self.assertTrue(np.array_equal(res[2], raw[4:8, :4]))
self.assertTrue(np.array_equal(res[3], raw[4:8, 4:]))
self.assertTrue(np.array_equal(res[4], raw[8:, :4]))
self.assertTrue(np.array_equal(res[5], raw[8:, 4:]))
def testCopytoExecution(self):
a = ones((2, 3), chunks=1)
b = tensor([3, -1, 3], chunks=2)
copyto(a, b, where=b > 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.array([[3, 1, 3], [3, 1, 3]])
np.testing.assert_equal(res, expected)
def testAstypeExecution(self):
raw = np.random.random((10, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.astype('i8')))
raw = sps.random(10, 5, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray()))
def testTransposeExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.T))
arr3 = transpose(arr, axes=(-2, -1, -3))
res = self.executor.execute_tensor(arr3, concat=True)
self.assertTrue(np.array_equal(res[0], raw.transpose(1, 2, 0)))
raw = sps.random(11, 8)
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
self.assertTrue(arr2.issparse())
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.T.toarray()))
def testSwapaxesExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.swapaxes(2, 0)))
raw = sps.random(11, 8, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0)))
def testMoveaxisExecution(self):
x = zeros((3, 4, 5), chunks=2)
t = moveaxis(x, 0, -1)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (4, 5, 3))
t = moveaxis(x, -1, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 3, 4))
t = moveaxis(x, [0, 1], [-1, -2])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
t = moveaxis(x, [0, 1, 2], [-1, -2, -3])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
def testBroadcastToExecution(self):
raw = np.random.random((10, 5, 1))
arr = tensor(raw, chunks=2)
arr2 = broadcast_to(arr, (5, 10, 5, 6))
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], np.broadcast_to(raw, (5, 10, 5, 6))))
def testBroadcastArraysExecutions(self):
x_data = [[1, 2, 3]]
x = tensor(x_data, chunks=1)
y_data = [[1], [2], [3]]
y = tensor(y_data, chunks=2)
a = broadcast_arrays(x, y)
res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a]
expected = np.broadcast_arrays(x_data, y_data)
for r, e in zip(res, expected):
np.testing.assert_equal(r, e)
def testWhereExecution(self):
raw_cond = np.random.randint(0, 2, size=(4, 4), dtype='?')
raw_x = np.random.rand(4, 1)
raw_y = np.random.rand(4, 4)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)
self.assertTrue(np.array_equal(res[0], np.where(raw_cond, raw_x, raw_y)))
raw_cond = sps.csr_matrix(np.random.randint(0, 2, size=(4, 4), dtype='?'))
raw_x = sps.random(4, 1, density=.1)
raw_y = sps.random(4, 4, density=.1)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)[0]
self.assertTrue(np.array_equal(res.toarray(),
np.where(raw_cond.toarray(), raw_x.toarray(), raw_y.toarray())))
def testReshapeExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 30)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 30)))
y2 = x.reshape(10, -1)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(10, -1)))
y3 = x.reshape(-1)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1)))
y4 = x.ravel()
res = self.executor.execute_tensor(y4, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.ravel()))
raw_data = np.random.rand(30, 100, 20)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 20, 5, 5, 4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 20, 5, 5, 4)))
y2 = x.reshape(3000, 10, 2)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(3000, 10, 2)))
y3 = x.reshape(60, 25, 40)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(60, 25, 40)))
def testExpandDimsExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = expand_dims(x, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 1)))
y = expand_dims(x, 0)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 0)))
y = expand_dims(x, 3)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 3)))
y = expand_dims(x, -1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -1)))
y = expand_dims(x, -4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -4)))
with self.assertRaises(np.AxisError):
expand_dims(x, -5)
with self.assertRaises(np.AxisError):
expand_dims(x, 4)
def testRollAxisExecution(self):
x = ones((3, 4, 5, 6), chunks=1)
y = rollaxis(x, 3, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.rollaxis(np.ones((3, 4, 5, 6)), 3, 1)))
def testAtleast1dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_1d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([1])))
self.assertTrue(np.array_equal(res[1], np.ones(3)))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast2dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_2d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([[1]])))
self.assertTrue(np.array_equal(res[1], np.atleast_2d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast3dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_3d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.atleast_3d(x)))
self.assertTrue(np.array_equal(res[1], np.atleast_3d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.atleast_3d(np.ones((3, 4)))))
def testArgwhereExecution(self):
x = arange(6, chunks=2).reshape(2, 3)
t = argwhere(x > 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(np.arange(6).reshape(2, 3) > 1)
self.assertTrue(np.array_equal(res, expected))
def testArraySplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = array_split(x, 3, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), 3, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = array_split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
def testSplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = split(x, 4, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), 4, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# hsplit
x = arange(120, chunks=3).reshape(2, 12, 5)
ss = hsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.hsplit(np.arange(120).reshape(2, 12, 5), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# vsplit
x = arange(48, chunks=3).reshape(8, 3, 2)
ss = vsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.vsplit(np.arange(48).reshape(8, 3, 2), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# dsplit
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = dsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.dsplit(np.arange(48).reshape(2, 3, 8), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
x_data = sps.random(12, 8, density=.1)
x = tensor(x_data, chunks=3)
ss = split(x, 4, axis=0)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(x_data.toarray(), 4, axis=0)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r.toarray(), e) for r, e in zip(res, expected)]
def testRollExecution(self):
x = arange(10, chunks=2)
t = roll(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10), 2)
np.testing.assert_equal(res, expected)
x2 = x.reshape(2, 5)
t = roll(x2, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=0)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=1)
np.testing.assert_equal(res, expected)
def testSqueezeExecution(self):
data = np.array([[[0], [1], [2]]])
x = tensor(data, chunks=1)
t = squeeze(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data)
np.testing.assert_equal(res, expected)
t = squeeze(x, axis=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data, axis=2)
np.testing.assert_equal(res, expected)
def testPtpExecution(self):
x = arange(4, chunks=1).reshape(2, 2)
t = ptp(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=0)
np.testing.assert_equal(res, expected)
t = ptp(x, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=1)
np.testing.assert_equal(res, expected)
t = ptp(x)
res = self.executor.execute_tensor(t)[0]
expected = np.ptp(np.arange(4).reshape(2, 2))
np.testing.assert_equal(res, expected)
def testDiffExecution(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, n=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, n=2)
np.testing.assert_equal(res, expected)
data = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, axis=0)
np.testing.assert_equal(res, expected)
x = mt.arange('1066-10-13', '1066-10-16', dtype=mt.datetime64)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64))
np.testing.assert_equal(res, expected)
def testEdiff1d(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = ediff1d(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
to_begin = tensor(-99, chunks=2)
to_end = tensor([88, 99], chunks=2)
t = ediff1d(x, to_begin=to_begin, to_end=to_end)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99]))
np.testing.assert_equal(res, expected)
data = [[1, 2, 4], [1, 6, 24]]
t = ediff1d(tensor(data, chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
def testDigitizeExecution(self):
data = np.array([0.2, 6.4, 3.0, 1.6])
x = tensor(data, chunks=2)
bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
b = tensor(bins, chunks=2)
inds = digitize(x, b)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
data = np.array([1.2, 10.0, 12.4, 15.5, 20.])
x = tensor(data, chunks=2)
bins = np.array([0, 5, 10, 15, 20])
inds = digitize(x, bins, right=True)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=True)
np.testing.assert_equal(res, expected)
inds = digitize(x, bins, right=False)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=False)
np.testing.assert_equal(res, expected)
data = sps.random(10, 1, density=.1) * 12
x = tensor(data, chunks=2)
bins = np.array([1.0, 2.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data.toarray(), bins, right=False)
np.testing.assert_equal(res.toarray(), expected)
def testAverageExecution(self):
data = arange(1, 5, chunks=1)
t = average(data)
res = self.executor.execute_tensor(t)[0]
expected = np.average(np.arange(1, 5))
self.assertEqual(res, expected)
t = average(arange(1, 11, chunks=2), weights=arange(10, 0, -1, chunks=2))
res = self.executor.execute_tensor(t)[0]
expected = np.average(range(1, 11), weights=range(10, 0, -1))
self.assertEqual(res, expected)
data = arange(6, chunks=2).reshape((3, 2))
t = average(data, axis=1, weights=tensor([1./4, 3./4], chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.average(np.arange(6).reshape(3, 2), axis=1, weights=(1./4, 3./4))
np.testing.assert_equal(res, expected)
with self.assertRaises(TypeError):
average(data, weights=tensor([1./4, 3./4], chunks=2))
def testCovExecution(self):
data = np.array([[0, 2], [1, 1], [2, 0]]).T
x = tensor(data, chunks=1)
t = cov(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.cov(data)
np.testing.assert_equal(res, expected)
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
X = stack((x, y), axis=0)
t = cov(x, y)
r = tall(t == cov(X))
self.assertTrue(self.executor.execute_tensor(r)[0])
def testCorrcoefExecution(self):
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
t = corrcoef(x, y)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.corrcoef(data_x, data_y)
np.testing.assert_equal(res, expected)
def testFlipExecution(self):
a = arange(8, chunks=2).reshape((2, 2, 2))
t = flip(a, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 0)
np.testing.assert_equal(res, expected)
t = flip(a, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 1)
np.testing.assert_equal(res, expected)
t = flipud(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flipud(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
t = fliplr(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.fliplr(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
def testRepeatExecution(self):
a = repeat(3, 4)
res = self.executor.execute_tensor(a)[0]
expected = np.repeat(3, 4)
np.testing.assert_equal(res, expected)
x_data = np.random.randn(20, 30)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 2)
np.testing.assert_equal(res, expected)
t = repeat(x, 3, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 3, axis=1)
np.testing.assert_equal(res, expected)
t = repeat(x, np.arange(20), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
t = repeat(x, arange(20, chunks=5), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
x_data = sps.random(20, 30, density=.1)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data.toarray(), 2, axis=1)
np.testing.assert_equal(res.toarray(), expected)
def testTileExecution(self):
a_data = np.array([0, 1, 2])
a = tensor(a_data, chunks=2)
t = tile(a, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, 2)
np.testing.assert_equal(res, expected)
t = tile(a, (2, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 2))
np.testing.assert_equal(res, expected)
t = tile(a, (2, 1, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 1, 2))
np.testing.assert_equal(res, expected)
b_data = np.array([[1, 2], [3, 4]])
b = tensor(b_data, chunks=1)
t = tile(b, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, 2)
np.testing.assert_equal(res, expected)
t = tile(b, (2, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, (2, 1))
np.testing.assert_equal(res, expected)
c_data = np.array([1, 2, 3, 4])
c = tensor(c_data, chunks=3)
t = tile(c, (4, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(c_data, (4, 1))
np.testing.assert_equal(res, expected)
def testIsInExecution(self):
element = 2 * arange(4, chunks=1).reshape((2, 2))
test_elements = [1, 2, 4, 8]
mask = isin(element, test_elements)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([2, 4])
np.testing.assert_equal(res, expected)
mask = isin(element, test_elements, invert=True)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements, invert=True)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([0, 6])
np.testing.assert_equal(res, expected)
test_set = {1, 2, 4, 8}
mask = isin(element, test_set)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_set)
np.testing.assert_equal(res, expected)
| [((1394, 1411), 'mars.tensor.execution.core.Executor', 'Executor', (['"""numpy"""'], {}), "('numpy')\n", (1402, 1411), False, 'from mars.tensor.execution.core import Executor\n'), ((1463, 1488), 'numpy.random.random', 'np.random.random', (['(11, 8)'], {}), '((11, 8))\n', (1479, 1488), True, 'import numpy as np\n'), ((1503, 1524), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (1509, 1524), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2022, 2044), 'mars.tensor.expressions.datasource.ones', 'ones', (['(2, 3)'], {'chunks': '(1)'}), '((2, 3), chunks=1)\n', (2026, 2044), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2057, 2085), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[3, -1, 3]'], {'chunks': '(2)'}), '([3, -1, 3], chunks=2)\n', (2063, 2085), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2095, 2120), 'mars.tensor.expressions.base.copyto', 'copyto', (['a', 'b'], {'where': '(b > 1)'}), '(a, b, where=b > 1)\n', (2101, 2120), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((2203, 2235), 'numpy.array', 'np.array', (['[[3, 1, 3], [3, 1, 3]]'], {}), '([[3, 1, 3], [3, 1, 3]])\n', (2211, 2235), True, 'import numpy as np\n'), ((2245, 2283), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (2268, 2283), True, 'import numpy as np\n'), ((2334, 2359), 'numpy.random.random', 'np.random.random', (['(10, 5)'], {}), '((10, 5))\n', (2350, 2359), True, 'import numpy as np\n'), ((2374, 2395), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2380, 2395), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2572, 2602), 'scipy.sparse.random', 'sps.random', (['(10)', '(5)'], {'density': '(0.2)'}), '(10, 5, density=0.2)\n', (2582, 2602), True, 'import scipy.sparse as sps\n'), ((2616, 2637), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2622, 2637), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2872, 2900), 'numpy.random.random', 'np.random.random', (['(11, 8, 5)'], {}), '((11, 8, 5))\n', (2888, 2900), True, 'import numpy as np\n'), ((2915, 2936), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2921, 2936), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2952, 2966), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {}), '(arr)\n', (2961, 2966), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3102, 3135), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {'axes': '(-2, -1, -3)'}), '(arr, axes=(-2, -1, -3))\n', (3111, 3135), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3287, 3304), 'scipy.sparse.random', 'sps.random', (['(11)', '(8)'], {}), '(11, 8)\n', (3297, 3304), True, 'import scipy.sparse as sps\n'), ((3319, 3340), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3325, 3340), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((3356, 3370), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {}), '(arr)\n', (3365, 3370), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3604, 3632), 'numpy.random.random', 'np.random.random', (['(11, 8, 5)'], {}), '((11, 8, 5))\n', (3620, 3632), True, 'import numpy as np\n'), ((3647, 3668), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3653, 3668), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((3850, 3880), 'scipy.sparse.random', 'sps.random', (['(11)', '(8)'], {'density': '(0.2)'}), '(11, 8, density=0.2)\n', (3860, 3880), True, 'import scipy.sparse as sps\n'), ((3894, 3915), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3900, 3915), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4152, 4178), 'mars.tensor.expressions.datasource.zeros', 'zeros', (['(3, 4, 5)'], {'chunks': '(2)'}), '((3, 4, 5), chunks=2)\n', (4157, 4178), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4192, 4210), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '(0)', '(-1)'], {}), '(x, 0, -1)\n', (4200, 4210), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4334, 4352), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '(-1)', '(0)'], {}), '(x, -1, 0)\n', (4342, 4352), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4476, 4505), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '[0, 1]', '[-1, -2]'], {}), '(x, [0, 1], [-1, -2])\n', (4484, 4505), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4629, 4665), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '[0, 1, 2]', '[-1, -2, -3]'], {}), '(x, [0, 1, 2], [-1, -2, -3])\n', (4637, 4665), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4831, 4859), 'numpy.random.random', 'np.random.random', (['(10, 5, 1)'], {}), '((10, 5, 1))\n', (4847, 4859), True, 'import numpy as np\n'), ((4874, 4895), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(2)'}), '(raw, chunks=2)\n', (4880, 4895), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4911, 4943), 'mars.tensor.expressions.base.broadcast_to', 'broadcast_to', (['arr', '(5, 10, 5, 6)'], {}), '(arr, (5, 10, 5, 6))\n', (4923, 4943), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((5180, 5204), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(1)'}), '(x_data, chunks=1)\n', (5186, 5204), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5250, 5274), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['y_data'], {'chunks': '(2)'}), '(y_data, chunks=2)\n', (5256, 5274), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5288, 5310), 'mars.tensor.expressions.base.broadcast_arrays', 'broadcast_arrays', (['x', 'y'], {}), '(x, y)\n', (5304, 5310), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((5410, 5445), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['x_data', 'y_data'], {}), '(x_data, y_data)\n', (5429, 5445), True, 'import numpy as np\n'), ((5583, 5630), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(4, 4)', 'dtype': '"""?"""'}), "(0, 2, size=(4, 4), dtype='?')\n", (5600, 5630), True, 'import numpy as np\n'), ((5647, 5667), 'numpy.random.rand', 'np.random.rand', (['(4)', '(1)'], {}), '(4, 1)\n', (5661, 5667), True, 'import numpy as np\n'), ((5684, 5704), 'numpy.random.rand', 'np.random.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (5698, 5704), True, 'import numpy as np\n'), ((5819, 5836), 'mars.tensor.expressions.base.where', 'where', (['cond', 'x', 'y'], {}), '(cond, x, y)\n', (5824, 5836), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((6080, 6109), 'scipy.sparse.random', 'sps.random', (['(4)', '(1)'], {'density': '(0.1)'}), '(4, 1, density=0.1)\n', (6090, 6109), True, 'import scipy.sparse as sps\n'), ((6125, 6154), 'scipy.sparse.random', 'sps.random', (['(4)', '(4)'], {'density': '(0.1)'}), '(4, 4, density=0.1)\n', (6135, 6154), True, 'import scipy.sparse as sps\n'), ((6268, 6285), 'mars.tensor.expressions.base.where', 'where', (['cond', 'x', 'y'], {}), '(cond, x, y)\n', (6273, 6285), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((6564, 6590), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)', '(30)'], {}), '(10, 20, 30)\n', (6578, 6590), True, 'import numpy as np\n'), ((6603, 6629), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (6609, 6629), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((7292, 7319), 'numpy.random.rand', 'np.random.rand', (['(30)', '(100)', '(20)'], {}), '(30, 100, 20)\n', (7306, 7319), True, 'import numpy as np\n'), ((7332, 7358), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (7338, 7358), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((7953, 7979), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)', '(30)'], {}), '(10, 20, 30)\n', (7967, 7979), True, 'import numpy as np\n'), ((7992, 8018), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (7998, 8018), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((8032, 8049), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(1)'], {}), '(x, 1)\n', (8043, 8049), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8200, 8217), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (8211, 8217), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8368, 8385), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(3)'], {}), '(x, 3)\n', (8379, 8385), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8536, 8554), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-1)'], {}), '(x, -1)\n', (8547, 8554), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8706, 8724), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-4)'], {}), '(x, -4)\n', (8717, 8724), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9068, 9096), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4, 5, 6)'], {'chunks': '(1)'}), '((3, 4, 5, 6), chunks=1)\n', (9072, 9096), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9109, 9126), 'mars.tensor.expressions.base.rollaxis', 'rollaxis', (['x', '(3)', '(1)'], {}), '(x, 3, 1)\n', (9117, 9126), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9342, 9359), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (9346, 9359), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9372, 9394), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (9376, 9394), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9408, 9427), 'mars.tensor.expressions.base.atleast_1d', 'atleast_1d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (9418, 9427), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9758, 9775), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (9762, 9775), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9788, 9810), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (9792, 9810), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9824, 9843), 'mars.tensor.expressions.base.atleast_2d', 'atleast_2d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (9834, 9843), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10191, 10208), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (10195, 10208), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10221, 10243), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (10225, 10243), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10257, 10276), 'mars.tensor.expressions.base.atleast_3d', 'atleast_3d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (10267, 10276), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10671, 10686), 'mars.tensor.expressions.base.argwhere', 'argwhere', (['(x > 1)'], {}), '(x > 1)\n', (10679, 10686), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10972, 10997), 'mars.tensor.expressions.base.array_split', 'array_split', (['x', '(3)'], {'axis': '(2)'}), '(x, 3, axis=2)\n', (10983, 10997), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((11287, 11324), 'mars.tensor.expressions.base.array_split', 'array_split', (['x', '[3, 5, 6, 10]'], {'axis': '(2)'}), '(x, [3, 5, 6, 10], axis=2)\n', (11298, 11324), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((11710, 11729), 'mars.tensor.expressions.base.split', 'split', (['x', '(4)'], {'axis': '(2)'}), '(x, 4, axis=2)\n', (11715, 11729), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12013, 12044), 'mars.tensor.expressions.base.split', 'split', (['x', '[3, 5, 6, 10]'], {'axis': '(2)'}), '(x, [3, 5, 6, 10], axis=2)\n', (12018, 12044), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12409, 12421), 'mars.tensor.expressions.base.hsplit', 'hsplit', (['x', '(4)'], {}), '(x, 4)\n', (12415, 12421), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12767, 12779), 'mars.tensor.expressions.base.vsplit', 'vsplit', (['x', '(4)'], {}), '(x, 4)\n', (12773, 12779), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13123, 13135), 'mars.tensor.expressions.base.dsplit', 'dsplit', (['x', '(4)'], {}), '(x, 4)\n', (13129, 13135), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13416, 13446), 'scipy.sparse.random', 'sps.random', (['(12)', '(8)'], {'density': '(0.1)'}), '(12, 8, density=0.1)\n', (13426, 13446), True, 'import scipy.sparse as sps\n'), ((13458, 13482), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3)'}), '(x_data, chunks=3)\n', (13464, 13482), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13496, 13515), 'mars.tensor.expressions.base.split', 'split', (['x', '(4)'], {'axis': '(0)'}), '(x, 4, axis=0)\n', (13501, 13515), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13827, 13847), 'mars.tensor.expressions.datasource.arange', 'arange', (['(10)'], {'chunks': '(2)'}), '(10, chunks=2)\n', (13833, 13847), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13861, 13871), 'mars.tensor.expressions.base.roll', 'roll', (['x', '(2)'], {}), '(x, 2)\n', (13865, 13871), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13988, 14026), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14011, 14026), True, 'import numpy as np\n'), ((14070, 14081), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {}), '(x2, 1)\n', (14074, 14081), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14212, 14250), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14235, 14250), True, 'import numpy as np\n'), ((14264, 14283), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {'axis': '(0)'}), '(x2, 1, axis=0)\n', (14268, 14283), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14422, 14460), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14445, 14460), True, 'import numpy as np\n'), ((14474, 14493), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {'axis': '(1)'}), '(x2, 1, axis=1)\n', (14478, 14493), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14632, 14670), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14655, 14670), True, 'import numpy as np\n'), ((14723, 14750), 'numpy.array', 'np.array', (['[[[0], [1], [2]]]'], {}), '([[[0], [1], [2]]])\n', (14731, 14750), True, 'import numpy as np\n'), ((14763, 14785), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(1)'}), '(data, chunks=1)\n', (14769, 14785), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((14799, 14809), 'mars.tensor.expressions.base.squeeze', 'squeeze', (['x'], {}), '(x)\n', (14806, 14809), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14892, 14908), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (14902, 14908), True, 'import numpy as np\n'), ((14917, 14955), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14940, 14955), True, 'import numpy as np\n'), ((14969, 14987), 'mars.tensor.expressions.base.squeeze', 'squeeze', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (14976, 14987), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15070, 15094), 'numpy.squeeze', 'np.squeeze', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (15080, 15094), True, 'import numpy as np\n'), ((15103, 15141), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15126, 15141), True, 'import numpy as np\n'), ((15234, 15248), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (15237, 15248), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15382, 15420), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15405, 15420), True, 'import numpy as np\n'), ((15434, 15448), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (15437, 15448), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15582, 15620), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15605, 15620), True, 'import numpy as np\n'), ((15634, 15640), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {}), '(x)\n', (15637, 15640), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15753, 15791), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15776, 15791), True, 'import numpy as np\n'), ((15841, 15866), 'numpy.array', 'np.array', (['[1, 2, 4, 7, 0]'], {}), '([1, 2, 4, 7, 0])\n', (15849, 15866), True, 'import numpy as np\n'), ((15879, 15901), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (15885, 15901), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((15915, 15922), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (15919, 15922), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16005, 16018), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (16012, 16018), True, 'import numpy as np\n'), ((16027, 16065), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16050, 16065), True, 'import numpy as np\n'), ((16079, 16091), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {'n': '(2)'}), '(x, n=2)\n', (16083, 16091), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16174, 16192), 'numpy.diff', 'np.diff', (['data'], {'n': '(2)'}), '(data, n=2)\n', (16181, 16192), True, 'import numpy as np\n'), ((16201, 16239), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16224, 16239), True, 'import numpy as np\n'), ((16256, 16295), 'numpy.array', 'np.array', (['[[1, 3, 6, 10], [0, 5, 6, 8]]'], {}), '([[1, 3, 6, 10], [0, 5, 6, 8]])\n', (16264, 16295), True, 'import numpy as np\n'), ((16308, 16330), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (16314, 16330), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((16344, 16351), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (16348, 16351), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16434, 16447), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (16441, 16447), True, 'import numpy as np\n'), ((16456, 16494), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16479, 16494), True, 'import numpy as np\n'), ((16508, 16523), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (16512, 16523), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16606, 16627), 'numpy.diff', 'np.diff', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (16613, 16627), True, 'import numpy as np\n'), ((16636, 16674), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16659, 16674), True, 'import numpy as np\n'), ((16688, 16746), 'mars.tensor.arange', 'mt.arange', (['"""1066-10-13"""', '"""1066-10-16"""'], {'dtype': 'mt.datetime64'}), "('1066-10-13', '1066-10-16', dtype=mt.datetime64)\n", (16697, 16746), True, 'from mars import tensor as mt\n'), ((16759, 16766), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (16763, 16766), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16925, 16963), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16948, 16963), True, 'import numpy as np\n'), ((17007, 17032), 'numpy.array', 'np.array', (['[1, 2, 4, 7, 0]'], {}), '([1, 2, 4, 7, 0])\n', (17015, 17032), True, 'import numpy as np\n'), ((17045, 17067), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17051, 17067), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17081, 17091), 'mars.tensor.expressions.base.ediff1d', 'ediff1d', (['x'], {}), '(x)\n', (17088, 17091), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((17174, 17190), 'numpy.ediff1d', 'np.ediff1d', (['data'], {}), '(data)\n', (17184, 17190), True, 'import numpy as np\n'), ((17199, 17237), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17222, 17237), True, 'import numpy as np\n'), ((17258, 17279), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['(-99)'], {'chunks': '(2)'}), '(-99, chunks=2)\n', (17264, 17279), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17297, 17323), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[88, 99]'], {'chunks': '(2)'}), '([88, 99], chunks=2)\n', (17303, 17323), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17336, 17380), 'mars.tensor.expressions.base.ediff1d', 'ediff1d', (['x'], {'to_begin': 'to_begin', 'to_end': 'to_end'}), '(x, to_begin=to_begin, to_end=to_end)\n', (17343, 17380), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((17529, 17567), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17552, 17567), True, 'import numpy as np\n'), ((17735, 17751), 'numpy.ediff1d', 'np.ediff1d', (['data'], {}), '(data)\n', (17745, 17751), True, 'import numpy as np\n'), ((17760, 17798), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17783, 17798), True, 'import numpy as np\n'), ((17852, 17882), 'numpy.array', 'np.array', (['[0.2, 6.4, 3.0, 1.6]'], {}), '([0.2, 6.4, 3.0, 1.6])\n', (17860, 17882), True, 'import numpy as np\n'), ((17895, 17917), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17901, 17917), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17933, 17969), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.5, 4.0, 10.0]'], {}), '([0.0, 1.0, 2.5, 4.0, 10.0])\n', (17941, 17969), True, 'import numpy as np\n'), ((17985, 18002), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {}), '(x, bins)\n', (17993, 18002), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18088, 18111), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {}), '(data, bins)\n', (18099, 18111), True, 'import numpy as np\n'), ((18120, 18158), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18143, 18158), True, 'import numpy as np\n'), ((18172, 18194), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['bins'], {'chunks': '(2)'}), '(bins, chunks=2)\n', (18178, 18194), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18210, 18224), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'b'], {}), '(x, b)\n', (18218, 18224), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18310, 18333), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {}), '(data, bins)\n', (18321, 18333), True, 'import numpy as np\n'), ((18342, 18380), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18365, 18380), True, 'import numpy as np\n'), ((18397, 18436), 'numpy.array', 'np.array', (['[1.2, 10.0, 12.4, 15.5, 20.0]'], {}), '([1.2, 10.0, 12.4, 15.5, 20.0])\n', (18405, 18436), True, 'import numpy as np\n'), ((18448, 18470), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (18454, 18470), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18486, 18514), 'numpy.array', 'np.array', (['[0, 5, 10, 15, 20]'], {}), '([0, 5, 10, 15, 20])\n', (18494, 18514), True, 'import numpy as np\n'), ((18530, 18559), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {'right': '(True)'}), '(x, bins, right=True)\n', (18538, 18559), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18645, 18680), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {'right': '(True)'}), '(data, bins, right=True)\n', (18656, 18680), True, 'import numpy as np\n'), ((18689, 18727), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18712, 18727), True, 'import numpy as np\n'), ((18744, 18774), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {'right': '(False)'}), '(x, bins, right=False)\n', (18752, 18774), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18860, 18896), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {'right': '(False)'}), '(data, bins, right=False)\n', (18871, 18896), True, 'import numpy as np\n'), ((18905, 18943), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18928, 18943), True, 'import numpy as np\n'), ((19007, 19029), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (19013, 19029), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19045, 19081), 'numpy.array', 'np.array', (['[1.0, 2.0, 2.5, 4.0, 10.0]'], {}), '([1.0, 2.0, 2.5, 4.0, 10.0])\n', (19053, 19081), True, 'import numpy as np\n'), ((19097, 19114), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {}), '(x, bins)\n', (19105, 19114), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((19356, 19378), 'mars.tensor.expressions.datasource.arange', 'arange', (['(1)', '(5)'], {'chunks': '(1)'}), '(1, 5, chunks=1)\n', (19362, 19378), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19391, 19404), 'mars.tensor.expressions.base.average', 'average', (['data'], {}), '(data)\n', (19398, 19404), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20070, 20108), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (20093, 20108), True, 'import numpy as np\n'), ((20316, 20338), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(1)'}), '(data, chunks=1)\n', (20322, 20338), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20352, 20358), 'mars.tensor.expressions.base.cov', 'cov', (['x'], {}), '(x)\n', (20355, 20358), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20441, 20453), 'numpy.cov', 'np.cov', (['data'], {}), '(data)\n', (20447, 20453), True, 'import numpy as np\n'), ((20462, 20500), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (20485, 20500), True, 'import numpy as np\n'), ((20581, 20605), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_x'], {'chunks': '(1)'}), '(data_x, chunks=1)\n', (20587, 20605), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20618, 20642), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_y'], {'chunks': '(1)'}), '(data_y, chunks=1)\n', (20624, 20642), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20656, 20677), 'mars.tensor.expressions.merge.stack', 'stack', (['(x, y)'], {'axis': '(0)'}), '((x, y), axis=0)\n', (20661, 20677), False, 'from mars.tensor.expressions.merge import stack\n'), ((20690, 20699), 'mars.tensor.expressions.base.cov', 'cov', (['x', 'y'], {}), '(x, y)\n', (20693, 20699), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20905, 20929), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_x'], {'chunks': '(1)'}), '(data_x, chunks=1)\n', (20911, 20929), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20942, 20966), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_y'], {'chunks': '(1)'}), '(data_y, chunks=1)\n', (20948, 20966), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20980, 20994), 'mars.tensor.expressions.base.corrcoef', 'corrcoef', (['x', 'y'], {}), '(x, y)\n', (20988, 20994), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21077, 21104), 'numpy.corrcoef', 'np.corrcoef', (['data_x', 'data_y'], {}), '(data_x, data_y)\n', (21088, 21104), True, 'import numpy as np\n'), ((21113, 21151), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21136, 21151), True, 'import numpy as np\n'), ((21250, 21260), 'mars.tensor.expressions.base.flip', 'flip', (['a', '(0)'], {}), '(a, 0)\n', (21254, 21260), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21393, 21431), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21416, 21431), True, 'import numpy as np\n'), ((21445, 21455), 'mars.tensor.expressions.base.flip', 'flip', (['a', '(1)'], {}), '(a, 1)\n', (21449, 21455), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21588, 21626), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21611, 21626), True, 'import numpy as np\n'), ((21640, 21649), 'mars.tensor.expressions.base.flipud', 'flipud', (['a'], {}), '(a)\n', (21646, 21649), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21781, 21819), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21804, 21819), True, 'import numpy as np\n'), ((21833, 21842), 'mars.tensor.expressions.base.fliplr', 'fliplr', (['a'], {}), '(a)\n', (21839, 21842), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21974, 22012), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21997, 22012), True, 'import numpy as np\n'), ((22061, 22073), 'mars.tensor.expressions.base.repeat', 'repeat', (['(3)', '(4)'], {}), '(3, 4)\n', (22067, 22073), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22143, 22158), 'numpy.repeat', 'np.repeat', (['(3)', '(4)'], {}), '(3, 4)\n', (22152, 22158), True, 'import numpy as np\n'), ((22167, 22205), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22190, 22205), True, 'import numpy as np\n'), ((22224, 22247), 'numpy.random.randn', 'np.random.randn', (['(20)', '(30)'], {}), '(20, 30)\n', (22239, 22247), True, 'import numpy as np\n'), ((22260, 22289), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3, 4)'}), '(x_data, chunks=(3, 4))\n', (22266, 22289), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((22303, 22315), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(2)'], {}), '(x, 2)\n', (22309, 22315), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22398, 22418), 'numpy.repeat', 'np.repeat', (['x_data', '(2)'], {}), '(x_data, 2)\n', (22407, 22418), True, 'import numpy as np\n'), ((22427, 22465), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22450, 22465), True, 'import numpy as np\n'), ((22479, 22499), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(3)'], {'axis': '(1)'}), '(x, 3, axis=1)\n', (22485, 22499), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22582, 22610), 'numpy.repeat', 'np.repeat', (['x_data', '(3)'], {'axis': '(1)'}), '(x_data, 3, axis=1)\n', (22591, 22610), True, 'import numpy as np\n'), ((22619, 22657), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22642, 22657), True, 'import numpy as np\n'), ((22835, 22873), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22858, 22873), True, 'import numpy as np\n'), ((23058, 23096), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23081, 23096), True, 'import numpy as np\n'), ((23115, 23146), 'scipy.sparse.random', 'sps.random', (['(20)', '(30)'], {'density': '(0.1)'}), '(20, 30, density=0.1)\n', (23125, 23146), True, 'import scipy.sparse as sps\n'), ((23158, 23187), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3, 4)'}), '(x_data, chunks=(3, 4))\n', (23164, 23187), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23201, 23221), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(2)'], {'axis': '(1)'}), '(x, 2, axis=1)\n', (23207, 23221), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23451, 23470), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (23459, 23470), True, 'import numpy as np\n'), ((23483, 23507), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['a_data'], {'chunks': '(2)'}), '(a_data, chunks=2)\n', (23489, 23507), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23521, 23531), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2)'], {}), '(a, 2)\n', (23525, 23531), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23614, 23632), 'numpy.tile', 'np.tile', (['a_data', '(2)'], {}), '(a_data, 2)\n', (23621, 23632), True, 'import numpy as np\n'), ((23641, 23679), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23664, 23679), True, 'import numpy as np\n'), ((23693, 23708), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2, 2)'], {}), '(a, (2, 2))\n', (23697, 23708), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23791, 23814), 'numpy.tile', 'np.tile', (['a_data', '(2, 2)'], {}), '(a_data, (2, 2))\n', (23798, 23814), True, 'import numpy as np\n'), ((23823, 23861), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23846, 23861), True, 'import numpy as np\n'), ((23875, 23893), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2, 1, 2)'], {}), '(a, (2, 1, 2))\n', (23879, 23893), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23976, 24002), 'numpy.tile', 'np.tile', (['a_data', '(2, 1, 2)'], {}), '(a_data, (2, 1, 2))\n', (23983, 24002), True, 'import numpy as np\n'), ((24011, 24049), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24034, 24049), True, 'import numpy as np\n'), ((24068, 24094), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (24076, 24094), True, 'import numpy as np\n'), ((24107, 24131), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['b_data'], {'chunks': '(1)'}), '(b_data, chunks=1)\n', (24113, 24131), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((24145, 24155), 'mars.tensor.expressions.base.tile', 'tile', (['b', '(2)'], {}), '(b, 2)\n', (24149, 24155), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24238, 24256), 'numpy.tile', 'np.tile', (['b_data', '(2)'], {}), '(b_data, 2)\n', (24245, 24256), True, 'import numpy as np\n'), ((24265, 24303), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24288, 24303), True, 'import numpy as np\n'), ((24317, 24332), 'mars.tensor.expressions.base.tile', 'tile', (['b', '(2, 1)'], {}), '(b, (2, 1))\n', (24321, 24332), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24415, 24438), 'numpy.tile', 'np.tile', (['b_data', '(2, 1)'], {}), '(b_data, (2, 1))\n', (24422, 24438), True, 'import numpy as np\n'), ((24447, 24485), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24470, 24485), True, 'import numpy as np\n'), ((24504, 24526), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (24512, 24526), True, 'import numpy as np\n'), ((24539, 24563), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['c_data'], {'chunks': '(3)'}), '(c_data, chunks=3)\n', (24545, 24563), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((24577, 24592), 'mars.tensor.expressions.base.tile', 'tile', (['c', '(4, 1)'], {}), '(c, (4, 1))\n', (24581, 24592), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24675, 24698), 'numpy.tile', 'np.tile', (['c_data', '(4, 1)'], {}), '(c_data, (4, 1))\n', (24682, 24698), True, 'import numpy as np\n'), ((24707, 24745), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24730, 24745), True, 'import numpy as np\n'), ((24891, 24919), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_elements'], {}), '(element, test_elements)\n', (24895, 24919), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25070, 25108), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25093, 25108), True, 'import numpy as np\n'), ((25203, 25219), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (25211, 25219), True, 'import numpy as np\n'), ((25228, 25266), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25251, 25266), True, 'import numpy as np\n'), ((25283, 25324), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_elements'], {'invert': '(True)'}), '(element, test_elements, invert=True)\n', (25287, 25324), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25488, 25526), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25511, 25526), True, 'import numpy as np\n'), ((25621, 25637), 'numpy.array', 'np.array', (['[0, 6]'], {}), '([0, 6])\n', (25629, 25637), True, 'import numpy as np\n'), ((25646, 25684), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25669, 25684), True, 'import numpy as np\n'), ((25733, 25756), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_set'], {}), '(element, test_set)\n', (25737, 25756), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25902, 25940), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25925, 25940), True, 'import numpy as np\n'), ((1630, 1665), 'numpy.array_equal', 'np.array_equal', (['res[0]', 'raw[:4, :4]'], {}), '(res[0], raw[:4, :4])\n', (1644, 1665), True, 'import numpy as np\n'), ((1691, 1726), 'numpy.array_equal', 'np.array_equal', (['res[1]', 'raw[:4, 4:]'], {}), '(res[1], raw[:4, 4:])\n', (1705, 1726), True, 'import numpy as np\n'), ((1752, 1788), 'numpy.array_equal', 'np.array_equal', (['res[2]', 'raw[4:8, :4]'], {}), '(res[2], raw[4:8, :4])\n', (1766, 1788), True, 'import numpy as np\n'), ((1814, 1850), 'numpy.array_equal', 'np.array_equal', (['res[3]', 'raw[4:8, 4:]'], {}), '(res[3], raw[4:8, 4:])\n', (1828, 1850), True, 'import numpy as np\n'), ((1876, 1911), 'numpy.array_equal', 'np.array_equal', (['res[4]', 'raw[8:, :4]'], {}), '(res[4], raw[8:, :4])\n', (1890, 1911), True, 'import numpy as np\n'), ((1937, 1972), 'numpy.array_equal', 'np.array_equal', (['res[5]', 'raw[8:, 4:]'], {}), '(res[5], raw[8:, 4:])\n', (1951, 1972), True, 'import numpy as np\n'), ((3055, 3084), 'numpy.array_equal', 'np.array_equal', (['res[0]', 'raw.T'], {}), '(res[0], raw.T)\n', (3069, 3084), True, 'import numpy as np\n'), ((5499, 5528), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (5522, 5528), True, 'import numpy as np\n'), ((5727, 5753), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_cond'], {'chunks': '(2)'}), '(raw_cond, chunks=2)\n', (5733, 5753), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5755, 5778), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_x'], {'chunks': '(2)'}), '(raw_x, chunks=2)\n', (5761, 5778), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5780, 5803), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_y'], {'chunks': '(2)'}), '(raw_y, chunks=2)\n', (5786, 5803), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6015, 6062), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(4, 4)', 'dtype': '"""?"""'}), "(0, 2, size=(4, 4), dtype='?')\n", (6032, 6062), True, 'import numpy as np\n'), ((6176, 6202), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_cond'], {'chunks': '(2)'}), '(raw_cond, chunks=2)\n', (6182, 6202), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6204, 6227), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_x'], {'chunks': '(2)'}), '(raw_x, chunks=2)\n', (6210, 6227), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6229, 6252), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_y'], {'chunks': '(2)'}), '(raw_y, chunks=2)\n', (6235, 6252), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((8922, 8940), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-5)'], {}), '(x, -5)\n', (8933, 8940), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9000, 9017), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(4)'], {}), '(x, 4)\n', (9011, 9017), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10838, 10867), 'numpy.array_equal', 'np.array_equal', (['res', 'expected'], {}), '(res, expected)\n', (10852, 10867), True, 'import numpy as np\n'), ((11211, 11240), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11234, 11240), True, 'import numpy as np\n'), ((11550, 11579), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11573, 11579), True, 'import numpy as np\n'), ((11937, 11966), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11960, 11966), True, 'import numpy as np\n'), ((12264, 12293), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (12287, 12293), True, 'import numpy as np\n'), ((12624, 12653), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (12647, 12653), True, 'import numpy as np\n'), ((12980, 13009), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (13003, 13009), True, 'import numpy as np\n'), ((13336, 13365), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (13359, 13365), True, 'import numpy as np\n'), ((13962, 13975), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (13971, 13975), True, 'import numpy as np\n'), ((16857, 16915), 'numpy.arange', 'np.arange', (['"""1066-10-13"""', '"""1066-10-16"""'], {'dtype': 'np.datetime64'}), "('1066-10-13', '1066-10-16', dtype=np.datetime64)\n", (16866, 16915), True, 'import numpy as np\n'), ((17629, 17651), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17635, 17651), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18960, 18990), 'scipy.sparse.random', 'sps.random', (['(10)', '(1)'], {'density': '(0.1)'}), '(10, 1, density=0.1)\n', (18970, 18990), True, 'import scipy.sparse as sps\n'), ((19485, 19500), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (19494, 19500), True, 'import numpy as np\n'), ((19563, 19586), 'mars.tensor.expressions.datasource.arange', 'arange', (['(1)', '(11)'], {'chunks': '(2)'}), '(1, 11, chunks=2)\n', (19569, 19586), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20267, 20301), 'numpy.array', 'np.array', (['[[0, 2], [1, 1], [2, 0]]'], {}), '([[0, 2], [1, 1], [2, 0]])\n', (20275, 20301), True, 'import numpy as np\n'), ((22681, 22694), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (22690, 22694), True, 'import numpy as np\n'), ((22804, 22817), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (22813, 22817), True, 'import numpy as np\n'), ((22897, 22917), 'mars.tensor.expressions.datasource.arange', 'arange', (['(20)'], {'chunks': '(5)'}), '(20, chunks=5)\n', (22903, 22917), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23027, 23040), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (23036, 23040), True, 'import numpy as np\n'), ((5055, 5090), 'numpy.broadcast_to', 'np.broadcast_to', (['raw', '(5, 10, 5, 6)'], {}), '(raw, (5, 10, 5, 6))\n', (5070, 5090), True, 'import numpy as np\n'), ((5945, 5977), 'numpy.where', 'np.where', (['raw_cond', 'raw_x', 'raw_y'], {}), '(raw_cond, raw_x, raw_y)\n', (5953, 5977), True, 'import numpy as np\n'), ((8157, 8184), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(1)'], {}), '(raw_data, 1)\n', (8171, 8184), True, 'import numpy as np\n'), ((8325, 8352), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(0)'], {}), '(raw_data, 0)\n', (8339, 8352), True, 'import numpy as np\n'), ((8493, 8520), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(3)'], {}), '(raw_data, 3)\n', (8507, 8520), True, 'import numpy as np\n'), ((8662, 8690), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(-1)'], {}), '(raw_data, -1)\n', (8676, 8690), True, 'import numpy as np\n'), ((8832, 8860), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(-4)'], {}), '(raw_data, -4)\n', (8846, 8860), True, 'import numpy as np\n'), ((9552, 9565), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (9560, 9565), True, 'import numpy as np\n'), ((9615, 9625), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (9622, 9625), True, 'import numpy as np\n'), ((9675, 9690), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (9682, 9690), True, 'import numpy as np\n'), ((9968, 9983), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (9976, 9983), True, 'import numpy as np\n'), ((10108, 10123), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (10115, 10123), True, 'import numpy as np\n'), ((10401, 10417), 'numpy.atleast_3d', 'np.atleast_3d', (['x'], {}), '(x)\n', (10414, 10417), True, 'import numpy as np\n'), ((10625, 10644), 'mars.tensor.expressions.datasource.arange', 'arange', (['(6)'], {'chunks': '(2)'}), '(6, chunks=2)\n', (10631, 10644), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10921, 10941), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (10927, 10941), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((11659, 11679), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (11665, 11679), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((12356, 12377), 'mars.tensor.expressions.datasource.arange', 'arange', (['(120)'], {'chunks': '(3)'}), '(120, chunks=3)\n', (12362, 12377), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((12716, 12736), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (12722, 12736), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13072, 13092), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (13078, 13092), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((15187, 15206), 'mars.tensor.expressions.datasource.arange', 'arange', (['(4)'], {'chunks': '(1)'}), '(4, chunks=1)\n', (15193, 15206), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17501, 17519), 'numpy.array', 'np.array', (['[88, 99]'], {}), '([88, 99])\n', (17509, 17519), True, 'import numpy as np\n'), ((19596, 19623), 'mars.tensor.expressions.datasource.arange', 'arange', (['(10)', '(0)', '(-1)'], {'chunks': '(2)'}), '(10, 0, -1, chunks=2)\n', (19602, 19623), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19801, 19820), 'mars.tensor.expressions.datasource.arange', 'arange', (['(6)'], {'chunks': '(2)'}), '(6, chunks=2)\n', (19807, 19820), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19879, 19915), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[1.0 / 4, 3.0 / 4]'], {'chunks': '(2)'}), '([1.0 / 4, 3.0 / 4], chunks=2)\n', (19885, 19915), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20722, 20728), 'mars.tensor.expressions.base.cov', 'cov', (['X'], {}), '(X)\n', (20725, 20728), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21198, 21217), 'mars.tensor.expressions.datasource.arange', 'arange', (['(8)'], {'chunks': '(2)'}), '(8, chunks=2)\n', (21204, 21217), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9246, 9267), 'numpy.ones', 'np.ones', (['(3, 4, 5, 6)'], {}), '((3, 4, 5, 6))\n', (9253, 9267), True, 'import numpy as np\n'), ((10047, 10057), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (10054, 10057), True, 'import numpy as np\n'), ((10481, 10491), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (10488, 10491), True, 'import numpy as np\n'), ((10556, 10571), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (10563, 10571), True, 'import numpy as np\n'), ((11109, 11122), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11118, 11122), True, 'import numpy as np\n'), ((11436, 11449), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11445, 11449), True, 'import numpy as np\n'), ((11835, 11848), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11844, 11848), True, 'import numpy as np\n'), ((12150, 12163), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (12159, 12163), True, 'import numpy as np\n'), ((12528, 12542), 'numpy.arange', 'np.arange', (['(120)'], {}), '(120)\n', (12537, 12542), True, 'import numpy as np\n'), ((12886, 12899), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (12895, 12899), True, 'import numpy as np\n'), ((13242, 13255), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (13251, 13255), True, 'import numpy as np\n'), ((14172, 14185), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14181, 14185), True, 'import numpy as np\n'), ((14374, 14387), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14383, 14387), True, 'import numpy as np\n'), ((14584, 14597), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14593, 14597), True, 'import numpy as np\n'), ((15338, 15350), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15347, 15350), True, 'import numpy as np\n'), ((15538, 15550), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15547, 15550), True, 'import numpy as np\n'), ((15717, 15729), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15726, 15729), True, 'import numpy as np\n'), ((20004, 20016), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (20013, 20016), True, 'import numpy as np\n'), ((20187, 20223), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[1.0 / 4, 3.0 / 4]'], {'chunks': '(2)'}), '([1.0 / 4, 3.0 / 4], chunks=2)\n', (20193, 20223), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((21351, 21363), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21360, 21363), True, 'import numpy as np\n'), ((21546, 21558), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21555, 21558), True, 'import numpy as np\n'), ((21742, 21754), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21751, 21754), True, 'import numpy as np\n'), ((21935, 21947), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21944, 21947), True, 'import numpy as np\n'), ((24802, 24821), 'mars.tensor.expressions.datasource.arange', 'arange', (['(4)'], {'chunks': '(1)'}), '(4, chunks=1)\n', (24808, 24821), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10781, 10793), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (10790, 10793), True, 'import numpy as np\n'), ((25017, 25029), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25026, 25029), True, 'import numpy as np\n'), ((25422, 25434), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25431, 25434), True, 'import numpy as np\n'), ((25854, 25866), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25863, 25866), True, 'import numpy as np\n')] |
drumpt/Co-Mixup | comix-imagenet/init_paths.py | 4c43f0ec873ce6c1e8ab446c7cb9e25089b9b91a | import sys
import matplotlib
matplotlib.use('Agg')
sys.path.insert(0, 'lib')
| [((29, 50), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (43, 50), False, 'import matplotlib\n'), ((51, 76), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""lib"""'], {}), "(0, 'lib')\n", (66, 76), False, 'import sys\n')] |
kcotar/Gaia_clusters_potential | members_abundances_in_out_uncertainties.py | aee2658c40446891d31528f8dec3cec899b63c68 | import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.table import Table, join
from os import chdir, system
from scipy.stats import norm as gauss_norm
from sys import argv
from getopt import getopt
# turn off polyfit ranking warnings
import warnings
warnings.filterwarnings('ignore')
def _prepare_pdf_data(means, stds, range, norm=True):
x_vals = np.linspace(range[0], range[1], 250)
y_vals = np.zeros_like(x_vals)
# create and sum all PDF of stellar abundances
for d_m, d_s in zip(means, stds):
if np.isfinite([d_m, d_s]).all():
y_vals += gauss_norm.pdf(x_vals, loc=d_m, scale=d_s)
# return normalized summed pdf of all stars
if norm and np.nansum(y_vals) > 0.:
y_vals = 1. * y_vals/np.nanmax(y_vals)
return x_vals, y_vals
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.nanmax(heights)
return edges[:-1], heights, width
def _evaluate_abund_trend_fit(orig, fit, idx, sigma_low, sigma_high):
# diffence to the original data
diff = orig - fit
std_diff = np.nanstd(diff[idx])
# select data that will be fitted
idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low),
diff > (std_diff * sigma_high))
return np.logical_and(idx, ~idx_outlier)
def fit_abund_trend(p_data, a_data,
steps=3, sigma_low=2.5, sigma_high=2.5,
order=5, window=10, n_min_perc=10.,func='poly'):
idx_fit = np.logical_and(np.isfinite(p_data), np.isfinite(a_data))
data_len = np.sum(idx_fit)
n_fit_points_prev = np.sum(idx_fit)
if data_len <= order + 1:
return None, None
p_offset = np.nanmedian(p_data)
for i_f in range(steps): # number of sigma clipping steps
if func == 'cheb':
coef = np.polynomial.chebyshev.chebfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
coef = np.polynomial.legendre.legfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
coef = np.polyfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
coef = splrep(p_data[idx_fit] - p_offset, a_data[idx_fit], k=order, s=window)
f_data = splev(p_data - p_offset, coef)
idx_fit = _evaluate_abund_trend_fit(a_data, f_data, idx_fit, sigma_low, sigma_high)
n_fit_points = np.sum(idx_fit)
if 100.*n_fit_points/data_len < n_min_perc:
break
if n_fit_points == n_fit_points_prev:
break
else:
n_fit_points_prev = n_fit_points
a_std = np.nanstd(a_data - f_data)
return [coef, p_offset], a_std
def eval_abund_trend(p_data, m_data, func='poly'):
coef, p_offset = m_data
if func == 'cheb':
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
f_data = splev(p_data - p_offset, coef)
return f_data
simulation_dir = '/shared/data-camelot/cotar/'
data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/'
data_dir = '/shared/ebla/cotar/'
USE_DR3 = True
Q_FLAGS = True
P_INDIVIDUAL = False
suffix = ''
if len(argv) > 1:
# parse input options
opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])
# set parameters, depending on user inputs
print(opts)
for o, a in opts:
if o == '--dr3':
USE_DR3 = int(a) > 0
if o == '--suffix':
suffix += str(a)
if o == '--flags':
Q_FLAGS = int(a) > 0
if o == '--individual':
P_INDIVIDUAL = int(a) > 0
CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits')
tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits')
# remove cluster members from tails data
print('Cluster members all:', len(CG_data), len(tails_data))
idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True)
tails_data = tails_data[idx_not_in_cluster]
print('Cluster members all:', len(CG_data), len(tails_data))
if USE_DR3:
# cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits')
cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits')
fe_col = 'fe_h'
teff_col = 'teff'
q_flag = 'flag_sp'
suffix += '_DR3'
else:
pass
if Q_FLAGS:
suffix += '_flag0'
# determine all possible simulation subdirs
chdir(data_dir_clusters)
for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'):
chdir(cluster_dir)
print('Working on clusters in ' + cluster_dir)
for sub_dir in glob('*'):
current_cluster = '_'.join(sub_dir.split('_')[0:2])
source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id']
source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id']
idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg))
idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail))
if '.png' in sub_dir or 'individual-abund' in sub_dir:
continue
print(' ')
print(sub_dir)
chdir(sub_dir)
try:
g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t')
idx_init = np.in1d(cannon_data['source_id'], g_init['source_id'])
except:
idx_init = np.full(len(cannon_data), False)
try:
g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t')
g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster
g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster
g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1.,
g_in['in_cluster_prob'] >= 68.)]
idx_in = np.in1d(cannon_data['source_id'], g_in['source_id'])
idx_in_no_CG = np.logical_and(idx_in,
np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id'])))
except:
idx_in = np.full(len(cannon_data), False)
idx_in_no_CG = np.full(len(cannon_data), False)
try:
g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0,
g_out['in_cluster_prob'] <= 0)]
idx_out = np.in1d(cannon_data['source_id'], g_out['source_id'])
except:
idx_out = np.full(len(cannon_data), False)
chdir('..')
if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0:
print(' Some Galah lists are missing')
if USE_DR3:
abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)]
else:
abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3]
# abund_cols = ['e_' + cc for cc in abund_cols]
# rg = (0., 0.35)
# yt = [0., 0.1, 0.2, 0.3]
# medfix = '-snr-sigma_'
abund_cols = ['diff_' + cc for cc in abund_cols]
rg = (-0.45, 0.45)
yt = [-0.3, -0.15, 0.0, 0.15, 0.3]
medfix = '-detrended-snr_'
# ------------------------------------------------------------------------------
# NEW: plot with parameter dependency trends
# ------------------------------------------------------------------------------
bs = 40
x_cols_fig = 7
y_cols_fig = 5
param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]}
for param in ['snr_c2_iraf']: #list(param_lims.keys()):
cannon_data['abund_det'] = 0
cannon_data['abund_det_elems'] = 0
print('Estimating membership using parameter', param)
fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10))
for i_c, col in enumerate(abund_cols):
# print(col)
x_p = i_c % x_cols_fig
y_p = int(1. * i_c / x_cols_fig)
fit_x_param = 'teff'
cur_abund_col = '_'.join(col.split('_')[1:])
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col]
idx_val = np.isfinite(cannon_data[col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u4 = np.logical_and(idx_cg_memb, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2],
cannon_data[cur_abund_col][idx_u2],
order=3, steps=2, func='poly',
sigma_low=2.5, sigma_high=2.5, n_min_perc=10.)
if fit_model is not None:
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly')
else:
cannon_data['diff_' + cur_abund_col] = np.nan
ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5)))
ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add,
ylim=rg,
yticks=yt,)
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
rg = (-0.6, 0.6)
idx_val = np.isfinite(cannon_data[teff_col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
x_p = -1
y_p = -1
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1],
lw=0, s=3, color='C2', label='Field')
sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10.,
func='poly')
if np.sum(idx_u5) > 0:
sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5])
else:
ax[-1, -3].legend(handles=[sl1, sl1, sl3])
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
x_p = -2
y_p = -1
ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3)
# plt.show()
plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250)
plt.close(fig)
chdir('..')
| [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((323, 356), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (346, 356), False, 'import warnings\n'), ((4237, 4302), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'clusters/Cantat-Gaudin_2018/members.fits')"], {}), "(data_dir + 'clusters/Cantat-Gaudin_2018/members.fits')\n", (4247, 4302), False, 'from astropy.table import Table, join\n'), ((4314, 4390), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'clusters/cluster_tails/members_open_gaia_tails.fits')"], {}), "(data_dir + 'clusters/cluster_tails/members_open_gaia_tails.fits')\n", (4324, 4390), False, 'from astropy.table import Table, join\n'), ((4513, 4580), 'numpy.in1d', 'np.in1d', (["tails_data['source_id']", "CG_data['source_id']"], {'invert': '(True)'}), "(tails_data['source_id'], CG_data['source_id'], invert=True)\n", (4520, 4580), True, 'import numpy as np\n'), ((5027, 5051), 'os.chdir', 'chdir', (['data_dir_clusters'], {}), '(data_dir_clusters)\n', (5032, 5051), False, 'from os import chdir, system\n'), ((5071, 5103), 'glob.glob', 'glob', (['"""Cluster_orbits_GaiaDR2_*"""'], {}), "('Cluster_orbits_GaiaDR2_*')\n", (5075, 5103), False, 'from glob import glob\n'), ((426, 462), 'numpy.linspace', 'np.linspace', (['range[0]', 'range[1]', '(250)'], {}), '(range[0], range[1], 250)\n', (437, 462), True, 'import numpy as np\n'), ((476, 497), 'numpy.zeros_like', 'np.zeros_like', (['x_vals'], {}), '(x_vals)\n', (489, 497), True, 'import numpy as np\n'), ((929, 968), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins', 'range': 'range'}), '(d, bins=bins, range=range)\n', (941, 968), True, 'import numpy as np\n'), ((981, 1008), 'numpy.abs', 'np.abs', (['(edges[0] - edges[1])'], {}), '(edges[0] - edges[1])\n', (987, 1008), True, 'import numpy as np\n'), ((1255, 1275), 'numpy.nanstd', 'np.nanstd', (['diff[idx]'], {}), '(diff[idx])\n', (1264, 1275), True, 'import numpy as np\n'), ((1332, 1411), 'numpy.logical_or', 'np.logical_or', (['(diff < -1.0 * std_diff * sigma_low)', '(diff > std_diff * sigma_high)'], {}), '(diff < -1.0 * std_diff * sigma_low, diff > std_diff * sigma_high)\n', (1345, 1411), True, 'import numpy as np\n'), ((1458, 1491), 'numpy.logical_and', 'np.logical_and', (['idx', '(~idx_outlier)'], {}), '(idx, ~idx_outlier)\n', (1472, 1491), True, 'import numpy as np\n'), ((1746, 1761), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (1752, 1761), True, 'import numpy as np\n'), ((1787, 1802), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (1793, 1802), True, 'import numpy as np\n'), ((1874, 1894), 'numpy.nanmedian', 'np.nanmedian', (['p_data'], {}), '(p_data)\n', (1886, 1894), True, 'import numpy as np\n'), ((3046, 3072), 'numpy.nanstd', 'np.nanstd', (['(a_data - f_data)'], {}), '(a_data - f_data)\n', (3055, 3072), True, 'import numpy as np\n'), ((3829, 3895), 'getopt.getopt', 'getopt', (['argv[1:]', '""""""', "['dr3=', 'suffix=', 'flags=', 'individual=']"], {}), "(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])\n", (3835, 3895), False, 'from getopt import getopt\n'), ((4794, 4846), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'GALAH_iDR3_main_191213.fits')"], {}), "(data_dir + 'GALAH_iDR3_main_191213.fits')\n", (4804, 4846), False, 'from astropy.table import Table, join\n'), ((5109, 5127), 'os.chdir', 'chdir', (['cluster_dir'], {}), '(cluster_dir)\n', (5114, 5127), False, 'from os import chdir, system\n'), ((5199, 5208), 'glob.glob', 'glob', (['"""*"""'], {}), "('*')\n", (5203, 5208), False, 'from glob import glob\n'), ((14816, 14827), 'os.chdir', 'chdir', (['""".."""'], {}), "('..')\n", (14821, 14827), False, 'from os import chdir, system\n'), ((1689, 1708), 'numpy.isfinite', 'np.isfinite', (['p_data'], {}), '(p_data)\n', (1700, 1708), True, 'import numpy as np\n'), ((1710, 1729), 'numpy.isfinite', 'np.isfinite', (['a_data'], {}), '(a_data)\n', (1721, 1729), True, 'import numpy as np\n'), ((2824, 2839), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (2830, 2839), True, 'import numpy as np\n'), ((3230, 3286), 'numpy.polynomial.chebyshev.chebval', 'np.polynomial.chebyshev.chebval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (3261, 3286), True, 'import numpy as np\n'), ((3328, 3382), 'numpy.polynomial.legendre.legval', 'np.polynomial.legendre.legval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (3357, 3382), True, 'import numpy as np\n'), ((5740, 5754), 'os.chdir', 'chdir', (['sub_dir'], {}), '(sub_dir)\n', (5745, 5754), False, 'from os import chdir, system\n'), ((7540, 7551), 'os.chdir', 'chdir', (['""".."""'], {}), "('..')\n", (7545, 7551), False, 'from os import chdir, system\n'), ((651, 693), 'scipy.stats.norm.pdf', 'gauss_norm.pdf', (['x_vals'], {'loc': 'd_m', 'scale': 'd_s'}), '(x_vals, loc=d_m, scale=d_s)\n', (665, 693), True, 'from scipy.stats import norm as gauss_norm\n'), ((758, 775), 'numpy.nansum', 'np.nansum', (['y_vals'], {}), '(y_vals)\n', (767, 775), True, 'import numpy as np\n'), ((811, 828), 'numpy.nanmax', 'np.nanmax', (['y_vals'], {}), '(y_vals)\n', (820, 828), True, 'import numpy as np\n'), ((1053, 1071), 'numpy.nanmax', 'np.nanmax', (['heights'], {}), '(heights)\n', (1062, 1071), True, 'import numpy as np\n'), ((2005, 2092), 'numpy.polynomial.chebyshev.chebfit', 'np.polynomial.chebyshev.chebfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit],\n order)\n', (2036, 2092), True, 'import numpy as np\n'), ((2110, 2166), 'numpy.polynomial.chebyshev.chebval', 'np.polynomial.chebyshev.chebval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (2141, 2166), True, 'import numpy as np\n'), ((2214, 2299), 'numpy.polynomial.legendre.legfit', 'np.polynomial.legendre.legfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit],\n order)\n', (2243, 2299), True, 'import numpy as np\n'), ((2317, 2371), 'numpy.polynomial.legendre.legval', 'np.polynomial.legendre.legval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (2346, 2371), True, 'import numpy as np\n'), ((2418, 2480), 'numpy.polyfit', 'np.polyfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit], order)\n', (2428, 2480), True, 'import numpy as np\n'), ((3423, 3438), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (3432, 3438), True, 'import numpy as np\n'), ((5501, 5523), 'numpy.array', 'np.array', (['source_id_cg'], {}), '(source_id_cg)\n', (5509, 5523), True, 'import numpy as np\n'), ((5578, 5602), 'numpy.array', 'np.array', (['source_id_tail'], {}), '(source_id_tail)\n', (5586, 5602), True, 'import numpy as np\n'), ((5790, 5858), 'astropy.table.Table.read', 'Table.read', (['"""members_init_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('members_init_galah.csv', format='ascii', delimiter='\\t')\n", (5800, 5858), False, 'from astropy.table import Table, join\n'), ((5882, 5936), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_init['source_id']"], {}), "(cannon_data['source_id'], g_init['source_id'])\n", (5889, 5936), True, 'import numpy as np\n'), ((6046, 6118), 'astropy.table.Table.read', 'Table.read', (['"""possible_ejected-step1.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_ejected-step1.csv', format='ascii', delimiter='\\t')\n", (6056, 6118), False, 'from astropy.table import Table, join\n'), ((6138, 6216), 'astropy.table.Table.read', 'Table.read', (['"""possible_ejected-step1_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_ejected-step1_galah.csv', format='ascii', delimiter='\\t')\n", (6148, 6216), False, 'from astropy.table import Table, join\n'), ((6715, 6767), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_in['source_id']"], {}), "(cannon_data['source_id'], g_in['source_id'])\n", (6722, 6767), True, 'import numpy as np\n'), ((7097, 7175), 'astropy.table.Table.read', 'Table.read', (['"""possible_outside-step1_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_outside-step1_galah.csv', format='ascii', delimiter='\\t')\n", (7107, 7175), False, 'from astropy.table import Table, join\n'), ((7406, 7459), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_out['source_id']"], {}), "(cannon_data['source_id'], g_out['source_id'])\n", (7413, 7459), True, 'import numpy as np\n'), ((9017, 9071), 'matplotlib.pyplot.subplots', 'plt.subplots', (['y_cols_fig', 'x_cols_fig'], {'figsize': '(15, 10)'}), '(y_cols_fig, x_cols_fig, figsize=(15, 10))\n', (9029, 9071), True, 'import matplotlib.pyplot as plt\n'), ((11756, 11790), 'numpy.isfinite', 'np.isfinite', (['cannon_data[teff_col]'], {}), '(cannon_data[teff_col])\n', (11767, 11790), True, 'import numpy as np\n'), ((11956, 11988), 'numpy.logical_and', 'np.logical_and', (['idx_out', 'idx_val'], {}), '(idx_out, idx_val)\n', (11970, 11988), True, 'import numpy as np\n'), ((12010, 12043), 'numpy.logical_and', 'np.logical_and', (['idx_init', 'idx_val'], {}), '(idx_init, idx_val)\n', (12024, 12043), True, 'import numpy as np\n'), ((12065, 12096), 'numpy.logical_and', 'np.logical_and', (['idx_in', 'idx_val'], {}), '(idx_in, idx_val)\n', (12079, 12096), True, 'import numpy as np\n'), ((12118, 12151), 'numpy.logical_and', 'np.logical_and', (['idx_tail', 'idx_val'], {}), '(idx_tail, idx_val)\n', (12132, 12151), True, 'import numpy as np\n'), ((14564, 14658), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.97)', 'bottom': '(0.02)', 'left': '(0.04)', 'right': '(0.98)', 'hspace': '(0.3)', 'wspace': '(0.3)'}), '(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=\n 0.3, wspace=0.3)\n', (14583, 14658), True, 'import matplotlib.pyplot as plt\n'), ((14691, 14787), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png')"], {'dpi': '(250)'}), "('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix +\n '.png', dpi=250)\n", (14702, 14787), True, 'import matplotlib.pyplot as plt\n'), ((14796, 14810), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14805, 14810), True, 'import matplotlib.pyplot as plt\n'), ((598, 621), 'numpy.isfinite', 'np.isfinite', (['[d_m, d_s]'], {}), '([d_m, d_s])\n', (609, 621), True, 'import numpy as np\n'), ((2502, 2517), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (2511, 2517), True, 'import numpy as np\n'), ((7564, 7580), 'numpy.sum', 'np.sum', (['idx_init'], {}), '(idx_init)\n', (7570, 7580), True, 'import numpy as np\n'), ((7589, 7603), 'numpy.sum', 'np.sum', (['idx_in'], {}), '(idx_in)\n', (7595, 7603), True, 'import numpy as np\n'), ((7612, 7627), 'numpy.sum', 'np.sum', (['idx_out'], {}), '(idx_out)\n', (7618, 7627), True, 'import numpy as np\n'), ((9449, 9478), 'numpy.isfinite', 'np.isfinite', (['cannon_data[col]'], {}), '(cannon_data[col])\n', (9460, 9478), True, 'import numpy as np\n'), ((9613, 9645), 'numpy.logical_and', 'np.logical_and', (['idx_out', 'idx_val'], {}), '(idx_out, idx_val)\n', (9627, 9645), True, 'import numpy as np\n'), ((9671, 9704), 'numpy.logical_and', 'np.logical_and', (['idx_init', 'idx_val'], {}), '(idx_init, idx_val)\n', (9685, 9704), True, 'import numpy as np\n'), ((9730, 9761), 'numpy.logical_and', 'np.logical_and', (['idx_in', 'idx_val'], {}), '(idx_in, idx_val)\n', (9744, 9761), True, 'import numpy as np\n'), ((9787, 9823), 'numpy.logical_and', 'np.logical_and', (['idx_cg_memb', 'idx_val'], {}), '(idx_cg_memb, idx_val)\n', (9801, 9823), True, 'import numpy as np\n'), ((9849, 9882), 'numpy.logical_and', 'np.logical_and', (['idx_tail', 'idx_val'], {}), '(idx_tail, idx_val)\n', (9863, 9882), True, 'import numpy as np\n'), ((11841, 11890), 'numpy.logical_and', 'np.logical_and', (['idx_val', '(cannon_data[q_flag] == 0)'], {}), '(idx_val, cannon_data[q_flag] == 0)\n', (11855, 11890), True, 'import numpy as np\n'), ((12957, 12971), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (12963, 12971), True, 'import numpy as np\n'), ((13352, 13366), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (13358, 13366), True, 'import numpy as np\n'), ((13368, 13382), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (13374, 13382), True, 'import numpy as np\n'), ((13384, 13398), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (13390, 13398), True, 'import numpy as np\n'), ((14097, 14111), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (14103, 14111), True, 'import numpy as np\n'), ((14344, 14358), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (14350, 14358), True, 'import numpy as np\n'), ((14360, 14374), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (14366, 14374), True, 'import numpy as np\n'), ((14376, 14390), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (14382, 14390), True, 'import numpy as np\n'), ((6312, 6404), 'numpy.logical_and', 'np.logical_and', (["(g_in_all['time_in_cluster'] >= 1.0)", "(g_in_all['in_cluster_prob'] >= 68.0)"], {}), "(g_in_all['time_in_cluster'] >= 1.0, g_in_all[\n 'in_cluster_prob'] >= 68.0)\n", (6326, 6404), True, 'import numpy as np\n'), ((6576, 6655), 'numpy.logical_and', 'np.logical_and', (["(g_in['time_in_cluster'] >= 1.0)", "(g_in['in_cluster_prob'] >= 68.0)"], {}), "(g_in['time_in_cluster'] >= 1.0, g_in['in_cluster_prob'] >= 68.0)\n", (6590, 6655), True, 'import numpy as np\n'), ((6875, 6930), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "CG_data['source_id']"], {}), "(cannon_data['source_id'], CG_data['source_id'])\n", (6882, 6930), True, 'import numpy as np\n'), ((7265, 7341), 'numpy.logical_and', 'np.logical_and', (["(g_out['time_in_cluster'] <= 0)", "(g_out['in_cluster_prob'] <= 0)"], {}), "(g_out['time_in_cluster'] <= 0, g_out['in_cluster_prob'] <= 0)\n", (7279, 7341), True, 'import numpy as np\n'), ((9537, 9586), 'numpy.logical_and', 'np.logical_and', (['idx_val', '(cannon_data[q_flag] == 0)'], {}), '(idx_val, cannon_data[q_flag] == 0)\n', (9551, 9586), True, 'import numpy as np\n'), ((11052, 11066), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (11058, 11066), True, 'import numpy as np\n'), ((11395, 11409), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (11401, 11409), True, 'import numpy as np\n'), ((11411, 11425), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (11417, 11425), True, 'import numpy as np\n'), ((11427, 11441), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (11433, 11441), True, 'import numpy as np\n'), ((11125, 11155), 'numpy.logical_and', 'np.logical_and', (['idx_u3', 'idx_u5'], {}), '(idx_u3, idx_u5)\n', (11139, 11155), True, 'import numpy as np\n')] |
donno2048/python-minifier | src/python_minifier/transforms/remove_pass.py | 9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5 | import ast
from python_minifier.transforms.suite_transformer import SuiteTransformer
class RemovePass(SuiteTransformer):
"""
Remove Pass keywords from source
If a statement is syntactically necessary, use an empty expression instead
"""
def __call__(self, node):
return self.visit(node)
def suite(self, node_list, parent):
without_pass = [self.visit(a) for a in filter(lambda n: not self.is_node(n, ast.Pass), node_list)]
if len(without_pass) == 0:
if isinstance(parent, ast.Module):
return []
else:
return [self.add_child(ast.Expr(value=ast.Num(0)), parent=parent)]
return without_pass
| [((649, 659), 'ast.Num', 'ast.Num', (['(0)'], {}), '(0)\n', (656, 659), False, 'import ast\n')] |
gzu300/Linear_Algebra | test/tests.py | 437a285b0230f4da8b0573b04da32ee965b09233 | import unittest
from pkg import Linear_Algebra
import numpy as np
class TestLU(unittest.TestCase):
def setUp(self):
self.U_answer = np.around(np.array([[2,1,0],[0,3/2,1],[0,0,4/3]], dtype=float), decimals=2).tolist()
self.L_answer = np.around(np.array([[1,0,0],[1/2,1,0],[0,2/3,1]], dtype=float), decimals=2).tolist()
def test_perm(self):
answer = np.array([[0,1,0], [1,0,0], [0,0,1]], dtype=float).tolist()
result = Linear_Algebra.make_perm_mx(3, 0, 1).tolist()
self.assertEqual(result, answer)
def test_LU(self):
L_result, U_result = np.around(Linear_Algebra.LU(np.array([[2,1,0],[1,2,1],[0,1,2]], dtype=float)), decimals=2).tolist()
self.assertEqual(U_result, self.U_answer)
self.assertEqual(L_result, self.L_answer)
class TestDet(unittest.TestCase):
def setUp(self):
self.input_mx = np.array([[2,-1,0,0],[-1,2,-1,0],[0,-1,2,-1],[0,0,-1,2]], dtype=float)
def test_find_det(self):
result = np.around(Linear_Algebra.find_det(A = self.input_mx), decimals=2).tolist()
answer = np.around(5, decimals=2).tolist()
self.assertEqual(result, answer)
if __name__ == '__main__':
unittest.main() | [((1199, 1214), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1212, 1214), False, 'import unittest\n'), ((883, 972), 'numpy.array', 'np.array', (['[[2, -1, 0, 0], [-1, 2, -1, 0], [0, -1, 2, -1], [0, 0, -1, 2]]'], {'dtype': 'float'}), '([[2, -1, 0, 0], [-1, 2, -1, 0], [0, -1, 2, -1], [0, 0, -1, 2]],\n dtype=float)\n', (891, 972), True, 'import numpy as np\n'), ((382, 438), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 1]]'], {'dtype': 'float'}), '([[0, 1, 0], [1, 0, 0], [0, 0, 1]], dtype=float)\n', (390, 438), True, 'import numpy as np\n'), ((459, 495), 'pkg.Linear_Algebra.make_perm_mx', 'Linear_Algebra.make_perm_mx', (['(3)', '(0)', '(1)'], {}), '(3, 0, 1)\n', (486, 495), False, 'from pkg import Linear_Algebra\n'), ((1092, 1116), 'numpy.around', 'np.around', (['(5)'], {'decimals': '(2)'}), '(5, decimals=2)\n', (1101, 1116), True, 'import numpy as np\n'), ((155, 219), 'numpy.array', 'np.array', (['[[2, 1, 0], [0, 3 / 2, 1], [0, 0, 4 / 3]]'], {'dtype': 'float'}), '([[2, 1, 0], [0, 3 / 2, 1], [0, 0, 4 / 3]], dtype=float)\n', (163, 219), True, 'import numpy as np\n'), ((264, 328), 'numpy.array', 'np.array', (['[[1, 0, 0], [1 / 2, 1, 0], [0, 2 / 3, 1]]'], {'dtype': 'float'}), '([[1, 0, 0], [1 / 2, 1, 0], [0, 2 / 3, 1]], dtype=float)\n', (272, 328), True, 'import numpy as np\n'), ((1010, 1050), 'pkg.Linear_Algebra.find_det', 'Linear_Algebra.find_det', ([], {'A': 'self.input_mx'}), '(A=self.input_mx)\n', (1033, 1050), False, 'from pkg import Linear_Algebra\n'), ((627, 683), 'numpy.array', 'np.array', (['[[2, 1, 0], [1, 2, 1], [0, 1, 2]]'], {'dtype': 'float'}), '([[2, 1, 0], [1, 2, 1], [0, 1, 2]], dtype=float)\n', (635, 683), True, 'import numpy as np\n')] |
ofekashery/the-blue-alliance | src/backend/common/models/favorite.py | df0e47d054161fe742ac6198a6684247d0713279 | from backend.common.models.mytba import MyTBAModel
class Favorite(MyTBAModel):
"""
In order to make strongly consistent DB requests, instances of this class
should be created with a parent that is the associated Account key.
"""
def __init__(self, *args, **kwargs):
super(Favorite, self).__init__(*args, **kwargs)
| [] |
MontyThibault/centre-of-mass-awareness | Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/filebrowsebutton.py | 58778f148e65749e1dfc443043e9fc054ca3ff4d | #----------------------------------------------------------------------
# Name: wxPython.lib.filebrowsebutton
# Purpose: Composite controls that provide a Browse button next to
# either a wxTextCtrl or a wxComboBox. The Browse button
# launches a wxFileDialog and loads the result into the
# other control.
#
# Author: Mike Fletcher
#
# RCS-ID: $Id: filebrowsebutton.py 59674 2009-03-20 21:00:16Z RD $
# Copyright: (c) 2000 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# 12/02/2003 - Jeff Grimmett ([email protected])
#
# o 2.5 Compatability changes
#
import os
import types
import wx
#----------------------------------------------------------------------
class FileBrowseButton(wx.Panel):
"""
A control to allow the user to type in a filename or browse with
the standard file dialog to select file
"""
def __init__ (self, parent, id= -1,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style = wx.TAB_TRAVERSAL,
labelText= "File Entry:",
buttonText= "Browse",
toolTip= "Type filename or click browse to choose file",
# following are the values for a file dialog box
dialogTitle = "Choose a file",
startDirectory = ".",
initialValue = "",
fileMask = "*.*",
fileMode = wx.OPEN,
# callback for when value changes (optional)
changeCallback= lambda x:x,
labelWidth = 0,
name = 'fileBrowseButton',
):
"""
:param labelText: Text for label to left of text field
:param buttonText: Text for button which launches the file dialog
:param toolTip: Help text
:param dialogTitle: Title used in file dialog
:param startDirectory: Default directory for file dialog startup
:param fileMask: File mask (glob pattern, such as *.*) to use in file dialog
:param fileMode: wx.OPEN or wx.SAVE, indicates type of file dialog to use
:param changeCallback: Optional callback called for all changes in value of the control
:param labelWidth: Width of the label
"""
# store variables
self.labelText = labelText
self.buttonText = buttonText
self.toolTip = toolTip
self.dialogTitle = dialogTitle
self.startDirectory = startDirectory
self.initialValue = initialValue
self.fileMask = fileMask
self.fileMode = fileMode
self.changeCallback = changeCallback
self.callCallback = True
self.labelWidth = labelWidth
# create the dialog
self.createDialog(parent, id, pos, size, style, name )
# Setting a value causes the changeCallback to be called.
# In this case that would be before the return of the
# constructor. Not good. So a default value on
# SetValue is used to disable the callback
self.SetValue( initialValue, 0)
def createDialog( self, parent, id, pos, size, style, name ):
"""Setup the graphic representation of the dialog"""
wx.Panel.__init__ (self, parent, id, pos, size, style, name)
self.SetMinSize(size) # play nice with sizers
box = wx.BoxSizer(wx.HORIZONTAL)
self.label = self.createLabel( )
box.Add( self.label, 0, wx.CENTER )
self.textControl = self.createTextControl()
box.Add( self.textControl, 1, wx.LEFT|wx.CENTER, 5)
self.browseButton = self.createBrowseButton()
box.Add( self.browseButton, 0, wx.LEFT|wx.CENTER, 5)
# add a border around the whole thing and resize the panel to fit
outsidebox = wx.BoxSizer(wx.VERTICAL)
outsidebox.Add(box, 1, wx.EXPAND|wx.ALL, 3)
outsidebox.Fit(self)
self.SetAutoLayout(True)
self.SetSizer( outsidebox )
self.Layout()
if type( size ) == types.TupleType:
size = apply( wx.Size, size)
self.SetDimensions(-1, -1, size.width, size.height, wx.SIZE_USE_EXISTING)
# if size.width != -1 or size.height != -1:
# self.SetSize(size)
def SetBackgroundColour(self,color):
wx.Panel.SetBackgroundColour(self,color)
self.label.SetBackgroundColour(color)
def createLabel( self ):
"""Create the label/caption"""
label = wx.StaticText(self, -1, self.labelText, style =wx.ALIGN_RIGHT )
font = label.GetFont()
w, h, d, e = self.GetFullTextExtent(self.labelText, font)
if self.labelWidth > 0:
label.SetSize((self.labelWidth+5, h))
else:
label.SetSize((w+5, h))
return label
def createTextControl( self):
"""Create the text control"""
textControl = wx.TextCtrl(self, -1)
textControl.SetToolTipString( self.toolTip )
if self.changeCallback:
textControl.Bind(wx.EVT_TEXT, self.OnChanged)
textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged)
return textControl
def OnChanged(self, evt):
if self.callCallback and self.changeCallback:
self.changeCallback(evt)
def createBrowseButton( self):
"""Create the browse-button control"""
button =wx.Button(self, -1, self.buttonText)
button.SetToolTipString( self.toolTip )
button.Bind(wx.EVT_BUTTON, self.OnBrowse)
return button
def OnBrowse (self, event = None):
""" Going to browse for file... """
current = self.GetValue()
directory = os.path.split(current)
if os.path.isdir( current):
directory = current
current = ''
elif directory and os.path.isdir( directory[0] ):
current = directory[1]
directory = directory [0]
else:
directory = self.startDirectory
current = ''
dlg = wx.FileDialog(self, self.dialogTitle, directory, current,
self.fileMask, self.fileMode)
if dlg.ShowModal() == wx.ID_OK:
self.SetValue(dlg.GetPath())
dlg.Destroy()
def GetValue (self):
"""
retrieve current value of text control
"""
return self.textControl.GetValue()
def SetValue (self, value, callBack=1):
"""set current value of text control"""
save = self.callCallback
self.callCallback = callBack
self.textControl.SetValue(value)
self.callCallback = save
def Enable (self, value=True):
""" Convenient enabling/disabling of entire control """
self.label.Enable (value)
self.textControl.Enable (value)
return self.browseButton.Enable (value)
def Disable (self,):
""" Convenient disabling of entire control """
self.Enable(False)
def GetLabel( self ):
""" Retrieve the label's current text """
return self.label.GetLabel()
def SetLabel( self, value ):
""" Set the label's current text """
rvalue = self.label.SetLabel( value )
self.Refresh( True )
return rvalue
class FileBrowseButtonWithHistory( FileBrowseButton ):
"""
with following additions:
__init__(..., history=None)
history -- optional list of paths for initial history drop-down
(must be passed by name, not a positional argument)
If history is callable it will must return a list used
for the history drop-down
changeCallback -- as for FileBrowseButton, but with a work-around
for win32 systems which don't appear to create wx.EVT_COMBOBOX
events properly. There is a (slight) chance that this work-around
will cause some systems to create two events for each Combobox
selection. If you discover this condition, please report it!
As for a FileBrowseButton.__init__ otherwise.
GetHistoryControl()
Return reference to the control which implements interfaces
required for manipulating the history list. See GetHistoryControl
documentation for description of what that interface is.
GetHistory()
Return current history list
SetHistory( value=(), selectionIndex = None )
Set current history list, if selectionIndex is not None, select that index
"""
def __init__( self, *arguments, **namedarguments):
self.history = namedarguments.get( "history" )
if self.history:
del namedarguments["history"]
self.historyCallBack=None
if callable(self.history):
self.historyCallBack=self.history
self.history=None
name = namedarguments.get('name', 'fileBrowseButtonWithHistory')
namedarguments['name'] = name
FileBrowseButton.__init__(self, *arguments, **namedarguments)
def createTextControl( self):
"""Create the text control"""
textControl = wx.ComboBox(self, -1, style = wx.CB_DROPDOWN )
textControl.SetToolTipString( self.toolTip )
textControl.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
if self.changeCallback:
textControl.Bind(wx.EVT_TEXT, self.OnChanged)
textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged)
if self.history:
history=self.history
self.history=None
self.SetHistory( history, control=textControl)
return textControl
def GetHistoryControl( self ):
"""
Return a pointer to the control which provides (at least)
the following methods for manipulating the history list:
Append( item ) -- add item
Clear() -- clear all items
Delete( index ) -- 0-based index to delete from list
SetSelection( index ) -- 0-based index to select in list
Semantics of the methods follow those for the wxComboBox control
"""
return self.textControl
def SetHistory( self, value=(), selectionIndex = None, control=None ):
"""Set the current history list"""
if control is None:
control = self.GetHistoryControl()
if self.history == value:
return
self.history = value
# Clear history values not the selected one.
tempValue=control.GetValue()
# clear previous values
control.Clear()
control.SetValue(tempValue)
# walk through, appending new values
for path in value:
control.Append( path )
if selectionIndex is not None:
control.SetSelection( selectionIndex )
def GetHistory( self ):
"""Return the current history list"""
if self.historyCallBack != None:
return self.historyCallBack()
elif self.history:
return list( self.history )
else:
return []
def OnSetFocus(self, event):
"""When the history scroll is selected, update the history"""
if self.historyCallBack != None:
self.SetHistory( self.historyCallBack(), control=self.textControl)
event.Skip()
if wx.Platform == "__WXMSW__":
def SetValue (self, value, callBack=1):
""" Convenient setting of text control value, works
around limitation of wx.ComboBox """
save = self.callCallback
self.callCallback = callBack
self.textControl.SetValue(value)
self.callCallback = save
# Hack to call an event handler
class LocalEvent:
def __init__(self, string):
self._string=string
def GetString(self):
return self._string
if callBack==1:
# The callback wasn't being called when SetValue was used ??
# So added this explicit call to it
self.changeCallback(LocalEvent(value))
class DirBrowseButton(FileBrowseButton):
def __init__(self, parent, id = -1,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.TAB_TRAVERSAL,
labelText = 'Select a directory:',
buttonText = 'Browse',
toolTip = 'Type directory name or browse to select',
dialogTitle = '',
startDirectory = '.',
changeCallback = None,
dialogClass = wx.DirDialog,
newDirectory = False,
name = 'dirBrowseButton'):
FileBrowseButton.__init__(self, parent, id, pos, size, style,
labelText, buttonText, toolTip,
dialogTitle, startDirectory,
changeCallback = changeCallback,
name = name)
self.dialogClass = dialogClass
self.newDirectory = newDirectory
#
def OnBrowse(self, ev = None):
style=0
if not self.newDirectory:
style |= wx.DD_DIR_MUST_EXIST
dialog = self.dialogClass(self,
message = self.dialogTitle,
defaultPath = self.startDirectory,
style = style)
if dialog.ShowModal() == wx.ID_OK:
self.SetValue(dialog.GetPath())
dialog.Destroy()
#
#----------------------------------------------------------------------
if __name__ == "__main__":
#from skeletonbuilder import rulesfile
class SimpleCallback:
def __init__( self, tag ):
self.tag = tag
def __call__( self, event ):
print self.tag, event.GetString()
class DemoFrame( wx.Frame ):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "File entry with browse", size=(500,260))
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
panel = wx.Panel (self,-1)
innerbox = wx.BoxSizer(wx.VERTICAL)
control = FileBrowseButton(
panel,
initialValue = "z:\\temp",
)
innerbox.Add( control, 0, wx.EXPAND )
middlecontrol = FileBrowseButtonWithHistory(
panel,
labelText = "With History",
initialValue = "d:\\temp",
history = ["c:\\temp", "c:\\tmp", "r:\\temp","z:\\temp"],
changeCallback= SimpleCallback( "With History" ),
)
innerbox.Add( middlecontrol, 0, wx.EXPAND )
middlecontrol = FileBrowseButtonWithHistory(
panel,
labelText = "History callback",
initialValue = "d:\\temp",
history = self.historyCallBack,
changeCallback= SimpleCallback( "History callback" ),
)
innerbox.Add( middlecontrol, 0, wx.EXPAND )
self.bottomcontrol = control = FileBrowseButton(
panel,
labelText = "With Callback",
style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN ,
changeCallback= SimpleCallback( "With Callback" ),
)
innerbox.Add( control, 0, wx.EXPAND)
self.bottommostcontrol = control = DirBrowseButton(
panel,
labelText = "Simple dir browse button",
style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN)
innerbox.Add( control, 0, wx.EXPAND)
ID = wx.NewId()
innerbox.Add( wx.Button( panel, ID,"Change Label", ), 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnChangeLabel , id=ID)
ID = wx.NewId()
innerbox.Add( wx.Button( panel, ID,"Change Value", ), 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnChangeValue, id=ID )
panel.SetAutoLayout(True)
panel.SetSizer( innerbox )
self.history={"c:\\temp":1, "c:\\tmp":1, "r:\\temp":1,"z:\\temp":1}
def historyCallBack(self):
keys=self.history.keys()
keys.sort()
return keys
def OnFileNameChangedHistory (self, event):
self.history[event.GetString ()]=1
def OnCloseMe(self, event):
self.Close(True)
def OnChangeLabel( self, event ):
self.bottomcontrol.SetLabel( "Label Updated" )
def OnChangeValue( self, event ):
self.bottomcontrol.SetValue( "r:\\somewhere\\over\\the\\rainbow.htm" )
def OnCloseWindow(self, event):
self.Destroy()
class DemoApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame = DemoFrame(None)
frame.Show(True)
self.SetTopWindow(frame)
return True
def test( ):
app = DemoApp(0)
app.MainLoop()
print 'Creating dialog'
test( )
| [] |
whanderley/eden | modules/pygsm/devicewrapper.py | 08ced3be3d52352c54cbd412ed86128fbb68b1d2 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
# arch: pacman -S python-pyserial
# debian/ubuntu: apt-get install python-serial
import serial
import re
import errors
class DeviceWrapper(object):
def __init__(self, logger, *args, **kwargs):
self.device = serial.Serial(*args, **kwargs)
self.logger = logger
def isOpen(self):
return self.device.isOpen()
def close(self):
self.device.close()
def write(self, str):
self.device.write(str)
def _read(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) until _terminator_ is hit,
(defaults to \r\n, which reads a single "line"), and return."""
buffer = []
# if a different timeout was requested just
# for _this_ read, store and override the
# current device setting (not thread safe!)
if read_timeout is not None:
old_timeout = self.device.timeout
self.device.timeout = read_timeout
def __reset_timeout():
"""restore the device's previous timeout
setting, if we overrode it earlier."""
if read_timeout is not None:
self.device.timeout =\
old_timeout
# the default terminator reads
# until a newline is hit
if read_term is None:
read_term = "\r\n"
while(True):
buf = self.device.read()
buffer.append(buf)
# if a timeout was hit, raise an exception including the raw data that
# we've already read (in case the calling func was _expecting_ a timeout
# (wouldn't it be nice if serial.Serial.read returned None for this?)
if buf == '':
__reset_timeout()
raise(errors.GsmReadTimeoutError(buffer))
# if last n characters of the buffer match the read
# terminator, return what we've received so far
if ''.join(buffer[-len(read_term):]) == read_term:
buf_str = ''.join(buffer)
__reset_timeout()
self._log(repr(buf_str), 'read')
return buf_str
def read_lines(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) one line at a time until a response
terminator ("OK", "ERROR", or "CMx ERROR...") is hit, then return
a list containing the lines."""
buffer = []
# keep on looping until a command terminator
# is encountered. these are NOT the same as the
# "read_term" argument - only OK or ERROR is valid
while(True):
buf = self._read(
read_term=read_term,
read_timeout=read_timeout)
buf = buf.strip()
buffer.append(buf)
# most commands return OK for success, but there
# are some exceptions. we're not checking those
# here (unlike RubyGSM), because they should be
# handled when they're _expected_
if buf == "OK":
return buffer
# some errors contain useful error codes, so raise a
# proper error with a description from pygsm/errors.py
m = re.match(r"^\+(CM[ES]) ERROR: (\d+)$", buf)
if m is not None:
type, code = m.groups()
raise(errors.GsmModemError(type, int(code)))
# ...some errors are not so useful
# (at+cmee=1 should enable error codes)
if buf == "ERROR":
raise(errors.GsmModemError)
def _log(self, str, type="debug"):
if hasattr(self, "logger"):
self.logger(self, str, type) | [((292, 322), 'serial.Serial', 'serial.Serial', (['*args'], {}), '(*args, **kwargs)\n', (305, 322), False, 'import serial\n'), ((3328, 3372), 're.match', 're.match', (['"""^\\\\+(CM[ES]) ERROR: (\\\\d+)$"""', 'buf'], {}), "('^\\\\+(CM[ES]) ERROR: (\\\\d+)$', buf)\n", (3336, 3372), False, 'import re\n'), ((1869, 1903), 'errors.GsmReadTimeoutError', 'errors.GsmReadTimeoutError', (['buffer'], {}), '(buffer)\n', (1895, 1903), False, 'import errors\n')] |
alqmy/The-Garage-Summer-Of-Code | day1/loops.py | af310d5e5194a62962db2fc1e601099468251efa | # while True:
# # ejecuta esto
# print("Hola")
real = 7
print("Entre un numero entre el 1 y el 10")
guess = int(input())
# =/=
while guess != real:
print("Ese no es el numero")
print("Entre un numero entre el 1 y el 10")
guess = int(input())
# el resto
print("Yay! Lo sacastes!")
| [] |
paulveillard/cybersecurity-penetration-testing | pentest-scripts/learning-python-for-forensics/Chapter 6/rot13.py | a5afff13ec25afd0cf16ef966d35bddb91518af4 | def rotCode(data):
"""
The rotCode function encodes/decodes data using string indexing
:param data: A string
:return: The rot-13 encoded/decoded string
"""
rot_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
substitutions = []
# Walk through each individual character
for c in data:
# Walk through each individual character
if c.isupper():
try:
# Find the position of the character in rot_chars list
index = rot_chars.index(c.lower())
except ValueError:
substitutions.append(c)
continue
# Calculate the relative index that is 13 characters away from the index
substitutions.append((rot_chars[(index-13)]).upper())
else:
try:
# Find the position of the character in rot_chars list
index = rot_chars.index(c)
except ValueError:
substitutions.append(c)
continue
substitutions.append(rot_chars[((index-13))])
return ''.join(substitutions)
if __name__ == '__main__':
print rotCode('Jul, EBG-13?')
| [] |
sunyilgdx/CwVW-SIF | examples/sim_tfidf.py | 85ef56d80512e2f6bff1266e030552075566b240 | import pickle, sys
sys.path.append('../src')
import data_io, sim_algo, eval, params
## run
# wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016)
# '../data/glove.840B.300d.txt' # need to download it first
# ]
wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016)
'../data/glove.6B.50d.txt' # need to download it first
]
rmpcs = [0,1]
comment4para = [ # need to align with the following loop
['word vector files', wordfiles], # comments and values,
['remove principal component or not', rmpcs]
]
params = params.params()
parr4para = {}
sarr4para = {}
for wordfile in wordfiles:
(words, We) = data_io.getWordmap(wordfile)
weight4ind = data_io.getIDFWeight(wordfile)
for rmpc in rmpcs:
print('word vectors loaded from %s' % wordfile)
print('word weights computed from idf')
params.rmpc = rmpc
print('remove the first %d principal components' % rmpc)
# eval just one example dataset
parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
## eval all datasets; need to obtained datasets from John Wieting (https://github.com/jwieting/iclr2016)
# parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
paras = (wordfile, rmpc)
parr4para[paras] = parr
sarr4para[paras] = sarr
## save result
save_result = False # True
result_file = 'result/sim_tfidf.result'
if save_result:
with open(result_file, 'w') as f:
pickle.dump([parr4para, sarr4para, comment4para] , f)
| [((19, 44), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (34, 44), False, 'import pickle, sys\n'), ((704, 719), 'params.params', 'params.params', ([], {}), '()\n', (717, 719), False, 'import data_io, sim_algo, eval, params\n'), ((795, 823), 'data_io.getWordmap', 'data_io.getWordmap', (['wordfile'], {}), '(wordfile)\n', (813, 823), False, 'import data_io, sim_algo, eval, params\n'), ((841, 871), 'data_io.getIDFWeight', 'data_io.getIDFWeight', (['wordfile'], {}), '(wordfile)\n', (861, 871), False, 'import data_io, sim_algo, eval, params\n'), ((1152, 1245), 'eval.sim_evaluate_one', 'eval.sim_evaluate_one', (['We', 'words', 'weight4ind', 'sim_algo.weighted_average_sim_rmpc', 'params'], {}), '(We, words, weight4ind, sim_algo.\n weighted_average_sim_rmpc, params)\n', (1173, 1245), False, 'import data_io, sim_algo, eval, params\n'), ((1708, 1760), 'pickle.dump', 'pickle.dump', (['[parr4para, sarr4para, comment4para]', 'f'], {}), '([parr4para, sarr4para, comment4para], f)\n', (1719, 1760), False, 'import pickle, sys\n')] |
div72/py2many | tests/cases/cls.py | 60277bc13597bd32d078b88a7390715568115fc6 | class Foo:
def bar(self):
return "a"
if __name__ == "__main__":
f = Foo()
b = f.bar()
print(b) | [] |
jhja/RFNN | theano-rfnn/mnist_loader.py | a63641d6e584df743a5e0a9efaf41911f057a977 | import numpy as np
import os
from random import shuffle
datasets_dir = './../data/'
def one_hot(x,n):
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x),n))
o_h[np.arange(len(x)),x] = 1
return o_h
def mnist(ntrain=60000,ntest=10000,onehot=True):
ntrain=np.array(ntrain).astype(int).squeeze()
data_dir = os.path.join(datasets_dir,'mnist/')
fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trY_shuffle = []
trX_shuffle = []
index_shuf = range(len(trY))
shuffle(index_shuf)
for i in index_shuf:
trY_shuffle.append(trY[i])
trX_shuffle.append(trX[i])
trX = np.asarray(trX_shuffle)
trY = np.asarray(trY_shuffle)
trX = trX/255.
teX = teX/255.
trX = trX[:ntrain]
trY = trY[:ntrain]
teX = teX[:ntest]
teY = teY[:ntest]
if onehot:
trY = one_hot(trY, 10)
teY = one_hot(teY, 10)
else:
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX,teX,trY,teY
| [((344, 380), 'os.path.join', 'os.path.join', (['datasets_dir', '"""mnist/"""'], {}), "(datasets_dir, 'mnist/')\n", (356, 380), False, 'import os\n'), ((457, 493), 'numpy.fromfile', 'np.fromfile', ([], {'file': 'fd', 'dtype': 'np.uint8'}), '(file=fd, dtype=np.uint8)\n', (468, 493), True, 'import numpy as np\n'), ((630, 666), 'numpy.fromfile', 'np.fromfile', ([], {'file': 'fd', 'dtype': 'np.uint8'}), '(file=fd, dtype=np.uint8)\n', (641, 666), True, 'import numpy as np\n'), ((781, 817), 'numpy.fromfile', 'np.fromfile', ([], {'file': 'fd', 'dtype': 'np.uint8'}), '(file=fd, dtype=np.uint8)\n', (792, 817), True, 'import numpy as np\n'), ((953, 989), 'numpy.fromfile', 'np.fromfile', ([], {'file': 'fd', 'dtype': 'np.uint8'}), '(file=fd, dtype=np.uint8)\n', (964, 989), True, 'import numpy as np\n'), ((1107, 1126), 'random.shuffle', 'shuffle', (['index_shuf'], {}), '(index_shuf)\n', (1114, 1126), False, 'from random import shuffle\n'), ((1233, 1256), 'numpy.asarray', 'np.asarray', (['trX_shuffle'], {}), '(trX_shuffle)\n', (1243, 1256), True, 'import numpy as np\n'), ((1267, 1290), 'numpy.asarray', 'np.asarray', (['trY_shuffle'], {}), '(trY_shuffle)\n', (1277, 1290), True, 'import numpy as np\n'), ((130, 141), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (138, 141), True, 'import numpy as np\n'), ((394, 443), 'os.path.join', 'os.path.join', (['data_dir', '"""train-images-idx3-ubyte"""'], {}), "(data_dir, 'train-images-idx3-ubyte')\n", (406, 443), False, 'import os\n'), ((567, 616), 'os.path.join', 'os.path.join', (['data_dir', '"""train-labels-idx1-ubyte"""'], {}), "(data_dir, 'train-labels-idx1-ubyte')\n", (579, 616), False, 'import os\n'), ((719, 767), 'os.path.join', 'os.path.join', (['data_dir', '"""t10k-images-idx3-ubyte"""'], {}), "(data_dir, 't10k-images-idx3-ubyte')\n", (731, 767), False, 'import os\n'), ((891, 939), 'os.path.join', 'os.path.join', (['data_dir', '"""t10k-labels-idx1-ubyte"""'], {}), "(data_dir, 't10k-labels-idx1-ubyte')\n", (903, 939), False, 'import os\n'), ((1528, 1543), 'numpy.asarray', 'np.asarray', (['trY'], {}), '(trY)\n', (1538, 1543), True, 'import numpy as np\n'), ((1558, 1573), 'numpy.asarray', 'np.asarray', (['teY'], {}), '(teY)\n', (1568, 1573), True, 'import numpy as np\n'), ((290, 306), 'numpy.array', 'np.array', (['ntrain'], {}), '(ntrain)\n', (298, 306), True, 'import numpy as np\n')] |
crltsnch/Ejercicios-grupales | Ejercicio 2.py | 72e01d6489816ea1b9308af1abd62792e5464c93 | import math
import os
import random
import re
import sys
def compareTriplets(a, b):
puntosA=0
puntosB=0
for i in range (0,3):
if a[i]<b[i]:
puntosB+=1
elif a[i]>b[i]:
puntosA+=1
puntosTotales=[puntosA, puntosB]
return puntosTotales
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'] + 'solucion2.txt', 'w')
print("Escribe las notas de a")
a = list(map(int, input().rstrip().split()))
print("Escribe las notas de b")
b = list(map(int, input().rstrip().split()))
result = compareTriplets(a, b)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close() | [] |
nityagautam/ReportDashboard-backend | app/routes/router.py | d23fe008cb0df6a703fcd665181897a75b71d5b2 | #===============================================================
# @author: [email protected]
# @written: 08 December 2021
# @desc: Routes for the Backend server
#===============================================================
# Import section with referecne of entry file or main file;
from __main__ import application
from flask import jsonify, render_template, url_for, request, redirect
# Local sample data import
from app.config.uiconfig import app_ui_config
from app import sample_data
# ==============================================================
# App Routes/Gateways
# ==============================================================
@application.route('/test', methods=['GET'])
def test():
return '<h4>HELLO WORLD!</h4><hr/> it works!'
@application.route('/', methods=['GET'])
@application.route('/home', methods=['GET'])
@application.route('/dashboard', methods=['GET'])
def root():
return render_template("dashboard.html", app_data=app_ui_config, data=sample_data.latest_data)
@application.route('/history', methods=['GET'])
def history():
return render_template("history.html", app_data=app_ui_config, data=sample_data.history_data)
@application.route('/about', methods=['GET'])
def about():
return render_template("about.html", app_data=app_ui_config, data=sample_data.latest_data)
@application.route('/get-notes', methods=['POST'])
def get_todo():
print("KEY :: VALUE (from the received form data)")
print([(key, val) for key, val in zip(request.form.keys(), request.form.values())])
return redirect("/notes", code=302)
@application.route('/notes')
def info():
return render_template("notes.html", app_data=app_ui_config)
@application.route('/sample-data')
def get_sample_data():
return jsonify(app_ui_config)
# ==============================================================
# Error Handlers Starts
# ==============================================================
# 404 Handler; We can also pass the specific request errors codes to the decorator;
@application.errorhandler(404)
def not_found(err):
return render_template("error.html", app_data=app_ui_config, error_data=err), 400
# Exception/Error handler; We can also pass the specific errors to the decorator;
@application.errorhandler(TypeError)
def server_error(err):
application.logger.exception(err)
return render_template("error.html", app_data=app_ui_config, error_data=err), 500
# Exception/Error handler; We can also pass the specific errors to the decorator;
@application.errorhandler(Exception)
def server_error(err):
application.logger.exception(err)
return render_template("error.html", app_data=app_ui_config, error_data=err), 500
# ==============================================================
# Error Handlers Ends
# ==============================================================
# Route For Sample data
@application.route('/data')
def get_data():
data = {
"reports": [
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_1",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "50", "fail": "0", "ignored": "0", "skipped": "0"},
"total_time": "35 min."
},
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_2",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "10", "fail": "2", "ignored": "0", "skipped": "0"},
"total_time": "0.2345 secs."
},
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_3",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "100", "fail": "5", "ignored": "0", "skipped": "0"},
"total_time": "5 days"
}
]
}
return jsonify(data)
# ==============================================================
# Extra routes starts
# ==============================================================
@application.route('/sample1')
def sample1():
return render_template("web-analytics-overview.html")
@application.route('/sample2')
def sample2():
return render_template("web-analytics-real-time.html")
@application.route('/logo')
def get_logo():
"""
Queries the snapshot data for both Serenity and JMeter projects from the MongoDB.
Renders the Snapshot view of html
:return: N/A
"""
# set template directory of the Flask App to the path set by the user as command line arg.
return f'<html><head><title>Root</title><head><body><hr/> Welcome to the main page <hr/> ' \
f'Building image from static public location: <br/> ' \
f'<img src=\'{url_for("static", filename="images/logo.svg")}\' /> </body></html>'
| [((675, 718), '__main__.application.route', 'application.route', (['"""/test"""'], {'methods': "['GET']"}), "('/test', methods=['GET'])\n", (692, 718), False, 'from __main__ import application\n'), ((787, 826), '__main__.application.route', 'application.route', (['"""/"""'], {'methods': "['GET']"}), "('/', methods=['GET'])\n", (804, 826), False, 'from __main__ import application\n'), ((829, 872), '__main__.application.route', 'application.route', (['"""/home"""'], {'methods': "['GET']"}), "('/home', methods=['GET'])\n", (846, 872), False, 'from __main__ import application\n'), ((875, 923), '__main__.application.route', 'application.route', (['"""/dashboard"""'], {'methods': "['GET']"}), "('/dashboard', methods=['GET'])\n", (892, 923), False, 'from __main__ import application\n'), ((1043, 1089), '__main__.application.route', 'application.route', (['"""/history"""'], {'methods': "['GET']"}), "('/history', methods=['GET'])\n", (1060, 1089), False, 'from __main__ import application\n'), ((1211, 1255), '__main__.application.route', 'application.route', (['"""/about"""'], {'methods': "['GET']"}), "('/about', methods=['GET'])\n", (1228, 1255), False, 'from __main__ import application\n'), ((1372, 1421), '__main__.application.route', 'application.route', (['"""/get-notes"""'], {'methods': "['POST']"}), "('/get-notes', methods=['POST'])\n", (1389, 1421), False, 'from __main__ import application\n'), ((1632, 1659), '__main__.application.route', 'application.route', (['"""/notes"""'], {}), "('/notes')\n", (1649, 1659), False, 'from __main__ import application\n'), ((1746, 1779), '__main__.application.route', 'application.route', (['"""/sample-data"""'], {}), "('/sample-data')\n", (1763, 1779), False, 'from __main__ import application\n'), ((2091, 2120), '__main__.application.errorhandler', 'application.errorhandler', (['(404)'], {}), '(404)\n', (2115, 2120), False, 'from __main__ import application\n'), ((2318, 2353), '__main__.application.errorhandler', 'application.errorhandler', (['TypeError'], {}), '(TypeError)\n', (2342, 2353), False, 'from __main__ import application\n'), ((2593, 2628), '__main__.application.errorhandler', 'application.errorhandler', (['Exception'], {}), '(Exception)\n', (2617, 2628), False, 'from __main__ import application\n'), ((2963, 2989), '__main__.application.route', 'application.route', (['"""/data"""'], {}), "('/data')\n", (2980, 2989), False, 'from __main__ import application\n'), ((4612, 4641), '__main__.application.route', 'application.route', (['"""/sample1"""'], {}), "('/sample1')\n", (4629, 4641), False, 'from __main__ import application\n'), ((4723, 4752), '__main__.application.route', 'application.route', (['"""/sample2"""'], {}), "('/sample2')\n", (4740, 4752), False, 'from __main__ import application\n'), ((4835, 4861), '__main__.application.route', 'application.route', (['"""/logo"""'], {}), "('/logo')\n", (4852, 4861), False, 'from __main__ import application\n'), ((949, 1041), 'flask.render_template', 'render_template', (['"""dashboard.html"""'], {'app_data': 'app_ui_config', 'data': 'sample_data.latest_data'}), "('dashboard.html', app_data=app_ui_config, data=sample_data.\n latest_data)\n", (964, 1041), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1118, 1209), 'flask.render_template', 'render_template', (['"""history.html"""'], {'app_data': 'app_ui_config', 'data': 'sample_data.history_data'}), "('history.html', app_data=app_ui_config, data=sample_data.\n history_data)\n", (1133, 1209), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1282, 1370), 'flask.render_template', 'render_template', (['"""about.html"""'], {'app_data': 'app_ui_config', 'data': 'sample_data.latest_data'}), "('about.html', app_data=app_ui_config, data=sample_data.\n latest_data)\n", (1297, 1370), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1597, 1625), 'flask.redirect', 'redirect', (['"""/notes"""'], {'code': '(302)'}), "('/notes', code=302)\n", (1605, 1625), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1685, 1738), 'flask.render_template', 'render_template', (['"""notes.html"""'], {'app_data': 'app_ui_config'}), "('notes.html', app_data=app_ui_config)\n", (1700, 1738), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1816, 1838), 'flask.jsonify', 'jsonify', (['app_ui_config'], {}), '(app_ui_config)\n', (1823, 1838), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((2383, 2416), '__main__.application.logger.exception', 'application.logger.exception', (['err'], {}), '(err)\n', (2411, 2416), False, 'from __main__ import application\n'), ((2658, 2691), '__main__.application.logger.exception', 'application.logger.exception', (['err'], {}), '(err)\n', (2686, 2691), False, 'from __main__ import application\n'), ((4439, 4452), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (4446, 4452), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((4670, 4716), 'flask.render_template', 'render_template', (['"""web-analytics-overview.html"""'], {}), "('web-analytics-overview.html')\n", (4685, 4716), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((4781, 4828), 'flask.render_template', 'render_template', (['"""web-analytics-real-time.html"""'], {}), "('web-analytics-real-time.html')\n", (4796, 4828), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((2154, 2223), 'flask.render_template', 'render_template', (['"""error.html"""'], {'app_data': 'app_ui_config', 'error_data': 'err'}), "('error.html', app_data=app_ui_config, error_data=err)\n", (2169, 2223), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((2429, 2498), 'flask.render_template', 'render_template', (['"""error.html"""'], {'app_data': 'app_ui_config', 'error_data': 'err'}), "('error.html', app_data=app_ui_config, error_data=err)\n", (2444, 2498), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((2704, 2773), 'flask.render_template', 'render_template', (['"""error.html"""'], {'app_data': 'app_ui_config', 'error_data': 'err'}), "('error.html', app_data=app_ui_config, error_data=err)\n", (2719, 2773), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((5151, 5196), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': '"""images/logo.svg"""'}), "('static', filename='images/logo.svg')\n", (5158, 5196), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1539, 1558), 'flask.request.form.keys', 'request.form.keys', ([], {}), '()\n', (1556, 1558), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1560, 1581), 'flask.request.form.values', 'request.form.values', ([], {}), '()\n', (1579, 1581), False, 'from flask import jsonify, render_template, url_for, request, redirect\n')] |
carldlaird/idaes-pse | idaes/generic_models/properties/core/examples/ASU_PR.py | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Air separation phase equilibrium package using Peng-Robinson EoS.
Example property package using the Generic Property Package Framework.
This example shows how to set up a property package to do air separation
phase equilibrium in the generic framework using Peng-Robinson equation
along with methods drawn from the pre-built IDAES property libraries.
The example includes two dictionaries.
1. The dictionary named configuration contains parameters obtained from
The Properties of Gases and Liquids (1987) 4th edition and NIST.
2. The dictionary named configuration_Dowling_2015 contains parameters used in
A framework for efficient large scale equation-oriented flowsheet optimization
(2015) Dowling. The parameters are extracted from Properties of Gases and
Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors
and converted values from the Properties of Gases and Liquids (1977)
3rd edition to j.
"""
# Import Python libraries
import logging
# Import Pyomo units
from pyomo.environ import units as pyunits
# Import IDAES cores
from idaes.core import LiquidPhase, VaporPhase, Component
from idaes.generic_models.properties.core.state_definitions import FTPx
from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType
from idaes.generic_models.properties.core.phase_equil import SmoothVLE
from idaes.generic_models.properties.core.phase_equil.bubble_dew import \
LogBubbleDew
from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity
from idaes.generic_models.properties.core.pure import RPP4
from idaes.generic_models.properties.core.pure import NIST
from idaes.generic_models.properties.core.pure import RPP3
# Set up logger
_log = logging.getLogger(__name__)
# ---------------------------------------------------------------------
# Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system
# Data Sources:
# [1] The Properties of Gases and Liquids (1987)
# 4th edition, Chemical Engineering Series - Robert C. Reid
# [2] NIST, https://webbook.nist.gov/
# Retrieved 16th August, 2020
# [3] The Properties of Gases and Liquids (1987)
# 3rd edition, Chemical Engineering Series - Robert C. Reid
# Cp parameters where converted to j in Dowling 2015
# [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015)
# Computers and Chemical Engineering - Alexander W. Dowling
configuration = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (34e5, pyunits.Pa), # [1]
"temperature_crit": (126.2, pyunits.K), # [1]
"omega": 0.037, # [1]
"cp_mol_ig_comp_coeff": {
"A": (3.115E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (-1.357E-2,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (2.680E-5,
pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.168E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.7362, None), # [2]
"B": (264.651, pyunits.K),
"C": (-6.788, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (48.98e5, pyunits.Pa), # [1]
"temperature_crit": (150.86, pyunits.K), # [1]
"omega": 0.001, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.050E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (0.0, pyunits.J/pyunits.mol/pyunits.K**2),
"C": (0.0, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {"A": (3.29555, None), # [2]
"B": (215.24, pyunits.K),
"C": (-22.233, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (50.43e5, pyunits.Pa), # [1]
"temperature_crit": (154.58, pyunits.K), # [1]
"omega": 0.025, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.811E1, pyunits.J/pyunits.mol/pyunits.K),
"B": (-3.680E-6,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (1.746E-5, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.065E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.85845, None), # [2]
"B": (325.675, pyunits.K),
"C": (-5.667, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
configuration_Dowling_2015 = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (33.943875e5, pyunits.Pa), # [4]
"temperature_crit": (126.2, pyunits.K), # [4]
"omega": 0.04, # [3]
"cp_mol_ig_comp_coeff": {
'A': (3.112896E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-1.356E-2, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (2.6878E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.167E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (14.9342, None), # [3]
'B': (588.72, pyunits.K),
'C': (-6.60, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (48.737325e5, pyunits.Pa), # [4]
"temperature_crit": (150.86, pyunits.K), # [4]
"omega": -0.004, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.0790296E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.209E-05, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (5.163E-08, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [3]
"pressure_sat_comp_coeff": {
'A': (15.2330, None), # [3]
'B': (700.51, pyunits.K),
'C': (-5.84, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (50.45985e5, pyunits.Pa), # [4]
"temperature_crit": (154.58, pyunits.K), # [4]
"omega": 0.021, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.8087192E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.678E-6, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (1.745E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.064E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (15.4075, None), # [3]
'B': (734.55, pyunits.K),
'C': (-6.45, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
| [((2492, 2519), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2509, 2519), False, 'import logging\n')] |
bellanov/calculator | tests/functional/test_calculator.py | a66e68a368a5212247aeff3291c9cb8b508e91be | """TODO: Move the Threads Here"""
| [] |
Sette/autokeras | autokeras/hypermodel/graph.py | c5a83607a899ad545916b3794561d6908d9cdbac | import functools
import pickle
import kerastuner
import tensorflow as tf
from tensorflow.python.util import nest
from autokeras.hypermodel import base
from autokeras.hypermodel import compiler
class Graph(kerastuner.engine.stateful.Stateful):
"""A graph consists of connected Blocks, HyperBlocks, Preprocessors or Heads.
# Arguments
inputs: A list of input node(s) for the Graph.
outputs: A list of output node(s) for the Graph.
override_hps: A list of HyperParameters. The predefined HyperParameters that
will override the space of the Hyperparameters defined in the Hypermodels
with the same names.
"""
def __init__(self, inputs, outputs, override_hps=None):
super().__init__()
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
self._node_to_id = {}
self._nodes = []
self.blocks = []
self._block_to_id = {}
self._build_network()
self.override_hps = override_hps or []
def compile(self, func):
"""Share the information between blocks by calling functions in compiler.
# Arguments
func: A dictionary. The keys are the block classes. The values are
corresponding compile functions.
"""
for block in self.blocks:
if block.__class__ in func:
func[block.__class__](block)
def _register_hps(self, hp):
"""Register the override HyperParameters for current HyperParameters."""
for single_hp in self.override_hps:
name = single_hp.name
if name not in hp.values:
hp.register(single_hp.name,
single_hp.__class__.__name__,
single_hp.get_config())
hp.values[name] = single_hp.default
def _build_network(self):
self._node_to_id = {}
# Recursively find all the interested nodes.
for input_node in self.inputs:
self._search_network(input_node, self.outputs, set(), set())
self._nodes = sorted(list(self._node_to_id.keys()),
key=lambda x: self._node_to_id[x])
for node in (self.inputs + self.outputs):
if node not in self._node_to_id:
raise ValueError('Inputs and outputs not connected.')
# Find the blocks.
blocks = []
for input_node in self._nodes:
for block in input_node.out_blocks:
if any([output_node in self._node_to_id
for output_node in block.outputs]) and block not in blocks:
blocks.append(block)
# Check if all the inputs of the blocks are set as inputs.
for block in blocks:
for input_node in block.inputs:
if input_node not in self._node_to_id:
raise ValueError('A required input is missing for HyperModel '
'{name}.'.format(name=block.name))
# Calculate the in degree of all the nodes
in_degree = [0] * len(self._nodes)
for node_id, node in enumerate(self._nodes):
in_degree[node_id] = len([
block for block in node.in_blocks if block in blocks])
# Add the blocks in topological order.
self.blocks = []
self._block_to_id = {}
while len(blocks) != 0:
new_added = []
# Collect blocks with in degree 0.
for block in blocks:
if any([in_degree[self._node_to_id[node]]
for node in block.inputs]):
continue
new_added.append(block)
# Remove the collected blocks from blocks.
for block in new_added:
blocks.remove(block)
for block in new_added:
# Add the collected blocks to the AutoModel.
self._add_block(block)
# Decrease the in degree of the output nodes.
for output_node in block.outputs:
if output_node not in self._node_to_id:
continue
output_node_id = self._node_to_id[output_node]
in_degree[output_node_id] -= 1
def _search_network(self, input_node, outputs, in_stack_nodes,
visited_nodes):
visited_nodes.add(input_node)
in_stack_nodes.add(input_node)
outputs_reached = False
if input_node in outputs:
outputs_reached = True
for block in input_node.out_blocks:
for output_node in block.outputs:
if output_node in in_stack_nodes:
raise ValueError('The network has a cycle.')
if output_node not in visited_nodes:
self._search_network(output_node, outputs, in_stack_nodes,
visited_nodes)
if output_node in self._node_to_id.keys():
outputs_reached = True
if outputs_reached:
self._add_node(input_node)
in_stack_nodes.remove(input_node)
def _add_block(self, block):
if block not in self.blocks:
block_id = len(self.blocks)
self._block_to_id[block] = block_id
self.blocks.append(block)
def _add_node(self, input_node):
if input_node not in self._node_to_id:
self._node_to_id[input_node] = len(self._node_to_id)
def _get_block(self, name):
for block in self.blocks:
if block.name == name:
return block
raise ValueError('Cannot find block named {name}.'.format(name=name))
def get_state(self):
# TODO: Include everything including the graph structure.
block_state = {str(block_id): block.get_state()
for block_id, block in enumerate(self.blocks)}
node_state = {str(node_id): node.get_state()
for node_id, node in enumerate(self._nodes)}
return {'blocks': block_state, 'nodes': node_state}
def set_state(self, state):
# TODO: Include everything including the graph structure.
block_state = state['blocks']
node_state = state['nodes']
for block_id, block in enumerate(self.blocks):
block.set_state(block_state[str(block_id)])
for node_id, node in enumerate(self._nodes):
node.set_state(node_state[str(node_id)])
def save(self, fname):
state = self.get_state()
with tf.io.gfile.GFile(fname, 'wb') as f:
pickle.dump(state, f)
return str(fname)
def reload(self, fname):
with tf.io.gfile.GFile(fname, 'rb') as f:
state = pickle.load(f)
self.set_state(state)
def build(self, hp):
self._register_hps(hp)
class PlainGraph(Graph):
"""A graph built from a HyperGraph to produce KerasGraph and PreprocessGraph.
A PlainGraph does not contain HyperBlock. HyperGraph's hyper_build function
returns an instance of PlainGraph, which can be directly built into a KerasGraph
and a PreprocessGraph.
# Arguments
inputs: A list of input node(s) for the PlainGraph.
outputs: A list of output node(s) for the PlainGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
self._keras_model_inputs = []
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
def _build_network(self):
super()._build_network()
# Find the model input nodes
for node in self._nodes:
if self._is_keras_model_inputs(node):
self._keras_model_inputs.append(node)
self._keras_model_inputs = sorted(self._keras_model_inputs,
key=lambda x: self._node_to_id[x])
@staticmethod
def _is_keras_model_inputs(node):
for block in node.in_blocks:
if not isinstance(block, base.Preprocessor):
return False
for block in node.out_blocks:
if not isinstance(block, base.Preprocessor):
return True
return False
def build_keras_graph(self):
return KerasGraph(self._keras_model_inputs,
self.outputs,
override_hps=self.override_hps)
def build_preprocess_graph(self):
return PreprocessGraph(self.inputs,
self._keras_model_inputs,
override_hps=self.override_hps)
class KerasGraph(Graph, kerastuner.HyperModel):
"""A graph and HyperModel to be built into a Keras model."""
def build(self, hp):
"""Build the HyperModel into a Keras Model."""
super().build(hp)
self.compile(compiler.AFTER)
real_nodes = {}
for input_node in self.inputs:
node_id = self._node_to_id[input_node]
real_nodes[node_id] = input_node.build()
for block in self.blocks:
if isinstance(block, base.Preprocessor):
continue
temp_inputs = [real_nodes[self._node_to_id[input_node]]
for input_node in block.inputs]
outputs = block.build(hp, inputs=temp_inputs)
outputs = nest.flatten(outputs)
for output_node, real_output_node in zip(block.outputs, outputs):
real_nodes[self._node_to_id[output_node]] = real_output_node
model = tf.keras.Model(
[real_nodes[self._node_to_id[input_node]] for input_node in
self.inputs],
[real_nodes[self._node_to_id[output_node]] for output_node in
self.outputs])
return self._compile_keras_model(hp, model)
def _get_metrics(self):
metrics = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
metrics[block.name] = block.metrics
return metrics
def _get_loss(self):
loss = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
loss[block.name] = block.loss
return loss
def _compile_keras_model(self, hp, model):
# Specify hyperparameters from compile(...)
optimizer = hp.Choice('optimizer',
['adam', 'adadelta', 'sgd'],
default='adam')
model.compile(optimizer=optimizer,
metrics=self._get_metrics(),
loss=self._get_loss())
return model
class PreprocessGraph(Graph):
"""A graph consists of only Preprocessors.
It is both a search space with Hyperparameters and a model to be fitted. It
preprocess the dataset with the Preprocessors. The output is the input to the
Keras model. It does not extend Hypermodel class because it cannot be built into
a Keras model.
"""
def preprocess(self, dataset, validation_data=None, fit=False):
"""Preprocess the data to be ready for the Keras Model.
# Arguments
dataset: tf.data.Dataset. Training data.
validation_data: tf.data.Dataset. Validation data.
fit: Boolean. Whether to fit the preprocessing layers with x and y.
# Returns
if validation data is provided.
A tuple of two preprocessed tf.data.Dataset, (train, validation).
Otherwise, return the training dataset.
"""
dataset = self._preprocess(dataset, fit=fit)
if validation_data:
validation_data = self._preprocess(validation_data)
return dataset, validation_data
def _preprocess(self, dataset, fit=False):
# A list of input node ids in the same order as the x in the dataset.
input_node_ids = [self._node_to_id[input_node] for input_node in self.inputs]
# Iterate until all the model inputs have their data.
while set(map(lambda node: self._node_to_id[node], self.outputs)
) - set(input_node_ids):
# Gather the blocks for the next iteration over the dataset.
blocks = []
for node_id in input_node_ids:
for block in self._nodes[node_id].out_blocks:
if block in self.blocks:
blocks.append(block)
if fit:
# Iterate the dataset to fit the preprocessors in current depth.
self._fit(dataset, input_node_ids, blocks)
# Transform the dataset.
output_node_ids = []
dataset = dataset.map(functools.partial(
self._transform,
input_node_ids=input_node_ids,
output_node_ids=output_node_ids,
blocks=blocks,
fit=fit))
# Build input_node_ids for next depth.
input_node_ids = output_node_ids
return dataset
def _fit(self, dataset, input_node_ids, blocks):
# Iterate the dataset to fit the preprocessors in current depth.
for x, y in dataset:
x = nest.flatten(x)
id_to_data = {
node_id: temp_x for temp_x, node_id in zip(x, input_node_ids)
}
for block in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in block.inputs]
block.update(data, y=y)
# Finalize and set the shapes of the output nodes.
for block in blocks:
block.finalize()
nest.flatten(block.outputs)[0].shape = block.output_shape
def _transform(self,
x,
y,
input_node_ids,
output_node_ids,
blocks,
fit=False):
x = nest.flatten(x)
id_to_data = {
node_id: temp_x
for temp_x, node_id in zip(x, input_node_ids)
}
output_data = {}
# Transform each x by the corresponding block.
for hm in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in hm.inputs]
data = tf.py_function(functools.partial(hm.transform, fit=fit),
inp=nest.flatten(data),
Tout=hm.output_types())
data = nest.flatten(data)[0]
data.set_shape(hm.output_shape)
output_data[self._node_to_id[hm.outputs[0]]] = data
# Keep the Keras Model inputs even they are not inputs to the blocks.
for node_id, data in id_to_data.items():
if self._nodes[node_id] in self.outputs:
output_data[node_id] = data
for node_id in sorted(output_data.keys()):
output_node_ids.append(node_id)
return tuple(map(
lambda node_id: output_data[node_id], output_node_ids)), y
def build(self, hp):
"""Obtain the values of all the HyperParameters.
Different from the build function of Hypermodel. This build function does not
produce a Keras model. It only obtain the hyperparameter values from
HyperParameters.
# Arguments
hp: HyperParameters.
"""
super().build(hp)
self.compile(compiler.BEFORE)
for block in self.blocks:
block.build(hp)
def copy(old_instance):
instance = old_instance.__class__()
instance.set_state(old_instance.get_state())
return instance
class HyperGraph(Graph):
"""A HyperModel based on connected Blocks and HyperBlocks.
# Arguments
inputs: A list of input node(s) for the HyperGraph.
outputs: A list of output node(s) for the HyperGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(inputs, outputs, **kwargs)
self.compile(compiler.HYPER)
def build_graphs(self, hp):
plain_graph = self.hyper_build(hp)
preprocess_graph = plain_graph.build_preprocess_graph()
preprocess_graph.build(hp)
return (preprocess_graph,
plain_graph.build_keras_graph())
def hyper_build(self, hp):
"""Build a GraphHyperModel with no HyperBlock but only Block."""
# Make sure get_uid would count from start.
tf.keras.backend.clear_session()
inputs = []
old_node_to_new = {}
for old_input_node in self.inputs:
input_node = copy(old_input_node)
inputs.append(input_node)
old_node_to_new[old_input_node] = input_node
for old_block in self.blocks:
inputs = [old_node_to_new[input_node]
for input_node in old_block.inputs]
if isinstance(old_block, base.HyperBlock):
outputs = old_block.build(hp, inputs=inputs)
else:
outputs = copy(old_block)(inputs)
for output_node, old_output_node in zip(outputs, old_block.outputs):
old_node_to_new[old_output_node] = output_node
inputs = []
for input_node in self.inputs:
inputs.append(old_node_to_new[input_node])
outputs = []
for output_node in self.outputs:
outputs.append(old_node_to_new[output_node])
return PlainGraph(inputs, outputs, override_hps=self.override_hps)
| [((780, 800), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), '(inputs)\n', (792, 800), False, 'from tensorflow.python.util import nest\n'), ((824, 845), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['outputs'], {}), '(outputs)\n', (836, 845), False, 'from tensorflow.python.util import nest\n'), ((9568, 9741), 'tensorflow.keras.Model', 'tf.keras.Model', (['[real_nodes[self._node_to_id[input_node]] for input_node in self.inputs]', '[real_nodes[self._node_to_id[output_node]] for output_node in self.outputs]'], {}), '([real_nodes[self._node_to_id[input_node]] for input_node in\n self.inputs], [real_nodes[self._node_to_id[output_node]] for\n output_node in self.outputs])\n', (9582, 9741), True, 'import tensorflow as tf\n'), ((14012, 14027), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['x'], {}), '(x)\n', (14024, 14027), False, 'from tensorflow.python.util import nest\n'), ((16521, 16553), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (16551, 16553), True, 'import tensorflow as tf\n'), ((6630, 6660), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['fname', '"""wb"""'], {}), "(fname, 'wb')\n", (6647, 6660), True, 'import tensorflow as tf\n'), ((6679, 6700), 'pickle.dump', 'pickle.dump', (['state', 'f'], {}), '(state, f)\n', (6690, 6700), False, 'import pickle\n'), ((6770, 6800), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['fname', '"""rb"""'], {}), "(fname, 'rb')\n", (6787, 6800), True, 'import tensorflow as tf\n'), ((6827, 6841), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6838, 6841), False, 'import pickle\n'), ((9375, 9396), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['outputs'], {}), '(outputs)\n', (9387, 9396), False, 'from tensorflow.python.util import nest\n'), ((13284, 13299), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['x'], {}), '(x)\n', (13296, 13299), False, 'from tensorflow.python.util import nest\n'), ((12787, 12913), 'functools.partial', 'functools.partial', (['self._transform'], {'input_node_ids': 'input_node_ids', 'output_node_ids': 'output_node_ids', 'blocks': 'blocks', 'fit': 'fit'}), '(self._transform, input_node_ids=input_node_ids,\n output_node_ids=output_node_ids, blocks=blocks, fit=fit)\n', (12804, 12913), False, 'import functools\n'), ((14397, 14437), 'functools.partial', 'functools.partial', (['hm.transform'], {'fit': 'fit'}), '(hm.transform, fit=fit)\n', (14414, 14437), False, 'import functools\n'), ((14574, 14592), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['data'], {}), '(data)\n', (14586, 14592), False, 'from tensorflow.python.util import nest\n'), ((13743, 13770), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['block.outputs'], {}), '(block.outputs)\n', (13755, 13770), False, 'from tensorflow.python.util import nest\n'), ((14477, 14495), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['data'], {}), '(data)\n', (14489, 14495), False, 'from tensorflow.python.util import nest\n')] |
shreyventure/LeetCode-Solutions | Python/longest-valid-parentheses.py | 74423d65702b78974e390f17c9d6365d17e6eed5 | '''
Speed: 95.97%
Memory: 24.96%
Time complexity: O(n)
Space complexity: O(n)
'''
class Solution(object):
def longestValidParentheses(self, s):
ans=0
stack=[-1]
for i in range(len(s)):
if(s[i]=='('):
stack.append(i)
else:
stack.pop()
if(len(stack)==0):
stack.append(i)
else:
ans=max(ans,i-stack[-1])
return ans | [] |
i25ffz/openaes | setup.py | a0dbde40d4ce0e4186ea14c4dc9519fe152c018c | from distutils.core import setup, Extension
import os.path
kw = {
'name':"PyOpenAES",
'version':"0.10.0",
'description':"OpenAES cryptographic library for Python.",
'ext_modules':[
Extension(
'openaes',
include_dirs = ['inc', 'src/isaac'],
# define_macros=[('ENABLE_PYTHON', '1')],
sources = [
os.path.join('src/oaes_lib.c'),
os.path.join('src/oaes_py.c'),
os.path.join('src/isaac/rand.c')
]
)
]
}
setup(**kw) | [((436, 447), 'distutils.core.setup', 'setup', ([], {}), '(**kw)\n', (441, 447), False, 'from distutils.core import setup, Extension\n')] |
jayvdb/scitools | examples/isosurface_demo2.py | 8df53a3a3bc95377f9fa85c04f3a329a0ec33e67 | #!/usr/bin/env python
# Example taken from:
# http://www.mathworks.com/access/helpdesk/help/techdoc/visualize/f5-3371.html
from scitools.easyviz import *
from time import sleep
from scipy import io
setp(interactive=False)
# Displaying an Isosurface:
mri = io.loadmat('mri_matlab_v6.mat')
D = mri['D']
#Ds = smooth3(D);
isosurface(D,5,indexing='xy')
#hiso = isosurface(Ds,5),
# 'FaceColor',[1,.75,.65],...
# 'EdgeColor','none');
shading('interp')
# Adding an Isocap to Show a Cutaway Surface:
#hcap = patch(isocaps(D,5),...
# 'FaceColor','interp',...
# 'EdgeColor','none');
#colormap(map)
# Define the View:
view(45,30)
axis('tight')
daspect([1,1,.4])
# Add Lighting:
#lightangle(45,30);
#set(gcf,'Renderer','zbuffer'); lighting phong
#isonormals(Ds,hiso)
#set(hcap,'AmbientStrength',.6)
#set(hiso,'SpecularColorReflectance',0,'SpecularExponent',50)
show()
raw_input('Press Return key to quit: ')
#savefig('tmp_isosurf2a.eps')
#savefig('tmp_isosurf2a.png')
| [((260, 291), 'scipy.io.loadmat', 'io.loadmat', (['"""mri_matlab_v6.mat"""'], {}), "('mri_matlab_v6.mat')\n", (270, 291), False, 'from scipy import io\n')] |
zibneuro/udvary-et-al-2022 | structural_model/util_morphology.py | 8b456c41e72958677cb6035028d9c23013cb7c7e | import os
import numpy as np
import json
import util_amira
def getEdgeLabelName(label):
if(label == 6):
return "axon"
elif(label == 4):
return "apical"
elif(label == 5):
return "basal"
elif(label == 7):
return "soma"
else:
return "other"
def getSomaPosition(points):
somaPos = []
for p in points:
if(p["edge_label"] == "soma"):
somaPos.append(p["position"])
return np.mean(np.vstack(tuple(somaPos)), axis=0)
def loadAmiraExport(filename):
with open(filename) as f:
lines = f.readlines()
labels = lines[0].rstrip().split(",")
points = []
for i in range(1, len(lines)):
line = lines[i].rstrip().split(",")
point = {}
point["edge_id"] = int(line[labels.index("edge_id")])
point["source_node_id"] = int(line[labels.index("source_node")])
point["target_node_id"] = int(line[labels.index("target_node")])
point["edge_label"] = getEdgeLabelName(
int(line[labels.index("edge_label")]))
point["edge_point_id"] = int(line[labels.index("edge_point")])
point["position"] = np.array([float(line[labels.index("x")]), float(
line[labels.index("y")]), float(line[labels.index("z")])])
point["radius"] = float(line[labels.index("radius")])
point["inside_vS1"] = int(line[labels.index("inside_vS1")])
if(point["edge_label"] != "other"):
points.append(point)
return points
def separateCompartments(edgePoints):
apical = []
basal = []
axon = []
for edgePoint in edgePoints:
if(edgePoint["edge_label"] == "apical"):
apical.append(edgePoint)
elif(edgePoint["edge_label"] == "basal"):
basal.append(edgePoint)
elif(edgePoint["edge_label"] == "axon"):
axon.append(edgePoint)
compartments = {}
compartments["apical"] = apical
compartments["basal"] = basal
compartments["axon"] = axon
return compartments
def loadGraphset(networkDir):
if(os.path.exists(os.path.join(networkDir, "morphologies", "Morphologies.am"))):
graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "Morphologies.am"), legacy=False)
else:
graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "MorphologiesWithNeuronIDs.am"), legacy=True)
return graphset
def writeToCache(filename, transformation, neuronId):
transformationFile = "/tmp/transformation_{}".format(neuronId)
np.savetxt(transformationFile, transformation)
meta = {
"morphologyFile" : filename,
"transformationFile" : transformationFile
}
metaFile = "/tmp/meta_{}.json".format(neuronId)
with open(metaFile, "w") as f:
print("meta", meta)
json.dump(meta, f)
def readFromCache(neuronId):
metaFile = "/tmp/meta_{}.json".format(neuronId)
with open(metaFile) as f:
meta = json.load(f)
transformationFile = meta["transformationFile"]
T = np.loadtxt(transformationFile)
morphologyFile = meta["morphologyFile"]
return morphologyFile, T
def loadAxon(graphset, neuronId, saveToCache = False, loadFromCache = False):
if(loadFromCache):
filename, T = readFromCache(neuronId)
else:
idx = len(graphset[neuronId]) - 1
filename = graphset[neuronId][idx]["file"]
T = graphset[neuronId][idx]["transformation"]
if(saveToCache):
writeToCache(filename, T, neuronId)
return util_amira.readSpatialGraph(filename, T)
def loadDendrite(graphset, neuronId, saveToCache = False, loadFromCache = False):
if(loadFromCache):
filename, T = readFromCache(neuronId)
else:
filename = graphset[neuronId][0]["file"]
T = graphset[neuronId][0]["transformation"]
if(saveToCache):
writeToCache(filename, T, neuronId)
return util_amira.readSpatialGraph(filename, T) | [((2638, 2684), 'numpy.savetxt', 'np.savetxt', (['transformationFile', 'transformation'], {}), '(transformationFile, transformation)\n', (2648, 2684), True, 'import numpy as np\n'), ((3140, 3170), 'numpy.loadtxt', 'np.loadtxt', (['transformationFile'], {}), '(transformationFile)\n', (3150, 3170), True, 'import numpy as np\n'), ((3664, 3704), 'util_amira.readSpatialGraph', 'util_amira.readSpatialGraph', (['filename', 'T'], {}), '(filename, T)\n', (3691, 3704), False, 'import util_amira\n'), ((4059, 4099), 'util_amira.readSpatialGraph', 'util_amira.readSpatialGraph', (['filename', 'T'], {}), '(filename, T)\n', (4086, 4099), False, 'import util_amira\n'), ((2150, 2209), 'os.path.join', 'os.path.join', (['networkDir', '"""morphologies"""', '"""Morphologies.am"""'], {}), "(networkDir, 'morphologies', 'Morphologies.am')\n", (2162, 2209), False, 'import os\n'), ((2915, 2933), 'json.dump', 'json.dump', (['meta', 'f'], {}), '(meta, f)\n', (2924, 2933), False, 'import json\n'), ((3066, 3078), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3075, 3078), False, 'import json\n'), ((2263, 2322), 'os.path.join', 'os.path.join', (['networkDir', '"""morphologies"""', '"""Morphologies.am"""'], {}), "(networkDir, 'morphologies', 'Morphologies.am')\n", (2275, 2322), False, 'import os\n'), ((2402, 2474), 'os.path.join', 'os.path.join', (['networkDir', '"""morphologies"""', '"""MorphologiesWithNeuronIDs.am"""'], {}), "(networkDir, 'morphologies', 'MorphologiesWithNeuronIDs.am')\n", (2414, 2474), False, 'import os\n')] |
FAIR-Data-Austria/invenio-madmp | invenio_madmp/views.py | 74372ee794f81666f5e9cf08ef448c21b2e428be | """Blueprint definitions for maDMP integration."""
from flask import Blueprint, jsonify, request
from invenio_db import db
from .convert import convert_dmp
from .models import DataManagementPlan
def _summarize_dmp(dmp: DataManagementPlan) -> dict:
"""Create a summary dictionary for the given DMP."""
res = {"dmp_id": dmp.dmp_id, "datasets": []}
for ds in dmp.datasets:
dataset = {"dataset_id": ds.dataset_id, "record": None}
if ds.record:
dataset["record"] = ds.record.model.json
res["datasets"].append(dataset)
return res
def create_rest_blueprint(app) -> Blueprint:
"""Create the blueprint for the REST endpoints using the current app extensions."""
# note: using flask.current_app isn't directly possible, because Invenio-MaDMP is
# registered as an extension in the API app, not the "normal" app
# (which is the one usually returned by current_app)
rest_blueprint = Blueprint("invenio_madmp", __name__)
auth = app.extensions["invenio-madmp"].auth
@rest_blueprint.route("/dmps", methods=["GET"])
@auth.login_required
def list_dmps():
"""Give a summary of all stored DMPs."""
dmps = DataManagementPlan.query.all()
res = [_summarize_dmp(dmp) for dmp in dmps]
return jsonify(res)
@rest_blueprint.route("/dmps", methods=["POST"])
@auth.login_required
def create_dmp():
"""Create a new DMP from the maDMP JSON in the request body."""
if request.json is None:
return jsonify({"error": "no json body supplied"}), 400
elif request.json.get("dmp") is None:
return jsonify({"error": "dmp not found in the body"}), 400
dmp_json = request.json.get("dmp", {})
dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier")
if DataManagementPlan.get_by_dmp_id(dmp_json_id) is not None:
return jsonify({"error": "dmp with the same id already exists"}), 409
dmp = convert_dmp(dmp_json)
db.session.add(dmp)
db.session.commit()
# TODO change the returned value
return jsonify(_summarize_dmp(dmp)), 201
@rest_blueprint.route("/dmps/<dmp_id>", methods=["PATCH"])
@auth.login_required
def update_dmp(dmp_id: str = None):
"""Update the specified DMP using the maDMP JSON in the request body."""
hard_sync = request.args.get("sync", "soft") == "hard"
if request.json is None:
return jsonify({"error": "no json body supplied"}), 400
elif request.json.get("dmp") is None:
return jsonify({"error": "dmp not found in the body"}), 400
dmp_json = request.json.get("dmp", {})
dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier")
if dmp_id and dmp_json_id and dmp_id != dmp_json_id:
return jsonify({"error": "mismatch between dmp id from url and body"}), 400
dmp_id = dmp_id or dmp_json_id
if DataManagementPlan.get_by_dmp_id(dmp_id) is None:
return jsonify({"error": "dmp not found"}), 404
dmp = convert_dmp(dmp_json, hard_sync)
db.session.commit()
# TODO change the returned value
return jsonify(_summarize_dmp(dmp))
@rest_blueprint.route("/dmps", methods=["PATCH"])
@auth.login_required
def update_dmp_without_id():
"""Update the specified DMP using the maDMP JSON in the request body."""
return update_dmp(None)
return rest_blueprint
| [((965, 1001), 'flask.Blueprint', 'Blueprint', (['"""invenio_madmp"""', '__name__'], {}), "('invenio_madmp', __name__)\n", (974, 1001), False, 'from flask import Blueprint, jsonify, request\n'), ((1312, 1324), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (1319, 1324), False, 'from flask import Blueprint, jsonify, request\n'), ((1737, 1764), 'flask.request.json.get', 'request.json.get', (['"""dmp"""', '{}'], {}), "('dmp', {})\n", (1753, 1764), False, 'from flask import Blueprint, jsonify, request\n'), ((2030, 2049), 'invenio_db.db.session.add', 'db.session.add', (['dmp'], {}), '(dmp)\n', (2044, 2049), False, 'from invenio_db import db\n'), ((2058, 2077), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2075, 2077), False, 'from invenio_db import db\n'), ((2682, 2709), 'flask.request.json.get', 'request.json.get', (['"""dmp"""', '{}'], {}), "('dmp', {})\n", (2698, 2709), False, 'from flask import Blueprint, jsonify, request\n'), ((3144, 3163), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3161, 3163), False, 'from invenio_db import db\n'), ((2399, 2431), 'flask.request.args.get', 'request.args.get', (['"""sync"""', '"""soft"""'], {}), "('sync', 'soft')\n", (2415, 2431), False, 'from flask import Blueprint, jsonify, request\n'), ((1550, 1593), 'flask.jsonify', 'jsonify', (["{'error': 'no json body supplied'}"], {}), "({'error': 'no json body supplied'})\n", (1557, 1593), False, 'from flask import Blueprint, jsonify, request\n'), ((1612, 1635), 'flask.request.json.get', 'request.json.get', (['"""dmp"""'], {}), "('dmp')\n", (1628, 1635), False, 'from flask import Blueprint, jsonify, request\n'), ((1922, 1979), 'flask.jsonify', 'jsonify', (["{'error': 'dmp with the same id already exists'}"], {}), "({'error': 'dmp with the same id already exists'})\n", (1929, 1979), False, 'from flask import Blueprint, jsonify, request\n'), ((2495, 2538), 'flask.jsonify', 'jsonify', (["{'error': 'no json body supplied'}"], {}), "({'error': 'no json body supplied'})\n", (2502, 2538), False, 'from flask import Blueprint, jsonify, request\n'), ((2557, 2580), 'flask.request.json.get', 'request.json.get', (['"""dmp"""'], {}), "('dmp')\n", (2573, 2580), False, 'from flask import Blueprint, jsonify, request\n'), ((2858, 2921), 'flask.jsonify', 'jsonify', (["{'error': 'mismatch between dmp id from url and body'}"], {}), "({'error': 'mismatch between dmp id from url and body'})\n", (2865, 2921), False, 'from flask import Blueprint, jsonify, request\n'), ((3047, 3082), 'flask.jsonify', 'jsonify', (["{'error': 'dmp not found'}"], {}), "({'error': 'dmp not found'})\n", (3054, 3082), False, 'from flask import Blueprint, jsonify, request\n'), ((1664, 1711), 'flask.jsonify', 'jsonify', (["{'error': 'dmp not found in the body'}"], {}), "({'error': 'dmp not found in the body'})\n", (1671, 1711), False, 'from flask import Blueprint, jsonify, request\n'), ((2609, 2656), 'flask.jsonify', 'jsonify', (["{'error': 'dmp not found in the body'}"], {}), "({'error': 'dmp not found in the body'})\n", (2616, 2656), False, 'from flask import Blueprint, jsonify, request\n')] |
aipassio/visual_retrieval | retrieval/urls.py | ce8dae2ad517a9edb5e278163dd6d0f7ffc1b5f4 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('retrieval_insert', views.retrieval_insert, name='retrieval_insert'),
path('retrieval_get', views.retrieval_get, name='retrieval_get')
] | [((71, 106), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (75, 106), False, 'from django.urls import path\n'), ((112, 185), 'django.urls.path', 'path', (['"""retrieval_insert"""', 'views.retrieval_insert'], {'name': '"""retrieval_insert"""'}), "('retrieval_insert', views.retrieval_insert, name='retrieval_insert')\n", (116, 185), False, 'from django.urls import path\n'), ((191, 255), 'django.urls.path', 'path', (['"""retrieval_get"""', 'views.retrieval_get'], {'name': '"""retrieval_get"""'}), "('retrieval_get', views.retrieval_get, name='retrieval_get')\n", (195, 255), False, 'from django.urls import path\n')] |
noshluk2/Wifi-Signal-Robot-localization | scripts/Interfacing/encoder_class.py | 538e6c4e7a63486f22ab708908c476cd808f720c | import RPi.GPIO as GPIO
import threading
class Encoder(object):
def __init__(self, r_en_a,r_en_b,l_en_a,l_en_b):
GPIO.setmode(GPIO.BCM)
GPIO.setup(r_en_a, GPIO.IN)
GPIO.setup(r_en_b, GPIO.IN)
GPIO.setup(l_en_a, GPIO.IN)
GPIO.setup(l_en_b, GPIO.IN)
self.l_en_a=l_en_a;self.l_en_b=l_en_b;
self.r_en_a=r_en_a;self.r_en_b=r_en_b;
GPIO.add_event_detect(r_en_a, GPIO.BOTH, callback=self.Update_encR)
GPIO.add_event_detect(l_en_a, GPIO.BOTH, callback=self.Update_encL)
self.count_R =0
self.count_L=0
def Update_encR(self,channel):
if GPIO.input(self.r_en_a) == GPIO.input(self.r_en_b):
self.count_R=self.count_R + 1
else :
self.count_R = self.count_R - 1
def Update_encL(self,channel):
if GPIO.input(self.l_en_a) == GPIO.input(self.l_en_b):
self.count_L=self.count_L + 1
else :
self.count_L = self.count_L - 1
return (self.count_L)
def get_r_enc(self):
return self.count_R
def get_l_enc(self):
return self.count_L
def clear_encoders(self):
self.count_R=0
self.count_L=0
# r_en_a = 27
# r_en_b = 10
# l_en_a = 5
# l_en_b = 6
# enc_obj = Encoder(27,10,5,6)
# def update_encoders():
# threading.Timer(1,update_encoders).start()
# print(" looping ")
# update_encoders() | [((126, 148), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (138, 148), True, 'import RPi.GPIO as GPIO\n'), ((157, 184), 'RPi.GPIO.setup', 'GPIO.setup', (['r_en_a', 'GPIO.IN'], {}), '(r_en_a, GPIO.IN)\n', (167, 184), True, 'import RPi.GPIO as GPIO\n'), ((193, 220), 'RPi.GPIO.setup', 'GPIO.setup', (['r_en_b', 'GPIO.IN'], {}), '(r_en_b, GPIO.IN)\n', (203, 220), True, 'import RPi.GPIO as GPIO\n'), ((229, 256), 'RPi.GPIO.setup', 'GPIO.setup', (['l_en_a', 'GPIO.IN'], {}), '(l_en_a, GPIO.IN)\n', (239, 256), True, 'import RPi.GPIO as GPIO\n'), ((265, 292), 'RPi.GPIO.setup', 'GPIO.setup', (['l_en_b', 'GPIO.IN'], {}), '(l_en_b, GPIO.IN)\n', (275, 292), True, 'import RPi.GPIO as GPIO\n'), ((396, 463), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['r_en_a', 'GPIO.BOTH'], {'callback': 'self.Update_encR'}), '(r_en_a, GPIO.BOTH, callback=self.Update_encR)\n', (417, 463), True, 'import RPi.GPIO as GPIO\n'), ((472, 539), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['l_en_a', 'GPIO.BOTH'], {'callback': 'self.Update_encL'}), '(l_en_a, GPIO.BOTH, callback=self.Update_encL)\n', (493, 539), True, 'import RPi.GPIO as GPIO\n'), ((634, 657), 'RPi.GPIO.input', 'GPIO.input', (['self.r_en_a'], {}), '(self.r_en_a)\n', (644, 657), True, 'import RPi.GPIO as GPIO\n'), ((661, 684), 'RPi.GPIO.input', 'GPIO.input', (['self.r_en_b'], {}), '(self.r_en_b)\n', (671, 684), True, 'import RPi.GPIO as GPIO\n'), ((846, 869), 'RPi.GPIO.input', 'GPIO.input', (['self.l_en_a'], {}), '(self.l_en_a)\n', (856, 869), True, 'import RPi.GPIO as GPIO\n'), ((873, 896), 'RPi.GPIO.input', 'GPIO.input', (['self.l_en_b'], {}), '(self.l_en_b)\n', (883, 896), True, 'import RPi.GPIO as GPIO\n')] |
systori/systori | systori/apps/equipment/urls.py | e309c63e735079ff6032fdaf1db354ec872b28b1 | from django.conf.urls import url
from django.urls import path, include
from systori.apps.user.authorization import office_auth
from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate
urlpatterns = [
# two url rules to make the active_filter keyword optional
url(
r"^equipment/$", office_auth(EquipmentListView.as_view()), name="equipment.list"
),
url(
r"^equipment/(?P<active_filter>[\w-]+)$",
office_auth(EquipmentListView.as_view()),
name="equipment.list",
),
url(
r"^equipment-(?P<pk>\d+)$",
office_auth(EquipmentView.as_view()),
name="equipment.view",
),
url(
r"^create-equipment$",
office_auth(EquipmentCreate.as_view()),
name="equipment.create",
),
url(
r"^equipment-(?P<pk>\d+)/edit$",
office_auth(EquipmentUpdate.as_view()),
name="equipment.edit",
),
url(
r"^equipment-(?P<pk>\d+)/delete$",
office_auth(EquipmentDelete.as_view()),
name="equipment.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-refueling-stop$",
office_auth(RefuelingStopCreate.as_view()),
name="refueling_stop.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/update$",
office_auth(RefuelingStopUpdate.as_view()),
name="refueling_stop.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/delete",
office_auth(RefuelingStopDelete.as_view()),
name="refueling_stop.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-maintenance",
office_auth(MaintenanceCreate.as_view()),
name="maintenance.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/update$",
office_auth(MaintenanceUpdate.as_view()),
name="maintenance.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/delete",
office_auth(MaintenanceDelete.as_view()),
name="maintenance.delete",
),
]
| [((500, 527), 'systori.apps.equipment.views.EquipmentListView.as_view', 'EquipmentListView.as_view', ([], {}), '()\n', (525, 527), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((638, 665), 'systori.apps.equipment.views.EquipmentListView.as_view', 'EquipmentListView.as_view', ([], {}), '()\n', (663, 665), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((771, 794), 'systori.apps.equipment.views.EquipmentView.as_view', 'EquipmentView.as_view', ([], {}), '()\n', (792, 794), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((895, 920), 'systori.apps.equipment.views.EquipmentCreate.as_view', 'EquipmentCreate.as_view', ([], {}), '()\n', (918, 920), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((1033, 1058), 'systori.apps.equipment.views.EquipmentUpdate.as_view', 'EquipmentUpdate.as_view', ([], {}), '()\n', (1056, 1058), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((1171, 1196), 'systori.apps.equipment.views.EquipmentDelete.as_view', 'EquipmentDelete.as_view', ([], {}), '()\n', (1194, 1196), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((1326, 1355), 'systori.apps.equipment.views.RefuelingStopCreate.as_view', 'RefuelingStopCreate.as_view', ([], {}), '()\n', (1353, 1355), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((1512, 1541), 'systori.apps.equipment.views.RefuelingStopUpdate.as_view', 'RefuelingStopUpdate.as_view', ([], {}), '()\n', (1539, 1541), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((1697, 1726), 'systori.apps.equipment.views.RefuelingStopDelete.as_view', 'RefuelingStopDelete.as_view', ([], {}), '()\n', (1724, 1726), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((1857, 1884), 'systori.apps.equipment.views.MaintenanceCreate.as_view', 'MaintenanceCreate.as_view', ([], {}), '()\n', (1882, 1884), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((2035, 2062), 'systori.apps.equipment.views.MaintenanceUpdate.as_view', 'MaintenanceUpdate.as_view', ([], {}), '()\n', (2060, 2062), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n'), ((2212, 2239), 'systori.apps.equipment.views.MaintenanceDelete.as_view', 'MaintenanceDelete.as_view', ([], {}), '()\n', (2237, 2239), False, 'from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate\n')] |
MRXLT/PaddleHub | paddlehub/module/check_info_pb2.py | a9cd941bef2ac5a2d81b2f20422a4fbd9a87eb90 | #coding:utf-8
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: check_info.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='check_info.proto',
package='paddlehub.module.checkinfo',
syntax='proto3',
serialized_pb=_b(
'\n\x10\x63heck_info.proto\x12\x1apaddlehub.module.checkinfo\"\x85\x01\n\x08\x46ileInfo\x12\x11\n\tfile_name\x18\x01 \x01(\t\x12\x33\n\x04type\x18\x02 \x01(\x0e\x32%.paddlehub.module.checkinfo.FILE_TYPE\x12\x0f\n\x07is_need\x18\x03 \x01(\x08\x12\x0b\n\x03md5\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"\x84\x01\n\x08Requires\x12>\n\x0crequire_type\x18\x01 \x01(\x0e\x32(.paddlehub.module.checkinfo.REQUIRE_TYPE\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\ngreat_than\x18\x03 \x01(\x08\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\"\xc8\x01\n\tCheckInfo\x12\x16\n\x0epaddle_version\x18\x01 \x01(\t\x12\x13\n\x0bhub_version\x18\x02 \x01(\t\x12\x1c\n\x14module_proto_version\x18\x03 \x01(\t\x12\x38\n\nfile_infos\x18\x04 \x03(\x0b\x32$.paddlehub.module.checkinfo.FileInfo\x12\x36\n\x08requires\x18\x05 \x03(\x0b\x32$.paddlehub.module.checkinfo.Requires*\x1e\n\tFILE_TYPE\x12\x08\n\x04\x46ILE\x10\x00\x12\x07\n\x03\x44IR\x10\x01*[\n\x0cREQUIRE_TYPE\x12\x12\n\x0ePYTHON_PACKAGE\x10\x00\x12\x0e\n\nHUB_MODULE\x10\x01\x12\n\n\x06SYSTEM\x10\x02\x12\x0b\n\x07\x43OMMAND\x10\x03\x12\x0e\n\nPY_VERSION\x10\x04\x42\x02H\x03\x62\x06proto3'
))
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FILE_TYPE = _descriptor.EnumDescriptor(
name='FILE_TYPE',
full_name='paddlehub.module.checkinfo.FILE_TYPE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FILE', index=0, number=0, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='DIR', index=1, number=1, options=None, type=None),
],
containing_type=None,
options=None,
serialized_start=522,
serialized_end=552,
)
_sym_db.RegisterEnumDescriptor(_FILE_TYPE)
FILE_TYPE = enum_type_wrapper.EnumTypeWrapper(_FILE_TYPE)
_REQUIRE_TYPE = _descriptor.EnumDescriptor(
name='REQUIRE_TYPE',
full_name='paddlehub.module.checkinfo.REQUIRE_TYPE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PYTHON_PACKAGE', index=0, number=0, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='HUB_MODULE', index=1, number=1, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='SYSTEM', index=2, number=2, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='COMMAND', index=3, number=3, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='PY_VERSION', index=4, number=4, options=None, type=None),
],
containing_type=None,
options=None,
serialized_start=554,
serialized_end=645,
)
_sym_db.RegisterEnumDescriptor(_REQUIRE_TYPE)
REQUIRE_TYPE = enum_type_wrapper.EnumTypeWrapper(_REQUIRE_TYPE)
FILE = 0
DIR = 1
PYTHON_PACKAGE = 0
HUB_MODULE = 1
SYSTEM = 2
COMMAND = 3
PY_VERSION = 4
_FILEINFO = _descriptor.Descriptor(
name='FileInfo',
full_name='paddlehub.module.checkinfo.FileInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_name',
full_name='paddlehub.module.checkinfo.FileInfo.file_name',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type',
full_name='paddlehub.module.checkinfo.FileInfo.type',
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_need',
full_name='paddlehub.module.checkinfo.FileInfo.is_need',
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='md5',
full_name='paddlehub.module.checkinfo.FileInfo.md5',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description',
full_name='paddlehub.module.checkinfo.FileInfo.description',
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=49,
serialized_end=182,
)
_REQUIRES = _descriptor.Descriptor(
name='Requires',
full_name='paddlehub.module.checkinfo.Requires',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='require_type',
full_name='paddlehub.module.checkinfo.Requires.require_type',
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version',
full_name='paddlehub.module.checkinfo.Requires.version',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='great_than',
full_name='paddlehub.module.checkinfo.Requires.great_than',
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description',
full_name='paddlehub.module.checkinfo.Requires.description',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=185,
serialized_end=317,
)
_CHECKINFO = _descriptor.Descriptor(
name='CheckInfo',
full_name='paddlehub.module.checkinfo.CheckInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='paddle_version',
full_name='paddlehub.module.checkinfo.CheckInfo.paddle_version',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hub_version',
full_name='paddlehub.module.checkinfo.CheckInfo.hub_version',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='module_proto_version',
full_name=
'paddlehub.module.checkinfo.CheckInfo.module_proto_version',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_infos',
full_name='paddlehub.module.checkinfo.CheckInfo.file_infos',
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='requires',
full_name='paddlehub.module.checkinfo.CheckInfo.requires',
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=320,
serialized_end=520,
)
_FILEINFO.fields_by_name['type'].enum_type = _FILE_TYPE
_REQUIRES.fields_by_name['require_type'].enum_type = _REQUIRE_TYPE
_CHECKINFO.fields_by_name['file_infos'].message_type = _FILEINFO
_CHECKINFO.fields_by_name['requires'].message_type = _REQUIRES
DESCRIPTOR.message_types_by_name['FileInfo'] = _FILEINFO
DESCRIPTOR.message_types_by_name['Requires'] = _REQUIRES
DESCRIPTOR.message_types_by_name['CheckInfo'] = _CHECKINFO
DESCRIPTOR.enum_types_by_name['FILE_TYPE'] = _FILE_TYPE
DESCRIPTOR.enum_types_by_name['REQUIRE_TYPE'] = _REQUIRE_TYPE
FileInfo = _reflection.GeneratedProtocolMessageType(
'FileInfo',
(_message.Message, ),
dict(
DESCRIPTOR=_FILEINFO,
__module__='check_info_pb2'
# @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.FileInfo)
))
_sym_db.RegisterMessage(FileInfo)
Requires = _reflection.GeneratedProtocolMessageType(
'Requires',
(_message.Message, ),
dict(
DESCRIPTOR=_REQUIRES,
__module__='check_info_pb2'
# @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.Requires)
))
_sym_db.RegisterMessage(Requires)
CheckInfo = _reflection.GeneratedProtocolMessageType(
'CheckInfo',
(_message.Message, ),
dict(
DESCRIPTOR=_CHECKINFO,
__module__='check_info_pb2'
# @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.CheckInfo)
))
_sym_db.RegisterMessage(CheckInfo)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(),
_b('H\003'))
# @@protoc_insertion_point(module_scope)
| [((558, 584), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (582, 584), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((2503, 2548), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', (['_FILE_TYPE'], {}), '(_FILE_TYPE)\n', (2536, 2548), False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((3475, 3523), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', (['_REQUIRE_TYPE'], {}), '(_REQUIRE_TYPE)\n', (3508, 3523), False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((13565, 13593), 'google.protobuf.descriptor_pb2.FileOptions', 'descriptor_pb2.FileOptions', ([], {}), '()\n', (13591, 13593), False, 'from google.protobuf import descriptor_pb2\n'), ((2131, 2224), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""FILE"""', 'index': '(0)', 'number': '(0)', 'options': 'None', 'type': 'None'}), "(name='FILE', index=0, number=0, options=\n None, type=None)\n", (2162, 2224), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2242, 2333), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""DIR"""', 'index': '(1)', 'number': '(1)', 'options': 'None', 'type': 'None'}), "(name='DIR', index=1, number=1, options=None,\n type=None)\n", (2273, 2333), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2736, 2838), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""PYTHON_PACKAGE"""', 'index': '(0)', 'number': '(0)', 'options': 'None', 'type': 'None'}), "(name='PYTHON_PACKAGE', index=0, number=0,\n options=None, type=None)\n", (2767, 2838), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2857, 2955), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""HUB_MODULE"""', 'index': '(1)', 'number': '(1)', 'options': 'None', 'type': 'None'}), "(name='HUB_MODULE', index=1, number=1,\n options=None, type=None)\n", (2888, 2955), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2974, 3069), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""SYSTEM"""', 'index': '(2)', 'number': '(2)', 'options': 'None', 'type': 'None'}), "(name='SYSTEM', index=2, number=2, options=\n None, type=None)\n", (3005, 3069), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3087, 3183), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""COMMAND"""', 'index': '(3)', 'number': '(3)', 'options': 'None', 'type': 'None'}), "(name='COMMAND', index=3, number=3, options=\n None, type=None)\n", (3118, 3183), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3201, 3299), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""PY_VERSION"""', 'index': '(4)', 'number': '(4)', 'options': 'None', 'type': 'None'}), "(name='PY_VERSION', index=4, number=4,\n options=None, type=None)\n", (3232, 3299), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4330, 4646), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""type"""', 'full_name': '"""paddlehub.module.checkinfo.FileInfo.type"""', 'index': '(1)', 'number': '(2)', 'type': '(14)', 'cpp_type': '(8)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='type', full_name=\n 'paddlehub.module.checkinfo.FileInfo.type', index=1, number=2, type=14,\n cpp_type=8, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None)\n", (4357, 4646), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4819, 5145), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""is_need"""', 'full_name': '"""paddlehub.module.checkinfo.FileInfo.is_need"""', 'index': '(2)', 'number': '(3)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='is_need', full_name=\n 'paddlehub.module.checkinfo.FileInfo.is_need', index=2, number=3, type=\n 8, cpp_type=7, label=1, has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None)\n", (4846, 5145), True, 'from google.protobuf import descriptor as _descriptor\n'), ((6758, 7090), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""require_type"""', 'full_name': '"""paddlehub.module.checkinfo.Requires.require_type"""', 'index': '(0)', 'number': '(1)', 'type': '(14)', 'cpp_type': '(8)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='require_type', full_name=\n 'paddlehub.module.checkinfo.Requires.require_type', index=0, number=1,\n type=14, cpp_type=8, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None)\n", (6785, 7090), True, 'from google.protobuf import descriptor as _descriptor\n'), ((7778, 8109), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""great_than"""', 'full_name': '"""paddlehub.module.checkinfo.Requires.great_than"""', 'index': '(2)', 'number': '(3)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='great_than', full_name=\n 'paddlehub.module.checkinfo.Requires.great_than', index=2, number=3,\n type=8, cpp_type=7, label=1, has_default_value=False, default_value=\n False, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, options=None)\n", (7805, 8109), True, 'from google.protobuf import descriptor as _descriptor\n'), ((10829, 11160), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""file_infos"""', 'full_name': '"""paddlehub.module.checkinfo.CheckInfo.file_infos"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='file_infos', full_name=\n 'paddlehub.module.checkinfo.CheckInfo.file_infos', index=3, number=4,\n type=11, cpp_type=10, label=3, has_default_value=False, default_value=[\n ], message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, options=None)\n", (10856, 11160), True, 'from google.protobuf import descriptor as _descriptor\n'), ((11333, 11660), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""requires"""', 'full_name': '"""paddlehub.module.checkinfo.CheckInfo.requires"""', 'index': '(4)', 'number': '(5)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='requires', full_name=\n 'paddlehub.module.checkinfo.CheckInfo.requires', index=4, number=5,\n type=11, cpp_type=10, label=3, has_default_value=False, default_value=[\n ], message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, options=None)\n", (11360, 11660), True, 'from google.protobuf import descriptor as _descriptor\n')] |
rursvd/pynumerical2 | 40_3.py | 4b2d33125b64a39099ac8eddef885e0ea11b237d | from numpy import zeros
# Define ab2 function
def ab2(f,t0,tf,y0,n):
h = (tf - t0)/n
t = zeros(n+1)
y = zeros(n+1)
t[0] = t0
y[0] = y0
y[1] = y[0] + h * f(t[0],y[0])
t[1] = t[0] + h
for i in range(1,n):
y[i+1] = y[i] + (3.0/2.0) * h * f(t[i],y[i])-1.0/2.0 * h * f(t[i-1],y[i-1])
t[i+1] = t[i] + h
return t,y
# Define functions
def f(t,y):
return t - y
# Set initial conditions
t0 = 0.0
tf = 1.0
y0 = 1.0
n = 5
# Execute AB2
t, yab2 = ab2(f,t0,tf,y0,n)
# Print results
print("%5s %8s" % ('t','y'))
for i in range(n+1):
print("%8.4f %8.4f" % (t[i],yab2[i]))
| [((99, 111), 'numpy.zeros', 'zeros', (['(n + 1)'], {}), '(n + 1)\n', (104, 111), False, 'from numpy import zeros\n'), ((118, 130), 'numpy.zeros', 'zeros', (['(n + 1)'], {}), '(n + 1)\n', (123, 130), False, 'from numpy import zeros\n')] |
NeonDaniel/lingua-franca | test/test_parse_cs.py | eee95702016b4013b0d81dc74da98cd2d2f53358 | #
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from datetime import datetime, timedelta
from lingua_franca import get_default_lang, set_default_lang, \
load_language, unload_language
from lingua_franca.parse import extract_datetime
from lingua_franca.parse import extract_duration
from lingua_franca.parse import extract_number, extract_numbers
from lingua_franca.parse import fuzzy_match
from lingua_franca.parse import get_gender
from lingua_franca.parse import match_one
from lingua_franca.parse import normalize
def setUpModule():
load_language("cs-cz")
set_default_lang("cs")
def tearDownModule():
unload_language("cs")
class TestFuzzyMatch(unittest.TestCase):
def test_matches(self):
self.assertTrue(fuzzy_match("ty a já", "ty a já") >= 1.0)
self.assertTrue(fuzzy_match("ty a já", "ty") < 0.5)
self.assertTrue(fuzzy_match("Ty", "ty") >= 0.5)
self.assertTrue(fuzzy_match("ty a já", "ty") ==
fuzzy_match("ty", "ty a já"))
self.assertTrue(fuzzy_match("ty a já", "on nebo oni") < 0.23)
def test_match_one(self):
# test list of choices
choices = ['frank', 'kate', 'harry', 'henry']
self.assertEqual(match_one('frank', choices)[0], 'frank')
self.assertEqual(match_one('fran', choices)[0], 'frank')
self.assertEqual(match_one('enry', choices)[0], 'henry')
self.assertEqual(match_one('katt', choices)[0], 'kate')
# test dictionary of choices
choices = {'frank': 1, 'kate': 2, 'harry': 3, 'henry': 4}
self.assertEqual(match_one('frank', choices)[0], 1)
self.assertEqual(match_one('enry', choices)[0], 4)
class TestNormalize(unittest.TestCase):
def test_extract_number(self):
self.assertEqual(extract_number("tohle je první test",
ordinals=True), 1)
self.assertEqual(extract_number("tohle je 2 test"), 2)
self.assertEqual(extract_number("tohle je druhý test",
ordinals=True), 2)
#self.assertEqual(extract_number("tohle je třetí test"), 1.0 / 3.0)
self.assertEqual(extract_number("tohle je třetí test",
ordinals=True), 3.0)
self.assertEqual(extract_number("ten čtvrtý", ordinals=True), 4.0)
self.assertEqual(extract_number(
"ten třicátý šestý", ordinals=True), 36.0)
self.assertEqual(extract_number("tohle je test číslo 4"), 4)
self.assertEqual(extract_number("jedna třetina šálku"), 1.0 / 3.0)
self.assertEqual(extract_number("tři šálky"), 3)
self.assertEqual(extract_number("1/3 šálku"), 1.0 / 3.0)
self.assertEqual(extract_number("čtvrtina šálku"), 0.25)
self.assertEqual(extract_number("1/4 cup"), 0.25)
self.assertEqual(extract_number("jedna čtvrtina šálku"), 0.25)
self.assertEqual(extract_number("2/3 šálků"), 2.0 / 3.0)
self.assertEqual(extract_number("3/4 šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("1 a 3/4 šálků"), 1.75)
self.assertEqual(extract_number("1 šálek a půl"), 1.5)
self.assertEqual(extract_number("jeden šálek a polovina"), 1.5)
self.assertEqual(extract_number("jedna a půl šálků"), 1.5)
self.assertEqual(extract_number("jedna a jedna polovina šálků"), 1.5)
self.assertEqual(extract_number("tři čtvrtina šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("tři čtvrtiny šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("dvacet dva"), 22)
self.assertEqual(extract_number(
"Dvacet dva s velkým písmenam na začátku"), 22)
self.assertEqual(extract_number(
"dvacet Dva s dva krát velkým písmem"), 22)
self.assertEqual(extract_number(
"dvacet Dva s různou velikostí písmen"), 22)
self.assertEqual(extract_number("Dvacet dva a Tři Pětiny"), 22.6)
self.assertEqual(extract_number("dvě sto"), 200)
self.assertEqual(extract_number("devět tisíc"), 9000)
self.assertEqual(extract_number("šest sto šedesát šest"), 666)
self.assertEqual(extract_number("dva million"), 2000000)
self.assertEqual(extract_number("dva million pět sto tisíc "
"tun žhavého kovu"), 2500000)
self.assertEqual(extract_number("šest trillion"), 6000000000000.0)
self.assertEqual(extract_number("šest trilion", short_scale=False),
6e+18)
self.assertEqual(extract_number("jedna tečka pět"), 1.5)
self.assertEqual(extract_number("tři tečka čtrnáct"), 3.14)
self.assertEqual(extract_number("nula tečka dva"), 0.2)
self.assertEqual(extract_number("billion roků "),
1000000000.0)
self.assertEqual(extract_number("bilion roků",
short_scale=False),
1000000000000.0)
self.assertEqual(extract_number("jedno sto tisíc"), 100000)
self.assertEqual(extract_number("mínus 2"), -2)
self.assertEqual(extract_number("záporné sedmdesát"), -70)
self.assertEqual(extract_number("tisíc million"), 1000000000)
self.assertEqual(extract_number("miliarda", short_scale=False),
1000000000)
self.assertEqual(extract_number("šestina třetina"),
1 / 6 / 3)
self.assertEqual(extract_number("šestina třetí", ordinals=True),
3)
self.assertEqual(extract_number("třicet sekund"), 30)
self.assertEqual(extract_number("třicátý druhý", ordinals=True), 32)
self.assertEqual(extract_number("tohle je billiontý test",
ordinals=True), 1e09)
print("tohle udělat později")
#self.assertEqual(extract_number("tohle je billiontý test"), 1e-9)
self.assertEqual(extract_number("tohle je biliontý test",
ordinals=True,
short_scale=False), 1e12)
print("tohle udělat později")
# self.assertEqual(extract_number("tohle je biliontý test",
# short_scale=False), 1e-12)
# Verify non-power multiples of ten no longer discard
# adjacent multipliers
self.assertEqual(extract_number("dvacet tisíc"), 20000)
self.assertEqual(extract_number("padesát million"), 50000000)
# Verify smaller powers of ten no longer cause miscalculation of larger
# powers of ten (see MycroftAI#86)
self.assertEqual(extract_number("dvacet billion tři sto million \
devět sto padesát tisíc šest sto \
sedmdesát pět tečka osm"),
20300950675.8)
self.assertEqual(extract_number("devět sto devadesát devět million devět \
sto devadesát devět tisíc devět \
sto devadesát devět tečka devět"),
999999999.9)
# TODO why does "trillion" result in xxxx.0?
self.assertEqual(extract_number("osm sto trillion dva sto \
padesát sedm"), 800000000000257.0)
# TODO handle this case
# self.assertEqual(
# extract_number("6 dot six six six"),
# 6.666)
self.assertTrue(extract_number("Tenisový hráč je rychlý") is False)
self.assertTrue(extract_number("křehký") is False)
self.assertTrue(extract_number("křehká nula") is not False)
self.assertEqual(extract_number("křehká nula"), 0)
#self.assertTrue(extract_number("grobo 0") is not False)
#self.assertEqual(extract_number("grobo 0"), 0)
self.assertEqual(extract_number("dvojice piv"), 2)
self.assertEqual(extract_number("dvojice sto piv"), 200)
self.assertEqual(extract_number("dvojice tisíc piv"), 2000)
self.assertEqual(extract_number(
"tohle je 7 test", ordinals=True), 7)
self.assertEqual(extract_number(
"tohle je 7 test", ordinals=False), 7)
self.assertTrue(extract_number("tohle je n. test") is False)
self.assertEqual(extract_number("tohle je 1. test"), 1)
self.assertEqual(extract_number("tohle je 2. test"), 2)
self.assertEqual(extract_number("tohle je 3. test"), 3)
self.assertEqual(extract_number("tohle je 31. test"), 31)
self.assertEqual(extract_number("tohle je 32. test"), 32)
self.assertEqual(extract_number("tohle je 33. test"), 33)
self.assertEqual(extract_number("tohle je 34. test"), 34)
self.assertEqual(extract_number("celkem 100%"), 100)
def test_extract_duration_cs(self):
self.assertEqual(extract_duration("10 sekund"),
(timedelta(seconds=10.0), ""))
self.assertEqual(extract_duration("5 minut"),
(timedelta(minutes=5), ""))
self.assertEqual(extract_duration("2 hodiny"),
(timedelta(hours=2), ""))
self.assertEqual(extract_duration("3 dny"),
(timedelta(days=3), ""))
self.assertEqual(extract_duration("25 týdnů"),
(timedelta(weeks=25), ""))
self.assertEqual(extract_duration("sedm hodin"),
(timedelta(hours=7), ""))
self.assertEqual(extract_duration("7.5 sekund"),
(timedelta(seconds=7.5), ""))
self.assertEqual(extract_duration("osm a polovina dne třicet"
" devět sekund"),
(timedelta(days=8.5, seconds=39), ""))
self.assertEqual(extract_duration("Nastav časovač na 30 minut"),
(timedelta(minutes=30), "nastav časovač na"))
self.assertEqual(extract_duration("Čtyři a půl minuty do"
" západu"),
(timedelta(minutes=4.5), "do západu"))
self.assertEqual(extract_duration("devatenáct minut po hodině"),
(timedelta(minutes=19), "po hodině"))
self.assertEqual(extract_duration("vzbuď mě za tři týdny, čtyři"
" sto devadesát sedm dní, a"
" tři sto 91.6 sekund"),
(timedelta(weeks=3, days=497, seconds=391.6),
"vzbuď mě za , , a"))
self.assertEqual(extract_duration("film je jedna hodina, padesát sedm"
" a půl minuty dlouhý"),
(timedelta(hours=1, minutes=57.5),
"film je , dlouhý"))
self.assertEqual(extract_duration("10-sekund"),
(timedelta(seconds=10.0), ""))
self.assertEqual(extract_duration("5-minut"),
(timedelta(minutes=5), ""))
def test_extractdatetime_cs(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 13, 4) # Tue June 27, 2017 @ 1:04pm
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(normalize(text))
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("nyní je čas",
"2017-06-27 13:04:00", "je čas")
testExtract("za sekundu",
"2017-06-27 13:04:01", "")
testExtract("za minutu",
"2017-06-27 13:05:00", "")
# testExtract("ve dvou minutách",
# "2017-06-27 13:06:00", "")
# testExtract("in a couple of minutes",
# "2017-06-27 13:06:00", "")
# testExtract("ve dvou hodinách",
# "2017-06-27 15:04:00", "")
# testExtract("in a couple of hours",
# "2017-06-27 15:04:00", "")
# testExtract("v dvoje týden",
# "2017-07-11 00:00:00", "")
# testExtract("in a couple of weeks",
# "2017-07-11 00:00:00", "")
# testExtract("v dvoje měsíc",
# "2017-08-27 00:00:00", "")
# testExtract("v dvoje rok",
# "2019-06-27 00:00:00", "")
# testExtract("in a couple of months",
# "2017-08-27 00:00:00", "")
# testExtract("in a couple of years",
# "2019-06-27 00:00:00", "")
testExtract("v desetiletí",
"2027-06-27 00:00:00", "")
# testExtract("in a couple of decades",
# "2037-06-27 00:00:00", "")
testExtract("další desetiletí",
"2027-06-27 00:00:00", "")
testExtract("v století",
"2117-06-27 00:00:00", "")
testExtract("v tisíciletí",
"3017-06-27 00:00:00", "")
testExtract("v dvoje desetiletí",
"2037-06-27 00:00:00", "")
testExtract("v 5 desetiletí",
"2067-06-27 00:00:00", "")
testExtract("v dvoje století",
"2217-06-27 00:00:00", "")
# testExtract("in a couple of centuries",
# "2217-06-27 00:00:00", "")
testExtract("v 2 století",
"2217-06-27 00:00:00", "")
testExtract("v dvoje tisíciletí",
"4017-06-27 00:00:00", "")
# testExtract("in a couple of millenniums",
# "4017-06-27 00:00:00", "")
testExtract("v hodina",
"2017-06-27 14:04:00", "")
testExtract("chci to během hodiny",
"2017-06-27 14:04:00", "chci to")
testExtract("za 1 sekundu",
"2017-06-27 13:04:01", "")
testExtract("za 2 sekundy",
"2017-06-27 13:04:02", "")
testExtract("Nastav časovač na 1 minutu",
"2017-06-27 13:05:00", "nastav časovač")
testExtract("Nastav časovač na půl hodina",
"2017-06-27 13:34:00", "nastav časovač")
testExtract("Nastav časovač na 5 den od dnes",
"2017-07-02 00:00:00", "nastav časovač")
testExtract("den po zítřku",
"2017-06-29 00:00:00", "")
testExtract("Jaké je počasí den po zítřku?",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("Připomeň mi v 10:45 pm",
"2017-06-27 22:45:00", "připomeň mi")
testExtract("jaké je počasí v pátek ráno",
"2017-06-30 08:00:00", "jaké je počasí")
testExtract("jaké je zítřejší počasí",
"2017-06-28 00:00:00", "jaké je počasí")
testExtract("jaké je počasí toto odpoledne",
"2017-06-27 15:00:00", "jaké je počasí")
testExtract("jaké je počasí tento večer",
"2017-06-27 19:00:00", "jaké je počasí")
testExtract("jaké bylo počasí toto ráno",
"2017-06-27 08:00:00", "jaké bylo počasí")
testExtract("připomeň mi abych zavolal mámě v 8 týden a 2 dny",
"2017-08-24 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v srpen 3",
"2017-08-03 00:00:00", "připomeň mi abych zavolal mámě") # přidat i třetího slovně
testExtract("připomeň mi zítra abych zavolal mámě v 7am",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi zítra abych zavolal mámě v 10pm",
"2017-06-28 22:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7am",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v hodina",
"2017-06-27 14:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 1730",
"2017-06-27 17:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 0630",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30 hodina",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30 hodina",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě večer v 7 hodin",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin večer",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin ráno",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v Čtvrtek večer v 7 hodin",
"2017-06-29 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v Čtvrtek ráno v 7 hodin",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin Čtvrtek ráno",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek ráno",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
# TODO: This test is imperfect due to "at 7:00" still in the
# remainder. But let it pass for now since time is correct
testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek večer",
"2017-06-29 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 Středa večer",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 Středa v večer",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě Středa večer v 8",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za dvě hodiny",
"2017-06-27 15:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za 2 hodiny",
"2017-06-27 15:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za 15 minut",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za patnáct minut",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za půl hodina",
"2017-06-27 13:34:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za půl hodina",
"2017-06-27 13:34:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za čtvrt hodina",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za čtvrt hodina",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am 2 den po této sobota",
"2017-07-03 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("Přehraj Rick Astley hudbu 2 dny od Pátek",
"2017-07-02 00:00:00", "přehraj rick astley hudbu")
testExtract("Začni invazi v 3:45 pm v Čtvrtek",
"2017-06-29 15:45:00", "začni invazi")
testExtract("V Pondělí, objednej koláč z pekárny",
"2017-07-03 00:00:00", "objednej koláč z pekárny")
testExtract("Přehraj Happy Birthday hudbu 5 roků od dnes",
"2022-06-27 00:00:00", "přehraj happy birthday hudbu")
testExtract("Skype Mámě v 12:45 pm další Čtvrtek",
"2017-07-06 12:45:00", "skype mámě")
testExtract("Jaké je počasí příští Pátek?",
"2017-06-30 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští Středa?",
"2017-07-05 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští Čtvrtek?",
"2017-07-06 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští pátek ráno",
"2017-06-30 08:00:00", "jaké je počasí")
testExtract("jaké je počasí příští pátek večer",
"2017-06-30 19:00:00", "jaké je počasí")
testExtract("jaké je počasí příští pátek odpoledne",
"2017-06-30 15:00:00", "jaké je počasí")
testExtract("připomeň mi abych zavolal mámě v srpen třetího",
"2017-08-03 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("Kup ohňostroj v 4 Červenec",
"2017-07-04 00:00:00", "kup ohňostroj")
testExtract("jaké je počasí 2 týdny od další pátek",
"2017-07-14 00:00:00", "jaké je počasí")
testExtract("jaké je počasí Středa v 0700 hodina",
"2017-06-28 07:00:00", "jaké je počasí")
testExtract("Nastav budík Středa v 7 hodin",
"2017-06-28 07:00:00", "nastav budík")
testExtract("Nastav schůzku v 12:45 pm další Čtvrtek",
"2017-07-06 12:45:00", "nastav schůzku")
testExtract("Jaké je počasí tento Čtvrtek?",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("nastav návštěvu na 2 týdny a 6 dní od Sobota",
"2017-07-21 00:00:00", "nastav návštěvu")
testExtract("Zahaj invazi v 03 45 v Čtvrtek",
"2017-06-29 03:45:00", "zahaj invazi")
testExtract("Zahaj invazi v 800 hodin v Čtvrtek",
"2017-06-29 08:00:00", "zahaj invazi")
testExtract("Zahaj párty v 8 hodin v večer v Čtvrtek",
"2017-06-29 20:00:00", "zahaj párty")
testExtract("Zahaj invazi v 8 v večer v Čtvrtek",
"2017-06-29 20:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v poledne",
"2017-06-29 12:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v půlnoc",
"2017-06-29 00:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v 0500",
"2017-06-29 05:00:00", "zahaj invazi")
testExtract("připomeň mi abych vstal v 4 roky",
"2021-06-27 00:00:00", "připomeň mi abych vstal")
testExtract("připomeň mi abych vstal v 4 roky a 4 dny",
"2021-07-01 00:00:00", "připomeň mi abych vstal")
testExtract("jaké je počasí 3 dny po zítra?",
"2017-07-01 00:00:00", "jaké je počasí")
testExtract("prosinec 3",
"2017-12-03 00:00:00", "")
testExtract("sejdeme se v 8:00 dnes večer",
"2017-06-27 20:00:00", "sejdeme se")
testExtract("sejdeme se v 5pm",
"2017-06-27 17:00:00", "sejdeme se")
testExtract("sejdeme se v 8 am",
"2017-06-28 08:00:00", "sejdeme se")
testExtract("připomeň mi abych vstal v 8 am",
"2017-06-28 08:00:00", "připomeň mi abych vstal")
testExtract("jaké je počasí v úterý",
"2017-06-27 00:00:00", "jaké je počasí")
testExtract("jaké je počasí v pondělí",
"2017-07-03 00:00:00", "jaké je počasí")
testExtract("jaké je počasí toto Středa",
"2017-06-28 00:00:00", "jaké je počasí")
testExtract("v Čtvrtek jaké je počasí",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("tento Čtvrtek jaké je počasí",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("poslední pondělí jaké bylo počasí",
"2017-06-26 00:00:00", "jaké bylo počasí")
testExtract("nastav budík na Středa večer v 8",
"2017-06-28 20:00:00", "nastav budík")
testExtract("nastav budík na Středa v 3 hodiny v odpoledne",
"2017-06-28 15:00:00", "nastav budík")
testExtract("nastav budík na Středa v 3 hodiny v ráno",
"2017-06-28 03:00:00", "nastav budík")
testExtract("nastav budík na Středa ráno v 7 hodin",
"2017-06-28 07:00:00", "nastav budík")
testExtract("nastav budík na dnes v 7 hodin",
"2017-06-27 19:00:00", "nastav budík")
testExtract("nastav budík na tento večer v 7 hodin",
"2017-06-27 19:00:00", "nastav budík")
# TODO: This test is imperfect due to the "at 7:00" still in the
# remainder. But let it pass for now since time is correct
testExtract("nastav budík na tento večer v 7:00",
"2017-06-27 19:00:00", "nastav budík v 7:00")
testExtract("večer v červen 5 2017 připomeň mi" +
" abych zavolal mámě",
"2017-06-05 19:00:00", "připomeň mi abych zavolal mámě")
# TODO: This test is imperfect due to the missing "for" in the
# remainder. But let it pass for now since time is correct
testExtract("aktualizuj můj kalendář na ranní schůzku s julius" +
" v březnu 4",
"2018-03-04 08:00:00",
"aktualizuj můj kalendář schůzku s julius")
testExtract("připomeň mi abych zavolal mámě další úterý",
"2017-07-04 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě 3 týdny",
"2017-07-18 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 týdny",
"2017-08-22 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 týdny a 2 dny",
"2017-08-24 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 4 dny",
"2017-07-01 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 3 měsíce",
"2017-09-27 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 2 roky a 2 dny",
"2019-06-29 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě další týden",
"2017-07-04 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am v Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am tato Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10 další Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am další Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
# test yesterday
testExtract("jaký den byl včera",
"2017-06-26 00:00:00", "jaký den byl")
testExtract("jaký den byl den před včera",
"2017-06-25 00:00:00", "jaký den byl")
testExtract("měl jsem večeři včera v 6",
"2017-06-26 06:00:00", "měl jsem večeři")
testExtract("měl jsem večeři včera v 6 am",
"2017-06-26 06:00:00", "měl jsem večeři")
testExtract("měl jsem večeři včera v 6 pm",
"2017-06-26 18:00:00", "měl jsem večeři")
# Below two tests, ensure that time is picked
# even if no am/pm is specified
# in case of weekdays/tonight
testExtract("nastav budík na 9 o víkendech",
"2017-06-27 21:00:00", "nastav budík víkendech")
testExtract("na 8 dnes večer",
"2017-06-27 20:00:00", "")
testExtract("na 8:30pm dnes večer",
"2017-06-27 20:30:00", "")
# Tests a time with ':' & without am/pm
testExtract("nastav budík na dnes večer 9:30",
"2017-06-27 21:30:00", "nastav budík")
testExtract("nastav budík na 9:00 na dnes večer",
"2017-06-27 21:00:00", "nastav budík")
# Check if it picks intent irrespective of correctness
testExtract("nastav budík na 9 hodin dnes večer",
"2017-06-27 21:00:00", "nastav budík")
testExtract("připomeň mi hru dnes v noci v 11:30",
"2017-06-27 23:30:00", "připomeň mi hru")
testExtract("nastav budík v 7:30 o výkendech",
"2017-06-27 19:30:00", "nastav budík o výkendech")
# "# days <from X/after X>"
testExtract("mé narozeniny jsou 2 dny od dnes",
"2017-06-29 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny po dnes",
"2017-06-29 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny od zítra",
"2017-06-30 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny od zítra",
"2017-06-30 00:00:00", "mé narozeniny jsou")
testExtract("připomeň mi abych zavolal mámě v 10am 2 dny po další Sobota",
"2017-07-10 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("mé narozeniny jsou 2 dny od včera",
"2017-06-28 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny po včera",
"2017-06-28 00:00:00", "mé narozeniny jsou")
# "# days ago>"
testExtract("mé narozeniny byly před 1 den",
"2017-06-26 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 2 dny",
"2017-06-25 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 3 dny",
"2017-06-24 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 4 dny",
"2017-06-23 00:00:00", "mé narozeniny byly")
# TODO this test is imperfect due to "tonight" in the reminder, but let is pass since the date is correct
testExtract("sejdeme se dnes v noci",
"2017-06-27 22:00:00", "sejdeme se noci")
# TODO this test is imperfect due to "at night" in the reminder, but let is pass since the date is correct
testExtract("sejdeme se později v noci",
"2017-06-27 22:00:00", "sejdeme se později v noci")
# TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct
testExtract("Jaké bude počasí zítra v noci",
"2017-06-28 22:00:00", "jaké bude počasí v noci")
# TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct
testExtract("jaké bude počasí příští úterý v noci",
"2017-07-04 22:00:00", "jaké bude počasí v noci")
def test_extract_ambiguous_time_cs(self):
morning = datetime(2017, 6, 27, 8, 1, 2)
večer = datetime(2017, 6, 27, 20, 1, 2)
noonish = datetime(2017, 6, 27, 12, 1, 2)
self.assertEqual(
extract_datetime('krmení ryb'), None)
self.assertEqual(
extract_datetime('den'), None)
self.assertEqual(
extract_datetime('týden'), None)
self.assertEqual(
extract_datetime('měsíc'), None)
self.assertEqual(
extract_datetime('rok'), None)
self.assertEqual(
extract_datetime(' '), None)
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', morning)[0],
datetime(2017, 6, 27, 10, 0, 0))
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', noonish)[0],
datetime(2017, 6, 27, 22, 0, 0))
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', večer)[0],
datetime(2017, 6, 27, 22, 0, 0))
"""
In Czech is May and may have different format
def test_extract_date_with_may_I_cs(self):
now = datetime(2019, 7, 4, 8, 1, 2)
may_date = datetime(2019, 5, 2, 10, 11, 20)
self.assertEqual(
extract_datetime('Můžu vědět jaký je to čas zítra', now)[0],
datetime(2019, 7, 5, 0, 0, 0))
self.assertEqual(
extract_datetime('Můžu vědět kdy je 10 hodin', now)[0],
datetime(2019, 7, 4, 10, 0, 0))
self.assertEqual(
extract_datetime('24. můžu chtít připomenutí', may_date)[0],
datetime(2019, 5, 24, 0, 0, 0))
"""
def test_extract_relativedatetime_cs(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 10, 1, 2)
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(normalize(text))
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("sejdeme se za 5 minut",
"2017-06-27 10:06:02", "sejdeme se")
testExtract("sejdeme se za 5minut",
"2017-06-27 10:06:02", "sejdeme se")
testExtract("sejdeme se za 5 sekund",
"2017-06-27 10:01:07", "sejdeme se")
testExtract("sejdeme se za 1 hodinu",
"2017-06-27 11:01:02", "sejdeme se")
testExtract("sejdeme se za 2 hodiny",
"2017-06-27 12:01:02", "sejdeme se")
print("TODO") # Need better normaliting procedure for czech inflexion
# testExtract("sejdeme se za 2hodiny",
# "2017-06-27 12:01:02", "sejdeme se")
testExtract("sejdeme se za 1 minutu",
"2017-06-27 10:02:02", "sejdeme se")
testExtract("sejdeme se za 1 sekundu",
"2017-06-27 10:01:03", "sejdeme se")
testExtract("sejdeme se za 5sekund",
"2017-06-27 10:01:07", "sejdeme se")
def test_spaces(self):
self.assertEqual(normalize(" tohle je test"),
"tohle je test")
self.assertEqual(normalize(" tohle je test "),
"tohle je test")
self.assertEqual(normalize(" tohle je jedna test"),
"tohle je 1 test")
def test_numbers(self):
self.assertEqual(normalize("tohle je jedna dva tři test"),
"tohle je 1 2 3 test")
self.assertEqual(normalize(" to je čtyři pět šest test"),
"to je 4 5 6 test")
self.assertEqual(normalize("to je sedum osum devět test"),
"to je 7 8 9 test")
self.assertEqual(normalize("to je sedm osm devět test"),
"to je 7 8 9 test")
self.assertEqual(normalize("tohle je deset jedenáct dvanáct test"),
"tohle je 10 11 12 test")
self.assertEqual(normalize("tohle je třináct čtrnáct test"),
"tohle je 13 14 test")
self.assertEqual(normalize("tohle je patnáct šestnáct sedmnáct"),
"tohle je 15 16 17")
self.assertEqual(normalize("tohle je osmnáct devatenáct dvacet"),
"tohle je 18 19 20")
self.assertEqual(normalize("tohle je jedna devatenáct dvacet dva"),
"tohle je 1 19 20 2")
self.assertEqual(normalize("tohle je jedna sto"),
"tohle je 1 sto")
self.assertEqual(normalize("tohle je jedna dva dvacet dva"),
"tohle je 1 2 20 2")
self.assertEqual(normalize("tohle je jedna a půl"),
"tohle je 1 a půl")
self.assertEqual(normalize("tohle je jedna a půl a pět šest"),
"tohle je 1 a půl a 5 6")
def test_multiple_numbers(self):
self.assertEqual(extract_numbers("tohle je jedna dva tři test"),
[1.0, 2.0, 3.0])
self.assertEqual(extract_numbers("to je čtyři pět šest test"),
[4.0, 5.0, 6.0])
self.assertEqual(extract_numbers("tohle je deset jedenáct dvanáct test"),
[10.0, 11.0, 12.0])
self.assertEqual(extract_numbers("tohle je jedna dvacet jedna test"),
[1.0, 21.0])
self.assertEqual(extract_numbers("1 pes, sedm prasat, macdonald měl "
"farmu, 3 krát 5 makaréna"),
[1, 7, 3, 5])
self.assertEqual(extract_numbers("dva piva pro dva medvědy"),
[2.0, 2.0])
self.assertEqual(extract_numbers("dvacet 20 dvacet"),
[20, 20, 20])
self.assertEqual(extract_numbers("dvacet 20 22"),
[20.0, 20.0, 22.0])
self.assertEqual(extract_numbers("dvacet dvacet dva dvacet"),
[20, 22, 20])
self.assertEqual(extract_numbers("dvacet 2"),
[22.0])
self.assertEqual(extract_numbers("dvacet 20 dvacet 2"),
[20, 20, 22])
self.assertEqual(extract_numbers("třetina jedna"),
[1 / 3, 1])
self.assertEqual(extract_numbers("třetí", ordinals=True), [3])
self.assertEqual(extract_numbers("šest trillion", short_scale=True),
[6e12])
self.assertEqual(extract_numbers("šest trilion", short_scale=False),
[6e18])
self.assertEqual(extract_numbers("dvě prasátka a šest trillion bakterií",
short_scale=True), [2, 6e12])
self.assertEqual(extract_numbers("dvě prasátka a šest trilion bakterií",
short_scale=False), [2, 6e18])
self.assertEqual(extract_numbers("třicátý druhý nebo první",
ordinals=True), [32, 1])
self.assertEqual(extract_numbers("tohle je sedm osm devět a"
" půl test"),
[7.0, 8.0, 9.5])
if __name__ == "__main__":
unittest.main()
| [((1097, 1119), 'lingua_franca.load_language', 'load_language', (['"""cs-cz"""'], {}), "('cs-cz')\n", (1110, 1119), False, 'from lingua_franca import get_default_lang, set_default_lang, load_language, unload_language\n'), ((1124, 1146), 'lingua_franca.set_default_lang', 'set_default_lang', (['"""cs"""'], {}), "('cs')\n", (1140, 1146), False, 'from lingua_franca import get_default_lang, set_default_lang, load_language, unload_language\n'), ((1175, 1196), 'lingua_franca.unload_language', 'unload_language', (['"""cs"""'], {}), "('cs')\n", (1190, 1196), False, 'from lingua_franca import get_default_lang, set_default_lang, load_language, unload_language\n'), ((40315, 40330), 'unittest.main', 'unittest.main', ([], {}), '()\n', (40328, 40330), False, 'import unittest\n'), ((32917, 32947), 'datetime.datetime', 'datetime', (['(2017)', '(6)', '(27)', '(8)', '(1)', '(2)'], {}), '(2017, 6, 27, 8, 1, 2)\n', (32925, 32947), False, 'from datetime import datetime, timedelta\n'), ((32965, 32996), 'datetime.datetime', 'datetime', (['(2017)', '(6)', '(27)', '(20)', '(1)', '(2)'], {}), '(2017, 6, 27, 20, 1, 2)\n', (32973, 32996), False, 'from datetime import datetime, timedelta\n'), ((33014, 33045), 'datetime.datetime', 'datetime', (['(2017)', '(6)', '(27)', '(12)', '(1)', '(2)'], {}), '(2017, 6, 27, 12, 1, 2)\n', (33022, 33045), False, 'from datetime import datetime, timedelta\n'), ((2331, 2383), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je první test"""'], {'ordinals': '(True)'}), "('tohle je první test', ordinals=True)\n", (2345, 2383), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((2453, 2486), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 2 test"""'], {}), "('tohle je 2 test')\n", (2467, 2486), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((2516, 2568), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je druhý test"""'], {'ordinals': '(True)'}), "('tohle je druhý test', ordinals=True)\n", (2530, 2568), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((2714, 2766), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je třetí test"""'], {'ordinals': '(True)'}), "('tohle je třetí test', ordinals=True)\n", (2728, 2766), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((2838, 2881), 'lingua_franca.parse.extract_number', 'extract_number', (['"""ten čtvrtý"""'], {'ordinals': '(True)'}), "('ten čtvrtý', ordinals=True)\n", (2852, 2881), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((2913, 2963), 'lingua_franca.parse.extract_number', 'extract_number', (['"""ten třicátý šestý"""'], {'ordinals': '(True)'}), "('ten třicátý šestý', ordinals=True)\n", (2927, 2963), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3009, 3048), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je test číslo 4"""'], {}), "('tohle je test číslo 4')\n", (3023, 3048), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3078, 3115), 'lingua_franca.parse.extract_number', 'extract_number', (['"""jedna třetina šálku"""'], {}), "('jedna třetina šálku')\n", (3092, 3115), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3153, 3180), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tři šálky"""'], {}), "('tři šálky')\n", (3167, 3180), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3210, 3237), 'lingua_franca.parse.extract_number', 'extract_number', (['"""1/3 šálku"""'], {}), "('1/3 šálku')\n", (3224, 3237), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3275, 3307), 'lingua_franca.parse.extract_number', 'extract_number', (['"""čtvrtina šálku"""'], {}), "('čtvrtina šálku')\n", (3289, 3307), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3340, 3365), 'lingua_franca.parse.extract_number', 'extract_number', (['"""1/4 cup"""'], {}), "('1/4 cup')\n", (3354, 3365), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3398, 3436), 'lingua_franca.parse.extract_number', 'extract_number', (['"""jedna čtvrtina šálku"""'], {}), "('jedna čtvrtina šálku')\n", (3412, 3436), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3469, 3496), 'lingua_franca.parse.extract_number', 'extract_number', (['"""2/3 šálků"""'], {}), "('2/3 šálků')\n", (3483, 3496), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3534, 3561), 'lingua_franca.parse.extract_number', 'extract_number', (['"""3/4 šálků"""'], {}), "('3/4 šálků')\n", (3548, 3561), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3599, 3630), 'lingua_franca.parse.extract_number', 'extract_number', (['"""1 a 3/4 šálků"""'], {}), "('1 a 3/4 šálků')\n", (3613, 3630), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3663, 3694), 'lingua_franca.parse.extract_number', 'extract_number', (['"""1 šálek a půl"""'], {}), "('1 šálek a půl')\n", (3677, 3694), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3726, 3766), 'lingua_franca.parse.extract_number', 'extract_number', (['"""jeden šálek a polovina"""'], {}), "('jeden šálek a polovina')\n", (3740, 3766), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3798, 3833), 'lingua_franca.parse.extract_number', 'extract_number', (['"""jedna a půl šálků"""'], {}), "('jedna a půl šálků')\n", (3812, 3833), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3865, 3911), 'lingua_franca.parse.extract_number', 'extract_number', (['"""jedna a jedna polovina šálků"""'], {}), "('jedna a jedna polovina šálků')\n", (3879, 3911), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((3943, 3979), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tři čtvrtina šálků"""'], {}), "('tři čtvrtina šálků')\n", (3957, 3979), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4017, 4053), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tři čtvrtiny šálků"""'], {}), "('tři čtvrtiny šálků')\n", (4031, 4053), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4091, 4119), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvacet dva"""'], {}), "('dvacet dva')\n", (4105, 4119), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4150, 4207), 'lingua_franca.parse.extract_number', 'extract_number', (['"""Dvacet dva s velkým písmenam na začátku"""'], {}), "('Dvacet dva s velkým písmenam na začátku')\n", (4164, 4207), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4251, 4304), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvacet Dva s dva krát velkým písmem"""'], {}), "('dvacet Dva s dva krát velkým písmem')\n", (4265, 4304), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4348, 4402), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvacet Dva s různou velikostí písmen"""'], {}), "('dvacet Dva s různou velikostí písmen')\n", (4362, 4402), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4446, 4487), 'lingua_franca.parse.extract_number', 'extract_number', (['"""Dvacet dva a Tři Pětiny"""'], {}), "('Dvacet dva a Tři Pětiny')\n", (4460, 4487), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4520, 4545), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvě sto"""'], {}), "('dvě sto')\n", (4534, 4545), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4577, 4606), 'lingua_franca.parse.extract_number', 'extract_number', (['"""devět tisíc"""'], {}), "('devět tisíc')\n", (4591, 4606), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4639, 4678), 'lingua_franca.parse.extract_number', 'extract_number', (['"""šest sto šedesát šest"""'], {}), "('šest sto šedesát šest')\n", (4653, 4678), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4710, 4739), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dva million"""'], {}), "('dva million')\n", (4724, 4739), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4775, 4835), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dva million pět sto tisíc tun žhavého kovu"""'], {}), "('dva million pět sto tisíc tun žhavého kovu')\n", (4789, 4835), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4914, 4945), 'lingua_franca.parse.extract_number', 'extract_number', (['"""šest trillion"""'], {}), "('šest trillion')\n", (4928, 4945), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((4989, 5038), 'lingua_franca.parse.extract_number', 'extract_number', (['"""šest trilion"""'], {'short_scale': '(False)'}), "('šest trilion', short_scale=False)\n", (5003, 5038), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5097, 5130), 'lingua_franca.parse.extract_number', 'extract_number', (['"""jedna tečka pět"""'], {}), "('jedna tečka pět')\n", (5111, 5130), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5162, 5197), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tři tečka čtrnáct"""'], {}), "('tři tečka čtrnáct')\n", (5176, 5197), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5230, 5262), 'lingua_franca.parse.extract_number', 'extract_number', (['"""nula tečka dva"""'], {}), "('nula tečka dva')\n", (5244, 5262), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5294, 5325), 'lingua_franca.parse.extract_number', 'extract_number', (['"""billion roků """'], {}), "('billion roků ')\n", (5308, 5325), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5391, 5439), 'lingua_franca.parse.extract_number', 'extract_number', (['"""bilion roků"""'], {'short_scale': '(False)'}), "('bilion roků', short_scale=False)\n", (5405, 5439), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5548, 5581), 'lingua_franca.parse.extract_number', 'extract_number', (['"""jedno sto tisíc"""'], {}), "('jedno sto tisíc')\n", (5562, 5581), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5616, 5641), 'lingua_franca.parse.extract_number', 'extract_number', (['"""mínus 2"""'], {}), "('mínus 2')\n", (5630, 5641), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5672, 5707), 'lingua_franca.parse.extract_number', 'extract_number', (['"""záporné sedmdesát"""'], {}), "('záporné sedmdesát')\n", (5686, 5707), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5739, 5770), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tisíc million"""'], {}), "('tisíc million')\n", (5753, 5770), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5809, 5854), 'lingua_franca.parse.extract_number', 'extract_number', (['"""miliarda"""'], {'short_scale': '(False)'}), "('miliarda', short_scale=False)\n", (5823, 5854), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((5918, 5951), 'lingua_franca.parse.extract_number', 'extract_number', (['"""šestina třetina"""'], {}), "('šestina třetina')\n", (5932, 5951), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((6014, 6060), 'lingua_franca.parse.extract_number', 'extract_number', (['"""šestina třetí"""'], {'ordinals': '(True)'}), "('šestina třetí', ordinals=True)\n", (6028, 6060), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((6115, 6146), 'lingua_franca.parse.extract_number', 'extract_number', (['"""třicet sekund"""'], {}), "('třicet sekund')\n", (6129, 6146), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((6177, 6223), 'lingua_franca.parse.extract_number', 'extract_number', (['"""třicátý druhý"""'], {'ordinals': '(True)'}), "('třicátý druhý', ordinals=True)\n", (6191, 6223), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((6254, 6310), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je billiontý test"""'], {'ordinals': '(True)'}), "('tohle je billiontý test', ordinals=True)\n", (6268, 6310), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((6497, 6571), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je biliontý test"""'], {'ordinals': '(True)', 'short_scale': '(False)'}), "('tohle je biliontý test', ordinals=True, short_scale=False)\n", (6511, 6571), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((6921, 6951), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvacet tisíc"""'], {}), "('dvacet tisíc')\n", (6935, 6951), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((6985, 7018), 'lingua_franca.parse.extract_number', 'extract_number', (['"""padesát million"""'], {}), "('padesát million')\n", (6999, 7018), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((7179, 7374), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvacet billion tři sto million devět sto padesát tisíc šest sto sedmdesát pět tečka osm"""'], {}), "(\n 'dvacet billion tři sto million devět sto padesát tisíc šest sto sedmdesát pět tečka osm'\n )\n", (7193, 7374), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((7435, 7646), 'lingua_franca.parse.extract_number', 'extract_number', (['"""devět sto devadesát devět million devět sto devadesát devět tisíc devět sto devadesát devět tečka devět"""'], {}), "(\n 'devět sto devadesát devět million devět sto devadesát devět tisíc devět sto devadesát devět tečka devět'\n )\n", (7449, 7646), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((7759, 7864), 'lingua_franca.parse.extract_number', 'extract_number', (['"""osm sto trillion dva sto padesát sedm"""'], {}), "(\n 'osm sto trillion dva sto padesát sedm'\n )\n", (7773, 7864), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8237, 8266), 'lingua_franca.parse.extract_number', 'extract_number', (['"""křehká nula"""'], {}), "('křehká nula')\n", (8251, 8266), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8419, 8448), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvojice piv"""'], {}), "('dvojice piv')\n", (8433, 8448), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8478, 8511), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvojice sto piv"""'], {}), "('dvojice sto piv')\n", (8492, 8511), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8543, 8578), 'lingua_franca.parse.extract_number', 'extract_number', (['"""dvojice tisíc piv"""'], {}), "('dvojice tisíc piv')\n", (8557, 8578), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8612, 8660), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 7 test"""'], {'ordinals': '(True)'}), "('tohle je 7 test', ordinals=True)\n", (8626, 8660), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8703, 8752), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 7 test"""'], {'ordinals': '(False)'}), "('tohle je 7 test', ordinals=False)\n", (8717, 8752), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8864, 8898), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 1. test"""'], {}), "('tohle je 1. test')\n", (8878, 8898), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8928, 8962), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 2. test"""'], {}), "('tohle je 2. test')\n", (8942, 8962), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8992, 9026), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 3. test"""'], {}), "('tohle je 3. test')\n", (9006, 9026), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((9056, 9091), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 31. test"""'], {}), "('tohle je 31. test')\n", (9070, 9091), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((9122, 9157), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 32. test"""'], {}), "('tohle je 32. test')\n", (9136, 9157), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((9188, 9223), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 33. test"""'], {}), "('tohle je 33. test')\n", (9202, 9223), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((9254, 9289), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je 34. test"""'], {}), "('tohle je 34. test')\n", (9268, 9289), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((9320, 9349), 'lingua_franca.parse.extract_number', 'extract_number', (['"""celkem 100%"""'], {}), "('celkem 100%')\n", (9334, 9349), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((9422, 9451), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""10 sekund"""'], {}), "('10 sekund')\n", (9438, 9451), False, 'from lingua_franca.parse import extract_duration\n'), ((9534, 9561), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""5 minut"""'], {}), "('5 minut')\n", (9550, 9561), False, 'from lingua_franca.parse import extract_duration\n'), ((9641, 9669), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""2 hodiny"""'], {}), "('2 hodiny')\n", (9657, 9669), False, 'from lingua_franca.parse import extract_duration\n'), ((9747, 9772), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""3 dny"""'], {}), "('3 dny')\n", (9763, 9772), False, 'from lingua_franca.parse import extract_duration\n'), ((9849, 9877), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""25 týdnů"""'], {}), "('25 týdnů')\n", (9865, 9877), False, 'from lingua_franca.parse import extract_duration\n'), ((9956, 9986), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""sedm hodin"""'], {}), "('sedm hodin')\n", (9972, 9986), False, 'from lingua_franca.parse import extract_duration\n'), ((10064, 10094), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""7.5 sekund"""'], {}), "('7.5 sekund')\n", (10080, 10094), False, 'from lingua_franca.parse import extract_duration\n'), ((10176, 10234), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""osm a polovina dne třicet devět sekund"""'], {}), "('osm a polovina dne třicet devět sekund')\n", (10192, 10234), False, 'from lingua_franca.parse import extract_duration\n'), ((10370, 10416), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""Nastav časovač na 30 minut"""'], {}), "('Nastav časovač na 30 minut')\n", (10386, 10416), False, 'from lingua_franca.parse import extract_duration\n'), ((10514, 10562), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""Čtyři a půl minuty do západu"""'], {}), "('Čtyři a půl minuty do západu')\n", (10530, 10562), False, 'from lingua_franca.parse import extract_duration\n'), ((10698, 10744), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""devatenáct minut po hodině"""'], {}), "('devatenáct minut po hodině')\n", (10714, 10744), False, 'from lingua_franca.parse import extract_duration\n'), ((10834, 10938), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""vzbuď mě za tři týdny, čtyři sto devadesát sedm dní, a tři sto 91.6 sekund"""'], {}), "(\n 'vzbuď mě za tři týdny, čtyři sto devadesát sedm dní, a tři sto 91.6 sekund'\n )\n", (10850, 10938), False, 'from lingua_franca.parse import extract_duration\n'), ((11164, 11238), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""film je jedna hodina, padesát sedm a půl minuty dlouhý"""'], {}), "('film je jedna hodina, padesát sedm a půl minuty dlouhý')\n", (11180, 11238), False, 'from lingua_franca.parse import extract_duration\n'), ((11421, 11450), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""10-sekund"""'], {}), "('10-sekund')\n", (11437, 11450), False, 'from lingua_franca.parse import extract_duration\n'), ((11533, 11560), 'lingua_franca.parse.extract_duration', 'extract_duration', (['"""5-minut"""'], {}), "('5-minut')\n", (11549, 11560), False, 'from lingua_franca.parse import extract_duration\n'), ((11711, 11739), 'datetime.datetime', 'datetime', (['(2017)', '(6)', '(27)', '(13)', '(4)'], {}), '(2017, 6, 27, 13, 4)\n', (11719, 11739), False, 'from datetime import datetime, timedelta\n'), ((11810, 11838), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['text', 'date'], {}), '(text, date)\n', (11826, 11838), False, 'from lingua_franca.parse import extract_datetime\n'), ((33084, 33114), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['"""krmení ryb"""'], {}), "('krmení ryb')\n", (33100, 33114), False, 'from lingua_franca.parse import extract_datetime\n'), ((33160, 33183), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['"""den"""'], {}), "('den')\n", (33176, 33183), False, 'from lingua_franca.parse import extract_datetime\n'), ((33229, 33254), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['"""týden"""'], {}), "('týden')\n", (33245, 33254), False, 'from lingua_franca.parse import extract_datetime\n'), ((33300, 33325), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['"""měsíc"""'], {}), "('měsíc')\n", (33316, 33325), False, 'from lingua_franca.parse import extract_datetime\n'), ((33371, 33394), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['"""rok"""'], {}), "('rok')\n", (33387, 33394), False, 'from lingua_franca.parse import extract_datetime\n'), ((33440, 33461), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['""" """'], {}), "(' ')\n", (33456, 33461), False, 'from lingua_franca.parse import extract_datetime\n'), ((33576, 33607), 'datetime.datetime', 'datetime', (['(2017)', '(6)', '(27)', '(10)', '(0)', '(0)'], {}), '(2017, 6, 27, 10, 0, 0)\n', (33584, 33607), False, 'from datetime import datetime, timedelta\n'), ((33716, 33747), 'datetime.datetime', 'datetime', (['(2017)', '(6)', '(27)', '(22)', '(0)', '(0)'], {}), '(2017, 6, 27, 22, 0, 0)\n', (33724, 33747), False, 'from datetime import datetime, timedelta\n'), ((33854, 33885), 'datetime.datetime', 'datetime', (['(2017)', '(6)', '(27)', '(22)', '(0)', '(0)'], {}), '(2017, 6, 27, 22, 0, 0)\n', (33862, 33885), False, 'from datetime import datetime, timedelta\n'), ((34625, 34656), 'datetime.datetime', 'datetime', (['(2017)', '(6)', '(27)', '(10)', '(1)', '(2)'], {}), '(2017, 6, 27, 10, 1, 2)\n', (34633, 34656), False, 'from datetime import datetime, timedelta\n'), ((34697, 34725), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['text', 'date'], {}), '(text, date)\n', (34713, 34725), False, 'from lingua_franca.parse import extract_datetime\n'), ((36159, 36191), 'lingua_franca.parse.normalize', 'normalize', (['""" tohle je test"""'], {}), "(' tohle je test')\n", (36168, 36191), False, 'from lingua_franca.parse import normalize\n'), ((36260, 36296), 'lingua_franca.parse.normalize', 'normalize', (['""" tohle je test """'], {}), "(' tohle je test ')\n", (36269, 36296), False, 'from lingua_franca.parse import normalize\n'), ((36365, 36405), 'lingua_franca.parse.normalize', 'normalize', (['""" tohle je jedna test"""'], {}), "(' tohle je jedna test')\n", (36374, 36405), False, 'from lingua_franca.parse import normalize\n'), ((36505, 36546), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je jedna dva tři test"""'], {}), "('tohle je jedna dva tři test')\n", (36514, 36546), False, 'from lingua_franca.parse import normalize\n'), ((36621, 36662), 'lingua_franca.parse.normalize', 'normalize', (['""" to je čtyři pět šest test"""'], {}), "(' to je čtyři pět šest test')\n", (36630, 36662), False, 'from lingua_franca.parse import normalize\n'), ((36734, 36774), 'lingua_franca.parse.normalize', 'normalize', (['"""to je sedum osum devět test"""'], {}), "('to je sedum osum devět test')\n", (36743, 36774), False, 'from lingua_franca.parse import normalize\n'), ((36846, 36885), 'lingua_franca.parse.normalize', 'normalize', (['"""to je sedm osm devět test"""'], {}), "('to je sedm osm devět test')\n", (36855, 36885), False, 'from lingua_franca.parse import normalize\n'), ((36957, 37006), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je deset jedenáct dvanáct test"""'], {}), "('tohle je deset jedenáct dvanáct test')\n", (36966, 37006), False, 'from lingua_franca.parse import normalize\n'), ((37084, 37126), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je třináct čtrnáct test"""'], {}), "('tohle je třináct čtrnáct test')\n", (37093, 37126), False, 'from lingua_franca.parse import normalize\n'), ((37201, 37248), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je patnáct šestnáct sedmnáct"""'], {}), "('tohle je patnáct šestnáct sedmnáct')\n", (37210, 37248), False, 'from lingua_franca.parse import normalize\n'), ((37321, 37368), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je osmnáct devatenáct dvacet"""'], {}), "('tohle je osmnáct devatenáct dvacet')\n", (37330, 37368), False, 'from lingua_franca.parse import normalize\n'), ((37441, 37490), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je jedna devatenáct dvacet dva"""'], {}), "('tohle je jedna devatenáct dvacet dva')\n", (37450, 37490), False, 'from lingua_franca.parse import normalize\n'), ((37564, 37595), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je jedna sto"""'], {}), "('tohle je jedna sto')\n", (37573, 37595), False, 'from lingua_franca.parse import normalize\n'), ((37665, 37707), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je jedna dva dvacet dva"""'], {}), "('tohle je jedna dva dvacet dva')\n", (37674, 37707), False, 'from lingua_franca.parse import normalize\n'), ((37780, 37813), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je jedna a půl"""'], {}), "('tohle je jedna a půl')\n", (37789, 37813), False, 'from lingua_franca.parse import normalize\n'), ((37885, 37929), 'lingua_franca.parse.normalize', 'normalize', (['"""tohle je jedna a půl a pět šest"""'], {}), "('tohle je jedna a půl a pět šest')\n", (37894, 37929), False, 'from lingua_franca.parse import normalize\n'), ((38045, 38091), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""tohle je jedna dva tři test"""'], {}), "('tohle je jedna dva tři test')\n", (38060, 38091), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((38160, 38204), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""to je čtyři pět šest test"""'], {}), "('to je čtyři pět šest test')\n", (38175, 38204), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((38273, 38328), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""tohle je deset jedenáct dvanáct test"""'], {}), "('tohle je deset jedenáct dvanáct test')\n", (38288, 38328), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((38400, 38451), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""tohle je jedna dvacet jedna test"""'], {}), "('tohle je jedna dvacet jedna test')\n", (38415, 38451), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((38516, 38593), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""1 pes, sedm prasat, macdonald měl farmu, 3 krát 5 makaréna"""'], {}), "('1 pes, sedm prasat, macdonald měl farmu, 3 krát 5 makaréna')\n", (38531, 38593), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((38703, 38746), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""dva piva pro dva medvědy"""'], {}), "('dva piva pro dva medvědy')\n", (38718, 38746), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((38810, 38845), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""dvacet 20 dvacet"""'], {}), "('dvacet 20 dvacet')\n", (38825, 38845), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((38911, 38942), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""dvacet 20 22"""'], {}), "('dvacet 20 22')\n", (38926, 38942), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39014, 39057), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""dvacet dvacet dva dvacet"""'], {}), "('dvacet dvacet dva dvacet')\n", (39029, 39057), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39123, 39150), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""dvacet 2"""'], {}), "('dvacet 2')\n", (39138, 39150), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39210, 39247), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""dvacet 20 dvacet 2"""'], {}), "('dvacet 20 dvacet 2')\n", (39225, 39247), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39313, 39345), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""třetina jedna"""'], {}), "('třetina jedna')\n", (39328, 39345), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39409, 39448), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""třetí"""'], {'ordinals': '(True)'}), "('třetí', ordinals=True)\n", (39424, 39448), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39480, 39530), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""šest trillion"""'], {'short_scale': '(True)'}), "('šest trillion', short_scale=True)\n", (39495, 39530), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39590, 39640), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""šest trilion"""'], {'short_scale': '(False)'}), "('šest trilion', short_scale=False)\n", (39605, 39640), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39700, 39774), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""dvě prasátka a šest trillion bakterií"""'], {'short_scale': '(True)'}), "('dvě prasátka a šest trillion bakterií', short_scale=True)\n", (39715, 39774), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((39853, 39927), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""dvě prasátka a šest trilion bakterií"""'], {'short_scale': '(False)'}), "('dvě prasátka a šest trilion bakterií', short_scale=False)\n", (39868, 39927), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((40006, 40064), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""třicátý druhý nebo první"""'], {'ordinals': '(True)'}), "('třicátý druhý nebo první', ordinals=True)\n", (40021, 40064), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((40141, 40194), 'lingua_franca.parse.extract_numbers', 'extract_numbers', (['"""tohle je sedm osm devět a půl test"""'], {}), "('tohle je sedm osm devět a půl test')\n", (40156, 40194), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((1292, 1325), 'lingua_franca.parse.fuzzy_match', 'fuzzy_match', (['"""ty a já"""', '"""ty a já"""'], {}), "('ty a já', 'ty a já')\n", (1303, 1325), False, 'from lingua_franca.parse import fuzzy_match\n'), ((1358, 1386), 'lingua_franca.parse.fuzzy_match', 'fuzzy_match', (['"""ty a já"""', '"""ty"""'], {}), "('ty a já', 'ty')\n", (1369, 1386), False, 'from lingua_franca.parse import fuzzy_match\n'), ((1418, 1441), 'lingua_franca.parse.fuzzy_match', 'fuzzy_match', (['"""Ty"""', '"""ty"""'], {}), "('Ty', 'ty')\n", (1429, 1441), False, 'from lingua_franca.parse import fuzzy_match\n'), ((1474, 1502), 'lingua_franca.parse.fuzzy_match', 'fuzzy_match', (['"""ty a já"""', '"""ty"""'], {}), "('ty a já', 'ty')\n", (1485, 1502), False, 'from lingua_franca.parse import fuzzy_match\n'), ((1530, 1558), 'lingua_franca.parse.fuzzy_match', 'fuzzy_match', (['"""ty"""', '"""ty a já"""'], {}), "('ty', 'ty a já')\n", (1541, 1558), False, 'from lingua_franca.parse import fuzzy_match\n'), ((1584, 1621), 'lingua_franca.parse.fuzzy_match', 'fuzzy_match', (['"""ty a já"""', '"""on nebo oni"""'], {}), "('ty a já', 'on nebo oni')\n", (1595, 1621), False, 'from lingua_franca.parse import fuzzy_match\n'), ((1771, 1798), 'lingua_franca.parse.match_one', 'match_one', (['"""frank"""', 'choices'], {}), "('frank', choices)\n", (1780, 1798), False, 'from lingua_franca.parse import match_one\n'), ((1837, 1863), 'lingua_franca.parse.match_one', 'match_one', (['"""fran"""', 'choices'], {}), "('fran', choices)\n", (1846, 1863), False, 'from lingua_franca.parse import match_one\n'), ((1902, 1928), 'lingua_franca.parse.match_one', 'match_one', (['"""enry"""', 'choices'], {}), "('enry', choices)\n", (1911, 1928), False, 'from lingua_franca.parse import match_one\n'), ((1967, 1993), 'lingua_franca.parse.match_one', 'match_one', (['"""katt"""', 'choices'], {}), "('katt', choices)\n", (1976, 1993), False, 'from lingua_franca.parse import match_one\n'), ((2134, 2161), 'lingua_franca.parse.match_one', 'match_one', (['"""frank"""', 'choices'], {}), "('frank', choices)\n", (2143, 2161), False, 'from lingua_franca.parse import match_one\n'), ((2194, 2220), 'lingua_franca.parse.match_one', 'match_one', (['"""enry"""', 'choices'], {}), "('enry', choices)\n", (2203, 2220), False, 'from lingua_franca.parse import match_one\n'), ((8032, 8073), 'lingua_franca.parse.extract_number', 'extract_number', (['"""Tenisový hráč je rychlý"""'], {}), "('Tenisový hráč je rychlý')\n", (8046, 8073), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8108, 8132), 'lingua_franca.parse.extract_number', 'extract_number', (['"""křehký"""'], {}), "('křehký')\n", (8122, 8132), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8168, 8197), 'lingua_franca.parse.extract_number', 'extract_number', (['"""křehká nula"""'], {}), "('křehká nula')\n", (8182, 8197), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((8794, 8828), 'lingua_franca.parse.extract_number', 'extract_number', (['"""tohle je n. test"""'], {}), "('tohle je n. test')\n", (8808, 8828), False, 'from lingua_franca.parse import extract_number, extract_numbers\n'), ((9479, 9502), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10.0)'}), '(seconds=10.0)\n', (9488, 9502), False, 'from datetime import datetime, timedelta\n'), ((9589, 9609), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (9598, 9609), False, 'from datetime import datetime, timedelta\n'), ((9697, 9715), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (9706, 9715), False, 'from datetime import datetime, timedelta\n'), ((9800, 9817), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (9809, 9817), False, 'from datetime import datetime, timedelta\n'), ((9905, 9924), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(25)'}), '(weeks=25)\n', (9914, 9924), False, 'from datetime import datetime, timedelta\n'), ((10014, 10032), 'datetime.timedelta', 'timedelta', ([], {'hours': '(7)'}), '(hours=7)\n', (10023, 10032), False, 'from datetime import datetime, timedelta\n'), ((10122, 10144), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(7.5)'}), '(seconds=7.5)\n', (10131, 10144), False, 'from datetime import datetime, timedelta\n'), ((10307, 10338), 'datetime.timedelta', 'timedelta', ([], {'days': '(8.5)', 'seconds': '(39)'}), '(days=8.5, seconds=39)\n', (10316, 10338), False, 'from datetime import datetime, timedelta\n'), ((10444, 10465), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (10453, 10465), False, 'from datetime import datetime, timedelta\n'), ((10635, 10657), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(4.5)'}), '(minutes=4.5)\n', (10644, 10657), False, 'from datetime import datetime, timedelta\n'), ((10772, 10793), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(19)'}), '(minutes=19)\n', (10781, 10793), False, 'from datetime import datetime, timedelta\n'), ((11046, 11089), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(3)', 'days': '(497)', 'seconds': '(391.6)'}), '(weeks=3, days=497, seconds=391.6)\n', (11055, 11089), False, 'from datetime import datetime, timedelta\n'), ((11311, 11343), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)', 'minutes': '(57.5)'}), '(hours=1, minutes=57.5)\n', (11320, 11343), False, 'from datetime import datetime, timedelta\n'), ((11478, 11501), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10.0)'}), '(seconds=10.0)\n', (11487, 11501), False, 'from datetime import datetime, timedelta\n'), ((11588, 11608), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (11597, 11608), False, 'from datetime import datetime, timedelta\n'), ((12058, 12073), 'lingua_franca.parse.normalize', 'normalize', (['text'], {}), '(text)\n', (12067, 12073), False, 'from lingua_franca.parse import normalize\n'), ((33507, 33559), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['"""nakrmit ryby v 10 hodin"""', 'morning'], {}), "('nakrmit ryby v 10 hodin', morning)\n", (33523, 33559), False, 'from lingua_franca.parse import extract_datetime\n'), ((33647, 33699), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['"""nakrmit ryby v 10 hodin"""', 'noonish'], {}), "('nakrmit ryby v 10 hodin', noonish)\n", (33663, 33699), False, 'from lingua_franca.parse import extract_datetime\n'), ((33787, 33837), 'lingua_franca.parse.extract_datetime', 'extract_datetime', (['"""nakrmit ryby v 10 hodin"""', 'večer'], {}), "('nakrmit ryby v 10 hodin', večer)\n", (33803, 33837), False, 'from lingua_franca.parse import extract_datetime\n'), ((34945, 34960), 'lingua_franca.parse.normalize', 'normalize', (['text'], {}), '(text)\n', (34954, 34960), False, 'from lingua_franca.parse import normalize\n')] |
WardenAllen/Uranus | src/net/pluto_ftp.py | 0d20cac631320b558254992c17678ddd1658587b | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/9/18 12:02
# @Author : WardenAllen
# @File : pluto_ftp.py
# @Brief :
import paramiko
class PlutoFtp :
# paramiko's Sftp() object.
__sftp = object
def connect_by_pass(self, host, port, uname, pwd):
transport = paramiko.Transport((host, port))
transport.connect(username=uname, password=pwd)
self.__sftp = paramiko.SFTPClient.from_transport(transport)
def connect_by_key(self, host, port, uname, key_path, key_pass = ''):
key = paramiko.RSAKey.from_private_key_file(key_path, key_pass)
transport = paramiko.Transport((host, port))
transport.connect(username=uname, pkey=key)
self.__sftp = paramiko.SFTPClient.from_transport(transport)
def get(self, remote, local, cb = None):
self.__sftp.get(remote, local, cb)
def put(self, local, remote, cb = None):
self.__sftp.put(local, remote, cb) | [((301, 333), 'paramiko.Transport', 'paramiko.Transport', (['(host, port)'], {}), '((host, port))\n', (319, 333), False, 'import paramiko\n'), ((412, 457), 'paramiko.SFTPClient.from_transport', 'paramiko.SFTPClient.from_transport', (['transport'], {}), '(transport)\n', (446, 457), False, 'import paramiko\n'), ((547, 604), 'paramiko.RSAKey.from_private_key_file', 'paramiko.RSAKey.from_private_key_file', (['key_path', 'key_pass'], {}), '(key_path, key_pass)\n', (584, 604), False, 'import paramiko\n'), ((625, 657), 'paramiko.Transport', 'paramiko.Transport', (['(host, port)'], {}), '((host, port))\n', (643, 657), False, 'import paramiko\n'), ((732, 777), 'paramiko.SFTPClient.from_transport', 'paramiko.SFTPClient.from_transport', (['transport'], {}), '(transport)\n', (766, 777), False, 'import paramiko\n')] |
alexbrasetvik/Piped | piped/processors/test/__init__.py | 0312c14d6c4c293df378c915cc9787bcc7faed36 | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
| [] |
reakfog/personal_computer_voice_assistant | assistance_bot/app.py | 3483f633c57cd2e930f94bcbda9739cde34525aa | import sys
sys.path = ['', '..'] + sys.path[1:]
import daemon
from assistance_bot import core
from functionality.voice_processing import speaking, listening
from functionality.commands import *
if __name__ == '__main__':
speaking.setup_assistant_voice(core.ttsEngine, core.assistant)
while True:
# start speech recording and speech recognition
recognized_speech = listening.get_listening_and_recognition_result(
core.recognizer,
core.microphone)
# executing the given command
execute_command(recognized_speech)
| [((229, 291), 'functionality.voice_processing.speaking.setup_assistant_voice', 'speaking.setup_assistant_voice', (['core.ttsEngine', 'core.assistant'], {}), '(core.ttsEngine, core.assistant)\n', (259, 291), False, 'from functionality.voice_processing import speaking, listening\n'), ((392, 477), 'functionality.voice_processing.listening.get_listening_and_recognition_result', 'listening.get_listening_and_recognition_result', (['core.recognizer', 'core.microphone'], {}), '(core.recognizer, core.microphone\n )\n', (438, 477), False, 'from functionality.voice_processing import speaking, listening\n')] |
tgodzik/intellij-community | python/testData/resolve/AssignmentExpressionsAndOuterVar.py | f5ef4191fc30b69db945633951fb160c1cfb7b6f | total = 0
partial_sums = [total := total + v for v in values]
print("Total:", total)
<ref> | [] |
florianhumblot/exhale | exhale/deploy.py | d6fa84fa32ee079c6b70898a1b0863a38e703591 | # -*- coding: utf8 -*-
########################################################################################
# This file is part of exhale. Copyright (c) 2017-2022, Stephen McDowell. #
# Full BSD 3-Clause license available here: #
# #
# https://github.com/svenevs/exhale/blob/master/LICENSE #
########################################################################################
'''
The deploy module is responsible for two primary actions:
1. Executing Doxygen (if requested in ``exhale_args``).
2. Launching the full API generation via the :func:`~exhale.deploy.explode` function.
'''
from __future__ import unicode_literals
from . import configs
from . import utils
from .graph import ExhaleRoot
import os
import sys
import six
import re
import codecs
import tempfile
import textwrap
from subprocess import PIPE, Popen, STDOUT
def _generate_doxygen(doxygen_input):
'''
This method executes doxygen based off of the specified input. By the time this
method is executed, it is assumed that Doxygen is intended to be run in the
**current working directory**. Search for ``returnPath`` in the implementation of
:func:`~exhale.configs.apply_sphinx_configurations` for handling of this aspect.
This method is intended to be called by :func:`~exhale.deploy.generateDoxygenXML`,
which is in turn called by :func:`~exhale.configs.apply_sphinx_configurations`.
Two versions of the
doxygen command can be executed:
1. If ``doxygen_input`` is exactly ``"Doxyfile"``, then it is assumed that a
``Doxyfile`` exists in the **current working directory**. Meaning the command
being executed is simply ``doxygen``.
2. For all other values, ``doxygen_input`` represents the arguments as to be
specified on ``stdin`` to the process.
**Parameters**
``doxygen_input`` (str)
Either the string ``"Doxyfile"`` to run vanilla ``doxygen``, or the
selection of doxygen inputs (that would ordinarily be in a ``Doxyfile``)
that will be ``communicate``d to the ``doxygen`` process on ``stdin``.
.. note::
If using Python **3**, the input **must** still be a ``str``. This
method will convert the input to ``bytes`` as follows:
.. code-block:: py
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
**Return**
``str`` or ``None``
If an error occurs, a string describing the error is returned with the
intention of the caller raising the exception. If ``None`` is returned,
then the process executed without error. Example usage:
.. code-block:: py
status = _generate_doxygen("Doxygen")
if status:
raise RuntimeError(status)
Though a little awkward, this is done to enable the intended caller of this
method to restore some state before exiting the program (namely, the working
directory before propagating an exception to ``sphinx-build``).
'''
if not isinstance(doxygen_input, six.string_types):
return "Error: the `doxygen_input` variable must be of type `str`."
doxyfile = doxygen_input == "Doxyfile"
try:
# Setup the arguments to launch doxygen
if doxyfile:
args = ["doxygen"]
kwargs = {}
else:
args = ["doxygen", "-"]
kwargs = {"stdin": PIPE}
if configs._on_rtd:
# On RTD, any capturing of Doxygen output can cause buffer overflows for
# even medium sized projects. So it is disregarded entirely to ensure the
# build will complete (otherwise, it silently fails after `cat conf.py`)
devnull_file = open(os.devnull, "w")
kwargs["stdout"] = devnull_file
kwargs["stderr"] = STDOUT
else:
# TL;DR: strictly enforce that (verbose) doxygen output doesn't cause the
# `communicate` to hang due to buffer overflows.
#
# See excellent synopsis:
# https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
if six.PY2:
tempfile_kwargs = {}
else:
# encoding argument introduced in python 3
tempfile_kwargs = {"encoding": "utf-8"}
tempfile_kwargs["mode"] = "r+"
tmp_out_file = tempfile.TemporaryFile(
prefix="doxygen_stdout_buff", **tempfile_kwargs
)
tmp_err_file = tempfile.TemporaryFile(
prefix="doxygen_stderr_buff", **tempfile_kwargs
)
# Write to the tempfiles over PIPE to avoid buffer overflowing
kwargs["stdout"] = tmp_out_file
kwargs["stderr"] = tmp_err_file
# Note: overload of args / kwargs, Popen is expecting a list as the first
# parameter (aka no *args, just args)!
doxygen_proc = Popen(args, **kwargs)
# Communicate can only be called once, arrange whether or not stdin has value
if not doxyfile:
# In Py3, make sure we are communicating a bytes-like object which is no
# longer interchangeable with strings (as was the case in Py2).
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
comm_kwargs = {"input": doxygen_input}
else:
comm_kwargs = {}
# Waits until doxygen has completed
doxygen_proc.communicate(**comm_kwargs)
# Print out what was written to the tmpfiles by doxygen
if not configs._on_rtd and not configs.exhaleSilentDoxygen:
# Doxygen output (some useful information, mostly just enumeration of the
# configurations you gave it {useful for debugging...})
if tmp_out_file.tell() > 0:
tmp_out_file.seek(0)
print(tmp_out_file.read())
# Doxygen error (e.g. any warnings, or invalid input)
if tmp_err_file.tell() > 0:
# Making them stick out, ideally users would reduce this output to 0 ;)
# This will print a yellow [~] before every line, but not make the
# entire line yellow because it's definitively not helpful
prefix = utils._use_color(
utils.prefix("[~]", " "), utils.AnsiColors.BOLD_YELLOW, sys.stderr
)
tmp_err_file.seek(0)
sys.stderr.write(utils.prefix(prefix, tmp_err_file.read()))
# Close the file handles opened for communication with subprocess
if configs._on_rtd:
devnull_file.close()
else:
# Delete the tmpfiles
tmp_out_file.close()
tmp_err_file.close()
# Make sure we had a valid execution of doxygen
exit_code = doxygen_proc.returncode
if exit_code != 0:
raise RuntimeError("Non-zero return code of [{0}] from 'doxygen'...".format(exit_code))
except Exception as e:
return "Unable to execute 'doxygen': {0}".format(e)
# returning None signals _success_
return None
def _valid_config(config, required):
'''
.. todo:: add documentation of this method
``config``: doxygen input we're looking for
``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present
'''
re_template = r"\s*{config}\s*=.*".format(config=config)
found = re.search(re_template, configs.exhaleDoxygenStdin)
if required:
return found is not None
else:
return found is None
def generateDoxygenXML():
# If this happens, we really shouldn't be here...
if not configs.exhaleExecutesDoxygen:
return textwrap.dedent('''
`generateDoxygenXML` should *ONLY* be called internally. You should
set `exhaleExecutesDoxygen=True` in `exhale_args` in `conf.py`.
''')
# Case 1: the user has their own `Doxyfile`.
if configs.exhaleUseDoxyfile:
return _generate_doxygen("Doxyfile")
# Case 2: use stdin, with some defaults and potentially additional specs from user
else:
# There are two doxygen specs that we explicitly disallow
#
# 1. OUTPUT_DIRECTORY: this is *ALREADY* specified implicitly via breathe
# 2. STRIP_FROM_PATH: this is a *REQUIRED* config (`doxygenStripFromPath`)
#
# There is one doxygen spec that is REQUIRED to be given:
#
# 1. INPUT (where doxygen should parse).
#
# The below is a modest attempt to validate that these were / were not given.
if not isinstance(configs.exhaleDoxygenStdin, six.string_types):
return "`exhaleDoxygenStdin` config must be a string!"
if not _valid_config("OUTPUT_DIRECTORY", False):
# If we are hitting this code, these should both exist and be configured
# since this method is called **AFTER** the configuration verification code
# performed in configs.apply_sphinx_configurations
breathe_projects = configs._the_app.config.breathe_projects
breathe_default_project = configs._the_app.config.breathe_default_project
return textwrap.dedent('''
`exhaleDoxygenStdin` may *NOT* specify `OUTPUT_DIRECTORY`. Exhale does
this internally by reading what you provided to `breathe_projects` in
your `conf.py`.
Based on what you had in `conf.py`, Exhale will be using
- The `breathe_default_project`:
{default}
- The output path specfied (`breathe_projects[breathe_default_project]`):
{path}
NOTE: the above path has the `xml` portion removed from what you
provided. This path is what is sent to Doxygen, Breathe
requires you include the `xml` directory path; so Exhale simply
re-uses this variable and adapts the value for our needs.
'''.format(
default=breathe_default_project,
path=breathe_projects[breathe_default_project].rsplit("{sep}xml".format(sep=os.sep), 1)[0]
))
if not _valid_config("STRIP_FROM_PATH", False):
return textwrap.dedent('''
`exhaleDoxygenStdin` may *NOT* specify `STRIP_FROM_PATH`. Exhale does
this internally by using the value you provided to `exhale_args` in
your `conf.py` for the key `doxygenStripFromPath`.
Based on what you had in `conf.py`, Exhale will be using:
{strip}
NOTE: the above is what you specified directly in `exhale_args`. Exhale
will be using an absolute path to send to Doxygen. It is:
{absolute}
'''.format(
strip=configs._the_app.config.exhale_args["doxygenStripFromPath"],
absolute=configs.doxygenStripFromPath
))
if not _valid_config("INPUT", True):
return textwrap.dedent('''
`exhaleDoxygenStdin` *MUST* specify the `INPUT` doxygen config variable.
The INPUT variable is what tells Doxygen where to look for code to
extract documentation from. For example, if you had a directory layout
project_root/
docs/
conf.py
Makefile
... etc ...
include/
my_header.hpp
src/
my_header.cpp
Then you would include the line
INPUT = ../include
in the string provided to `exhale_args["exhaleDoxygenStdin"]`.
''')
# For these, we just want to warn them of the impact but still allow an override
re_template = r"\s*{config}\s*=\s*(.*)"
for cfg in ("ALIASES", "PREDEFINED"):
found = re.search(re_template.format(config=cfg), configs.exhaleDoxygenStdin)
if found:
sys.stderr.write(utils.info(textwrap.dedent('''
You have supplied to `exhaleDoxygenStdin` a configuration of:
{cfg} = {theirs}
This has an important impact, as it overrides a default setting that
Exhale is using.
1. If you are intentionally overriding this configuration, simply
ignore this message --- what you intended will happen.
2. If you meant to _continue_ adding to the defaults Exhale provides,
you need to use a `+=` instead of a raw `=`. So do instead
{cfg} += {theirs}
'''.format(cfg=cfg, theirs=found.groups()[0])), utils.AnsiColors.BOLD_YELLOW))
# Include their custom doxygen definitions after the defaults so that they can
# override anything they want to. Populate the necessary output dir and strip path.
doxy_dir = configs._doxygen_xml_output_directory.rsplit("{sep}xml".format(sep=os.sep), 1)[0]
internal_configs = textwrap.dedent('''
# Tell doxygen to output wherever breathe is expecting things
OUTPUT_DIRECTORY = "{out}"
# Tell doxygen to strip the path names (RTD builds produce long abs paths...)
STRIP_FROM_PATH = "{strip}"
'''.format(out=doxy_dir, strip=configs.doxygenStripFromPath))
external_configs = textwrap.dedent(configs.exhaleDoxygenStdin)
# Place external configs last so that if the _valid_config method isn't actually
# catching what it should be, the internal configs will override theirs
full_input = "{base}\n{external}\n{internal}\n\n".format(base=configs.DEFAULT_DOXYGEN_STDIN_BASE,
external=external_configs,
internal=internal_configs)
# << verboseBuild
if configs.verboseBuild:
msg = "[*] The following input will be sent to Doxygen:\n"
if not configs.alwaysColorize and not sys.stderr.isatty():
sys.stderr.write(msg)
sys.stderr.write(full_input)
else:
sys.stderr.write(utils.colorize(msg, utils.AnsiColors.BOLD_CYAN))
sys.stderr.write(utils.__fancy(full_input, "make", "console"))
return _generate_doxygen(full_input)
########################################################################################
#
##
###
####
##### Primary entry point.
####
###
##
#
########################################################################################
def explode():
'''
This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has
already been applied. It performs minimal sanity checking, and then performs in
order
1. Creates a :class:`~exhale.graph.ExhaleRoot` object.
2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object.
3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object.
4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will
only produce output when :data:`~exhale.configs.verboseBuild` is ``True``).
This results in the full API being generated, and control is subsequently passed
back to Sphinx to now read in the source documents (many of which were just
generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the
final output.
'''
# Quick sanity check to make sure the bare minimum have been set in the configs
err_msg = "`configs.{config}` was `None`. Do not call `deploy.explode` directly."
if configs.containmentFolder is None:
raise RuntimeError(err_msg.format(config="containmentFolder"))
if configs.rootFileName is None:
raise RuntimeError(err_msg.format(config="rootFileName"))
if configs.doxygenStripFromPath is None:
raise RuntimeError(err_msg.format(config="doxygenStripFromPath"))
# From here on, we assume that everything else has been checked / configured.
try:
textRoot = ExhaleRoot()
except:
utils.fancyError("Unable to create an `ExhaleRoot` object:")
try:
sys.stdout.write("{0}\n".format(utils.info("Exhale: parsing Doxygen XML.")))
start = utils.get_time()
textRoot.parse()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: finished parsing Doxygen XML in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while parsing:")
try:
sys.stdout.write("{0}\n".format(
utils.info("Exhale: generating reStructuredText documents.")
))
start = utils.get_time()
textRoot.generateFullAPI()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: generated reStructuredText documents in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while generating:")
# << verboseBuild
# toConsole only prints if verbose mode is enabled
textRoot.toConsole()
# allow access to the result after-the-fact
configs._the_app.exhale_root = textRoot
| [((7789, 7839), 're.search', 're.search', (['re_template', 'configs.exhaleDoxygenStdin'], {}), '(re_template, configs.exhaleDoxygenStdin)\n', (7798, 7839), False, 'import re\n'), ((5253, 5274), 'subprocess.Popen', 'Popen', (['args'], {}), '(args, **kwargs)\n', (5258, 5274), False, 'from subprocess import PIPE, Popen, STDOUT\n'), ((8068, 8267), 'textwrap.dedent', 'textwrap.dedent', (['"""\n `generateDoxygenXML` should *ONLY* be called internally. You should\n set `exhaleExecutesDoxygen=True` in `exhale_args` in `conf.py`.\n """'], {}), '(\n """\n `generateDoxygenXML` should *ONLY* be called internally. You should\n set `exhaleExecutesDoxygen=True` in `exhale_args` in `conf.py`.\n """\n )\n', (8083, 8267), False, 'import textwrap\n'), ((14055, 14098), 'textwrap.dedent', 'textwrap.dedent', (['configs.exhaleDoxygenStdin'], {}), '(configs.exhaleDoxygenStdin)\n', (14070, 14098), False, 'import textwrap\n'), ((4699, 4770), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {'prefix': '"""doxygen_stdout_buff"""'}), "(prefix='doxygen_stdout_buff', **tempfile_kwargs)\n", (4721, 4770), False, 'import tempfile\n'), ((4828, 4899), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {'prefix': '"""doxygen_stderr_buff"""'}), "(prefix='doxygen_stderr_buff', **tempfile_kwargs)\n", (4850, 4899), False, 'import tempfile\n'), ((11483, 12282), 'textwrap.dedent', 'textwrap.dedent', (['"""\n `exhaleDoxygenStdin` *MUST* specify the `INPUT` doxygen config variable.\n The INPUT variable is what tells Doxygen where to look for code to\n extract documentation from. For example, if you had a directory layout\n\n project_root/\n docs/\n conf.py\n Makefile\n ... etc ...\n include/\n my_header.hpp\n src/\n my_header.cpp\n\n Then you would include the line\n\n INPUT = ../include\n\n in the string provided to `exhale_args["exhaleDoxygenStdin"]`.\n """'], {}), '(\n """\n `exhaleDoxygenStdin` *MUST* specify the `INPUT` doxygen config variable.\n The INPUT variable is what tells Doxygen where to look for code to\n extract documentation from. For example, if you had a directory layout\n\n project_root/\n docs/\n conf.py\n Makefile\n ... etc ...\n include/\n my_header.hpp\n src/\n my_header.cpp\n\n Then you would include the line\n\n INPUT = ../include\n\n in the string provided to `exhale_args["exhaleDoxygenStdin"]`.\n """\n )\n', (11498, 12282), False, 'import textwrap\n'), ((14776, 14797), 'sys.stderr.write', 'sys.stderr.write', (['msg'], {}), '(msg)\n', (14792, 14797), False, 'import sys\n'), ((14814, 14842), 'sys.stderr.write', 'sys.stderr.write', (['full_input'], {}), '(full_input)\n', (14830, 14842), False, 'import sys\n'), ((14739, 14758), 'sys.stderr.isatty', 'sys.stderr.isatty', ([], {}), '()\n', (14756, 14758), False, 'import sys\n')] |
rloganiv/bayesian-blackbox | src/bayesian_reliability_comparison.py | 6a111553200b6aa755149e08174abe1a61d37198 | import argparse
import multiprocessing
import os
import random
import numpy as np
from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR
from models import SumOfBetaEce
random.seed(2020)
num_cores = multiprocessing.cpu_count()
NUM_BINS = 10
NUM_RUNS = 100
N_list = [100, 200, 500, 1000, 2000, 5000, 10000]
OUTPUT_DIR = RESULTS_DIR + "bayesian_reliability_comparison/"
def main(args) -> None:
# load data
categories, observations, confidences, idx2category, category2idx, labels = prepare_data(
DATAFILE_LIST[args.dataset], False)
# train a ground_truth ece model
if args.ground_truth_type == 'bayesian':
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
else:
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=1e-3)
ground_truth_model.update_batch(confidences, observations)
results = np.zeros((args.num_runs, len(N_list), 5))
for run_id in range(args.num_runs):
tmp = list(zip(confidences, observations))
random.shuffle(tmp)
confidences, observations = zip(*tmp)
model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
for i in range(len(N_list)):
tmp = 0 if i == 0 else N_list[i - 1]
model.update_batch(confidences[tmp: N_list[i]], observations[tmp: N_list[i]])
results[run_id, i, 0] = N_list[i]
results[run_id, i, 1] = model.eval
results[run_id, i, 2] = model.frequentist_eval
results[run_id, i, 3] = model.calibration_estimation_error(ground_truth_model, args.weight_type)
results[run_id, i, 4] = model.frequentist_calibration_estimation_error(ground_truth_model, args.weight_type)
results_mean = np.mean(results, axis=0)
results_variance = np.std(results, axis=0)
if args.weight_type == 'online':
OUTPUT_DIR += "online_weights/"
try:
os.stat(OUTPUT_DIR)
except:
os.mkdir(OUTPUT_DIR)
if args.ground_truth_type == 'frequentist':
filename_mean = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
else:
filename_mean = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
header = 'N, bayesian_ece, frequentist_ece, bayesian_estimation_error, frequentist_estimation_error'
np.savetxt(filename_mean, results_mean, delimiter=',', header=header)
np.savetxt(filename_std, results_variance, delimiter=',', header=header)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, default='cifar100', help='input dataset')
parser.add_argument('-pseudocount', type=int, default=1, help='strength of prior')
parser.add_argument('-ground_truth_type', type=str, default='bayesian',
help='compute ground truth in a Bayesian or frequentist way, bayesian or frequentist')
parser.add_argument('-weight_type', type=str, default='pool',
help='weigh each bin with all data or only data seen so far, online or pool')
parser.add_argument('--num_runs', type=int, default=NUM_RUNS, help='number of runs')
parser.add_argument('--num_bins', type=int, default=NUM_BINS, help='number of bins in reliability diagram')
args, _ = parser.parse_known_args()
if args.dataset not in DATASET_LIST:
raise ValueError("%s is not in DATASET_LIST." % args.dataset)
main(args)
| [((195, 212), 'random.seed', 'random.seed', (['(2020)'], {}), '(2020)\n', (206, 212), False, 'import random\n'), ((225, 252), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (250, 252), False, 'import multiprocessing\n'), ((517, 565), 'data_utils.prepare_data', 'prepare_data', (['DATAFILE_LIST[args.dataset]', '(False)'], {}), '(DATAFILE_LIST[args.dataset], False)\n', (529, 565), False, 'from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR\n'), ((1798, 1822), 'numpy.mean', 'np.mean', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (1805, 1822), True, 'import numpy as np\n'), ((1846, 1869), 'numpy.std', 'np.std', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (1852, 1869), True, 'import numpy as np\n'), ((2701, 2770), 'numpy.savetxt', 'np.savetxt', (['filename_mean', 'results_mean'], {'delimiter': '""","""', 'header': 'header'}), "(filename_mean, results_mean, delimiter=',', header=header)\n", (2711, 2770), True, 'import numpy as np\n'), ((2775, 2847), 'numpy.savetxt', 'np.savetxt', (['filename_std', 'results_variance'], {'delimiter': '""","""', 'header': 'header'}), "(filename_std, results_variance, delimiter=',', header=header)\n", (2785, 2847), True, 'import numpy as np\n'), ((2891, 2916), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2914, 2916), False, 'import argparse\n'), ((686, 752), 'models.SumOfBetaEce', 'SumOfBetaEce', ([], {'num_bins': 'args.num_bins', 'pseudocount': 'args.pseudocount'}), '(num_bins=args.num_bins, pseudocount=args.pseudocount)\n', (698, 752), False, 'from models import SumOfBetaEce\n'), ((792, 847), 'models.SumOfBetaEce', 'SumOfBetaEce', ([], {'num_bins': 'args.num_bins', 'pseudocount': '(0.001)'}), '(num_bins=args.num_bins, pseudocount=0.001)\n', (804, 847), False, 'from models import SumOfBetaEce\n'), ((1068, 1087), 'random.shuffle', 'random.shuffle', (['tmp'], {}), '(tmp)\n', (1082, 1087), False, 'import random\n'), ((1151, 1217), 'models.SumOfBetaEce', 'SumOfBetaEce', ([], {'num_bins': 'args.num_bins', 'pseudocount': 'args.pseudocount'}), '(num_bins=args.num_bins, pseudocount=args.pseudocount)\n', (1163, 1217), False, 'from models import SumOfBetaEce\n'), ((1965, 1984), 'os.stat', 'os.stat', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (1972, 1984), False, 'import os\n'), ((2005, 2025), 'os.mkdir', 'os.mkdir', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (2013, 2025), False, 'import os\n')] |
SamKG/PsyNeuLink | psyneulink/core/components/functions/statefulfunctions/statefulfunction.py | 70558bcd870868e1688cb7a7c424d29ca336f2df | #
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
#
# ***************************************** STATEFUL FUNCTION *********************************************************
"""
* `StatefulFunction`
* `IntegratorFunctions`
* `MemoryFunctions`
"""
import abc
import typecheck as tc
import warnings
import numbers
import numpy as np
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.component import DefaultsFlexibility, _has_initializers_setter
from psyneulink.core.components.functions.function import Function_Base, FunctionError
from psyneulink.core.components.functions.distributionfunctions import DistributionFunction
from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.context import ContextFlags, handle_external_context
__all__ = ['StatefulFunction']
class StatefulFunction(Function_Base): # ---------------------------------------------------------------------
"""
StatefulFunction( \
default_variable=None, \
initializer, \
rate=1.0, \
noise=0.0, \
params=None, \
owner=None, \
prefs=None, \
)
.. _StatefulFunction:
Abstract base class for Functions the result of which depend on their `previous_value
<StatefulFunction.previous_value>` attribute.
COMMENT:
NARRATIVE HERE THAT EXPLAINS:
A) initializers and stateful_attributes
B) initializer (note singular) is a prespecified member of initializers
that contains the value with which to initiailzer previous_value
COMMENT
Arguments
---------
default_variable : number, list or array : default class_defaults.variable
specifies a template for `variable <StatefulFunction.variable>`.
initializer : float, list or 1d array : default 0.0
specifies initial value for `previous_value <StatefulFunction.previous_value>`. If it is a list or array,
it must be the same length as `variable <StatefulFunction.variable>` (see `initializer
<StatefulFunction.initializer>` for details).
rate : float, list or 1d array : default 1.0
specifies value used as a scaling parameter in a subclass-dependent way (see `rate <StatefulFunction.rate>` for
details); if it is a list or array, it must be the same length as `variable <StatefulFunction.default_variable>`.
noise : float, function, list or 1d array : default 0.0
specifies random value added in each call to `function <StatefulFunction.function>`; if it is a list or
array, it must be the same length as `variable <StatefulFunction.default_variable>` (see `noise
<StatefulFunction.noise>` for details).
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
current input value.
initializer : float or 1d array
determines initial value assigned to `previous_value <StatefulFunction.previous_value>`. If `variable
<StatefulFunction.variable>` is a list or array, and initializer is a float or has a single element, it is
applied to each element of `previous_value <StatefulFunction.previous_value>`. If initializer is a list or
array,each element is applied to the corresponding element of `previous_value <Integrator.previous_value>`.
previous_value : 1d array
last value returned (i.e., for which state is being maintained).
initializers : list
stores the names of the initialization attributes for each of the stateful attributes of the function. The
index i item in initializers provides the initialization value for the index i item in `stateful_attributes
<StatefulFunction.stateful_attributes>`.
stateful_attributes : list
stores the names of each of the stateful attributes of the function. The index i item in stateful_attributes is
initialized by the value of the initialization attribute whose name is stored in index i of `initializers
<StatefulFunction.initializers>`. In most cases, the stateful_attributes, in that order, are the return values
of the function.
.. _Stateful_Rate:
rate : float or 1d array
on each call to `function <StatefulFunction.function>`, applied to `variable <StatefulFunction.variable>`,
`previous_value <StatefulFunction.previous_value>`, neither, or both, depending on implementation by
subclass. If it is a float or has a single value, it is applied to all elements of its target(s); if it has
more than one element, each element is applied to the corresponding element of its target(s).
.. _Stateful_Noise:
noise : float, function, list, or 1d array
random value added on each call to `function <StatefulFunction.function>`. If `variable
<StatefulFunction.variable>` is a list or array, and noise is a float or function, it is applied
for each element of `variable <StatefulFunction.variable>`. If noise is a function, it is executed and applied
separately for each element of `variable <StatefulFunction.variable>`. If noise is a list or array,
it is applied elementwise (i.e., in Hadamard form).
.. hint::
To generate random noise that varies for every execution, a probability distribution function should be
used (see `Distribution Functions <DistributionFunction>` for details), that generates a new noise value
from its distribution on each execution. If noise is specified as a float, a function with a fixed
output, or a list or array of either of these, then noise is simply an offset that remains the same
across all executions.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict
the `PreferenceSet` for the Function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentType = STATEFUL_FUNCTION_TYPE
componentName = STATEFUL_FUNCTION
class Parameters(Function_Base.Parameters):
"""
Attributes
----------
initializer
see `initializer <StatefulFunction.initializer>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
noise
see `noise <StatefulFunction.noise>`
:default value: 0.0
:type: ``float``
previous_value
see `previous_value <StatefulFunction.previous_value>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
rate
see `rate <StatefulFunction.rate>`
:default value: 1.0
:type: ``float``
"""
noise = Parameter(0.0, modulable=True)
rate = Parameter(1.0, modulable=True)
previous_value = Parameter(np.array([0]), initializer='initializer', pnl_internal=True)
initializer = Parameter(np.array([0]), pnl_internal=True)
has_initializers = Parameter(True, setter=_has_initializers_setter, pnl_internal=True)
@handle_external_context()
@tc.typecheck
def __init__(self,
default_variable=None,
rate=None,
noise=None,
initializer=None,
params: tc.optional(tc.optional(dict)) = None,
owner=None,
prefs: tc.optional(is_pref_set) = None,
context=None,
**kwargs
):
if not hasattr(self, "initializers"):
self.initializers = ["initializer"]
if not hasattr(self, "stateful_attributes"):
self.stateful_attributes = ["previous_value"]
super().__init__(
default_variable=default_variable,
rate=rate,
initializer=initializer,
noise=noise,
params=params,
owner=owner,
prefs=prefs,
context=context,
**kwargs
)
def _validate(self, context=None):
self._validate_rate(self.defaults.rate)
self._validate_initializers(self.defaults.variable, context=context)
super()._validate(context=context)
def _validate_params(self, request_set, target_set=None, context=None):
# Handle list or array for rate specification
if RATE in request_set:
rate = request_set[RATE]
if isinstance(rate, (list, np.ndarray)) and not iscompatible(rate, self.defaults.variable):
if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size:
# If the variable was not specified, then reformat it to match rate specification
# and assign class_defaults.variable accordingly
# Note: this situation can arise when the rate is parametrized (e.g., as an array) in the
# StatefulFunction's constructor, where that is used as a specification for a function parameter
# (e.g., for an IntegratorMechanism), whereas the input is specified as part of the
# object to which the function parameter belongs (e.g., the IntegratorMechanism); in that
# case, the StatefulFunction gets instantiated using its class_defaults.variable ([[0]]) before
# the object itself, thus does not see the array specification for the input.
if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
self._instantiate_defaults(variable=np.zeros_like(np.array(rate)), context=context)
if self.verbosePref:
warnings.warn(
"The length ({}) of the array specified for the rate parameter ({}) of {} "
"must match the length ({}) of the default input ({}); "
"the default input has been updated to match".format(
len(rate),
rate,
self.name,
np.array(self.defaults.variable).size
),
self.defaults.variable,
)
else:
raise FunctionError(
"The length of the array specified for the rate parameter of {} ({}) "
"must match the length of the default input ({}).".format(
self.name,
# rate,
len(rate),
np.array(self.defaults.variable).size,
# self.defaults.variable,
)
)
super()._validate_params(request_set=request_set,
target_set=target_set,
context=context)
if NOISE in target_set:
noise = target_set[NOISE]
if isinstance(noise, DistributionFunction):
noise.owner = self
target_set[NOISE] = noise.execute
self._validate_noise(target_set[NOISE])
def _validate_initializers(self, default_variable, context=None):
for initial_value_name in self.initializers:
initial_value = self._get_current_parameter_value(initial_value_name, context=context)
if isinstance(initial_value, (list, np.ndarray)):
if len(initial_value) != 1:
# np.atleast_2d may not be necessary here?
if np.shape(np.atleast_2d(initial_value)) != np.shape(np.atleast_2d(default_variable)):
raise FunctionError("{}'s {} ({}) is incompatible with its default_variable ({}) ."
.format(self.name, initial_value_name, initial_value, default_variable))
elif not isinstance(initial_value, (float, int)):
raise FunctionError("{}'s {} ({}) must be a number or a list/array of numbers."
.format(self.name, initial_value_name, initial_value))
def _validate_rate(self, rate):
# FIX: CAN WE JUST GET RID OF THIS?
# kmantel: this duplicates much code in _validate_params above, but that calls _instantiate_defaults
# which I don't think is the right thing to do here, but if you don't call it in _validate_params
# then a lot of things don't get instantiated properly
if rate is not None:
if isinstance(rate, list):
rate = np.asarray(rate)
rate_type_msg = 'The rate parameter of {0} must be a number or an array/list of at most 1d (you gave: {1})'
if isinstance(rate, np.ndarray):
# kmantel: current test_gating test depends on 2d rate
# this should be looked at but for now this restriction is removed
# if rate.ndim > 1:
# raise FunctionError(rate_type_msg.format(self.name, rate))
pass
elif not isinstance(rate, numbers.Number):
raise FunctionError(rate_type_msg.format(self.name, rate))
if isinstance(rate, np.ndarray) and not iscompatible(rate, self.defaults.variable):
if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size:
if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
self.defaults.variable = np.zeros_like(np.array(rate))
if self.verbosePref:
warnings.warn(
"The length ({}) of the array specified for the rate parameter ({}) of {} "
"must match the length ({}) of the default input ({}); "
"the default input has been updated to match".format(
len(rate),
rate,
self.name,
np.array(self.defaults.variable).size
),
self.defaults.variable,
)
self._instantiate_value()
self._variable_shape_flexibility = DefaultsFlexibility.INCREASE_DIMENSION
else:
raise FunctionError(
"The length of the array specified for the rate parameter of {} ({})"
"must match the length of the default input ({}).".format(
len(rate),
# rate,
self.name,
np.array(self.defaults.variable).size,
# self.defaults.variable,
)
)
# Ensure that the noise parameter makes sense with the input type and shape; flag any noise functions that will
# need to be executed
def _validate_noise(self, noise):
# Noise is a list or array
if isinstance(noise, (np.ndarray, list)):
if len(noise) == 1:
pass
# Variable is a list/array
elif (not iscompatible(np.atleast_2d(noise), self.defaults.variable)
and not iscompatible(np.atleast_1d(noise), self.defaults.variable) and len(noise) > 1):
raise FunctionError(
"Noise parameter ({}) does not match default variable ({}). Noise parameter of {} "
"must be specified as a float, a function, or an array of the appropriate shape ({}).".format(
noise, self.defaults.variable, self.name,
np.shape(np.array(self.defaults.variable))
),
component=self
)
else:
for i in range(len(noise)):
if isinstance(noise[i], DistributionFunction):
noise[i] = noise[i].execute
# if not isinstance(noise[i], (float, int)) and not callable(noise[i]):
if not np.isscalar(noise[i]) and not callable(noise[i]):
raise FunctionError("The elements of a noise list or array must be scalars or functions. "
"{} is not a valid noise element for {}".format(noise[i], self.name))
def _try_execute_param(self, param, var, context=None):
# FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D BELOW]
param_shape = np.array(param).shape
if not len(param_shape):
param_shape = np.array(var).shape
# param is a list; if any element is callable, execute it
if isinstance(param, (np.ndarray, list)):
# NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths
# FIX: WHY FORCE 2d??
param = np.atleast_2d(param)
for i in range(len(param)):
for j in range(len(param[i])):
try:
param[i][j] = param[i][j](context=context)
except TypeError:
try:
param[i][j] = param[i][j]()
except TypeError:
pass
try:
param = param.reshape(param_shape)
except ValueError:
if object_has_single_value(param):
param = np.full(param_shape, float(param))
# param is one function
elif callable(param):
# NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths
new_param = []
# FIX: WHY FORCE 2d??
for row in np.atleast_2d(var):
# for row in np.atleast_1d(var):
# for row in var:
new_row = []
for item in row:
try:
val = param(context=context)
except TypeError:
val = param()
new_row.append(val)
new_param.append(new_row)
param = np.asarray(new_param)
# FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D ABOVE]
try:
if len(np.squeeze(param)):
param = param.reshape(param_shape)
except TypeError:
pass
return param
def _instantiate_attributes_before_function(self, function=None, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value(np.zeros_like(self.defaults.variable), context)
# use np.broadcast_to to guarantee that all initializer type attributes take on the same shape as variable
if not np.isscalar(self.defaults.variable):
for attr in self.initializers:
param = getattr(self.parameters, attr)
param._set(
np.broadcast_to(
param._get(context),
self.defaults.variable.shape
).copy(),
context
)
# create all stateful attributes and initialize their values to the current values of their
# corresponding initializer attributes
for attr_name in self.stateful_attributes:
initializer_value = getattr(self.parameters, getattr(self.parameters, attr_name).initializer)._get(context).copy()
getattr(self.parameters, attr_name)._set(initializer_value, context)
super()._instantiate_attributes_before_function(function=function, context=context)
def _initialize_previous_value(self, initializer, context=None):
initializer = convert_to_np_array(initializer, dimension=1)
self.defaults.initializer = initializer.copy()
self.parameters.initializer._set(initializer.copy(), context)
self.defaults.previous_value = initializer.copy()
self.parameters.previous_value.set(initializer.copy(), context)
return initializer
@handle_external_context()
def _update_default_variable(self, new_default_variable, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value(np.zeros_like(new_default_variable), context)
super()._update_default_variable(new_default_variable, context=context)
def _parse_value_order(self, **kwargs):
"""
Returns:
tuple: the values of the keyword arguments in the order
in which they appear in this Component's `value
<Component.value>`
"""
return tuple(v for k, v in kwargs.items())
@handle_external_context(fallback_most_recent=True)
def reset(self, *args, context=None, **kwargs):
"""
Resets `value <StatefulFunction.previous_value>` and `previous_value <StatefulFunction.previous_value>`
to the specified value(s).
If arguments are passed into the reset method, then reset sets each of the attributes in
`stateful_attributes <StatefulFunction.stateful_attributes>` to the value of the corresponding argument.
Next, it sets the `value <StatefulFunction.value>` to a list containing each of the argument values.
If reset is called without arguments, then it sets each of the attributes in `stateful_attributes
<StatefulFunction.stateful_attributes>` to the value of the corresponding attribute in `initializers
<StatefulFunction.initializers>`. Next, it sets the `value <StatefulFunction.value>` to a list containing
the values of each of the attributes in `initializers <StatefulFunction.initializers>`.
Often, the only attribute in `stateful_attributes <StatefulFunction.stateful_attributes>` is
`previous_value <StatefulFunction.previous_value>` and the only attribute in `initializers
<StatefulFunction.initializers>` is `initializer <StatefulFunction.initializer>`, in which case
the reset method sets `previous_value <StatefulFunction.previous_value>` and `value
<StatefulFunction.value>` to either the value of the argument (if an argument was passed into
reset) or the current value of `initializer <StatefulFunction.initializer>`.
For specific types of StatefulFunction functions, the reset method may carry out other
reinitialization steps.
"""
num_stateful_attrs = len(self.stateful_attributes)
if num_stateful_attrs >= 2:
# old args specification can be supported only in subclasses
# that explicitly define an order by overriding reset
if len(args) > 0:
raise FunctionError(
f'{self}.reset has more than one stateful attribute'
f' ({self.stateful_attributes}). You must specify reset'
' values by keyword.'
)
if len(kwargs) != num_stateful_attrs:
type_name = type(self).__name__
raise FunctionError(
'StatefulFunction.reset must receive a keyword argument for'
f' each item in {type_name}.stateful_attributes in the order in'
f' which they appear in {type_name}.value'
)
if num_stateful_attrs == 1:
try:
kwargs[self.stateful_attributes[0]]
except KeyError:
try:
kwargs[self.stateful_attributes[0]] = args[0]
except IndexError:
kwargs[self.stateful_attributes[0]] = None
invalid_args = []
# iterates in order arguments are sent in function call, so it
# will match their order in value as long as they are listed
# properly in subclass reset method signatures
for attr in kwargs:
try:
kwargs[attr]
except KeyError:
kwargs[attr] = None
if kwargs[attr] is not None:
# from before: unsure if conversion to 1d necessary
kwargs[attr] = np.atleast_1d(kwargs[attr])
else:
try:
kwargs[attr] = self._get_current_parameter_value(getattr(self.parameters, attr).initializer, context=context)
except AttributeError:
invalid_args.append(attr)
if len(invalid_args) > 0:
raise FunctionError(
f'Arguments {invalid_args} to reset are invalid because they do'
f" not correspond to any of {self}'s stateful_attributes"
)
# rebuilding value rather than simply returning reinitialization_values in case any of the stateful
# attrs are modified during assignment
value = []
for attr, v in kwargs.items():
# FIXME: HACK: Do not reinitialize random_state
if attr != "random_state":
getattr(self.parameters, attr).set(kwargs[attr],
context, override=True)
value.append(getattr(self.parameters, attr)._get(context))
self.parameters.value.set(value, context, override=True)
return value
def _gen_llvm_function_reset(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset):
assert "reset" in tags
for a in self.stateful_attributes:
initializer = getattr(self.parameters, a).initializer
source_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, initializer)
dest_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, a)
if source_ptr.type != dest_ptr.type:
warnings.warn("Shape mismatch: stateful param does not match the initializer: {}({}) vs. {}({})".format(initializer, source_ptr.type, a, dest_ptr.type))
# Take a guess that dest just has an extra dimension
assert len(dest_ptr.type.pointee) == 1
dest_ptr = builder.gep(dest_ptr, [ctx.int32_ty(0),
ctx.int32_ty(0)])
builder.store(builder.load(source_ptr), dest_ptr)
return builder
@abc.abstractmethod
def _function(self, *args, **kwargs):
raise FunctionError("StatefulFunction is not meant to be called explicitly")
| [((9174, 9199), 'psyneulink.core.globals.context.handle_external_context', 'handle_external_context', ([], {}), '()\n', (9197, 9199), False, 'from psyneulink.core.globals.context import ContextFlags, handle_external_context\n'), ((22615, 22640), 'psyneulink.core.globals.context.handle_external_context', 'handle_external_context', ([], {}), '()\n', (22638, 22640), False, 'from psyneulink.core.globals.context import ContextFlags, handle_external_context\n'), ((23266, 23316), 'psyneulink.core.globals.context.handle_external_context', 'handle_external_context', ([], {'fallback_most_recent': '(True)'}), '(fallback_most_recent=True)\n', (23289, 23316), False, 'from psyneulink.core.globals.context import ContextFlags, handle_external_context\n'), ((8834, 8864), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(0.0)'], {'modulable': '(True)'}), '(0.0, modulable=True)\n', (8843, 8864), False, 'from psyneulink.core.globals.parameters import Parameter\n'), ((8880, 8910), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(1.0)'], {'modulable': '(True)'}), '(1.0, modulable=True)\n', (8889, 8910), False, 'from psyneulink.core.globals.parameters import Parameter\n'), ((9100, 9167), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(True)'], {'setter': '_has_initializers_setter', 'pnl_internal': '(True)'}), '(True, setter=_has_initializers_setter, pnl_internal=True)\n', (9109, 9167), False, 'from psyneulink.core.globals.parameters import Parameter\n'), ((22278, 22323), 'psyneulink.core.globals.utilities.convert_to_np_array', 'convert_to_np_array', (['initializer'], {'dimension': '(1)'}), '(initializer, dimension=1)\n', (22297, 22323), False, 'from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array\n'), ((28973, 29043), 'psyneulink.core.components.functions.function.FunctionError', 'FunctionError', (['"""StatefulFunction is not meant to be called explicitly"""'], {}), "('StatefulFunction is not meant to be called explicitly')\n", (28986, 29043), False, 'from psyneulink.core.components.functions.function import Function_Base, FunctionError\n'), ((8946, 8959), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (8954, 8959), True, 'import numpy as np\n'), ((9039, 9052), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (9047, 9052), True, 'import numpy as np\n'), ((9490, 9514), 'typecheck.optional', 'tc.optional', (['is_pref_set'], {}), '(is_pref_set)\n', (9501, 9514), True, 'import typecheck as tc\n'), ((19016, 19031), 'numpy.array', 'np.array', (['param'], {}), '(param)\n', (19024, 19031), True, 'import numpy as np\n'), ((19386, 19406), 'numpy.atleast_2d', 'np.atleast_2d', (['param'], {}), '(param)\n', (19399, 19406), True, 'import numpy as np\n'), ((21312, 21347), 'numpy.isscalar', 'np.isscalar', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (21323, 21347), True, 'import numpy as np\n'), ((27119, 27262), 'psyneulink.core.components.functions.function.FunctionError', 'FunctionError', (['f"""Arguments {invalid_args} to reset are invalid because they do not correspond to any of {self}\'s stateful_attributes"""'], {}), '(\n f"Arguments {invalid_args} to reset are invalid because they do not correspond to any of {self}\'s stateful_attributes"\n )\n', (27132, 27262), False, 'from psyneulink.core.components.functions.function import Function_Base, FunctionError\n'), ((28189, 28252), 'psyneulink.core.llvm.helpers.get_param_ptr', 'pnlvm.helpers.get_param_ptr', (['builder', 'self', 'params', 'initializer'], {}), '(builder, self, params, initializer)\n', (28216, 28252), True, 'from psyneulink.core import llvm as pnlvm\n'), ((28276, 28328), 'psyneulink.core.llvm.helpers.get_state_ptr', 'pnlvm.helpers.get_state_ptr', (['builder', 'self', 'state', 'a'], {}), '(builder, self, state, a)\n', (28303, 28328), True, 'from psyneulink.core import llvm as pnlvm\n'), ((9410, 9427), 'typecheck.optional', 'tc.optional', (['dict'], {}), '(dict)\n', (9421, 9427), True, 'import typecheck as tc\n'), ((14889, 14905), 'numpy.asarray', 'np.asarray', (['rate'], {}), '(rate)\n', (14899, 14905), True, 'import numpy as np\n'), ((19097, 19110), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (19105, 19110), True, 'import numpy as np\n'), ((20243, 20261), 'numpy.atleast_2d', 'np.atleast_2d', (['var'], {}), '(var)\n', (20256, 20261), True, 'import numpy as np\n'), ((20656, 20677), 'numpy.asarray', 'np.asarray', (['new_param'], {}), '(new_param)\n', (20666, 20677), True, 'import numpy as np\n'), ((21133, 21170), 'numpy.zeros_like', 'np.zeros_like', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (21146, 21170), True, 'import numpy as np\n'), ((22821, 22856), 'numpy.zeros_like', 'np.zeros_like', (['new_default_variable'], {}), '(new_default_variable)\n', (22834, 22856), True, 'import numpy as np\n'), ((25354, 25503), 'psyneulink.core.components.functions.function.FunctionError', 'FunctionError', (['f"""{self}.reset has more than one stateful attribute ({self.stateful_attributes}). You must specify reset values by keyword."""'], {}), "(\n f'{self}.reset has more than one stateful attribute ({self.stateful_attributes}). You must specify reset values by keyword.'\n )\n", (25367, 25503), False, 'from psyneulink.core.components.functions.function import Function_Base, FunctionError\n'), ((25699, 25885), 'psyneulink.core.components.functions.function.FunctionError', 'FunctionError', (['f"""StatefulFunction.reset must receive a keyword argument for each item in {type_name}.stateful_attributes in the order in which they appear in {type_name}.value"""'], {}), "(\n f'StatefulFunction.reset must receive a keyword argument for each item in {type_name}.stateful_attributes in the order in which they appear in {type_name}.value'\n )\n", (25712, 25885), False, 'from psyneulink.core.components.functions.function import Function_Base, FunctionError\n'), ((26784, 26811), 'numpy.atleast_1d', 'np.atleast_1d', (['kwargs[attr]'], {}), '(kwargs[attr])\n', (26797, 26811), True, 'import numpy as np\n'), ((10573, 10615), 'psyneulink.core.globals.utilities.iscompatible', 'iscompatible', (['rate', 'self.defaults.variable'], {}), '(rate, self.defaults.variable)\n', (10585, 10615), False, 'from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array\n'), ((15549, 15591), 'psyneulink.core.globals.utilities.iscompatible', 'iscompatible', (['rate', 'self.defaults.variable'], {}), '(rate, self.defaults.variable)\n', (15561, 15591), False, 'from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array\n'), ((19902, 19932), 'psyneulink.core.globals.utilities.object_has_single_value', 'object_has_single_value', (['param'], {}), '(param)\n', (19925, 19932), False, 'from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array\n'), ((20796, 20813), 'numpy.squeeze', 'np.squeeze', (['param'], {}), '(param)\n', (20806, 20813), True, 'import numpy as np\n'), ((10668, 10700), 'numpy.array', 'np.array', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (10676, 10700), True, 'import numpy as np\n'), ((13889, 13917), 'numpy.atleast_2d', 'np.atleast_2d', (['initial_value'], {}), '(initial_value)\n', (13902, 13917), True, 'import numpy as np\n'), ((13931, 13962), 'numpy.atleast_2d', 'np.atleast_2d', (['default_variable'], {}), '(default_variable)\n', (13944, 13962), True, 'import numpy as np\n'), ((15644, 15676), 'numpy.array', 'np.array', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (15652, 15676), True, 'import numpy as np\n'), ((15835, 15849), 'numpy.array', 'np.array', (['rate'], {}), '(rate)\n', (15843, 15849), True, 'import numpy as np\n'), ((17662, 17682), 'numpy.atleast_2d', 'np.atleast_2d', (['noise'], {}), '(noise)\n', (17675, 17682), True, 'import numpy as np\n'), ((17747, 17767), 'numpy.atleast_1d', 'np.atleast_1d', (['noise'], {}), '(noise)\n', (17760, 17767), True, 'import numpy as np\n'), ((18169, 18201), 'numpy.array', 'np.array', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (18177, 18201), True, 'import numpy as np\n'), ((18579, 18600), 'numpy.isscalar', 'np.isscalar', (['noise[i]'], {}), '(noise[i])\n', (18590, 18600), True, 'import numpy as np\n'), ((11729, 11743), 'numpy.array', 'np.array', (['rate'], {}), '(rate)\n', (11737, 11743), True, 'import numpy as np\n'), ((12881, 12913), 'numpy.array', 'np.array', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (12889, 12913), True, 'import numpy as np\n'), ((17116, 17148), 'numpy.array', 'np.array', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (17124, 17148), True, 'import numpy as np\n'), ((12307, 12339), 'numpy.array', 'np.array', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (12315, 12339), True, 'import numpy as np\n'), ((16395, 16427), 'numpy.array', 'np.array', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (16403, 16427), True, 'import numpy as np\n')] |
Subsets and Splits