id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
122985
|
import numpy as np
from sklearn.model_selection import KFold
from sklearn.mixture import GaussianMixture
from routines import Routines
import configparser
config = configparser.ConfigParser()
config.read('../system.ini')
routines = Routines(config)
|
StarcoderdataPython
|
1792020
|
""" This module contains classes that implement a map->for loop transformation.
"""
import dace
from copy import deepcopy as dcpy
from dace import data, symbolic, dtypes, subsets
from dace.graph import edges, nodes, nxutil
from dace.transformation import pattern_matching
from math import ceil
import sympy
import networkx as nx
class MapToForLoop(pattern_matching.Transformation):
""" Implements the Map to for-loop transformation.
Takes a map and enforces a sequential schedule by transforming it into
a state-machine of a for-loop. Creates a nested SDFG, if necessary.
"""
_map_entry = nodes.MapEntry(nodes.Map("", [], []))
@staticmethod
def annotates_memlets():
return True
@staticmethod
def expressions():
return [nxutil.node_path_graph(MapToForLoop._map_entry)]
@staticmethod
def can_be_applied(graph, candidate, expr_index, sdfg, strict=False):
# Only uni-dimensional maps are accepted.
map_entry = graph.nodes()[candidate[MapToForLoop._map_entry]]
if len(map_entry.map.params) > 1:
return False
return True
@staticmethod
def match_to_str(graph, candidate):
map_entry = graph.nodes()[candidate[MapToForLoop._map_entry]]
return map_entry.map.label + ': ' + str(map_entry.map.params)
def apply(self, sdfg):
# Retrieve map entry and exit nodes.
graph = sdfg.nodes()[self.state_id]
map_entry = graph.nodes()[self.subgraph[MapToForLoop._map_entry]]
map_exits = graph.exit_nodes(map_entry)
loop_idx = map_entry.map.params[0]
loop_from, loop_to, loop_step = map_entry.map.range[0]
nested_sdfg = dace.SDFG(graph.label + '_' + map_entry.map.label)
# Construct nested SDFG
begin = nested_sdfg.add_state('begin')
guard = nested_sdfg.add_state('guard')
body = nested_sdfg.add_state('body')
end = nested_sdfg.add_state('end')
nested_sdfg.add_edge(
begin,
guard,
edges.InterstateEdge(assignments={str(loop_idx): str(loop_from)}))
nested_sdfg.add_edge(
guard,
body,
edges.InterstateEdge(condition = str(loop_idx) + ' <= ' + \
str(loop_to))
)
nested_sdfg.add_edge(
guard,
end,
edges.InterstateEdge(condition = str(loop_idx) + ' > ' + \
str(loop_to))
)
nested_sdfg.add_edge(
body,
guard,
edges.InterstateEdge(assignments = {str(loop_idx): str(loop_idx) + \
' + ' +str(loop_step)})
)
# Add map contents
map_subgraph = graph.scope_subgraph(map_entry)
for node in map_subgraph.nodes():
if node is not map_entry and node not in map_exits:
body.add_node(node)
for src, src_conn, dst, dst_conn, memlet in map_subgraph.edges():
if src is not map_entry and dst not in map_exits:
body.add_edge(src, src_conn, dst, dst_conn, memlet)
# Reconnect inputs
nested_in_data_nodes = {}
nested_in_connectors = {}
nested_in_memlets = {}
for i, edge in enumerate(graph.in_edges(map_entry)):
src, src_conn, dst, dst_conn, memlet = edge
data_label = '_in_' + memlet.data
memdata = sdfg.arrays[memlet.data]
if isinstance(memdata, data.Array):
data_array = sdfg.add_array(data_label, memdata.dtype, [
symbolic.overapproximate(r)
for r in memlet.bounding_box_size()
])
elif isinstance(memdata, data.Scalar):
data_array = sdfg.add_scalar(data_label, memdata.dtype)
else:
raise NotImplementedError()
data_node = nodes.AccessNode(data_label)
body.add_node(data_node)
nested_in_data_nodes.update({i: data_node})
nested_in_connectors.update({i: data_label})
nested_in_memlets.update({i: memlet})
for _, _, _, _, old_memlet in body.edges():
if old_memlet.data == memlet.data:
old_memlet.data = data_label
#body.add_edge(data_node, None, dst, dst_conn, memlet)
# Reconnect outputs
nested_out_data_nodes = {}
nested_out_connectors = {}
nested_out_memlets = {}
for map_exit in map_exits:
for i, edge in enumerate(graph.out_edges(map_exit)):
src, src_conn, dst, dst_conn, memlet = edge
data_label = '_out_' + memlet.data
memdata = sdfg.arrays[memlet.data]
if isinstance(memdata, data.Array):
data_array = sdfg.add_array(data_label, memdata.dtype, [
symbolic.overapproximate(r)
for r in memlet.bounding_box_size()
])
elif isinstance(memdata, data.Scalar):
data_array = sdfg.add_scalar(data_label, memdata.dtype)
else:
raise NotImplementedError()
data_node = nodes.AccessNode(data_label)
body.add_node(data_node)
nested_out_data_nodes.update({i: data_node})
nested_out_connectors.update({i: data_label})
nested_out_memlets.update({i: memlet})
for _, _, _, _, old_memlet in body.edges():
if old_memlet.data == memlet.data:
old_memlet.data = data_label
#body.add_edge(src, src_conn, data_node, None, memlet)
# Add nested SDFG and reconnect it
nested_node = graph.add_nested_sdfg(
nested_sdfg, sdfg, set(nested_in_connectors.values()),
set(nested_out_connectors.values()))
for i, edge in enumerate(graph.in_edges(map_entry)):
src, src_conn, dst, dst_conn, memlet = edge
graph.add_edge(src, src_conn, nested_node, nested_in_connectors[i],
nested_in_memlets[i])
for map_exit in map_exits:
for i, edge in enumerate(graph.out_edges(map_exit)):
src, src_conn, dst, dst_conn, memlet = edge
graph.add_edge(nested_node, nested_out_connectors[i], dst,
dst_conn, nested_out_memlets[i])
for src, src_conn, dst, dst_conn, memlet in graph.out_edges(map_entry):
i = int(src_conn[4:]) - 1
new_memlet = dcpy(memlet)
new_memlet.data = nested_in_data_nodes[i].data
body.add_edge(nested_in_data_nodes[i], None, dst, dst_conn,
new_memlet)
for map_exit in map_exits:
for src, src_conn, dst, dst_conn, memlet in graph.in_edges(
map_exit):
i = int(dst_conn[3:]) - 1
new_memlet = dcpy(memlet)
new_memlet.data = nested_out_data_nodes[i].data
body.add_edge(src, src_conn, nested_out_data_nodes[i], None,
new_memlet)
for node in map_subgraph:
graph.remove_node(node)
pattern_matching.Transformation.register_pattern(MapToForLoop)
|
StarcoderdataPython
|
12320
|
<reponame>zacharyt20/POCS
import os
import pytest
import yaml
from pocs.core import POCS
from pocs.observatory import Observatory
from pocs.utils import error
@pytest.fixture
def observatory():
observatory = Observatory(simulator=['all'])
yield observatory
def test_bad_state_machine_file():
with pytest.raises(error.InvalidConfig):
POCS.load_state_table(state_table_name='foo')
def test_load_bad_state(observatory):
pocs = POCS(observatory)
with pytest.raises(error.InvalidConfig):
pocs._load_state('foo')
def test_state_machine_absolute(temp_file):
state_table = POCS.load_state_table()
assert isinstance(state_table, dict)
with open(temp_file, 'w') as f:
f.write(yaml.dump(state_table))
file_path = os.path.abspath(temp_file)
assert POCS.load_state_table(state_table_name=file_path)
|
StarcoderdataPython
|
3301197
|
<reponame>Lh4cKg/task_2_zhuko
from django.urls import path, include
from rest_framework import routers
from server.apps.texts import views
router = routers.DefaultRouter()
router.register(r'texts', views.TextViewSet)
router.register(r'sentences', views.SentenceViewSet)
app_name = 'texts'
urlpatterns = [
path('', include(router.urls)),
]
|
StarcoderdataPython
|
1724380
|
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
"""
用户
"""
GRADE_TYPE = (
(1, "大学一年级"),
(2, "大学二年级"),
(3, "大学三年级"),
(4, "大学四年级"),
)
name = models.CharField(max_length=30, null=True, blank=True, verbose_name="姓名")
birthday = models.DateField(null=True, blank=True, verbose_name="出生年月")
gender = models.CharField(max_length=6, choices=(("male", u"男"), ("female", "女")), default="female", verbose_name="性别")
mobile = models.CharField(null=True, blank=True, max_length=11, verbose_name="电话")
email = models.EmailField(max_length=100, null=True, blank=True, verbose_name="邮箱")
is_teacher = models.BooleanField(default=False, verbose_name="是否为老师")
head_portrait = models.ImageField(max_length=200, blank=True, upload_to="head_portrait/", verbose_name="头像",
help_text="头像",
default="head_portrait/v2-97573b50437aac7ae71b73de012470ef_720w.jpg")
school = models.CharField(max_length=20, null=True, blank=True, verbose_name="学校")
grade = models.IntegerField(choices=GRADE_TYPE, default=1, verbose_name="年级", help_text="年级")
integral = models.IntegerField(default=0, verbose_name="积分", help_text="积分")
sign = models.TextField(null=True, blank=True, verbose_name="个性签名", help_text="个性签名")
class Meta:
verbose_name = "用户"
verbose_name_plural = verbose_name
def __str__(self):
if self.name:
return self.name
return self.username
class VerifyCode(models.Model):
"""
短信验证码
"""
code = models.CharField(max_length=10, verbose_name="验证码")
mobile = models.CharField(max_length=11, verbose_name="电话")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "短信验证码"
verbose_name_plural = verbose_name
def __str__(self):
return self.code
|
StarcoderdataPython
|
109048
|
<reponame>alec-tschantz/action-oriented
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from core.config import *
plt.rc("text", usetex=True)
def create_heatmap(matrix, title, save_path, color_bar=True):
f, (ax1, ax2) = plt.subplots(2, 1, figsize=(2.7, 4))
x_labels = [r"$s_{t-1}^{neg}$", r"$s_{t-1}^{pos}$"]
y_labels = [r"$s_{t}^{neg}$", r"$s_{t}^{pos}$"]
g1 = sns.heatmap(
matrix[0, :, :] * 100,
cmap="OrRd",
ax=ax1,
vmin=0.0,
vmax=70.0,
linewidth=2.5,
annot=True,
xticklabels=x_labels,
yticklabels=y_labels,
cbar=color_bar,
)
g2 = sns.heatmap(
matrix[1, :, :] * 100,
cmap="OrRd",
ax=ax2,
vmin=0.0,
vmax=70.0,
linewidth=2.5,
annot=True,
xticklabels=x_labels,
yticklabels=y_labels,
cbar=color_bar,
)
g1.set_yticklabels(g1.get_yticklabels(), rotation=0, fontsize=14)
g1.set_xticklabels(g1.get_xticklabels(), fontsize=14)
g2.set_yticklabels(g2.get_yticklabels(), rotation=0, fontsize=14)
g2.set_xticklabels(g2.get_xticklabels(), fontsize=14)
f.savefig(save_path, dpi=600, bbox_inches="tight")
plt.show()
if __name__ == "__main__":
states = np.load(STATES_PATH)
create_heatmap(states[FULL_ID, :, :, :], AGENT_NAMES[FULL_ID], FULL_STATES, color_bar=False)
create_heatmap(states[INST_ID, :, :, :], AGENT_NAMES[INST_ID], INST_STATES, color_bar=False)
create_heatmap(states[EPIS_ID, :, :, :], AGENT_NAMES[EPIS_ID], EPIS_STATES, color_bar=False)
create_heatmap(states[RAND_ID, :, :, :], AGENT_NAMES[RAND_ID], RAND_STATES, color_bar=False)
create_heatmap(states[RAND_ID, :, :, :], AGENT_NAMES[RAND_ID], COLOR_BAR, color_bar=True)
reversed_states = np.load(REVERSED_STATES_PATH)
create_heatmap(
reversed_states[FULL_ID, :, :, :],
AGENT_NAMES[FULL_ID] + " (Reversed prior)",
FULL_STATES_REVERSED,
color_bar=False,
)
create_heatmap(
reversed_states[INST_ID, :, :, :],
AGENT_NAMES[INST_ID] + " (Reversed prior)",
INST_STATES_REVERSED,
color_bar=False,
)
create_heatmap(
reversed_states[EPIS_ID, :, :, :],
AGENT_NAMES[EPIS_ID] + " (Reversed prior)",
EPIS_STATES_REVERSED,
color_bar=False,
)
create_heatmap(
reversed_states[RAND_ID, :, :, :],
AGENT_NAMES[RAND_ID] + " (Reversed prior)",
RAND_STATES_REVERSED,
color_bar=False,
)
|
StarcoderdataPython
|
89740
|
from osziplotter.network.Headers import BeaconHeader, SampleTransmissionHeader, CommandHeader
from osziplotter.network.SampleCollector import SampleCollector
from osziplotter.modelcontroller.BoardEvents import BoardEvents
from socket import socket, AF_INET, SOCK_DGRAM, error
from errno import EAGAIN, EWOULDBLOCK
from typing import Tuple, ClassVar
class Network(BoardEvents):
_listen_port: ClassVar[int] = 7567
def __init__(self) -> None:
super(Network, self).__init__()
self._sample_collector = SampleCollector()
self._socket = socket(AF_INET, SOCK_DGRAM)
def listen(self) -> None:
self._socket.bind(("", Network._listen_port))
self._socket.setblocking(False)
def handle_events(self) -> None:
try:
buffer, address = self._socket.recvfrom(4096)
if len(buffer) > 0:
beacon = BeaconHeader()
if beacon.from_bytearray(buffer):
beacon.address, _ = address
self.put(beacon.to_board_info())
return
samples = SampleTransmissionHeader()
if samples.from_bytearray(buffer):
samples.address, _ = address
self._sample_collector.process_received_sample_transmission_header(samples)
except error as e:
err = e.args[0]
if err != EAGAIN and err != EWOULDBLOCK:
print("Fatal socket error!")
def send_trigger(self, target: Tuple[str, int], channel: int, active: bool, trigger_voltage: int) -> None:
command = CommandHeader()
command.port = Network._listen_port
command.active = active
command.trigger_voltage = trigger_voltage
command.channel = channel
command_bin = command.to_bytearray()
self._socket.sendto(command_bin, target)
|
StarcoderdataPython
|
1753034
|
<reponame>twetzel59/TinyShell
RED = "\033[31;1m"
GREEN = "\033[32;1m"
CYAN = "\033[36;1m"
RESET = "\033[0m"
|
StarcoderdataPython
|
1775233
|
<filename>badcode/bblfshutil.py
import collections
import itertools
import typing
from typing import Iterable, Tuple
import bblfsh
from .tree import Tree
WILDCARD = '_MATCH_ANY_'
class UAST(Tree[Tuple[str,str]]):
def __init__(self, key: Tuple[str,str], children: Iterable['UAST']=tuple()) -> None:
super(UAST, self).__init__(key, children)
@staticmethod
def from_bblfsh(node: bblfsh.Node) -> 'UAST':
return UAST(
key=(node.internal_type, node.token),
children=(uast_to_tree(n) for n in node.children))
def match(self, other: 'UAST') -> bool:
return uast_eq_wildcards(self, other)
def __str__(self) -> str:
return uast_pretty_format(self)
def uast_to_tree(node: bblfsh.Node) -> Tree[Tuple[str, str]]:
children = (uast_to_tree(n) for n in node.children if n.internal_type != 'Position')
return Tree(
key=(node.internal_type, node.token),
children=children)
def uast_eq_node(a: UAST, b: UAST) -> bool:
return a.key == b.key
def uast_eq_node_wildcards(a: UAST, b: UAST) -> bool:
if a.key[0] != b.key[0] and a.key[0] != WILDCARD and b.key[0] != WILDCARD:
return False
if a.key[1] != b.key[1] and a.key[1] != WILDCARD and b.key[1] != WILDCARD:
return False
return True
def uast_eq(a: UAST, b: UAST, eqf=uast_eq_node) -> bool:
if b is None:
return False
for an, bn in itertools.zip_longest(a, b):
if an is None or bn is None:
return False
if not eqf(an, bn):
return False
return True
def uast_eq_wildcards(a: UAST, b: UAST) -> bool:
return uast_eq(a, b, eqf=uast_eq_node_wildcards)
def uast_pretty_format(n: UAST, indent=0) -> str:
s = '%stype: %s, token: %s' % ('. ' * indent, n.key[0], n.key[1])
for c in n.children:
s += '\n' + uast_pretty_format(c, indent=indent+1)
return s
def filter_node(uast: bblfsh.Node) -> None:
"""
Removes any data from the bblfsh.Node that is not used at all.
"""
while len(uast.roles) > 0:
uast.roles.pop()
uast.properties.clear()
for child in list(uast.children):
if child.internal_type == 'Position':
uast.children.remove(child)
continue
filter_node(child)
|
StarcoderdataPython
|
178848
|
"""Sinking point -
A discrete approximation of real numbers with explicit significance tracking.
Implemented really badly in one file.
TODO:
in digital.py:
- representation
- reading in from int / mpfr / string
- reading in from digital, clone and update
- comparison
- trivial bit manipulations like neg and abs
- rounding? round only via w / p
- output? to_mpfr? to_string? to_ieee? to_posit?
in conversion.py:
- to / from mantissa_exp form
- universal is_neg, etc. ?
- extract payload
Where to put OP / RM identifiers?
in xxxmath.py:
- numeric engine: given opcode, inputs as sink, precision,
produce another sink with rounding status
in arithmetic:
- round specifically to ieee / posit
- arith wrappers that can use either backend, and do the correct rounding / special case behavior
"""
import typing
import sys
import random
import re
from .integral import bitmask
from . import conversion
from .ops import RM
def _interval_scan_away(lower, upper, n):
"""Scan for a representative with n=n whose envelope encloses lower and upper.
Returns two things:
True, if the interval is provably too small for the bound, else False
(i.e. we found a representative whose envelope is totally enclosed between lower and upper).
None, if no enclosing representative is found, else the representative.
"""
if lower._inexact or upper._inexact:
raise ValueError('enclose: can only scan exact numbers')
elif lower.negative or upper.negative:
raise ValueError('enclose: can only scan positive numbers')
elif not lower < upper:
raise ValueError('enclose: can only scan ordered envelope, got [{}, {}]'.format(lower, upper))
rep = lower.trunc(n)
if rep.is_exactly_zero():
rep = rep.explode(sided=True, full=False)
else:
rep = rep.explode(sided=False, full=False)
# This loop will only make a small number of iterations.
# We will always hit the bottom of the interval after a short amount of time:
# if we truncated a lot of bits off, then the interval is large and we'll hit the
# exact value in one step. If we didn't truncate bits, the interval might be
# small, but we'll start exactly at lower.
# Because we detect the case where the envelope size is provable smaller than the
# interval, we will abort after a few iterations in cases where the envelope
# is much smaller than the interval.
while True:
bound_lo, bound_hi = rep.bounds()
bottom_enclosed = bound_lo <= lower
top_enclosed = upper <= bound_hi
if bottom_enclosed and top_enclosed:
# representative encloses the interval: return it
return False, rep
elif not (bottom_enclosed or top_enclosed):
# interval encloses the representative: unless we were using the half envelope
# near zero, this is proof that this n is too small
if rep.interval_sided:
# try the next number to see if that gives us a proof
# TODO: sided -> sided will break everything
rep = rep.away(const_p=False)
else:
return True, None
elif bottom_enclosed:
# (top wasn't enclosed, or we'd have hit the first case)
# bottom of interval was good, top wasn't: move on to the next number to see what
# happens
rep = rep.away(const_p=False)
else:
# bottom of interval was no good, so we went too far.
return False, None
def enclose(lower, upper, min_n=None):
"""Return the sink with the smallest interval that encloses lower and upper.
Upper and lower must be exact sinks, with upper <= lower.
TODO: auto bounds?
TODO: other kinds of intervals?
"""
if lower._inexact or upper._inexact:
raise ValueError('enclose: must have exact arguments, got [{} and {}]'.format(lower, upper))
elif lower == upper:
return Sink(lower) if lower.n < upper.n else Sink(upper)
elif not lower < upper:
raise ValueError('enclose: arguments out of order, not {} < {}'.format(lower, upper))
zero = Sink(0)
# because upper != lower, the distance between them must be larger than the interval size
# with this n
min_possible_n = min(lower.n, upper.n) - 1
if min_n is None:
min_n = min_possible_n
else:
min_n = max(min_possible_n, min_n)
if lower < zero and upper > zero:
# binsearch around zero
offset = 1
n_lo = n_hi = min_n
bound_lo, bound_hi = zero.trunc(n_hi).explode(sided=False, full=False).bounds()
# first expsearch for n_hi
while lower < bound_lo or bound_hi < upper:
n_lo = n_hi
n_hi = n_hi + offset
offset <<= 1
bound_lo, bound_hi = zero.trunc(n_hi).explode(sided=False, full=False).bounds()
# final condition: n_hi, bound_lo, bound_hi are all safe
while n_lo + 1 < n_hi:
n_mid = n_lo + ((n_hi - n_lo) // 2)
bound_lo, bound_hi = zero.trunc(n_mid).explode(sided=False, full=False).bounds()
if lower < bound_lo or bound_hi < upper:
# bound is unsafe, update n_lo
n_lo = n_mid
else:
# bound is safe, update n_hi
n_hi = n_mid
# final conditions: n_lo + 1 = n_hi, n_lo doesn't work, n_hi works
# OR, we never entered the loop, and n_lo = n_hi = min_n
return zero.trunc(n_hi).explode(sided=False, full=False)
else:
# First, reorder based on magnitude, as we can only trunc towards zero.
if lower.negative:
tmp = -lower
lower = -upper
upper = tmp
negative = True
else:
negative = False
# Binsearch for the largest interval that doesn't work.
# We know we've found it when we can demonstrate that the span
# of this interval is too small, but the demonstration fails for the next size up.
offset = 1
n_lo = n_hi = min_n
too_small, enclosing_rep = _interval_scan_away(lower, upper, n_hi)
# first expsearch for n_hi
while too_small:
n_lo = n_hi
n_hi = n_hi + offset
offset <<= 1
too_small, enclosing_rep = _interval_scan_away(lower, upper, n_hi)
# final condition: n_hi is not provably too small
while n_lo + 1 < n_hi:
n_mid = n_lo + ((n_hi - n_lo) // 2)
too_small, enclosing_rep = _interval_scan_away(lower, upper, n_mid)
if too_small:
# provably too small, update n_lo
n_lo = n_mid
else:
# not provable: update n_hi
n_hi = n_mid
# final conditions: n_lo + 1 = n_hi, n_lo is provably too small, n_hi has no such proof
# OR, we never entered the loops, and n_lo = n_hi = min_n
# We now perform a linear search, starting from n_lo, until we find the smallest n
# that can produce a representative. This should not take very long, as we are doubling
# the size of the envelope each time we increment n.
# TODO: We could save a few cycles by refusing to actually test n_lo if it is the same as n_hi.
n = n_lo
while True:
too_small, enclosing_rep = _interval_scan_away(lower, upper, n)
if enclosing_rep is None:
n += 1
else:
# remember to correct the sign
return Sink(enclosing_rep, negative=negative)
class PrecisionError(Exception):
"""Insufficient precision given to rounding operation."""
class Sink(object):
# for sinks with a real value, the value is exactly (sign) * _c * 2**_exp
_c : int = None # unsigned significand
_exp : int = None # exponent
# sign is stored separately, as is information about infiniteness or NaN
_negative : bool = None # sign bit
_isinf : bool = None # is this value infinite?
_isnan : bool = None # is this value NaN?
# _m and _exp are not directly visible; we expose them with attributes
@property
def m(self):
"""Signed integer significand.
The real value is m * 2**exp
"""
if self._negative:
return -self._c
else:
return self._c
@property
def exp(self):
"""Exponent."""
return self._exp
# we also present 4 views for the primary 'Titanic' properties
@property
def e(self):
"""IEEE 754 style exponent.
If the significand is interpreted as a binary-point number x between 1 and 2,
i.e. x = 1.10011100 etc. then the real value is x * 2**e.
"""
return (self._exp - 1) + self._c.bit_length()
@property
def n(self):
"""The 'sticky bit' or the binary place where digits are no longer significant.
I.e. -1 for an integer inexact beyond the binary point. Always equal to exp - 1.
"""
return self._exp - 1
@property
def p(self):
"""The precision of the significand.
Always equal to the number of bits in c; 0 for any zero.
"""
return self._c.bit_length()
@property
def c(self):
"""Unsigned integer significand."""
return self._c
# views of basic semantic flags
@property
def negative(self):
"""The sign bit - is this value negative?"""
return self._negative
@property
def isinf(self):
"""Is this value infinite?"""
return self._isinf
@property
def isnan(self):
"""Is this value NaN?"""
return self._isnan
# rounding envelopes and inexactness
_inexact : bool = None # approximate bit
_interval_full : bool = None # envelope interval size
_interval_sided : bool = None # envelope interval position
_interval_open_top : bool = None # is the top bound exclusive?
_interval_open_bottom : bool = None # ditto for the bottom bound
_rc : int = None # as MPFR result code. 0 if value is exact, -1 if rounded up, 1 if rounded down.
# views for interval properties
@property
def inexact(self):
"""Is this value inexact?"""
return self._inexact
@property
def interval_full(self):
"""Does the rounding envelope for this number extend a full ulp
on each side? (if false, it is a half ulp)
"""
return self._interval_full
@property
def interval_sided(self):
"""Does the rounding envelope only extend away from zero?
(if False, it is symmetric on both sides)
"""
return self._interval_sided
@property
def interval_open_top(self):
"""Is the top of the rounding envelope exclusive?
(if False, it is inclusive, or closed)
"""
return self._interval_open_top
@property
def interval_open_bottom(self):
"""Is the bottom of the rounding envelope exclusive?
(if False, it is inclusive, or closed)
"""
return self._interval_open_bottom
@property
def rc(self):
"""Result code. 1 if this value was rounded toward 0, -1 if it was rounded away.
"""
return self._rc
# other useful properties
def is_exactly_zero(self):
return self._c == 0 and not self._inexact
def is_zero(self):
return self._c == 0
def is_integer(self):
return self._exp >= 0 or self._c & bitmask(-self._exp) == 0
def is_identical_to(self, x):
return (
self._c == x._c
and self._exp == x._exp
and self._negative == x._negative
and self._isinf == x._isinf
and self._isnan == x._isnan
and self._inexact == x._inexact
and self._interval_full == x._interval_full
and self._interval_sided == x._interval_sided
and self._interval_open_top == x._interval_open_top
and self._interval_open_bottom == x._interval_open_bottom
and self._rc == x._rc
)
def __init__(self,
# The base value of the sink, either as a sink to copy
# or a string / float / mpfr to parse.
base=None,
# value information about the sink to construct
m=None,
exp=None,
# either m must be specified, or c and negative must be specified
c=None,
# negative can be specified alone to change the sign
negative=None,
# inf and nan can be set independently of other properties of the
# number, though having both set at once is not well defined
inf=None,
nan=None,
# inexactness information can be specified or modified independently
inexact=None,
full=None,
sided=None,
open_top=None,
open_bottom=None,
rc=None,
# rounding properties; ignored unless parsing a string
max_p=None,
min_n=None,
rm=conversion.ROUND_NEAREST_EVEN
):
"""Create a new sinking point number. The value can be specified in 3 ways:
If base is None, then the new number must have its value specified by exp
and either m, or c and negative. Note that since integer 0 in Python does
not have a sign, a signed zero must be specified with c and negative.
If base is an existing Sink, then that number is copied, and its fields can
be updated individually.
If base is a numeric type or a string, then that number is converted to a sink
with the closest possible value, as per the rounding specification. In practice,
rounding will only occur for strings. If the specified rounding is impossible
(i.e. rm is None, or both max_p and min_n are unspecified for a value such as
Pi with no finite representation) then an exception will be raised.
"""
# raw, non-converting forms
if base is None or isinstance(base, Sink):
# create from mantissa / exponent form
if base is None:
if not ((m is not None and (c is None and negative is None))
or (m is None and (c is not None and negative is not None))):
raise ValueError('must specify either m, or c and negative')
elif inf and nan:
raise ValueError('number cannot be simultaneously inf and nan')
if m is not None:
self._c = abs(m)
self._negative = (m < 0)
else:
self._c = c
self._negative = negative
self._exp = exp
self._isinf = bool(inf)
self._isnan = bool(nan)
self._inexact = bool(inexact)
self._interval_full = bool(full)
self._interval_sided = bool(sided)
self._interval_open_top = bool(open_top)
self._interval_open_bottom = bool(open_bottom)
if rc is None:
self._rc = 0
else:
self._rc = rc
# copy from existing sink
else:
if m is not None and (c is not None or negative is not None):
raise ValueError('cannot specify c or negative if m is specified')
if m is not None:
self._c = abs(m)
self._negative = (m < 0)
else:
self._c = c if c is not None else base.c
self._negative = negative if negative is not None else base.negative
self._exp = exp if exp is not None else base.exp
self._isinf = inf if inf is not None else base.isinf
self._isnan = nan if nan is not None else base.isnan
if self.isnan and self.isinf:
raise ValueError('cannot update number to simultaneously be inf and nan')
self._inexact = inexact if inexact is not None else base.inexact
self._interval_full = full if full is not None else base.interval_full
self._interval_sided = sided if sided is not None else base.interval_sided
self._interval_open_top = open_top if open_top is not None else base.interval_open_top
self._interval_open_bottom = open_bottom if open_bottom is not None else base.interval_open_bottom
self._rc = rc if rc is not None else base.rc
# convert another representation into sinking point
else:
if not (m is None and exp is None and c is None and negative is None and inf is None and nan is None):
raise ValueError('cannot specify numeric properties when converting another numeric type')
if isinstance(base, str):
# TODO unimplemented
base = float(base)
# TODO does not support inf and nan
negative, c, exp = conversion.numeric_to_signed_mantissa_exp(base)
self._c = c
self._negative = negative
self._exp = exp
self._isinf = False
self._isnan = False
# TODO conflict with rounding
self._inexact = bool(inexact)
self._interval_full = bool(full)
self._interval_sided = bool(sided)
self._interval_open_top = bool(open_top)
self._interval_open_bottom = bool(open_bottom)
# round to specified precision
def __repr__(self):
return 'Sink({}, c={}, exp={}, negative={}, inexact={}, full={}, sided={}, rc={})'.format(
repr(self.to_mpfr()), self.c, self.exp, self.negative, self.inexact, self.interval_full, self.interval_sided, self.rc,
)
def __str__(self):
"""yah"""
if self.c == 0:
sgn = '-' if self.negative else ''
if self._inexact:
return '{}0~@{:d}'.format(sgn, self.n)
else:
#print(repr(self))
return '{}0'.format(sgn)
else:
rep = re.search(r"'(.*)'", repr(self.to_mpfr())).group(1).split('e')
s = rep[0]
sexp = ''
if len(rep) > 1:
sexp = 'e' + 'e'.join(rep[1:])
return '{}{}{}'.format(s, '~' if self._inexact else '', sexp)
# return '{}{}'.format(rep, '~@{:d}'.format(self.n) if self._inexact else '')
def round_m(self, max_p, min_n=None, rm=RM.RNE):
"""Round the mantissa to at most max_p precision, or a least absolute digit
in position min_n, whichever is less precise. Exact numbers can always be rounded
to any precision, but rounding will fail if it would attempt to increase the
precision of an inexact number. Rounding respects the rc, and sets it accordingly
for the rounded result. Rounding can use any specified rounding mode, defaulting
to IEEE 754 style nearest even.
"""
# some values cannot be rounded; return unchanged
if self.is_zero() or self.isinf or self.isnan:
return Sink(self)
# determine where we're rounding to
if min_n is None:
n = self.e - max_p
else:
n = max(min_n, self.e - max_p)
offset = n - self.n
if offset < 0:
if self.inexact:
# If this number is inexact, then we'd have to make up bits to
# extend the precision.
raise PrecisionError('rounding inexact number cannot produce more precise result')
else:
# If the number is exact, then we can always extend with zeros. This is independent
# of the rounding mode.
return Sink(self, c=self.c << -offset, exp=self.exp + offset)
# Break up the significand
lost_bits = self.c & bitmask(offset)
left_bits = self.c >> offset
if offset > 0:
offset_m1 = offset - 1
low_bits = lost_bits & bitmask(offset_m1)
half_bit = lost_bits >> offset_m1
else:
# Rounding to the same precision is equivalent to having zero in the
# lower bits; the only interesting information will come from the result code.
low_bits = 0
half_bit = 0
# Determine which direction to round, based on rounding mode.
# 1 := round away from zero
# 0 := truncate towards zero
# -1 := round down towards zero (this is very unusual)
# Note that most rounding down will use truncation. Actual -1 direction
# "round down" can only happen with 0 lost_bits and a contrary rc, i.e. we rounded
# away but according to the new rounding mode we shouldn't have.
# Zero cannot be given -1 direction: we can only keep it by truncation, or round away.
direction = None
if rm == RM.RNE:
if half_bit == 0:
# always truncate
direction = 0
else: # half_bit == 1
if low_bits != 0:
# always round away
direction = 1
else: # low_bits == 0
# break tie
if self.rc > 0:
direction = 1
elif self.rc < 0:
direction = 0
else: # rc == 0
if self.inexact:
raise ValueError('unable to determine which way to round at this precision')
else: # not self.inexact
# round to even
if left_bits & 1 == 0:
direction = 0
else: # left_bits & 1 != 0
direction = 1
else:
raise ValueError('unimplemented: {}'.format(repr(rm)))
c = left_bits
exp = self.exp + offset
inexact = self.inexact or (lost_bits != 0)
if direction > 0:
# round away
c += 1
if c.bit_length() > max_p:
# we carried: shift over to preserve the right amount of precision
c >>= 1
exp += 1
rc = -1
elif direction == 0:
# truncate
if lost_bits != 0:
# if some bits were truncated off, the result code should indicate a round down
rc = 1
else:
# otherwise, preserve the old result code; nothing has changed
rc = self.rc
return Sink(self, c=c, exp=exp, inexact=inexact, rc=rc)
else: # direction < 0
# round down, towards zero
if direction is None:
raise ValueError('no rounding direction ???')
raise ValueError('unimplemented: round to previous')
return Sink(self, c=c, exp=exp, inexact=inexact, rc=rc)
# core envelope operations
# Adjacent interval logic.
# If const_p is True, then preserve the value of p (this is the behavior of IEEE754 FP).
# Otherwise, preserve n - this ensures intervals have the same size, as for fixed point.
# If strict is True, then always preserve interval properties - this may produce a disconnected interval
# for half intervals. Otherwise, sided half intervals will produce (connected) unsided half intervals,
# and unsided intervals will flow through sided intervals around zero.
# TODO: toward for sided half intervals produces a (still disconnected) unsided half interval.
# TODO: the low-level semantics of this are not necessarily reasonable nor important
def away(self, const_p = False, strict = False):
"""The sink with the next greatest magnitude at this precision, away from 0.
Preserves sign and exactness. Meaningless for non-sided zero.
"""
if self.is_zero() and (not self.interval_sided):
raise ValueError('away: cannot determine which direction to go from {}'.format(repr(self)))
next_c = self.c + 1
next_exp = self.exp
if next_c.bit_length() > self.p:
if const_p and next_c > 1:
# normalize precision, if we want to keep it constant
# only possible if we didn't start from 0
# TODO this definition of constant precision is broken, use IEEE 754 max_p / min_n
next_c >>= 1
next_exp += 1
if strict:
sided = self.interval_sided
else:
if next_c == 1:
sided = False
elif not self.interval_full:
sided = False
else:
sided = self.interval_sided
return Sink(self, c=next_c, exp=next_exp, sided=sided)
def toward(self, const_p = False, strict = False):
"""The sink with the next smallest magnitude at this precision, toward 0.
Preserves sign and exactness. Meaningless for any zero.
"""
if self.is_zero():
raise ValueError('toward: {} is already 0'.format(repr(self)))
prev_c = self.c - 1
prev_exp = self.exp
if prev_c.bit_length() < self.c.bit_length():
if const_p and prev_c > 0:
# normalize precision, if we want to keep it constant
# only possible if we didn't actually reach 0
# TODO this definition of constant precision is broken, use IEEE 754 max_p / min_n
prev_c <<= 1
prev_exp -= 1
if strict:
sided = self.interval_sided
else:
if prev_c == 0:
sided = True
elif not self.interval_full:
sided = False
else:
sided = self.interval_sided
return Sink(self, c=prev_c, exp=prev_exp, sided=sided)
def above(self, const_p = False, strict = False):
"""The sink with the next largest value, toward positive infinity.
"""
if self.is_zero():
if self.interval_sided:
if self.negative:
return -self
else:
return self.away(const_p=const_p, strict=strict)
else:
if strict:
sided = self.interval_sided
else:
sided = False
return Sink(self, c=1, negative=False, sided=sided)
elif self.negative:
return self.toward(const_p=const_p, strict=strict)
else:
return self.away(const_p=const_p, strict=strict)
def below(self, const_p = False, strict = False):
"""The sink with the next smallest value, toward negative infinity.
"""
if self.is_zero():
if self.interval_sided:
if self.negative:
return self.away(const_p=const_p, strict=strict)
else:
return -self
else:
if strict:
sided = self.interval_sided
else:
sided = False
return Sink(self, c=1, negative=True, sided=sided)
elif self.negative:
return self.away(const_p=const_p, strict=strict)
else:
return self.toward(const_p=const_p, strict=strict)
# Interval representatives and bounds.
# An interval's representative is the exact value used for arithmetic in traditional
# IEEE 754-like systems. An interval's bounds are [inclusive] limits on the values the interval
# can represent. For half intervals, they will have one more bit of precision than the
# interval's representative.
# TODO: bounds are always inclusive; this could be tracked, for example to actually do the right
# thing with <> and rounding modes.
def collapse(self, center=False):
"""Collapse an interval down to a representative point.
For sided intervals, can return the "bottom" of the interval, or its true center, which requires
1-2 bits more precision.
"""
if center and self.interval_sided and self._inexact:
extra_bits = 1 if self.interval_full else 2
return Sink(self.narrow(n=self.n - extra_bits), inexact=False, sided=False).away()
else:
return Sink(self, inexact=False)
def explode(self, sided=None, full=None):
"""Explode a representative point to an enclosing interval.
If provided, sided and full replace the corresponding properties of the original interval.
It is invalid to explode a larger interval to a smaller one, i.e. full to half or
unsided to sided.
"""
if self._inexact:
if sided and (not self.interval_sided):
raise ValueError('explode: cannot shrink unsided interval {} to sided'.format(repr(self)))
elif full and (not self.interval_full):
raise ValueError('explode: cannot shrink full interval {} to half'.format(repr(self)))
sided = self.interval_sided if sided is None else sided
full = self.interval_full if full is None else full
return Sink(self, inexact=True, sided=sided, full=full)
def bounds(self):
"""Upper and lower bounds on the value of this number.
Intervals are inclusive.
"""
if self._inexact:
if self.interval_full:
base = self
else:
base = self.narrow(n=self.n - 1)
if self.interval_sided:
if self.negative:
return base.away().collapse(), self.collapse()
else:
return self.collapse(), base.away().collapse()
else:
return base.below().collapse(), base.above().collapse()
else:
return Sink(self), Sink(self)
def trunc(self, n):
"""Round this number towards 0, throwing away the low bits, or append zeros
onto the end, to provide a lower bound on its absolute value at any n.
"""
if self._inexact:
# TODO
raise ValueError('trunc: unsupported: inexact value {}'.format(repr(self)))
if self.n == n:
return Sink(self)
else:
if self.n < n:
# get rid of bits
offset = n - self.n
c = self.c >> offset
exp = self.exp + offset
else:
# add bits
offset = self.n - n
c = self.c << offset
exp = self.exp - offset
return Sink(self, c=c, exp=exp)
def split(self, n=None, rm=0):
"""Split a number into an exact part and an uncertainty bound.
If we produce split(A, n) -> A', E, then we know:
- A' is exact
- E is zero
- lsb(A') == lsb(E) == max(n, lsb(A)) if A is inexact
- lsb(A') == lsb(E) == n if A is exact
TODO: is this correct????
"""
if n is None:
n = self.n
offset = n - self.n
if offset <= 0:
if offset == 0 or self.inexact:
return (Sink(self, inexact=False), Sink(self, c=0))
else:
return (Sink(self, c=self.c << -offset, exp=n+1), Sink(self, c=0, exp=n+1))
else:
lost_bits = self.c & bitmask(offset)
left_bits = self.c >> offset
low_bits = lost_bits & bitmask(offset - 1)
half_bit = lost_bits >> (offset - 1)
inexact = self._inexact or lost_bits != 0
if left_bits == 0 and lost_bits != 0:
sided = True
else:
sided = self.interval_sided
rounded = Sink(self, c=left_bits, exp=n+1, inexact=False, sided=sided)
# in all cases we copy the sign onto epsilon... is that right?
epsilon = Sink(self, c=0, exp=n+1, inexact=inexact, sided=sided)
# TODO use sane RM
if half_bit == 1:
# Note that if we're rounding an inexact number, then the new tight 1-ulp envelope
# of the result will not contain the entire envelope of the input.
if low_bits == 0:
# Exactly half way between, regardless of exactness.
# Use rounding mode to decide.
if rm == 0:
# round to even if rm is zero
if left_bits & bitmask(1) == 1:
return rounded.away(const_p=False), epsilon
else:
return rounded, epsilon
elif rm > 0:
# round away from zero if rm is positive
return rounded.away(const_p=False), epsilon
else:
# else, round toward zero if rm is negative
return rounded, epsilon
else:
return rounded.away(const_p=False), epsilon
else:
return rounded, epsilon
def widen(self, min_n = None, max_p = None):
"""Round this number, using split, so that n is >= min_n and p <= max_p.
By default, preserve n and p, returning this number unchanged.
"""
if min_n is None:
n = self.n
else:
n = min_n
if max_p is not None:
n = max(n, self.e - max_p)
rounded, epsilon = self.split(n)
if max_p is not None and rounded.p > max_p:
# If we rounded up and carried, we might have increased p by one.
# Split again to compensate; this should produce an epsilon of zero.
rounded, epsilon_correction = rounded.split(n + 1)
if not epsilon_correction.is_exactly_zero():
epsilon = epsilon_correction
raise ValueError('widen: unreachable')
return Sink(rounded, inexact=epsilon.inexact)
def narrow(self, n=None, p=None):
"""Force this number into a representation with either n or p.
By default, preserve n and p, returning this number unchanged.
Note that this may produce a smaller envelope that does not contain
the input value.
"""
if n is p is None:
return Sink(self)
elif n is None:
if self.c == 0:
# TODO what should be done here?
# specifying precision is meaningless for zero
n = self.n
else:
n = self.e - p
elif p is None:
# use n as provided
pass
else:
raise ValueError('narrow: can only specify one of n or p, got n={}, p={}'
.format(repr(n), repr(p)))
rounded, epsilon = self.split(n)
# There are two possibilities:
# Either we are trying to narrow the envelope, i.e. increase precision,
# and this split was a no-op;
# Or we are actually trying to widen the envelope, i.e. decrease precision,
# and this split may have rounded up, giving us more precision than we want.
if rounded.n > n:
# split was unable to provide a small enough n, so we have to force one
rounded = Sink(rounded, c=rounded.c << (rounded.n - n), exp=n+1)
elif p is not None and rounded.p > p:
# as for widening, round again to compensate
rounded, epsilon_correction = rounded.split(n + 1)
if not epsilon_correction.is_exactly_zero():
epsilon = epsilon_correction
raise ValueError('narrow: unreachable')
return Sink(rounded, inexact=epsilon.inexact)
def ieee_754(self, w, p):
emax = (1 << (w - 1)) - 1
emin = 1 - emax
max_p = p
min_n = emin - p
if self.c == 0:
return self.narrow(n=min_n)
elif self.n <= min_n or self.p <= max_p:
return self.widen(min_n=min_n, max_p=max_p)
else:
extra_bits = p - self.p
return self.narrow(n=max(min_n, self.n - extra_bits))
def to_mpfr(self):
if self.negative:
return conversion.mpfr_from_mantissa_exp(-self.c, self.n + 1)
else:
return conversion.mpfr_from_mantissa_exp(self.c, self.n + 1)
def to_float(self, ftype=float):
data = conversion.fdata(ftype)
w = data['w']
p = data['p']
rounded = self.ieee_754(w, p)
if rounded.negative:
return conversion.float_from_mantissa_exp(-rounded.c, rounded.n + 1, ftype=ftype)
else:
return conversion.float_from_mantissa_exp(rounded.c, rounded.n + 1, ftype=ftype)
def to_math(self):
# TODO assumes exactness
if self.is_zero():
return '0'
elif self.is_integer() and self.exp < 0:
return str(self.m >> -self.exp)
elif self.is_integer() and self.exp < 32: # TODO some reasonable threshold
return str(self.m << self.exp)
else:
return '{:d} * 2^{:d}'.format(self.m, self.exp)
# core arith and comparison
def __neg__(self):
return Sink(self, negative=not self.negative)
def __abs__(self):
return Sink(self, negative=False)
def compareto(self, x, strict=True):
"""Compare to another number.
Returns two different things: the ordering, and the sharpness.
For a.compareto(b), the ordering is:
-1 iff a < b
0 iff a = b
1 iff a > b
And the sharpness is:
True iff the intervals do not overlap, or a and b are the same point
False iff the intervals overlap at a single point (i.e. they are touching)
None iff the intervals overlap for a region larger than a single point
Note that two identical points have a sharpness of False, rather than None.
"""
lower, upper = self.bounds()
xlower, xupper = x.bounds()
# normalize to smallest n
n = min(upper.n, lower.n, xupper.n, xlower.n)
lower = lower.narrow(n=n)
upper = upper.narrow(n=n)
xlower = xlower.narrow(n=n)
xupper = xupper.narrow(n=n)
# convert to ordinals
lower_ord = -lower.c if lower.negative else lower.c
upper_ord = -upper.c if upper.negative else upper.c
xlower_ord = -xlower.c if xlower.negative else xlower.c
xupper_ord = -xupper.c if xupper.negative else xupper.c
# integer comparison
if not (lower_ord <= upper_ord and xlower_ord <= xupper_ord):
# TODO: assertion
print(lower_ord, upper_ord, xlower_ord, xupper_ord)
raise ValueError('compareto: unreachable')
elif lower_ord == upper_ord == xlower_ord == xupper_ord:
# a == b
order = 0
sharp = True
elif upper_ord <= xlower_ord:
# a <= b
order = -1
sharp = upper_ord != xlower_ord
elif xupper_ord < lower_ord:
# b <= a
order = 1
sharp = xupper_ord != lower_ord
else:
# overlap: compare representatives
# TODO: center here? it makes comparisons fair...
center = False
rep = self.collapse(center=center)
xrep = x.collapse(center=center)
n = min(rep.n, xrep.n)
rep = rep.narrow(n=n)
xrep = xrep.narrow(n=n)
rep_ord = -rep.c if rep.negative else rep.c
xrep_ord = -xrep.c if xrep.negative else xrep.c
if rep == xrep:
# a == b
order = 0
elif rep < xrep:
# a < b
order = -1
else:
# b < a
order = 1
sharp = None
if strict and sharp is None:
# TODO: this will print warnings, but has no other teeth, and is otherwise unused
# in inline comparisons.
print('WARNING: compared overlapping intervals {} and {}'.format(self, x))
return order, sharp
def __lt__(self, x):
order, sharp = self.compareto(x)
if sharp is False:
# TODO: fangs
print('WARNING: {} < {} is not known to be sharp'.format(self, x))
return order < 0
def __le__(self, x):
order, sharp = self.compareto(x)
return order <= 0
def __eq__(self, x):
order, sharp = self.compareto(x)
return order == 0
def __ne__(self, x):
order, sharp = self.compareto(x)
return order != 0
def __ge__(self, x):
order, sharp = self.compareto(x)
return 0 <= order
def __gt__(self, x):
order, sharp = self.compareto(x)
if sharp is False:
# TODO: fangs
print('WARNING: {} > {} is not known to be sharp'.format(self, x))
return 0 < order
#TODO: arith
#TODO: precision explodes with unnecessary trailing zeros, which is probably bad...
def __add__(self, x):
"""Add this sink to another sink x, exactly. Fails if either is inexact."""
if self.inexact or x.inexact:
raise ValueError('add: can only add exact sinks, got {} + {}'.format(repr(self), repr(x)))
n = min(self.n, x.n)
c_norm = self.c << (self.n - n)
xc_norm = x.c << (x.n - n)
sign = -1 if self.negative else 1
xsign = -1 if x.negative else 1
signed_c = (sign * c_norm) + (xsign * xc_norm)
if signed_c >= 0:
c = signed_c
negative = False
else:
c = -signed_c
negative = True
#TODO: inf and nan
#TODO: sign of negative 0
#TODO: envelope properties
return Sink(self, c=c, exp=n+1, negative=negative, sided=False)
def __sub__(self, x):
"""Alias of self + (-x)"""
return self + (-x)
def __mul__(self, x):
"""Multiply this sink by another sink x, exactly. Fails if either is inexact."""
if self.inexact or x.inexact:
raise ValueError('mul: can only multiply exact sinks, got {} * {}'.format(repr(self), repr(x)))
n = self.n + x.n + 1 # equivalent to (self.n + 1) + (x.n + 1) - 1
c = self.c * x.c
# TODO assert
if self.negative is None or x.negative is None:
raise ValueError('mul {} * {}: negative is None'.format(repr(self), repr(x)))
negative = self.negative != x.negative
#TODO: inf and nan
#TODO: envelope properties
return Sink(self, c=c, exp=n+1, negative=negative, sided=False)
|
StarcoderdataPython
|
1725372
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-20 10:15
from __future__ import unicode_literals
from django.db import migrations
def mark_all_renders_as_removed(apps, schema_editor):
Render = apps.get_model("papers", "Render")
Render.objects.all().update(container_is_removed=True)
class Migration(migrations.Migration):
dependencies = [("papers", "0008_render_container_is_removed")]
operations = [migrations.RunPython(mark_all_renders_as_removed)]
|
StarcoderdataPython
|
1655466
|
import sys
def solve():
sys.setrecursionlimit(10**6)
read = sys.stdin.readline
n = int(read())
adj = [[] for _ in range(n + 1)]
for _ in range(n - 1):
u, v = map(int, read().split())
adj[u].append(v)
adj[v].append(u)
# dp[for_tree_rooted_at][w/_or_wo/_root_being_early_adopter] = min_num_early_adopters
dp = [[0, 1] for _ in range(n + 1)]
# Bottom Up approach would be difficult,
# because we have to travarse the tree in descending depth manner which
# requires a depth labeling preprocess before the traversal.
# Though not that hard, anyway, we'll do top down way.
visited = [False] * (n + 1)
def min_ea(node):
visited[node] = True
for child in adj[node]:
if not visited[child]:
min_ea(child)
dp[node][0] += dp[child][1]
dp[node][1] += dp[child][0] if dp[child][0] < dp[child][1] else dp[child][1]
min_ea(1)
print(min(dp[1]))
# Amazingly Simple!
solve()
|
StarcoderdataPython
|
3251824
|
from dataclasses import dataclass, field
from typing import List, Optional, Union
from bindings.gmd.abstract_object_type import AbstractObjectType
from bindings.gmd.actuate_value import ActuateValue
from bindings.gmd.character_string_property_type import CharacterStringPropertyType
from bindings.gmd.ci_citation_type import CiCitationPropertyType
from bindings.gmd.ci_responsible_party_property_type import (
CiResponsiblePartyPropertyType,
)
from bindings.gmd.date_time_property_type import DateTimePropertyType
from bindings.gmd.ex_extent_property_type import ExExtentPropertyType
from bindings.gmd.md_reference_system_property_type import MdReferenceSystemPropertyType
from bindings.gmd.md_representative_fraction_property_type import (
MdRepresentativeFractionPropertyType,
)
from bindings.gmd.nil_reason_enumeration_value import NilReasonEnumerationValue
from bindings.gmd.show_value import ShowValue
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class LiProcessStepPropertyType:
class Meta:
name = "LI_ProcessStep_PropertyType"
li_process_step: Optional["LiProcessStep"] = field(
default=None,
metadata={
"name": "LI_ProcessStep",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
type: str = field(
init=False,
default="simple",
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
href: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
role: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
arcrole: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
title: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
show: Optional[ShowValue] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
actuate: Optional[ActuateValue] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
uuidref: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
nil_reason: Optional[Union[str, NilReasonEnumerationValue]] = field(
default=None,
metadata={
"name": "nilReason",
"type": "Attribute",
"namespace": "http://www.isotc211.org/2005/gco",
"pattern": r"other:\w{2,}",
},
)
@dataclass
class LiSourceType(AbstractObjectType):
class Meta:
name = "LI_Source_Type"
description: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
scale_denominator: Optional[MdRepresentativeFractionPropertyType] = field(
default=None,
metadata={
"name": "scaleDenominator",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
source_reference_system: Optional[MdReferenceSystemPropertyType] = field(
default=None,
metadata={
"name": "sourceReferenceSystem",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
source_citation: Optional[CiCitationPropertyType] = field(
default=None,
metadata={
"name": "sourceCitation",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
source_extent: List[ExExtentPropertyType] = field(
default_factory=list,
metadata={
"name": "sourceExtent",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
source_step: List[LiProcessStepPropertyType] = field(
default_factory=list,
metadata={
"name": "sourceStep",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
@dataclass
class LiSource(LiSourceType):
class Meta:
name = "LI_Source"
namespace = "http://www.isotc211.org/2005/gmd"
@dataclass
class LiSourcePropertyType:
class Meta:
name = "LI_Source_PropertyType"
li_source: Optional[LiSource] = field(
default=None,
metadata={
"name": "LI_Source",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
type: str = field(
init=False,
default="simple",
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
href: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
role: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
arcrole: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
title: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
show: Optional[ShowValue] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
actuate: Optional[ActuateValue] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
uuidref: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
nil_reason: Optional[Union[str, NilReasonEnumerationValue]] = field(
default=None,
metadata={
"name": "nilReason",
"type": "Attribute",
"namespace": "http://www.isotc211.org/2005/gco",
"pattern": r"other:\w{2,}",
},
)
@dataclass
class LiProcessStepType(AbstractObjectType):
class Meta:
name = "LI_ProcessStep_Type"
description: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
rationale: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
date_time: Optional[DateTimePropertyType] = field(
default=None,
metadata={
"name": "dateTime",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
processor: List[CiResponsiblePartyPropertyType] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
source: List[LiSourcePropertyType] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
@dataclass
class LiProcessStep(LiProcessStepType):
class Meta:
name = "LI_ProcessStep"
namespace = "http://www.isotc211.org/2005/gmd"
|
StarcoderdataPython
|
179530
|
<gh_stars>0
from django.test import TestCase
import datetime
from django.utils import timezone
from .models import Question
# Create your tests here.
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
time=timezone.now()+datetime.timedelta(days=30)
future_question=Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(),False)
def test_was_published_recently_with_old_question(self):
""" was_published_recently() should return False for ques-
tions whose pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question =Question(pub_date=time)
self.assertEqual(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
""" was_published_recently() should return True
for questions whose pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertEqual(recent_question.was_published_recently(), True)
|
StarcoderdataPython
|
1717857
|
import numpy as np
import tensorflow as tf
from embeddings import text_embeddings
from evaluation import appleveleval
from models import wordpair_model
from helpers import io_helper
from helpers import data_shaper
import random
import itertools
from ml import loss_functions
from ml import trainer
from evaluation import simple_stats
from scipy import stats
from evaluation import standard
from sys import stdin
import sys
from sts import faiss_sts
import os
import argparse
parser = argparse.ArgumentParser(description='Evaluates a specialization model on an evaluation dataset.')
parser.add_argument('embs', help='A path to the file containing the pre-trained (i.e., not specialized) distributional embeddings. The words in the embedding file need to be sorted by decreasing frequency in the corpus used for training the vectors.')
parser.add_argument('evaldata', help='A path to the file containing the evaluation dataset, e.g., SimLex-999 (format: word1 \t word2 \t score, one pair per line).')
parser.add_argument('modelpath', help='A path to which to store the trained specialization model.')
args = parser.parse_args()
if not os.path.isfile(args.embs):
print("Error: File with the pretrained embeddings not found.")
exit(code = 1)
if not os.path.isfile(args.evaldata):
print("Error: File with the evaluation dataset not found.")
exit(code = 1)
if not os.path.isfile(args.modelpath):
print("Error: Model file not found.")
exit(code = 1)
embs_path = args.embs
simlex_path = args.evaldata
model_path = args.modelpath
# deserializing the model
hyps, vars = io_helper.deserialize(model_path)
print(hyps)
same_encoder, hidden_layer_sizes, distance_measure = hyps
# loading/merging word embeddings
t_embeddings = text_embeddings.Embeddings()
t_embeddings.load_embeddings(embs_path, 200000, language = 'en', print_loading = True, skip_first_line = True)
t_embeddings.inverse_vocabularies()
vocabulary_size = len(t_embeddings.lang_vocabularies["en"])
embeddings = t_embeddings.lang_embeddings["en"].astype(np.float64)
embedding_size = t_embeddings.emb_sizes["en"]
# loading simlex and evaluating initial embeddings
simlex_path_en = simlex_path
simlex_entries_en = io_helper.load_csv_lines(simlex_path_en, delimiter = '\t', indices = [0, 1, 3])
simlex_corr_en = appleveleval.evaluate_reps_simlex(t_embeddings, simlex_entries_en, lang = "en", lower = False)
print("Evaluation dataset correlation before specialization: " + str(simlex_corr_en))
# preparing simlex pairs for the computation of the new embeddings with the model
simlex_data = []
for sim_ent in simlex_entries_en:
if sim_ent[0] in t_embeddings.lang_vocabularies["en"] and sim_ent[1] in t_embeddings.lang_vocabularies["en"]:
simlex_data.append((t_embeddings.lang_vocabularies["en"][sim_ent[0]], t_embeddings.lang_vocabularies["en"][sim_ent[1]], float(sim_ent[2])))
simlex_data_x1s = [x[0] for x in simlex_data]
simlex_data_x2s = [x[1] for x in simlex_data]
simlex_golds = [x[2] for x in simlex_data]
model = wordpair_model.WordPairModel(embeddings, embedding_size, hidden_layer_sizes, same_mlp = same_encoder, activation = tf.nn.tanh, distance_measure = distance_measure)
session = tf.InteractiveSession()
session.run(tf.global_variables_initializer())
print("Setting model variables...")
model.set_variable_values(session, vars)
print("Obtaining transformed vectors for evaluation dataset entries...")
first_embs_transformed_simlex = model.mlp1.outputs.eval(session = session, feed_dict = { model.input_w1 : simlex_data_x1s, model.dropout : 1.0, model.mlp1.dropout : 1.0})
second_embs_transformed_simlex = model.mlp2.outputs.eval(session = session, feed_dict = { model.input_w2 : simlex_data_x2s, model.dropout : 1.0, model.mlp2.dropout : 1.0})
simlex_predicted = []
for i in range(len(first_embs_transformed_simlex)):
simlex_predicted.append(simple_stats.cosine(first_embs_transformed_simlex[i], second_embs_transformed_simlex[i]))
spearman_simlex = stats.spearmanr(simlex_predicted, simlex_golds)
pearson_simlex = stats.pearsonr(simlex_predicted, simlex_golds)
print("Evaluation dataset correlation after specialization: ")
print("Spearman: " + str(spearman_simlex[0]))
print("Pearson: " + str(pearson_simlex[0]))
|
StarcoderdataPython
|
1714133
|
<reponame>Fishkudda/fobot<gh_stars>0
from pony.orm import *
from datetime import datetime,timedelta
import re
from collections import OrderedDict
db = Database()
class Maps(db.Entity):
id = PrimaryKey(int, auto=True)
first_played = Required(datetime)
last_played = Required(datetime)
name = Required(str,unique=True)
mode_type = Required(str)
played = Required(int, sql_default=True, default=0)
status = Set('ServerStatus')
votes = Set('Votes')
down_votes = Set('DownVotes')
value = Required(int, sql_default=True, default=1000)
class Player(db.Entity):
id = PrimaryKey(int, auto=True)
first_saw = Required(datetime)
steam_id = Required(str,unique=True)
name = Required(str)
voted_up = Set('Votes')
down_voted = Set('DownVotes')
player_status = Set('PlayerStatus')
multi = Required(int, sql_default=True, default=1)
vip = Required(bool, sql_default=True, default=False)
class PlayerStatus(db.Entity):
id = PrimaryKey(int, auto=True)
time = Required(datetime,sql_default=True,default=datetime.utcnow())
player = Required('Player')
class Votes(db.Entity):
id = PrimaryKey(int, auto=True)
voted = Required('Maps')
player = Required('Player')
class DownVotes(db.Entity):
id = PrimaryKey(int,auto=True)
voted = Required('Maps')
player = Required('Player')
class ServerStatus(db.Entity):
id = PrimaryKey(int, auto=True)
date = Required(datetime)
bots = Required(int)
human = Required(int)
current_map = Required('Maps')
db.bind(provider="sqlite", filename='server.db', create_db=True)
db.generate_mapping(create_tables=True)
set_sql_debug(False)
@db_session
def get_likes_dislikes(the_map):
down_voted = len(select(map_v for map_v in DownVotes if map_v.voted == the_map))
up_voted = len(select(map_v for map_v in Votes if map_v.voted == the_map))
return (up_voted, down_voted)
@db_session
def create_votes(name,map,choice):
if name == "Console":
if not Player.exists(name=name):
player = add_player(datetime.utcnow(),name,steam_id='STEAM_TEST_CONSOLE')
else:
player = Player.get(name=name)
else:
player = Player.get(name=name)
map_voted = Maps.get(name=map)
if choice:
if DownVotes.exists(player=player, voted = map_voted):
ex_vote = DownVotes.get(player=player, voted=map_voted)
ex_vote.delete()
if Votes.exists(player=player, voted=map_voted):
ex_vote = Votes.get(player=player, voted=map_voted)
ex_vote.delete()
return "Player {} does not like to play {} anymore".format(player.name,map_voted.name)
Votes(voted=map_voted, player=player)
return "Player {} likes to play {}".format(player.name,map_voted.name)
else:
if Votes.exists(player=player,voted=map_voted):
ex_vote = Votes.get(player=player,voted=map_voted)
ex_vote.delete()
if DownVotes.exists(player=player, voted=map_voted):
ex_vote = DownVotes.get(player=player,voted=map_voted)
ex_vote.delete()
return "Player {} does not dislike to play {} anymore".format(player.name,map_voted.name)
DownVotes(voted=map_voted, player=player)
return "Player {} dislikes to play {}".format(player.name,map_voted.name)
@db_session
def set_vip(player):
if type(player) == str:
if re.match(r'^STEAM_',player):
player_steam_id = player
p = Player.get(steam_id= player_steam_id)
p.vip = True
return p
else:
player_name = player
p = Player.get(name=player_name)
p.vip = True
return p
elif type(player) == int:
player_id = player
p = Player.get(id=player_id)
p.vip = True
return p
elif type(player) == Player:
player_db = player
player_db.vip = True
return player_db
@db_session
def unset_vip(player):
if type(player) == str:
if re.match(r'^STEAM_', player):
player_steam_id = player
p = Player.get(steam_id=player_steam_id)
p.vip = False
return p
else:
player_name = player
p = Player.get(name=player_name)
p.vip = False
return p
elif type(player) == int:
player_id = player
p = Player.get(id=player_id)
p.vip = False
return p
elif type(player) == Player:
player_db = player
player_db.vip = False
return player_db
@db_session
def get_total_minutes_up():
res = select(status for status in ServerStatus)
return (len(res)*30)/60
@db_session
def get_minutes_players():
all_player = select(player for player in Player if player.steam_id[0] != 'N')
return sum([get_minutes_played(player)for player in all_player])
@db_session
def calculate_next_map_pool(list_of_players):
list_of_players = [Player.get(steam_id=player.steam_id) for player in list_of_players if not player.is_bot]
all_maps = get_maps_by_played()
all_times_played = sum([map_v.played for map_v in all_maps])
if all_times_played == 0:
return [Maps.get(name='de_dust2'),Maps.get(name='cs_office')]
maps_value_list =[]
for map_v in all_maps:
map_value = 0
list_of_players = sorted(list_of_players,key=lambda time: get_minutes_played(time),reverse=True)
for index,player in enumerate(list_of_players):
res = map_v.value
vip = player.vip
player_liked_map = Votes.exists(player=player, voted=map_v)
player_disliked_map = DownVotes.exists(player=player, voted=map_v)
if player_liked_map:
if vip:
res = res*1.3
else:
res = res*1.2
if player_disliked_map:
if vip:
res = res*0.7
else:
res = res*0.8
if index in [0,1,2,3]:
res = res * 1.2
elif index in [4 , 5, 6, 7]:
res = res * 1
elif index in [8,9,10,11]:
res = res * 0.9
elif index in [12,13,14,15,16]:
res = res*0.8
else:
res = res*0.7
map_value = map_value + res
maps_value_list.append((map_value,map_v))
def take_value(entry):
return entry[1]
sorted_by_value = sorted(maps_value_list, key=take_value, reverse=True)
print(sorted_by_value)
result_list= []
for i in range(3):
result_list.append(sorted_by_value[i][0])
print(result_list)
return result_list
@db_session
def time_weight_player(player):
all_player_time = get_minutes_players()
if all_player_time == 0:
return 0
if type(player) == str:
if re.match(r'^STEAM_', player):
p = Player.get(steam_id=player)
p_t = get_minutes_played(p)
return (p_t/all_player_time)*100
else:
p = Player.get(name=player)
p_t = get_minutes_played(p)
return (p_t / all_player_time) * 100
elif type(player) == int:
p = Player.get(id=player)
p_t = get_minutes_played(p)
return (p_t / all_player_time) * 100
elif type(player) == Player:
p_t = get_minutes_played(Player)
return (p_t / all_player_time) * 100
@db_session
def get_minutes_played(player,time_start=datetime.utcnow()-timedelta(days=30),time_end=datetime.utcnow()):
if type(player) == int:
player_index = player
res = select(status for status in PlayerStatus if (player_index == status.player.id) and (status.time < time_end) and (status.time > time_start))
if len(res) == 0:
return 0
return (len(res) * 30) / 60
elif type(player) == str:
if re.match(r'^STEAM_', player):
player_steam_id = player
res = select(status for status in PlayerStatus if (player_steam_id == status.player.steam_id) and (status.time < time_end) and (status.time > time_start))
if len(res) == 0:
return 0
return (len(res) * 30) / 60
else:
player_name = player
res = select(status for status in PlayerStatus if (player_name == status.player.name) and (status.time < time_end) and (status.time > time_start))
if len(res) == 0:
return 0
return (len(res)*30)/60
elif type(player) == Player:
player_db = player
res = select(status for status in PlayerStatus if (player_db == status.player) and (status.time < time_end) and (status.time > time_start))
if len(res) == 0:
return 0
return (len(res)*30)/60
else:
return -9999
@db_session
def get_all_player():
return Player.select()[0:]
@db_session
def get_maps_by_played():
return Maps.select().order_by(desc(Maps.played))[0:]
@db_session
def create_player_status(first_saw,name,steam_id):
if Player.exists(steam_id=steam_id):
player = Player.get(steam_id=steam_id)
else:
player = Player(first_saw=first_saw, name=name, steam_id=steam_id)
return PlayerStatus(player=player)
@db_session
def add_player(first_saw, name, steam_id):
if Player.exists(steam_id=steam_id):
player = Player.get(steam_id=steam_id)
if name != player.name:
player.name = name
return player
return Player(first_saw=first_saw, name=name, steam_id=steam_id)
@db_session
def get_all_server_status():
return ServerStatus.select()[0:]
@db_session
def create_server_status_ticker(server):
create = create_server_status(server)
old_id = create.id-1
if ServerStatus.exists(id=old_id):
before = ServerStatus[old_id]
if (before.current_map.id != create.current_map.id) and (create.human >= 2):
before.current_map.played = before.current_map.played + 1
return create
@db_session
def create_server_status(server):
date = datetime.utcnow()
bots = server.get_number_of_bots()
human = server.get_number_of_players()
maps = Maps.get(name=server.current_map)
server_status = ServerStatus(date=date,
bots=bots,
human=human,
current_map=maps)
return server_status
@db_session
def add_map(first_played, last_played, name):
if Maps.exists(name=name):
print("{} already exists".format(name))
return Maps.get(name=name)
try:
mode_type = name.split('_')
if mode_type[0] == "de":
mode_type = "bomb"
elif mode_type[1] == "cs":
mode_type = "hossi"
else:
mode_type = "random"
Maps(first_played=first_played, last_played=last_played,
name=name, mode_type=mode_type)
except:
print("Map: {} is no proper CS-casual Map, ignore".format(name))
@db_session
def get_all_maps():
return Maps.select()[0:]
@db_session
def get_map(ident):
if type(ident) == str:
return Maps.get(name=ident)
if type(ident) == int:
return Maps[ident]
@db_session
def print_all_server_status():
for x in get_all_server_status():
try:
msg = "Bots: {} Human: {} Date: {} Map: {} Played: {}".format(x.bots,x.human,x.date,x.current_map.name,x.current_map.played)
except Exception:
print("Error cant print status")
@db_session
def create_test_user():
for i in range(1000):
add_player(datetime.utcnow(),str(i),"STEAM_1_{}".format(i))
def database_create_maps(map_list):
dt = datetime.utcnow()
result = []
for m in map_list:
result.append(add_map(dt,dt,m))
def print_all_maps():
for x in get_all_maps():
msg = "{} {} {}".format(x.name, x.played, x.value)
|
StarcoderdataPython
|
45329
|
<filename>rdkit/Chem/UnitTestGraphDescriptors.2.py
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for graph-theoretical descriptors
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest, os.path
from rdkit import Chem
from rdkit.Chem import GraphDescriptors, MolSurf, Lipinski, Crippen
def feq(n1, n2, tol=1e-4):
return abs(n1 - n2) <= tol
class TestCase(unittest.TestCase):
def setUp(self):
if doLong:
print('\n%s: ' % self.shortDescription(), end='')
def testBertzCTShort(self):
""" test calculation of Bertz 'C(T)' index
"""
data = [('C=CC=C', 21.01955), ('O=CC=O', 25.01955), ('FCC(=O)CF', 46.7548875),
('O=C1C=CC(=O)C=C1', 148.705216), ('C12C(F)=C(O)C(F)C1C(F)=C(O)C(F)2', 315.250442),
('C12CC=CCC1C(=O)C3CC=CCC3C(=O)2', 321.539522)]
for smi, CT in data:
m = Chem.MolFromSmiles(smi)
newCT = GraphDescriptors.BertzCT(m, forceDMat=1)
assert feq(newCT, CT, 1e-3), 'mol %s (CT calc = %f) should have CT = %f' % (smi, newCT, CT)
def _testBertzCTLong(self):
""" test calculation of Bertz 'C(T)' index
NOTE: this is a backwards compatibility test, because of the changes
w.r.t. the treatment of aromatic atoms in the new version, we need
to ignore molecules with aromatic rings...
"""
col = 1
with open(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'PP_descrs_regress.2.csv'),
'r') as inF:
lineNum = 0
for line in inF:
lineNum += 1
if line[0] != '#':
splitL = line.split(',')
smi = splitL[0]
m = Chem.MolFromSmiles(smi)
assert m, 'line %d, smiles: %s' % (lineNum, smi)
useIt = 1
for atom in m.GetAtoms():
if atom.GetIsAromatic():
useIt = 0
break
if useIt:
tgtVal = float(splitL[col])
try:
val = GraphDescriptors.BertzCT(m)
except Exception:
val = 666
assert feq(val, tgtVal, 1e-4), 'line %d, mol %s (CT calc = %f) should have CT = %f' % (
lineNum, smi, val, tgtVal)
def __testDesc(self, fileN, col, func):
with open(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', fileN), 'r') as inF:
lineNum = 0
for line in inF:
lineNum += 1
if line[0] != '#':
splitL = line.split(',')
smi = splitL[0]
m = Chem.MolFromSmiles(smi)
assert m, 'line %d, smiles: %s' % (lineNum, smi)
useIt = 1
if useIt:
tgtVal = float(splitL[col])
if not feq(tgtVal, 666.0):
try:
val = func(m)
except Exception:
val = 666
assert feq(val, tgtVal, 1e-4), 'line %d, mol %s (calc = %f) should have val = %f' % (
lineNum, smi, val, tgtVal)
def testChi0Long(self):
""" test calculation of Chi0
"""
col = 2
self.__testDesc('PP_descrs_regress.csv', col, GraphDescriptors.Chi0)
def _testChi0Long2(self):
""" test calculation of Chi0
"""
col = 2
self.__testDesc('PP_descrs_regress.2.csv', col, GraphDescriptors.Chi0)
def testHallKierAlphaLong(self):
""" test calculation of the Hall-Kier Alpha value
"""
col = 3
self.__testDesc('PP_descrs_regress.csv', col, GraphDescriptors.HallKierAlpha)
def _testHallKierAlphaLong2(self):
""" test calculation of the Hall-Kier Alpha value
"""
col = 3
self.__testDesc('PP_descrs_regress.2.csv', col, GraphDescriptors.HallKierAlpha)
def testIpc(self):
""" test calculation of Ipc.
"""
data = [('CCCCC', 1.40564, 11.24511), ('CCC(C)C', 1.37878, 9.65148),
('CC(C)(C)C', 0.72193, 3.60964), ('CN(CC)CCC', 1.67982, 31.91664),
('C1CCCCC1', 1.71997, 34.39946), ('CC1CCCCC1', 1.68562, 47.19725),
('Cc1ccccc1', 1.68562, 47.19725), ('CC(C)=C(C)C', 1.36096, 13.60964),
('C#N', 1.00000, 2.00000), ('OC#N', 0.91830, 2.75489)]
for smi, res1, res2 in data:
m = Chem.MolFromSmiles(smi)
Ipc = GraphDescriptors.Ipc(m, forceDMat=1)
Ipc_avg = GraphDescriptors.Ipc(m, avg=1, forceDMat=1)
assert feq(Ipc_avg, res1, 1e-3), 'mol %s (Ipc_avg=%f) should have Ipc_avg=%f' % (smi, Ipc_avg,
res1)
assert feq(Ipc, res2, 1e-3), 'mol %s (Ipc=%f) should have Ipc=%f' % (smi, Ipc, res2)
Ipc = GraphDescriptors.Ipc(m)
Ipc_avg = GraphDescriptors.Ipc(m, avg=1)
assert feq(Ipc_avg, res1, 1e-3), '2nd pass: mol %s (Ipc_avg=%f) should have Ipc_avg=%f' % (
smi, Ipc_avg, res1)
assert feq(Ipc, res2, 1e-3), '2nd pass: mol %s (Ipc=%f) should have Ipc=%f' % (smi, Ipc, res2)
def _testIpcLong(self):
""" test calculation of Ipc
"""
col = 4
self.__testDesc('PP_descrs_regress.csv', col, GraphDescriptors.Ipc)
def _testIpcLong2(self):
""" test calculation of Ipc
"""
col = 4
self.__testDesc('PP_descrs_regress.2.csv', col, GraphDescriptors.Ipc)
def testKappa1(self):
""" test calculation of the Hall-Kier kappa1 value
corrected data from Tables 3 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
data = [('C12CC2C3CC13', 2.344), ('C1CCC12CC2', 3.061), ('C1CCCCC1', 4.167), ('CCCCCC', 6.000),
('CCC(C)C1CCC(C)CC1', 9.091), ('CC(C)CC1CCC(C)CC1', 9.091),
('CC(C)C1CCC(C)CCC1', 9.091)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
kappa = GraphDescriptors.Kappa1(m)
assert feq(kappa, res, 1e-3), 'mol %s (kappa1=%f) should have kappa1=%f' % (smi, kappa, res)
def testKappa2(self):
""" test calculation of the Hall-Kier kappa2 value
corrected data from Tables 5 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
data = [
('[C+2](C)(C)(C)(C)(C)C', 0.667), ('[C+](C)(C)(C)(C)(CC)', 1.240),
('C(C)(C)(C)(CCC)', 2.3444), ('CC(C)CCCC', 4.167), ('CCCCCCC', 6.000), ('CCCCCC', 5.000),
('CCCCCCC', 6.000), ('C1CCCC1', 1.440), ('C1CCCC1C', 1.633), ('C1CCCCC1', 2.222),
('C1CCCCCC1', 3.061), ('CCCCC', 4.00), ('CC=CCCC', 4.740), ('C1=CN=CN1', 0.884),
('c1ccccc1', 1.606), ('c1cnccc1', 1.552), ('n1ccncc1', 1.500), ('CCCCF', 3.930),
('CCCCCl', 4.290), ('CCCCBr', 4.480), ('CCC(C)C1CCC(C)CC1', 4.133),
('CC(C)CC1CCC(C)CC1', 4.133), ('CC(C)C1CCC(C)CCC1', 4.133)
]
for smi, res in data:
#print smi
m = Chem.MolFromSmiles(smi)
kappa = GraphDescriptors.Kappa2(m)
assert feq(kappa, res, 1e-3), 'mol %s (kappa2=%f) should have kappa2=%f' % (smi, kappa, res)
def testKappa3(self):
""" test calculation of the Hall-Kier kappa3 value
corrected data from Tables 3 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
data = [
('C[C+](C)(C)(C)C(C)(C)C', 2.000), ('CCC(C)C(C)(C)(CC)', 2.380), ('CCC(C)CC(C)CC', 4.500),
('CC(C)CCC(C)CC', 5.878), ('CC(C)CCCC(C)C', 8.000), ('CCC(C)C1CCC(C)CC1', 2.500),
('CC(C)CC1CCC(C)CC1', 3.265), ('CC(C)C1CCC(C)CCC1', 2.844)
]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
kappa = GraphDescriptors.Kappa3(m)
assert feq(kappa, res, 1e-3), 'mol %s (kappa3=%f) should have kappa3=%f' % (smi, kappa, res)
def testKappa3Long(self):
""" test calculation of kappa3
"""
col = 5
self.__testDesc('PP_descrs_regress.csv', col, GraphDescriptors.Kappa3)
def _testKappa3Long2(self):
""" test calculation of kappa3
"""
col = 5
self.__testDesc('PP_descrs_regress.2.csv', col, GraphDescriptors.Kappa3)
def _testLabuteASALong(self):
""" test calculation of Labute's ASA value
"""
col = 6
self.__testDesc('PP_descrs_regress.csv', col, lambda x: MolSurf.LabuteASA(x, includeHs=1))
def _testLabuteASALong2(self):
""" test calculation of Labute's ASA value
"""
col = 6
self.__testDesc('PP_descrs_regress.2.csv', col, lambda x: MolSurf.LabuteASA(x, includeHs=1))
def _testTPSAShortNCI(self):
" Short TPSA test "
inName = RDConfig.RDDataDir + '/NCI/first_200.tpsa.csv'
with open(inName, 'r') as inF:
lines = inF.readlines()
for line in lines:
if line[0] != '#':
line.strip()
smi, ans = line.split(',')
ans = float(ans)
mol = Chem.MolFromSmiles(smi)
calc = MolSurf.TPSA(mol)
assert feq(calc, ans), 'bad TPSA for SMILES %s (%.2f != %.2f)' % (smi, calc, ans)
def _testTPSALongNCI(self):
" Long TPSA test "
fileN = 'tpsa_regr.csv'
with open(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', fileN), 'r') as inF:
lines = inF.readlines()
lineNo = 0
for line in lines:
lineNo += 1
if line[0] != '#':
line.strip()
smi, ans = line.split(',')
ans = float(ans)
mol = Chem.MolFromSmiles(smi)
assert mol, "line %d, failed for smiles: %s" % (lineNo, smi)
calc = MolSurf.TPSA(mol)
assert feq(calc, ans), 'line %d: bad TPSA for SMILES %s (%.2f != %.2f)' % (lineNo, smi,
calc, ans)
def testTPSALong(self):
""" test calculation of TPSA
"""
col = 28
self.__testDesc('PP_descrs_regress.csv', col, MolSurf.TPSA)
def _testTPSALong2(self):
""" test calculation of TPSA
"""
col = 28
self.__testDesc('PP_descrs_regress.2.csv', col, MolSurf.TPSA)
def _testLipinskiLong(self):
""" test calculation of Lipinski params
"""
fName = 'PP_descrs_regress.csv'
# we can't do H Acceptors for these pyridine-containing molecules
# because the values will be wrong for EVERY one.
#col = 29
#self.__testDesc(fName,col,Lipinski.NumHAcceptors)
col = 30
self.__testDesc(fName, col, Lipinski.NumHDonors)
col = 31
self.__testDesc(fName, col, Lipinski.NumHeteroatoms)
col = 32
self.__testDesc(fName, col, Lipinski.NumRotatableBonds)
def _testHAcceptorsLong(self):
""" test calculation of Lipinski params
"""
fName = 'Block_regress.Lip.csv'
col = 1
self.__testDesc(fName, col, Lipinski.NumHAcceptors)
def _testHDonorsLong(self):
""" test calculation of Lipinski params
"""
fName = 'Block_regress.Lip.csv'
col = 2
self.__testDesc(fName, col, Lipinski.NumHDonors)
def _testHeterosLong(self):
""" test calculation of Lipinski params
"""
fName = 'Block_regress.Lip.csv'
col = 3
self.__testDesc(fName, col, Lipinski.NumHeteroatoms)
def _testRotBondsLong(self):
""" test calculation of Lipinski params
"""
fName = 'Block_regress.Lip.csv'
col = 4
self.__testDesc(fName, col, Lipinski.NumRotatableBonds)
def _testLogPLong(self):
""" test calculation of Lipinski params
"""
fName = 'PP_descrs_regress.csv'
col = 33
self.__testDesc(fName, col, lambda x: Crippen.MolLogP(x, includeHs=1))
def _testLogPLong2(self):
""" test calculation of Lipinski params
"""
fName = 'PP_descrs_regress.2.csv'
col = 33
self.__testDesc(fName, col, lambda x: Crippen.MolLogP(x, includeHs=1))
def _testMOELong(self):
""" test calculation of MOE-type descriptors
"""
fName = 'PP_descrs_regress.VSA.csv'
col = 1
self.__testDesc(fName, col, MolSurf.SMR_VSA1)
col = 2
self.__testDesc(fName, col, MolSurf.SMR_VSA10)
col = 3
self.__testDesc(fName, col, MolSurf.SMR_VSA2)
col = 4
self.__testDesc(fName, col, MolSurf.SMR_VSA3)
col = 5
self.__testDesc(fName, col, MolSurf.SMR_VSA4)
col = 6
self.__testDesc(fName, col, MolSurf.SMR_VSA5)
col = 7
self.__testDesc(fName, col, MolSurf.SMR_VSA6)
col = 8
self.__testDesc(fName, col, MolSurf.SMR_VSA7)
col = 9
self.__testDesc(fName, col, MolSurf.SMR_VSA8)
col = 10
self.__testDesc(fName, col, MolSurf.SMR_VSA9)
col = 11
self.__testDesc(fName, col, MolSurf.SlogP_VSA1)
col = 12
self.__testDesc(fName, col, MolSurf.SlogP_VSA10)
col = 13
self.__testDesc(fName, col, MolSurf.SlogP_VSA11)
col = 14
self.__testDesc(fName, col, MolSurf.SlogP_VSA12)
def _testMOELong2(self):
""" test calculation of MOE-type descriptors
"""
fName = 'PP_descrs_regress.VSA.2.csv'
col = 1
self.__testDesc(fName, col, MolSurf.SMR_VSA1)
col = 2
self.__testDesc(fName, col, MolSurf.SMR_VSA10)
col = 11
self.__testDesc(fName, col, MolSurf.SlogP_VSA1)
col = 12
self.__testDesc(fName, col, MolSurf.SlogP_VSA10)
col = 13
self.__testDesc(fName, col, MolSurf.SlogP_VSA11)
col = 14
self.__testDesc(fName, col, MolSurf.SlogP_VSA12)
def testBalabanJ(self):
""" test calculation of the Balaban J value
J values are from Balaban's paper and have had roundoff
errors and typos corrected.
"""
data = [ # alkanes
('CC', 1.0),
('CCC', 1.6330),
('CCCC', 1.9747),
('CC(C)C', 2.3238),
('CCCCC', 2.1906),
('CC(C)CC', 2.5396),
('CC(C)(C)C', 3.0237),
('CCCCCC', 2.3391),
('CC(C)CCC', 2.6272),
('CCC(C)CC', 2.7542),
('CC(C)(C)CC', 3.1685),
('CC(C)C(C)C', 2.9935),
# cycloalkanes
('C1CCCCC1', 2.0000),
('C1C(C)CCCC1', 2.1229),
('C1C(CC)CCCC1', 2.1250),
('C1C(C)C(C)CCC1', 2.2794),
('C1C(C)CC(C)CC1', 2.2307),
('C1C(C)CCC(C)C1', 2.1924),
('C1C(CCC)CCCC1', 2.0779),
('C1C(C(C)C)CCCC1', 2.2284),
('C1C(CC)C(C)CCC1', 2.2973),
('C1C(CC)CC(C)CC1', 2.2317),
('C1C(CC)CCC(C)C1', 2.1804),
('C1C(C)C(C)C(C)CC1', 2.4133),
('C1C(C)C(C)CC(C)C1', 2.3462),
('C1C(C)CC(C)CC1(C)', 2.3409),
# aromatics
('c1ccccc1', 3.0000),
('c1c(C)cccc1', 3.0215),
('c1c(CC)cccc1', 2.8321),
('c1c(C)c(C)ccc1', 3.1349),
('c1c(C)cc(C)cc1', 3.0777),
('c1c(C)ccc(C)c1', 3.0325),
('c1c(CCC)cccc1', 2.6149),
('c1c(C(C)C)cccc1', 2.8483),
('c1c(CC)c(C)ccc1', 3.0065),
('c1c(CC)cc(C)cc1', 2.9369),
('c1c(CC)ccc(C)c1', 2.8816),
('c1c(C)c(C)c(C)cc1', 3.2478),
('c1c(C)c(C)cc(C)c1', 3.1717),
('c1c(C)cc(C)cc1(C)', 3.1657)
]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
j = GraphDescriptors.BalabanJ(m, forceDMat=1)
assert feq(j, res), 'mol %s (J=%f) should have J=%f' % (smi, j, res)
j = GraphDescriptors.BalabanJ(m)
assert feq(j, res), 'second pass: mol %s (J=%f) should have J=%f' % (smi, j, res)
def _testBalabanJLong(self):
""" test calculation of the balaban j value
"""
fName = 'PP_descrs_regress.rest.2.csv'
col = 1
self.__testDesc(fName, col, GraphDescriptors.BalabanJ)
def _testKappa1Long(self):
""" test calculation of kappa1
"""
fName = 'PP_descrs_regress.rest.2.csv'
col = 31
self.__testDesc(fName, col, GraphDescriptors.Kappa1)
def _testKappa2Long(self):
""" test calculation of kappa2
"""
fName = 'PP_descrs_regress.rest.2.csv'
col = 32
self.__testDesc(fName, col, GraphDescriptors.Kappa2)
def _testChi0Long(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 5
self.__testDesc(fName, col, GraphDescriptors.Chi0)
def _testChi1Long(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 8
self.__testDesc(fName, col, GraphDescriptors.Chi1)
def _testChi0v(self):
""" test calculation of Chi0v
"""
data = [('CCCCCC', 4.828), ('CCC(C)CC', 4.992), ('CC(C)CCC', 4.992), ('CC(C)C(C)C', 5.155),
('CC(C)(C)CC', 5.207), ('CCCCCO', 4.276), ('CCC(O)CC', 4.439), ('CC(O)(C)CC', 4.654),
('c1ccccc1O', 3.834), ('CCCl', 2.841), ('CCBr', 3.671), ('CCI', 4.242)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi0v(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi0v=%f) should have Chi0V=%f' % (smi, chi, res)
def _testChi0vLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 7
self.__testDesc(fName, col, GraphDescriptors.Chi0v)
def testChi1v(self):
""" test calculation of Chi1v
"""
data = [('CCCCCC', 2.914), ('CCC(C)CC', 2.808), ('CC(C)CCC', 2.770), ('CC(C)C(C)C', 2.643),
('CC(C)(C)CC', 2.561), ('CCCCCO', 2.523), ('CCC(O)CC', 2.489), ('CC(O)(C)CC', 2.284),
('c1ccccc1O', 2.134)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi1v(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi1v=%f) should have Chi1V=%f' % (smi, chi, res)
def _testChi1vLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 10
self.__testDesc(fName, col, GraphDescriptors.Chi1v)
def testPathCounts(self):
""" FIX: this should be in some other file
"""
data = [('CCCCCC', (6, 5, 4, 3, 2, 1)),
('CCC(C)CC', (6, 5, 5, 4, 1, 0)),
('CC(C)CCC', (6, 5, 5, 3, 2, 0)),
('CC(C)C(C)C', (6, 5, 6, 4, 0, 0)),
('CC(C)(C)CC', (6, 5, 7, 3, 0, 0)),
('CCCCCO', (6, 5, 4, 3, 2, 1)),
('CCC(O)CC', (6, 5, 5, 4, 1, 0)),
('CC(O)(C)CC', (6, 5, 7, 3, 0, 0)),
('c1ccccc1O', (7, 7, 8, 8, 8, 8)), ]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
for i in range(1, 6):
cnt = len(Chem.FindAllPathsOfLengthN(m, i, useBonds=1))
assert cnt == res[i], (smi, i, cnt, res[i], Chem.FindAllPathsOfLengthN(m, i, useBonds=1))
cnt = len(Chem.FindAllPathsOfLengthN(m, i + 1, useBonds=0))
assert cnt == res[i], (smi, i, cnt, res[i], Chem.FindAllPathsOfLengthN(m, i + 1,
useBonds=1))
def testChi2v(self):
""" test calculation of Chi2v
"""
data = [('CCCCCC', 1.707),
('CCC(C)CC', 1.922),
('CC(C)CCC', 2.183),
('CC(C)C(C)C', 2.488),
('CC(C)(C)CC', 2.914),
('CCCCCO', 1.431),
('CCC(O)CC', 1.470),
('CC(O)(C)CC', 2.166),
('c1ccccc1O', 1.336), ]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi2v(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi2v=%f) should have Chi2V=%f' % (smi, chi, res)
def _testChi2vLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 12
self.__testDesc(fName, col, GraphDescriptors.Chi2v)
def testChi3v(self):
""" test calculation of Chi3v
"""
data = [('CCCCCC', 0.957), ('CCC(C)CC', 1.394), ('CC(C)CCC', 0.866), ('CC(C)C(C)C', 1.333),
('CC(C)(C)CC', 1.061), ('CCCCCO', 0.762), ('CCC(O)CC', 0.943), ('CC(O)(C)CC', 0.865),
('c1ccccc1O', 0.756)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi3v(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi3v=%f) should have Chi3V=%f' % (smi, chi, res)
def _testChi3vLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 14
self.__testDesc(fName, col, GraphDescriptors.Chi3v)
def testChi4v(self):
""" test calculation of Chi4v
"""
data = [('CCCCCC', 0.500), ('CCC(C)CC', 0.289), ('CC(C)CCC', 0.577), ('CC(C)C(C)C', 0.000),
('CC(C)(C)CC', 0.000), ('CCCCCO', 0.362), ('CCC(O)CC', 0.289), ('CC(O)(C)CC', 0.000),
('c1ccccc1O', 0.428)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi4v(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi4v=%f) should have Chi4V=%f' % (smi, chi, res)
def testChi5v(self):
""" test calculation of Chi5v
"""
data = [('CCCCCC', 0.250), ('CCC(C)CC', 0.000), ('CC(C)CCC', 0.000), ('CC(C)C(C)C', 0.000),
('CC(C)(C)CC', 0.000), ('CCCCCO', 0.112), ('CCC(O)CC', 0.000), ('CC(O)(C)CC', 0.000),
('c1ccccc1O', 0.242)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.ChiNv_(m, 5)
assert feq(chi, res, 1e-3), 'mol %s (Chi5v=%f) should have Chi5V=%f' % (smi, chi, res)
def testChi0n(self):
""" test calculation of Chi0n
"""
data = [('CCCCCC', 4.828),
('CCC(C)CC', 4.992),
('CC(C)CCC', 4.992),
('CC(C)C(C)C', 5.155),
('CC(C)(C)CC', 5.207),
('CCCCCO', 4.276),
('CCC(O)CC', 4.439),
('CC(O)(C)CC', 4.654),
('c1ccccc1O', 3.834),
('CCCl', 2.085),
('CCBr', 2.085),
('CCI', 2.085), ]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi0n(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi0n=%f) should have Chi0n=%f' % (smi, chi, res)
def _testChi0nLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 6
self.__testDesc(fName, col, GraphDescriptors.Chi0n)
def testChi1n(self):
""" test calculation of Chi1n
"""
data = [('CCCCCC', 2.914), ('CCC(C)CC', 2.808), ('CC(C)CCC', 2.770), ('CC(C)C(C)C', 2.643),
('CC(C)(C)CC', 2.561), ('CCCCCO', 2.523), ('CCC(O)CC', 2.489), ('CC(O)(C)CC', 2.284),
('c1ccccc1O', 2.134)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi1n(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi1n=%f) should have Chi1N=%f' % (smi, chi, res)
def _testChi1nLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 9
self.__testDesc(fName, col, GraphDescriptors.Chi1n)
def testChi2n(self):
""" test calculation of Chi2n
"""
data = [('CCCCCC', 1.707), ('CCC(C)CC', 1.922), ('CC(C)CCC', 2.183), ('CC(C)C(C)C', 2.488),
('CC(C)(C)CC', 2.914), ('CCCCCO', 1.431), ('CCC(O)CC', 1.470), ('CC(O)(C)CC', 2.166),
('c1ccccc1O', 1.336)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi2n(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi2n=%f) should have Chi2N=%f' % (smi, chi, res)
def _testChi2nLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 11
self.__testDesc(fName, col, GraphDescriptors.Chi2n)
def testChi3n(self):
""" test calculation of Chi3n
"""
data = [('CCCCCC', 0.957), ('CCC(C)CC', 1.394), ('CC(C)CCC', 0.866), ('CC(C)C(C)C', 1.333),
('CC(C)(C)CC', 1.061), ('CCCCCO', 0.762), ('CCC(O)CC', 0.943), ('CC(O)(C)CC', 0.865),
('c1ccccc1O', 0.756)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi3n(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi3n=%f) should have Chi3N=%f' % (smi, chi, res)
def _testChi3nLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 13
self.__testDesc(fName, col, GraphDescriptors.Chi3n)
def testChi4n(self):
""" test calculation of Chi4n
"""
data = [('CCCCCC', 0.500), ('CCC(C)CC', 0.289), ('CC(C)CCC', 0.577), ('CC(C)C(C)C', 0.000),
('CC(C)(C)CC', 0.000), ('CCCCCO', 0.362), ('CCC(O)CC', 0.289), ('CC(O)(C)CC', 0.000),
('c1ccccc1O', 0.428)]
for smi, res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi4n(m)
assert feq(chi, res, 1e-3), 'mol %s (Chi4n=%f) should have Chi4N=%f' % (smi, chi, res)
def testIssue125(self):
""" test an issue with calculating BalabanJ
"""
smi = 'O=C(OC)C1=C(C)NC(C)=C(C(OC)=O)C1C2=CC=CC=C2[N+]([O-])=O'
m1 = Chem.MolFromSmiles(smi)
m2 = Chem.MolFromSmiles(smi)
Chem.MolToSmiles(m1)
j1 = GraphDescriptors.BalabanJ(m1)
j2 = GraphDescriptors.BalabanJ(m2)
assert feq(j1, j2)
def testOrderDepend(self):
""" test order dependence of some descriptors:
"""
data = [('C=CC=C', 21.01955, 2.73205), ('O=CC=O', 25.01955, 2.73205),
('FCC(=O)CF', 46.7548875, 2.98816), ('O=C1C=CC(=O)C=C1', 148.705216, 2.8265),
('C12C(F)=C(O)C(F)C1C(F)=C(O)C(F)2', 315.250442, 2.4509),
('C12CC=CCC1C(=O)C3CC=CCC3C(=O)2', 321.539522, 1.95986)]
for smi, CT, bal in data:
m = Chem.MolFromSmiles(smi)
newBal = GraphDescriptors.BalabanJ(m, forceDMat=1)
assert feq(newBal, bal, 1e-4), 'mol %s %f!=%f' % (smi, newBal, bal)
m = Chem.MolFromSmiles(smi)
newCT = GraphDescriptors.BertzCT(m, forceDMat=1)
assert feq(newCT, CT, 1e-4), 'mol %s (CT calc = %f) should have CT = %f' % (smi, newCT, CT)
m = Chem.MolFromSmiles(smi)
newCT = GraphDescriptors.BertzCT(m, forceDMat=1)
assert feq(newCT, CT, 1e-4), 'mol %s (CT calc = %f) should have CT = %f' % (smi, newCT, CT)
newBal = GraphDescriptors.BalabanJ(m, forceDMat=1)
assert feq(newBal, bal, 1e-4), 'mol %s %f!=%f' % (smi, newBal, bal)
m = Chem.MolFromSmiles(smi)
newBal = GraphDescriptors.BalabanJ(m, forceDMat=1)
assert feq(newBal, bal, 1e-4), 'mol %s %f!=%f' % (smi, newBal, bal)
newCT = GraphDescriptors.BertzCT(m, forceDMat=1)
assert feq(newCT, CT, 1e-4), 'mol %s (CT calc = %f) should have CT = %f' % (smi, newCT, CT)
if __name__ == '__main__':
import sys, getopt, re
doLong = 0
if len(sys.argv) > 1:
args, extras = getopt.getopt(sys.argv[1:], 'l')
for arg, val in args:
if arg == '-l':
doLong = 1
sys.argv.remove('-l')
if doLong:
for methName in dir(TestCase):
if re.match('_test', methName):
newName = re.sub('_test', 'test', methName)
exec('TestCase.%s = TestCase.%s' % (newName, methName))
unittest.main()
|
StarcoderdataPython
|
1780962
|
<gh_stars>0
from .GetTraceFootprints import GetTraceFootprints
from .GetOrbits import GetOrbits
from .GetPosition import GetPosition
from .PlotOrbit import PlotOrbit,PlotOrbitPlane
from .GetMercuryPos import GetMercuryPos
from .GetRegion import GetRegion
|
StarcoderdataPython
|
143189
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
from arch.api.utils import log_utils
from federatedml.model_base import ModelBase
from federatedml.param.onehot_encoder_param import OneHotEncoderParam
from federatedml.protobuf.generated import onehot_param_pb2, onehot_meta_pb2
from federatedml.statistic.data_overview import get_header
from federatedml.util import consts
LOGGER = log_utils.getLogger()
MODEL_PARAM_NAME = 'OneHotParam'
MODEL_META_NAME = 'OneHotMeta'
MODEL_NAME = 'OneHotEncoder'
class OneHotInnerParam(object):
def __init__(self):
self.col_name_maps = {}
self.header = []
self.transform_indexes = []
self.transform_names = []
self.result_header = []
def set_header(self, header):
self.header = header
for idx, col_name in enumerate(self.header):
self.col_name_maps[col_name] = idx
def set_result_header(self, result_header: list or tuple):
self.result_header = result_header.copy()
def set_transform_all(self):
self.transform_indexes = [i for i in range(len(self.header))]
self.transform_names = self.header
def add_transform_indexes(self, transform_indexes):
for idx in transform_indexes:
if idx >= len(self.header):
LOGGER.warning("Adding a index that out of header's bound")
continue
if idx not in self.transform_indexes:
self.transform_indexes.append(idx)
self.transform_names.append(self.header[idx])
def add_transform_names(self, transform_names):
for col_name in transform_names:
idx = self.col_name_maps.get(col_name)
if idx is None:
LOGGER.warning("Adding a col_name that is not exist in header")
continue
if idx not in self.transform_indexes:
self.transform_indexes.append(idx)
self.transform_names.append(self.header[idx])
class TransferPair(object):
def __init__(self, name):
self.name = name
self._values = set()
self._transformed_headers = {}
def add_value(self, value):
if value in self._values:
return
self._values.add(value)
if len(self._values) > consts.ONE_HOT_LIMIT:
raise ValueError("Input data should not have more than {} possible value when doing one-hot encode"
.format(consts.ONE_HOT_LIMIT))
self._transformed_headers[value] = self.__encode_new_header(value)
@property
def values(self):
return list(self._values)
@property
def transformed_headers(self):
return [self._transformed_headers[x] for x in self.values]
def query_name_by_value(self, value):
if value not in self._values:
return None
return self._transformed_headers.get(value)
def __encode_new_header(self, value):
return '_'.join([str(x) for x in [self.name, value]])
class OneHotEncoder(ModelBase):
def __init__(self):
super(OneHotEncoder, self).__init__()
self.col_maps = {}
self.schema = {}
self.output_data = None
self.model_param = OneHotEncoderParam()
self.inner_param: OneHotInnerParam = None
def _init_model(self, model_param):
self.model_param = model_param
# self.cols_index = model_param.cols
def fit(self, data_instances):
self._init_params(data_instances)
f1 = functools.partial(self.record_new_header,
inner_param=self.inner_param)
self.col_maps = data_instances.mapPartitions(f1).reduce(self.merge_col_maps)
LOGGER.debug("Before set_schema in fit, schema is : {}, header: {}".format(self.schema,
self.inner_param.header))
self._transform_schema()
data_instances = self.transform(data_instances)
LOGGER.debug("After transform in fit, schema is : {}, header: {}".format(self.schema,
self.inner_param.header))
return data_instances
def transform(self, data_instances):
self._init_params(data_instances)
LOGGER.debug("In Onehot transform, ori_header: {}, transfered_header: {}".format(
self.inner_param.header, self.inner_param.result_header
))
one_data = data_instances.first()[1].features
LOGGER.debug("Before transform, data is : {}".format(one_data))
f = functools.partial(self.transfer_one_instance,
col_maps=self.col_maps,
inner_param=self.inner_param)
new_data = data_instances.mapValues(f)
self.set_schema(new_data)
one_data = new_data.first()[1].features
LOGGER.debug("transfered data is : {}".format(one_data))
return new_data
def _transform_schema(self):
header = self.inner_param.header.copy()
LOGGER.debug("[Result][OneHotEncoder]Before one-hot, "
"data_instances schema is : {}".format(self.inner_param.header))
result_header = []
for col_name in header:
if col_name not in self.col_maps:
result_header.append(col_name)
continue
pair_obj = self.col_maps[col_name]
new_headers = pair_obj.transformed_headers
result_header.extend(new_headers)
self.inner_param.set_result_header(result_header)
LOGGER.debug("[Result][OneHotEncoder]After one-hot, data_instances schema is : {}".format(header))
def _init_params(self, data_instances):
if len(self.schema) == 0:
self.schema = data_instances.schema
if self.inner_param is not None:
return
self.inner_param = OneHotInnerParam()
# self.schema = data_instances.schema
LOGGER.debug("In _init_params, schema is : {}".format(self.schema))
header = get_header(data_instances)
self.inner_param.set_header(header)
if self.model_param.transform_col_indexes == -1:
self.inner_param.set_transform_all()
else:
self.inner_param.add_transform_indexes(self.model_param.transform_col_indexes)
self.inner_param.add_transform_names(self.model_param.transform_col_names)
@staticmethod
def record_new_header(data, inner_param: OneHotInnerParam):
"""
Generate a new schema based on data value. Each new value will generate a new header.
Returns
-------
col_maps: a dict in which keys are original header, values are dicts. The dicts in value
e.g.
cols_map = {"x1": {1 : "x1_1"},
...}
"""
col_maps = {}
for col_name in inner_param.transform_names:
col_maps[col_name] = TransferPair(col_name)
for _, instance in data:
feature = instance.features
for col_idx, col_name in zip(inner_param.transform_indexes, inner_param.transform_names):
pair_obj = col_maps.get(col_name)
feature_value = int(feature[col_idx])
pair_obj.add_value(feature_value)
return col_maps
@staticmethod
def encode_new_header(col_name, feature_value):
return '_'.join([str(x) for x in [col_name, feature_value]])
@staticmethod
def merge_col_maps(col_map1, col_map2):
if col_map1 is None and col_map2 is None:
return None
if col_map1 is None:
return col_map2
if col_map2 is None:
return col_map1
for col_name, pair_obj in col_map2.items():
if col_name not in col_map1:
col_map1[col_name] = pair_obj
continue
else:
col_1_obj = col_map1[col_name]
for value in pair_obj.values:
col_1_obj.add_value(value)
return col_map1
@staticmethod
def transfer_one_instance(instance, col_maps, inner_param):
feature = instance.features
result_header = inner_param.result_header
# new_feature = [0 for _ in result_header]
_transformed_value = {}
for idx, col_name in enumerate(inner_param.header):
value = feature[idx]
if col_name in result_header:
_transformed_value[col_name] = value
elif col_name not in col_maps:
continue
else:
pair_obj = col_maps.get(col_name)
new_col_name = pair_obj.query_name_by_value(value)
if new_col_name is None:
continue
_transformed_value[new_col_name] = 1
new_feature = [_transformed_value[x] if x in _transformed_value else 0 for x in result_header]
feature_array = np.array(new_feature)
instance.features = feature_array
return instance
def set_schema(self, data_instance):
self.schema['header'] = self.inner_param.result_header
data_instance.schema = self.schema
def _get_meta(self):
meta_protobuf_obj = onehot_meta_pb2.OneHotMeta(transform_col_names=self.inner_param.transform_names,
header=self.inner_param.header,
need_run=self.need_run)
return meta_protobuf_obj
def _get_param(self):
pb_dict = {}
for col_name, pair_obj in self.col_maps.items():
values = [str(x) for x in pair_obj.values]
value_dict_obj = onehot_param_pb2.ColsMap(values=values,
transformed_headers=pair_obj.transformed_headers)
pb_dict[col_name] = value_dict_obj
result_obj = onehot_param_pb2.OneHotParam(col_map=pb_dict,
result_header=self.inner_param.result_header)
return result_obj
def export_model(self):
if self.model_output is not None:
LOGGER.debug("Model output is : {}".format(self.model_output))
return self.model_output
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
MODEL_META_NAME: meta_obj,
MODEL_PARAM_NAME: param_obj
}
return result
def _load_model(self, model_dict):
self._parse_need_run(model_dict, MODEL_META_NAME)
model_param = list(model_dict.get('model').values())[0].get(MODEL_PARAM_NAME)
model_meta = list(model_dict.get('model').values())[0].get(MODEL_META_NAME)
self.model_output = {
MODEL_META_NAME: model_meta,
MODEL_PARAM_NAME: model_param
}
self.inner_param = OneHotInnerParam()
self.inner_param.set_header(list(model_meta.header))
self.inner_param.add_transform_names(list(model_meta.transform_col_names))
col_maps = dict(model_param.col_map)
self.col_maps = {}
for col_name, cols_map_obj in col_maps.items():
if col_name not in self.col_maps:
self.col_maps[col_name] = TransferPair(col_name)
pair_obj = self.col_maps[col_name]
for feature_value in list(cols_map_obj.values):
pair_obj.add_value(eval(feature_value))
self.inner_param.set_result_header(list(model_param.result_header))
|
StarcoderdataPython
|
1716676
|
<gh_stars>0
from twilio.rest import Client
TWILIO_SID = "AC22a576bf0fa38cb70e45832024b35bfc"
TWILIO_AUTH_TOKEN = "<KEY>"
TWILIO_VIRTUAL_NUMBER = "+19286156986"
TWILIO_VERIFIED_NUMBER = "+13303226254"
# This class is responsible for sending notifications with the deal flight details.
class NotificationManager:
def __init__(self):
self.client = Client(TWILIO_SID, TWILIO_AUTH_TOKEN)
def send_sms(self, message):
message = self.client.messages.create(
body=message,
from_=TWILIO_VIRTUAL_NUMBER,
to=TWILIO_VERIFIED_NUMBER,
)
# prints if successfully sent.
print(message.sid)
|
StarcoderdataPython
|
3364685
|
<gh_stars>0
from pynput.keyboard import Listener, Key
from rlbot.agents.base_agent import SimpleControllerState
def deadzone(normalized_axis):
if abs(normalized_axis) < 0.1:
return 0.0
return normalized_axis
class HytakControllerInput(SimpleControllerState):
def __init__(self):
self._gas_pedal = 0.0
self._brake_pedal = 0.0
self._left = 0.0
self._right = 0.0
self._up = 0.0
self._down = 0.0
self._roll_left = 0.0
self._roll_right = 0.0
self.jump = False
self.boost = False
self.handbrake = False
self.listener = Listener(self.create_on_press(), self.create_on_release())
self.listener.start()
@property
def throttle(self):
return self._gas_pedal - self._brake_pedal
@property
def steer(self):
return self._right - self._left
@property
def yaw(self):
return self.steer
@property
def pitch(self):
return self._up - self._down
@property
def roll(self):
return self._roll_right - self._roll_left
def create_on_press(self):
def on_press(key):
if key is None:
return
elif isinstance(key, Key):
if key == Key.space:
self.jump = True
elif key.char == '8':
self._gas_pedal = 1.0
elif key.char == '5':
self._brake_pedal = 1.0
elif key.char == 'a':
self._left = 1.0
elif key.char == 'd':
self._right = 1.0
elif key.char == 's':
self._up = 1.0
elif key.char == 'w':
self._down = 1.0
elif key.char == '0':
self.boost = True
elif key.char == '4':
self.handbrake = True
elif key.char == 'q':
self._roll_left = 1.0
elif key.char == 'e':
self._roll_right = 1.0
return on_press
def create_on_release(self):
def on_release(key):
if key is None:
return
elif isinstance(key, Key):
if key == Key.space:
self.jump = False
elif key.char == '8':
self._gas_pedal = 0.0
elif key.char == '5':
self._brake_pedal = 0.0
elif key.char == 'a':
self._left = 0.0
elif key.char == 'd':
self._right = 0.0
elif key.char == 's':
self._up = 0.0
elif key.char == 'w':
self._down = 0.0
elif key.char == '0':
self.boost = False
elif key.char == '4':
self.handbrake = False
elif key.char == 'q':
self._roll_left = 0.0
elif key.char == 'e':
self._roll_right = 0.0
return on_release
def __eq__(self, other):
return self.roll == other.roll and\
self.jump == other.jump and\
self.boost == other.boost and\
self.handbrake == other.handbrake and\
self.throttle == other.throttle and\
self.steer == other.steer and\
self.yaw == other.yaw and\
self.pitch == other.pitch
|
StarcoderdataPython
|
1798527
|
""" Test kw_only decorators """
from ..keywordonly import kw_only_func, kw_only_meth
import pytest
def test_kw_only_func():
# Test decorator
def func(an_arg):
"My docstring"
return an_arg
assert func(1) == 1
with pytest.raises(TypeError):
func(1, 2)
dec_func = kw_only_func(1)(func)
assert dec_func(1) == 1
with pytest.raises(TypeError):
dec_func(1, 2)
with pytest.raises(TypeError):
dec_func(1, akeyarg=3)
assert dec_func.__doc__ == 'My docstring'
@kw_only_func(1)
def kw_func(an_arg, a_kwarg='thing'):
"Another docstring"
return an_arg, a_kwarg
assert kw_func(1) == (1, 'thing')
with pytest.raises(TypeError):
kw_func(1, 2)
assert kw_func(1, a_kwarg=2) == (1, 2)
with pytest.raises(TypeError):
kw_func(1, akeyarg=3)
assert kw_func.__doc__ == 'Another docstring'
class C(object):
@kw_only_meth(1)
def kw_meth(self, an_arg, a_kwarg='thing'):
"Method docstring"
return an_arg, a_kwarg
c = C()
assert c.kw_meth(1) == (1, 'thing')
with pytest.raises(TypeError):
c.kw_meth(1, 2)
assert c.kw_meth(1, a_kwarg=2) == (1, 2)
with pytest.raises(TypeError):
c.kw_meth(1, akeyarg=3)
assert c.kw_meth.__doc__ == 'Method docstring'
|
StarcoderdataPython
|
1610947
|
#!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wttest
from wtscenario import make_scenarios
# test_hs19.py
# Ensure eviction doesn't clear the history store again after checkpoint has done so because of the same update without timestamp.
class test_hs19(wttest.WiredTigerTestCase):
conn_config = 'cache_size=5MB,eviction=(threads_max=1)'
key_format_values = [
('column', dict(key_format='r')),
('string-row', dict(key_format='S'))
]
scenarios = make_scenarios(key_format_values)
def create_key(self, i):
if self.key_format == 'S':
return str(i)
return i
def test_hs19(self):
uri = 'table:test_hs19'
junk_uri = 'table:junk'
self.session.create(uri, 'key_format={},value_format=S'.format(self.key_format))
session2 = self.conn.open_session()
session2.create(junk_uri, 'key_format={},value_format=S'.format(self.key_format))
cursor2 = session2.open_cursor(junk_uri)
cursor = self.session.open_cursor(uri)
self.conn.set_timestamp(
'oldest_timestamp=' + self.timestamp_str(1) +
',stable_timestamp=' + self.timestamp_str(1))
value1 = 'a' * 500
value2 = 'b' * 500
value3 = 'c' * 50000
# Insert an update without timestamp.
self.session.begin_transaction()
cursor[self.create_key(1)] = value1
self.session.commit_transaction()
# Do 2 modifies.
self.session.begin_transaction()
cursor.set_key(self.create_key(1))
mods = [wiredtiger.Modify('B', 100, 1)]
self.assertEqual(cursor.modify(mods), 0)
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(2))
self.session.begin_transaction()
cursor.set_key(self.create_key(1))
mods = [wiredtiger.Modify('C', 101, 1)]
self.assertEqual(cursor.modify(mods), 0)
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(3))
# Start a transaction to pin back the reconciliation last running value.
session2.begin_transaction()
cursor2[self.create_key(1)] = value3
# Insert a modify ahead of our reconstructed modify, this one will be used unintentionally
# to reconstruct the final value, corrupting the resulting value.
# The 0 at the end of the modify call indicates how many bytes to replace, we keep
# it as 0 here to not overwrite any of the existing value.
self.session.begin_transaction()
cursor.set_key(self.create_key(1))
mods = [wiredtiger.Modify('AAAAAAAAAA', 102, 0)]
self.assertEqual(cursor.modify(mods), 0)
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(4))
# Insert a modify to get written as the on disk value by checkpoint.
self.session.begin_transaction()
cursor.set_key(self.create_key(1))
mods = [wiredtiger.Modify('D', 102, 1)]
self.assertEqual(cursor.modify(mods), 0)
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(5))
# Checkpoint such that all modifies get written out to the history store and the latest
# modify gets written to the on disk value.
self.session.checkpoint('use_timestamp=true')
# Add an additional modify so that when eviction sees this page it will rewrite it as it's
# dirty.
self.session.begin_transaction()
cursor.set_key(self.create_key(1))
mods = [wiredtiger.Modify('E', 103, 1)]
self.assertEqual(cursor.modify(mods), 0)
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(6))
# First deposition the first cursor, so the page can be evicted.
cursor.reset()
evict_cursor = self.session.open_cursor(uri, None, "debug=(release_evict)")
# Search for the key so we position our cursor on the page that we want to evict.
evict_cursor.set_key(self.create_key(1))
evict_cursor.search()
evict_cursor.reset()
evict_cursor.close()
# Construct and test the value as at timestamp 2
expected = list(value1)
expected[100] = 'B'
expected = str().join(expected)
# Retrieve the value at timestamp 2.
self.session.begin_transaction('read_timestamp=' + self.timestamp_str(2))
cursor.set_key(self.create_key(1))
cursor.search()
# Assert that it matches our expected value.
self.assertEqual(cursor.get_value(), expected)
self.session.rollback_transaction()
# Construct and test the value as at timestamp 2
expected = list(expected)
expected[101] = 'C'
expected = str().join(expected)
# Retrieve the value at timestamp 3.
self.session.begin_transaction('read_timestamp=' + self.timestamp_str(3))
cursor.set_key(self.create_key(1))
cursor.search()
# Assert that it matches our expected value.
self.assertEqual(cursor.get_value(), expected)
self.session.rollback_transaction()
# Construct and test the value as at timestamp 4
expected = list(expected)
for x in range(10):
expected[102 + x] = 'A'
expected.append('a')
expected = str().join(expected)
# Retrieve the value at timestamp 1.
self.session.begin_transaction('read_timestamp=' + self.timestamp_str(4))
cursor.set_key(self.create_key(1))
cursor.search()
# Assert that it matches our expected value.
self.assertEqual(cursor.get_value(), expected)
self.session.rollback_transaction()
|
StarcoderdataPython
|
1768080
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 15:51:15 2021
@author: rosariouceda-sosa
"""
###########################################
# Extraction of Propbank, Verbnet and mappings
# It requires verbnet3.4, verbnet3.3 and verbnet3.2 in nltk_data directory,
# as well as the latest version of propbank
#
# In particular, it does require the last
###########################################
import json
import re
from nltk.corpus import treebank
from nltk.corpus.util import LazyCorpusLoader
from VerbnetCorpusReaderEx import VerbnetCorpusReaderEx
from nltk.corpus import PropbankCorpusReader
from semlinkEx import query_pb_vn_mapping, query_pb_vn_mapping_1_2
from xml.etree import ElementTree
from propbank_readerEx import PropbankCorpusReaderEx
#from nltk.corpus import propbank
propbank = LazyCorpusLoader(
"propbank-latest",
PropbankCorpusReaderEx,
"prop.txt",
r"frames/.*\.xml",
"verbs.txt",
lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
treebank,
) # Must be defined *after* treebank corpus.
vn_dict = {
"verbnet3.2": LazyCorpusLoader("verbnet3.2", VerbnetCorpusReaderEx, r"(?!\.).*\.xml"),
"verbnet3.3": LazyCorpusLoader("verbnet3.3", VerbnetCorpusReaderEx, r"(?!\.).*\.xml"),
"verbnet3.4": LazyCorpusLoader("verbnet3.4", VerbnetCorpusReaderEx, r"(?!\.).*\.xml")
}
#The default is 3.4
current_vn = vn_dict["verbnet3.4"]
VN_FILES = "/Users/rosariouceda-sosa/Documents/usr/SemanticsSvces/verbnet/verbnet-master/verbnet3.4"
VN_DIR = "/Users/rosariouceda-sosa/Documents/usr/SemanticsSvces/verbnet/"
PB_DIR = "/Users/rosariouceda-sosa/Documents/usr/SemanticsSvces/propbank/"
outputFile = ""
logFile = ""
processedGroupingsVN = {}
processedMaps = []
#key is entity and the list of mappings they have. Keys for Verbnet, Propbank and WN are their id's
memberToMap = {}
#inverse of memberToMap
mapToMember = {}
#Each propbank has: [roleSet] : name, arguments, lemmas, provenance
pb_index = {}
#Each verbnet has: [CODE] : name, [arguments] variableName, variableType, lemmas, provenance,
vn_index = {}
#{roleset} admire-31.2': {'provenance': 'verbnet3.4', 'arguments' : {'ARG0' : {'description' : "XXX" , 'vnArg' : Agent}}]
map_index = {}
extended_semnlink_index = []
#
#vnCodeToLemma = {}
###### LOG
outLog = open("/Users/rosariouceda-sosa/Downloads/OutLog_ULKB_Clean.txt", "w")
###########################################################
# AUXILIARY FUNCTIONS
###########################################################
#IMPORTANT: To standardize both verbnet names and codes,
def vn_standard(_verb: str) -> str:
# return _verb.replace(".", "-")
#do nothing
return _verb
def count_dict() -> int :
highest = 0
for thisMap in mapToMember :
if len(mapToMember[thisMap]) > highest:
highest = len(mapToMember[thisMap])
return highest
def compare_strs(_first : str, _second : str) -> bool :
if (_first.lower() == _second.lower()):
return 1
return 0
def checkKeyStr(_dict : {}, _key: str) -> str :
if _key in _dict.keys():
return _dict[_key]
else:
return ""
def toRDFStr(_in :str) -> str :
#somewhat sloppy, but gets the data
_in = _in.replace("/", "_")
_in = _in.replace(":", "-")
_in = _in.replace(" ", "")
_in = _in.replace("(", "_")
_in = _in.replace(")", "_")
_in = _in.replace("'", "")
_in = _in.replace(".", "-")
_in = _in.replace(",", "_")
_in = _in.replace("__", "_")
_in = _in.replace(">", "")
_in = _in.replace("<", "")
_in = _in.replace("#", "-")
_in = _in.replace("%", "_")
_in = _in.replace("?", "_")
#ADD THIS FOR THE INCONSISTENT VERBNET NAMING IN SEMLINK AND NLTK
_in = _in.replace("-", "_")
return _in
def checkArray(_dict : {}, _name : str) -> [] :
if (_name) in _dict.keys() :
return _dict.get(_name)
else:
return []
# Whether the mapping points to 'nothing'
def wrong_mapping(_term :str) -> bool :
_term = _term.strip()
if len(_term) == 0 :
return True
if _term == 'NP' or _term == 'np' or _term == 'NP>' or _term == 'np>':
return True
if _term == 'NM' or _term == 'nm' or _term == 'NM>' or _term == 'nm>':
return True
return False
def clean_text(_text :str, _oneLine : bool) -> str :
_text = _text.replace("\"", "\\\"")
_text = _text.replace("'", "\\'")
_text = _text.replace("\/", "-")
_text = _text.replace("`", " ")
if _oneLine :
_text = _text.replace("\n", "")
return _text
def chunk(mappings: str) -> []:
rList = mappings.split(',')
for item in rList :
item = item.strip()
return rList
# from admire-31.2 to admire
def get_vn_lemma(_verb : str) -> str :
return _verb.split('-', 1)[0]
# from admire-31.2 to 31.2 -- The first hyphen is the one that counts
def get_vn_code(_verb: str) -> str :
stVerb = vn_standard(_verb)
return stVerb.split('-',1)[1]
#from admire.01 to admire
def get_pb_lemma(_verb : str) -> str :
return _verb.split('.', 1)[0]
def get_vn_varName(_var : str) -> str :
if _var.startswith('?') :
_var = _var[1:]
return _var
def match_vn_codes(_first:str , _second: str) -> bool :
if toRDFStr(_first) == toRDFStr(_second):
return True
return False
def matchRDF(_item, _dict: {}) -> str :
toMatch = toRDFStr(_item)
for keyItem in _dict:
if toMatch == toRDFStr(keyItem):
return keyItem
return ""
# Also consider one start with another
def vn_in_dict(_item:str, _dict: {}, _name: str) -> bool :
for keyItem in _dict:
if len (_name ) == 0 :
compareTo = keyItem
else :
compareTo = _dict[keyItem][_name]
if compareTo == _item :
return True
if compareTo.startswith(_item) or compareTo.startswith(_name) :
return True
return False
def vn_to_swap(_item:str, _dict: {}, _name:str) -> str :
_itemCode = get_vn_code(_item)
if _itemCode not in vn_index :
return ""
_itemProvenance = vn_index[_itemCode]['provenance']
_itemVersion = _itemProvenance.split('.',1)[1]
for keyItem in _dict:
if len(_name) == 0 :
compareTo = keyItem
else :
compareTo = _dict[keyItem][_name]
compareToCode = get_vn_code(compareTo)
if _itemCode == compareToCode or compareToCode.startswith(_itemCode) or _itemCode.startswith(compareToCode) :
if compareToCode in vn_index :
compareToProvenance = vn_index[compareToCode]['provenance']
compareToVersion = compareToProvenance.split('.', 1)[1]
if compareToVersion < _itemVersion :
return compareTo
return ""
def unmatched_roles(_have : [], _want: []) -> [] :
result = []
for haveEntry in _have :
haveEntry = haveEntry.lower()
found = False
for wantEntry in _want :
if wantEntry.lower() == haveEntry :
found = True
if not found :
result.append(haveEntry)
return result
def wrongly_matched_roles(_have : [], _want: []) -> [] :
result = []
for haveEntry in _have :
haveEntry = haveEntry.lower()
found = False
for wantEntry in _want :
if wantEntry.lower() == haveEntry :
found = True
if not found :
result.append(haveEntry)
return result
def getRoleStrFrom(_list : []) -> str :
resultStr = ""
noDupList = []
for item in _list :
if item.startswith("?") :
item = item[1:]
if item not in noDupList :
noDupList.append(item)
for item in noDupList :
if len(resultStr) == 0 :
resultStr += item
else :
resultStr += ", " + item
return resultStr
def getRoleListFrom(_list : []) -> [[str]] :
resultStr = ""
noDupList = []
for item in _list :
if item.startswith("?") :
item = item[1:]
if item not in noDupList :
noDupList.append(item)
return noDupList
######################################################################
# SEMLINK INGESTION
######################################################################
#maps from verbnet class + argument a URL
#Check the variables that have been already mapped through Verbnet
def map_to_url(_class: str, _param : str) -> []:
global vnClassToVars, framesToVars
resultList = []
if _class not in vnClassToVars :
return resultList
argList = vnClassToVars[_class]
for argKey in argList :
if argKey.lower() == _param.lower():
resultList.append(argList[argKey])
# elif _class in framesToVars : #try the frames
# argList = framesToVars[_class]
# for frameKey in argList :
# for argKey in argList[frameKey] :
# if argKey.lower() == _param.lower() :
# resultList.append(argList[frameKey][argKey])
return resultList
def process_semlink_1_2() :
global provenance, pbToMap_params, pbToMap, semLinkFromPB
# from {'mapping': '51.2', 'source': 'verbnet3.4', 'arguments': {'ARG0': 'Theme'}}
# TO map_index
#[{'vnVerb': 'admire-31.2', 'provenance': 'verbnet3.4', 'arguments' : {'ARG0' : {'description' : "XXX" , 'vnArg' : Agent}}]
oldProvenance = provenance
provenance = "semlink 1.2.2"
for roleset in pb_index :
if "abound.01" in roleset:
print("DEBUG " + roleset)
semLinkmappingList = query_pb_vn_mapping_1_2(roleset)
#If there is no mapping, ignore.
if not semLinkmappingList or len(semLinkmappingList) == 0 :
# if outLog is not None :
# outLog.write("PROPBANK NO_SEMLINK_1_2," + roleset + "\n")
# outLog.flush()
continue
#If there is a mapping BUT we don't have the roleset, it's an issue.
if roleset not in map_index :
if outLog is not None :
outLog.write("NO_PROPBANK SEMLINK_1_2," + roleset + "\n")
outLog.flush()
map_index[roleset] = {}
#Grab the current map_index entry. We know it's there
ourMappings = map_index[roleset]
for mapping in semLinkmappingList :
vnRawCode = mapping['mapping']
vnRawCode = vn_standard(vnRawCode)
vnRawName = ""
if vnRawCode in vn_index :
vnRawName = vn_index[vnRawCode]['name']
else : # use a hack to substitute the first hyphen by a dot. Oh brother...
if outLog is not None :
outLog.write("NO VERBNET SEMLINK_1_2," + vnRawName + "," + vnRawCode + "\n")
outLog.flush()
continue #go to the next mapping
#If the verbnet class is already mapped, we ignore it.
arguments = mapping["arguments"]
toSwapVerb = vn_to_swap(vnRawName, ourMappings, "")
#we swao the new verbs but ONLY if we can get arguments in the new verb
if len(toSwapVerb) > 0 and len(arguments) > 0 :
ourMappings.pop(toSwapVerb, None)
if not vn_in_dict(vnRawName, ourMappings, "") :
print("SEMLINK1.2 Process " + roleset)
newMapping = {}
newMapping['provenance'] = 'Semlink 1.2.2'
ourMappings[vnRawName] = newMapping
newArguments = {}
newMapping['arguments'] = newArguments
if len(arguments) > 0 :
for pbArg in arguments :
vnArg = arguments[pbArg]
newArguments[pbArg] = {'description': "", "vnArg" : vnArg}
provenance = oldProvenance
def process_semlink_2() :
global provenance, pbToMap_params, pbToMap, semLinkFromPB
oldProvenance = provenance
provenance = "semlink 2"
foundMap = False
# FROM [{'mapping': 'admire-31.2', 'source': 'verbnet3.4', 'arguments' : {'ARG0' : Agent}}]
# TO map_index
#[{'vnVerb': 'admire-31.2', 'provenance': 'verbnet3.4', 'arguments' : {'ARG0' : {'description' : "XXX" , 'vnArg' : Agent}}]
for roleset in pb_index :
if "abound.01" in roleset :
print("DEBUG " + roleset)
if roleset not in map_index : #shouldn't happen
if outLog is not None :
outLog.write("NO_PROPBANK SEMLINK_2," + roleset + "\n")
outLog.flush()
map_index[roleset] = {}
semLinkmappingList = query_pb_vn_mapping(roleset)
if not semLinkmappingList or len(semLinkmappingList) == 0 :
if outLog is not None :
outLog.write("PROPBANK NO_SEMLINK_2," + roleset + "\n")
outLog.flush()
continue
print("SEMLINK2 Process " + roleset)
ourMappings = map_index[roleset]
for mapping in semLinkmappingList :
vnName = mapping['mapping']
#Favor verbs in higher versions of Verbnet ONLYthey
toSwapVerb = vn_to_swap(vnName, ourMappings, "")
semLinkArgs = {}
if 'arguments' in mapping :
semLinkArgs = mapping['arguments']
#Substitute for new-er versions of Verbnet but only if they have arguments
if len(toSwapVerb) > 0 and len(semLinkArgs) > 0:
ourMappings.pop(toSwapVerb, None)
if not vn_in_dict(vnName, ourMappings, "") :
newMapping = {}
ourMappings[vnName] = newMapping
newMapping['provenance'] = "Semlink 2.0"
newMapping['arguments'] = {}
arguments = newMapping['arguments']
for pbArg in semLinkArgs :
arguments[pbArg] = {'description': "", "vnArg" : semLinkArgs[pbArg]}
provenance = oldProvenance
######################################################################
# PROPBANK INGESTION
######################################################################
def query_propbank_roles(propbank_id):
print("\nquery_propbank_roles for propbank_id {}".format(propbank_id))
try:
role_set = propbank.roleset(propbank_id)
except Exception as e:
print(e)
return None
role_mappings = dict()
# print("role_set:", ElementTree.tostring(role_set, encoding='unicode'))
for role in role_set.findall("roles/role"):
# print("role:", ElementTree.tostring(role, encoding='unicode'))
for vn_role in role.findall('vnrole'):
# print("vn_role:", ElementTree.tostring(vn_role, encoding='unicode'))
arg_key = "ARG{}".format(role.attrib['n'])
if arg_key not in role_mappings:
role_mappings[arg_key] = []
role_mappings[arg_key].append({
"vncls": vn_role.attrib["vncls"],
"vntheta": vn_role.attrib["vntheta"],
"description": role.attrib['descr']
})
#print("query_propbank_roles role_mappings:", role_mappings)
return role_mappings
#########################################
# TRANSFORM MAPPINGS IN PROPBANK TO SEMLINK
# "abash.01": {
# "31.1": {
# "ARG0": "stimulus",
# "ARG1": "experiencer"
# }
# },
# #[{'mapping': 'admire-31.2', 'source': 'verbnet3.4', 'arguments' : {'ARG0' : Agent}}]
def propbank_to_semlink(_roleset : str) :
global map_index
#The mappings, [roleSet] : vn_name, [[arguments] : [pb_arg] : vn_arg, provenances
raw = query_propbank_roles(_roleset)
if len(raw) == 0 :
return
# vn_mapping
#[{'vnVerb': 'admire-31.2', 'provenance': 'verbnet3.4', 'arguments' : {'ARG0' : {'description' : "XXX" , 'vnArg' : Agent}}]
rolesetMap = {}
if _roleset in map_index :
rolesetMap = map_index[_roleset]
else :
map_index[_roleset] = rolesetMap
for argX in raw:
argXMappings = raw[argX]
for argXMapsTo in argXMappings :
vnTheta = argXMapsTo['vntheta']
vnDescription = argXMapsTo['description']
vncls = argXMapsTo["vncls"]
# We need to match on underscores because Propbank's verbs
# sometimes don't use the right VN code!!
vnCode = matchRDF(vncls, vn_index)
if len(vnCode) == 0 :
print (vncls + " NOT FOUND IN PROPBANK MAPPING")
if outLog is not None :
outLog.write("NO_PROPBANK," + _roleset + "\n")
outLog.flush()
continue
vnMapping = vn_index[vnCode]
vnName = vnMapping['name']
if len(vnName) == 0 :
print (vncls + " NOT FOUND")
if outLog is not None :
outLog.write("NO_PROPBANK," + _roleset + "\n")
outLog.flush()
continue
if outLog is not None :
outLog.write("MAP PB," + _roleset + "," + vnName + "\n")
outLog.flush()
if vnName not in rolesetMap :
verbMapping = {}
rolesetMap[vnName] = verbMapping
verbMapping['provenance'] = "Propbank"
verbMapping['arguments'] = {}
thisMapping = rolesetMap[vnName]
thisMapping['arguments'][argX] = {'description' : vnDescription, 'vnArg' : vnTheta }
def process_propbank( _namespace : str) :
global outputFile, idCounter, provenance, outLog
#Each propbank has: [roleSet] : name, arguments, lemmas, provenance
provenance = "Propbank NLTK"
for rolesetData in propbank.rolesets():
roleset = rolesetData.attrib["id"]
if outLog is not None :
outLog.write("PB," + roleset + "\n")
outLog.flush()
#outFile.write(fileID + "#" + tagger + "#" + roleset + "#")
rRoleSet = "UL_PB:" + toRDFStr(roleset)
if roleset in pb_index :
continue
pb_index[roleset] = {}
pb_index[roleset]['name'] = roleset
pb_index[roleset]['provenance'] = 'Propbank'
pb_index[roleset]['arguments'] = {}
argumentDict = pb_index[roleset]['arguments']
for role in rolesetData.findall("roles/role"):
argID = "ARG" + role.attrib['n']
argDesc = role.attrib['descr']
argumentDict[argID] = argDesc
propbank_to_semlink(roleset)
#Whether it has data or not, it has been added to map_index
provenance = "automatic generation"
######################################################################
# VERBNET INGESTION
######################################################################
def process_mappings() :
global idCounter, provenance, mapToMember, memberToMap
provenance = "clustered mappings"
for mapEntity in mapToMember :
if "UL_WN:" in mapEntity or "UL_PB:" in mapEntity or "UL_VN:" in mapEntity :
temp2 = mapEntity.split(":")[1]
temp1 = mapEntity.split(":")[0]
rMapName = temp1 + ":" + toRDFStr(temp2)
#else if "UL_WN-" in mapEntity or "UL_PB-" in mapEntity or "UL_VN-" in mapEntity :
# temp2 = mapEntity[6:]
# temp1 = mapEntity[0:5]
# rMapName = temp1 + ":" + toRDFStr(temp2)
else :
rMapName = toRDFStr(mapEntity)
if mapEntity not in processedMaps:
print("MAP NOT PROCESSED " + rMapName)
memberList = mapToMember[mapEntity] #maybe we added more
for member in memberList:
if "UL_WN:" in member or "UL_PB:" in member or "UL_VN:" in member :
temp1 = member.split(":")[0]
temp2 = member.split(":")[1]
rMember = temp1 + ":" + toRDFStr(temp2)
# else if "UL_WN-" in member or "UL_PB-" in member or "UL_VN-" in member:
# temp2 = mapEntity[6:]
# temp1 = mapEntity[0:5]
# rMember = temp1 + ":" + toRDFStr(temp2)
else :
rMember = toRDFStr(member)
#if len(rMember) == 1 :
# print("DEBUG: rMember ")
provenance = "automatic reasoning"
#Returns the name of the map with all the entities there
def unify_maps(_mapList : {}) -> str :
global memberToMap, mapToMember
if len(_mapList) == 0 :
return None
theMapName = ""
theMap = []
counter = 0
for entity in _mapList:
if counter == 0 :
theMapName = entity
theMap = _mapList[entity]
counter += 1
continue
mapMembers = _mapList[entity]
for member in mapMembers :
if not member in theMap :
theMap.append(member)
memberToMap[member] = theMapName
mapToMember[theMapName] = theMap
return theMapName
def get_vn_classes_for(term :str ) -> [] :
return vn.classids(term)
#def get_all_vn_classes() -> [] :
# return vn.classids()
def process_vn_frames(_className: str, _classKey:str) -> str:
#Each verbnet has: [CODE] : name, [arguments] variableName, variableType, lemmas, provenance
global idCounter, current_vn, framesToVars, vnClassToVars, provenance
theClass = current_vn.vnclass(_className)
vnframes = theClass.findall("FRAMES/FRAME")
if 'put' in _className :
print("DEBUG " + _className)
stClassKey = vn_standard(_classKey)
stClassName = vn_standard(_className)
vnIndexClass = vn_index[stClassKey] # it better work
if 'arguments' not in vnIndexClass :
vnIndexClass['arguments'] = {}
variables = vnIndexClass['arguments']
classRolesList = []
for frame in vnframes :
#DESCRIPTIOPN
descF = frame.find("DESCRIPTION")
numberF = descF.get("descriptionNumber")
#PROCESS SEMANTICS
semList = current_vn._get_semantics_within_frame(frame)
predCounter = 1
#Predicates per frame
for semPre in semList :
txt = ""
roleList = ""
predValue = semPre['predicate_value']
arguments = semPre['arguments']
is_negated = semPre["is_negated"]
if is_negated :
txt += "NOT "
txt += predValue + "("
argOrder = 1
for argument in arguments :
argValue = argument["value"]
argType = argument["type"]
argText = argType + "(" + argValue + ")"
if argType != 'Event' :
if argValue not in classRolesList :
classRolesList.append(argValue)
# argText = argument["type"] + "(" + argument["value"] + ")"
#writeStmt_toStr(rPredName, rdf_param_text, str(argOrder) + ", " + argText)
argOrder += 1
#if argValue not in framesToVars :
# framesToVars[argValue] = {}
#frameIndex = framesToVars[argValue]
#if rName not in frameIndex :
# frameIndex[rName] = {}
#paramIndex = frameIndex[rName]
#if argValue not in paramIndex :
# paramIndex[argValue] = argName
txt += argText + ","
txt = txt.rstrip(txt[-1]) + ")"
#The variable name must have the context of the frame so it can be
#identified. The text form can also be used.
#This is the whole text of the predicate
predCounter += 1
#print(_className + " --Sem--> " + txt + "\n\n")
#if len(txt)> 0 :
# writeStmt_toStr(name, vn_semantics, txt)
return getRoleListFrom(classRolesList)
def process_vn_thematic_roles(_className: str, _classCode:str) :
global idCounter, current_vn , vnClassToVars
theClass = current_vn.vnclass(_className)
vnthemRoles = theClass.findall("THEMROLES/THEMROLE")
mapping = vn_index[_classCode]
if 'arguments' not in mapping :
mapping['arguments'] = []
if 'caused_calibratable_cos' in _className :
print("DEBUG " + _className)
for themRole in vnthemRoles :
tType = themRole.get('type')
roleName = tType.strip()
if roleName not in mapping['arguments'] :
mapping['arguments'].append(roleName)
def process_vn_lemmas(_className: str, _classKey: str) :
#Each verbnet has: [CODE] : name, [arguments] variableName, variableType, lemmas, provenance
global current_vn, vn_index
theClass = current_vn.vnclass(_className)
stKey = vn_standard(_classKey)
stClassName = vn_standard(_className)
vnIndexClass = vn_index[stKey] # it better work
if 'lemmas' not in vnIndexClass :
vnIndexClass['lemmas'] = []
vnLemmas = vnIndexClass['lemmas']
for member in theClass.findall("MEMBERS/MEMBER") :
vnKey = member.get("name")
if vnKey in vnLemmas :
continue
vnLemmas.append(vnKey)
def read_verbnet_index( _namespace :str, _vn, _version) :
global logFile, vnClassToRDF, provenance, current_vn, vnCodeToVerb
#fill all the indexes
current_vn = _vn
allClasses = current_vn.classids()
for className in allClasses :
cCode = get_vn_code(className)
stCode = vn_standard(cCode)
stClassName = vn_standard(className)
if stCode not in vn_index:
print(_version + " , " + stClassName)
vn_index[stCode] = {}
vn_index[stCode]['name'] = stClassName
vn_index[stCode]['provenance'] = _version
if outLog is not None :
outLog.write(_version + "," + stClassName + "," + stCode + "\n")
outLog.flush()
process_vn_lemmas(className, cCode)
process_vn_thematic_roles(className, cCode)
roleList = process_vn_frames(className, cCode)
if 'arguments' not in vn_index[cCode] :
vn_index[cCode]['arguments']
args = vn_index[cCode]['arguments']
if len(roleList) > 0 :
for item in roleList :
if '?' in item:
item = item[1:]
if item not in args :
args.append(item)
subClasses = current_vn.subclasses(className)
for subClassName in subClasses :
cCode = get_vn_code(subClassName)
stCode = vn_standard(cCode)
stSubClassName = vn_standard(subClassName)
if stCode not in vn_index :
print(_version + " , " + stSubClassName)
vn_index[stCode] = {}
vn_index[stCode]['name'] = stSubClassName
vn_index[stCode]['provenance'] = _version
if outLog is not None :
outLog.write(_version + "," + stSubClassName+ "," + stCode + "\n")
outLog.flush()
process_vn_lemmas(subClassName, cCode)
process_vn_thematic_roles(subClassName, cCode)
roleList = process_vn_frames(subClassName, cCode)
if 'arguments' not in vn_index[cCode] :
vn_index[cCode]['arguments']
args = vn_index[stCode]['arguments']
if len(roleList) > 0 :
for item in roleList :
if '?' in item:
item = item[1:]
if item not in args :
args.append(item)
# Ingests Verbnet and Propbank
# (1) Verbnet (3.4, 3.3 and 3.2)
# (2) Propbank (from NLTK)
# (3) Mappings from PB to VN from Semlink
def process_ulkb_mappings(_output: str, _namespace = "ULKB" , _log = True) :
global outputFile, outLog, processedClasses, vn, provenance
outLog= open("/Users/rosariouceda-sosa/Downloads/UL_KB_LOG.txt", "w")
#STEP 1 -- READ THE VERBNET NAMES AND ROLES
print("-----> PROCESSING VERBNET 3.4 ")
read_verbnet_index( _namespace, vn_dict["verbnet3.4"], "verbnet3.4")
print("-----> PROCESSING VERBNET 3.3")
read_verbnet_index( _namespace, vn_dict["verbnet3.3"], "verbnet3.3")
print("-----> PROCESSING VERBNET 3.2")
read_verbnet_index( _namespace, vn_dict["verbnet3.2"], "verbnet3.2")
#STEP 2 -- BRING PROPBANK
#_namespace = "UL_PB"
print("-----> PROCESSING PROPBANK")
process_propbank(_namespace)
#STEP 3 -- From Semlink, depends on Propbank to fill pbIndex
print("-----> PROCESSING SEMLINK 2")
process_semlink_2()
print("-----> PROCESSING SEMLINK 1.2")
process_semlink_1_2()
#Process all mappings at this time if there are any left
print("-----> PROCESSING MAPPINGS")
process_mappings()
outputFile = open(_output, "w")
for pb_verb in map_index :
mapping = map_index[pb_verb]
for key in mapping:
vn_code = get_vn_code(key)
vn_args = vn_index[vn_code]['arguments']
mapping_args = mapping[key]['arguments']
mapping_vn_args = []
for arg in mapping_args :
mapping_vn_args.append(mapping_args[arg]['vnArg'])
not_matched = unmatched_roles(vn_args, mapping_vn_args)
wrongly_matched = unmatched_roles(mapping_vn_args, vn_args)
outputFile.write(pb_verb + "," +
key +"," +
mapping[key]['provenance'] + "," +
vn_index[vn_code]['provenance'] + "," +
str(mapping_args).replace(",", ";") + "," +
str(not_matched).replace(",", ";") + "," +
str(wrongly_matched).replace(",", ";") + "," +
str(vn_args).replace(",", ";") +
"\n")
outputFile.close()
#Writie to json fileto be ingested in the graph
jsonFile = _output.replace(".txt", ".json")
with open(jsonFile, 'w') as outfile:
json.dump(map_index, outfile)
outfile.close()
print("-----> DONE")
if __name__ == '__main__':
workDir = "/Users/rosariouceda-sosa/Downloads/cleanULKB/"
outFileName = "/Users/rosariouceda-sosa/Downloads/ULKB_UNIFIED.txt"
namespace = "UL_KB" # THE GENERAL NAMESPACE
inputGroupings = VN_DIR + "otherdata/all_verbnet_grouping.json"
# INGESTION PROCESS
process_ulkb_mappings(outFileName)
#INDIVIDUAL TESTS
#print(query_propbank_roles("make_up.08"))
if outLog is not None :
outLog.close()
# for vn_Class in vnClassToRDF :
# if vn_Class not in processedGroupingsVN :
# listFile.write(vn_Class + "," + "NO GROUP MAPPING" + "\n")
# listFile.close()
#SIMPLE TEST -- GET ALL CLASSES
#all_verb_classes = get_all_classes()
#for verbClass in all_verb_classes:
# print(verbClass)
#SIMPLE TEST - GET INFO FOR A LEMMA
#sampleClasses = get_classes_for('leave')
#for className in sampleClasses :
# v = vn.vnclass(className)
# print("CLASS = " + className)
# print('\t' + vn.pprint_themroles(className))
# for t in v.findall('THEMROLES/THEMROLE/SELRESTRS/SELRESTR') :
# print('\t' + str(t))
#export_verbnet_instances(workDir + "verbnet_output.txt")
|
StarcoderdataPython
|
3664
|
from unittest.mock import MagicMock, Mock
from i3ipc.aio import Con
import i3_live_tree.tree_serializer # noqa: F401
class MockConSerializer(Mock, Con):
"""Mock a generic i3ipc.aio.Con for serialization purposes
This Mock is meant to ease testing of i3ipc.aio.Con serialization methods,
which are mokey patched in i3_live_tree.tree_serializer.
In order to achieve this, the mock inherits all the method implementations
of i3ipc.aio.Con, most importantly the serialization ones. However,
whatever is needed for serialization, both properties and methods, is
mocked and can be injected in the constructor, in order to ease the
creation of mock instances.
"""
def __init__(self, *args, name=None, layout=None, focused=False,
nodes=iter(()), **kwargs):
Mock.__init__(self, *args, **kwargs)
self.focused = focused
self.layout = layout
self.name = name
self.nodes = nodes
class MockConNavigation(MagicMock):
"""Mock an i3ipc.aio.Con for navigation purposes
This Mock is meant to be used when testing i3ipc event handlers. It mocks
all the necessary methods and properties, by returning `self` when an
i3ipc.aio.Con instance is needed for the sake of simplicity.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def find_focused(self):
"""Return the focused window"""
return self
def workspace(self):
"""Return the containing workspace"""
return self
class MockI3(Mock):
"""Mock an i3ipc.aio.Connection"""
def __init__(self, *args, tree, **kwargs):
super().__init__(*args, **kwargs)
self.tree = tree
async def get_tree(self):
"""Return the i3 tree asynchronously"""
return self.tree
|
StarcoderdataPython
|
98274
|
import xlrd
import typing
import re
# from termcolor import colored
class Controle:
def __init__(self) -> None:
self.__planilha_esocial = None
self.__planilha_candidata = None
self.__relatorio:typing.IO = None
def set_planilha_esocial(self, nome_pasta_esocial:str) -> None:
try:
workbook_esocial = xlrd.open_workbook(nome_pasta_esocial)
self.__planilha_esocial = workbook_esocial.sheet_by_name('Sheet1')
except:
# print("\n\n--- olha o outro erro aí, amigo --- \n\n")
raise Exception("Falha ao abrir planilha padrão (eSocial).")
def set_planilha_candidata(self, nome_pasta_candidata: str) -> None:
try:
workbook_candidata = xlrd.open_workbook(nome_pasta_candidata)
self.__planilha_candidata = workbook_candidata.sheet_by_name('Mapeamento Final')
except:
raise Exception("Falha ao abrir planilha candidata.")
def set_relatorio(self, relatorio: typing.IO) -> None:
self.__relatorio = relatorio
def escreve_relatorio(self):
total_de_linhas = min([len(list(self.__planilha_esocial.get_rows())),
len(list(self.__planilha_candidata.get_rows()))])
linhas_que_importam = list(range(1, total_de_linhas))
colunas_que_importam = list(range(0,9))
espacosExtrasPorCelula = 5
colunas = "ABCDEFGHIJKLMNOPQRSTUWXYZ"
def get_string_celula(worksheet_esocial, worksheet_arterh, i:int, j:int, ordinal:int):
caracteresExibidosPorCelula = [5, 15, 15, 2, 5, 5, 5, 5, 2000]
conteudo_esocial = ""
conteudo_arterh = ""
try:
conteudo_esocial = int(worksheet_esocial.cell(i, j).value)
except:
conteudo_esocial = str(worksheet_esocial.cell(i, j).value)
finally:
conteudo_esocial = str(conteudo_esocial)
conteudo_esocial = re.sub(r"\W", "", conteudo_esocial)[0:caracteresExibidosPorCelula[ordinal]]\
.ljust(caracteresExibidosPorCelula[ordinal]+espacosExtrasPorCelula, ' ')
try:
conteudo_arterh = int(worksheet_arterh.cell(i, j).value)
except:
conteudo_arterh = str(worksheet_arterh.cell(i, j).value)
finally:
conteudo_arterh = str(conteudo_arterh)
conteudo_arterh = re.sub(r"\W", "", conteudo_arterh)[0:caracteresExibidosPorCelula[ordinal]]\
.ljust(caracteresExibidosPorCelula[ordinal]+espacosExtrasPorCelula, ' ')
return str(conteudo_esocial), str(conteudo_arterh)
for i in linhas_que_importam:
for ordinal, j in enumerate(colunas_que_importam):
conteudo_esocial, conteudo_arterh = get_string_celula(self.__planilha_esocial,
self.__planilha_candidata, i, j, ordinal)
if conteudo_esocial != conteudo_arterh:
self.__relatorio.write(f"{colunas[j]}{i+1}")
self.__relatorio.write("||")
self.__relatorio.write("\n")
for ordinal, j in enumerate(colunas_que_importam):
conteudo_esocial, conteudo_arterh = get_string_celula(self.__planilha_esocial,
self.__planilha_candidata, i, j, ordinal)
if conteudo_esocial != conteudo_arterh:
self.__relatorio.write(f"{conteudo_esocial}")
self.__relatorio.write("||")
self.__relatorio.write("\n")
for ordinal, j in enumerate(colunas_que_importam):
conteudo_esocial, conteudo_arterh = get_string_celula(self.__planilha_esocial,
self.__planilha_candidata, i, j, ordinal)
if conteudo_esocial != conteudo_arterh:
self.__relatorio.write(f"{conteudo_arterh}")
self.__relatorio.write("||")
self.__relatorio.write("\n")
# Eu não tenho controle sobre o relatório, logo não o fecho neste ponto da execução.
|
StarcoderdataPython
|
3284672
|
<reponame>smbapps/chinup
from __future__ import absolute_import, unicode_literals
from collections import OrderedDict
import logging
import threading
from .lowlevel import batch_request
from .conf import settings
from .util import get_proof
logger = logging.getLogger(__name__)
_threadlocals = threading.local()
class ChinupQueue(object):
"""
List of pending Chinups with a common app token.
"""
def __new__(cls, app_token, **kwargs):
try:
qs = _threadlocals.chinup_queues
except AttributeError:
qs = _threadlocals.chinup_queues = {}
try:
q = qs[app_token]
except KeyError:
q = qs[app_token] = super(ChinupQueue, cls).__new__(
cls, app_token, **kwargs)
q.chinups = []
return q
def __init__(self, app_token, app_secret=None):
self.app_token = app_token
self.appsecret_proof = (get_proof(key=app_secret, msg=app_token)
if app_secret else None)
# self.chinups set in __new__ for per-token singleton
def __repr__(self):
return '<{0.__class__.__name__} id={1} len={2} app_token={0.app_token}>'.format(
self, id(self), len(self.chinups))
def append(self, chinup, dedup=None):
"""
Adds chinup to the queue.
"""
logger.debug("Queuing %r", chinup)
self.chinups.append(chinup)
def sync(self, caller=None):
"""
Builds and sends batch request, then populates Chinup responses.
"""
if caller:
assert caller in self.chinups
# Take the existing queue from self.chinups. This is the max we will
# try to accomplish in this sync, even if more are added during
# processing (this can happen in chinup callback, or for paged
# responses).
chinups, self.chinups = self.chinups, []
# Run prepare_batch() over the entire queue once before starting on
# batches. This is an opportunity to replace users with tokens the most
# efficiently, for example.
if chinups:
chinups, _ = chinups[0].prepare_batch(chinups)
# Deduplicate to get the list of unique chinups.
if settings.DEDUP:
chinups, dups = self.dedup(chinups)
else:
dups = None
self._sync(chinups, caller)
# Reduplicate the responses into the dups.
if dups:
chinups = self.redup(chinups, dups)
if caller and not caller.completed:
# Ugh, this means we timed out without making progress.
caller.exception = QueueTimedOut("Couldn't make enough progress to complete request.")
# Drop completed chinups from the queue to prevent clogging with
# completed chinups. Put them on the front of the queue, rather than
# replacing it entirely, in case there were callbacks (in the response
# setter) that added to self.chinups.
self.chinups[:0] = [cu for cu in chinups if not cu.completed]
def _sync(self, chinups, caller):
# Some requests in the batch might time out rather than completing.
# Continue batching until the calling chinup is satisfied, or until we
# stop making progress.
progress = 1
while chinups and progress and not (caller and caller.completed):
# Ask the first chinup to process the chinups into a list of
# request dicts. This is a classmethod, but calling via the first
# chinup doesn't require us to know if Chinup has been subclassed.
chinups, requests = chinups[0].prepare_batch(chinups)
# It's possible that prepare_batch() decided all the chinups
# were invalid, so make sure that we actually have requests.
if not requests:
assert not chinups
logger.debug("No requests in batch after calling make_request_dicts()")
break
# Make the batch request.
assert len(requests) <= 50
logger.log(logging.INFO if settings.DEBUG_REQUESTS else logging.DEBUG,
"Making batch request len=%s/%s queue=%s",
len(requests), len(chinups), id(self))
responses = batch_request(self.app_token, requests,
appsecret_proof=self.appsecret_proof)
# Populate responses into chinups.
for cu, r in zip(chinups, responses):
# Don't set response for timeouts, so they'll be automatically
# tried again when .data is accessed.
if r is not None:
cu.response = r
logger.log(logging.INFO if settings.DEBUG_REQUESTS else logging.DEBUG,
'%s%r', 'TIMEOUT ' if r is None else '', cu)
# Check for progress.
progress = sum(1 for cu in chinups if cu.completed)
# Filter out the completed chinups for the next pass.
chinups = [cu for cu in chinups if not cu.completed]
@classmethod
def dedup(cls, chinups):
"""
Returns (uniques, dups) where the latter is a dict of lists indexed by
the former.
"""
dups = OrderedDict()
for c in chinups:
clist = dups.setdefault(c, [])
if clist:
logger.debug("Dedup %r", c)
clist.append(c)
uniques = [clist[0] for clist in dups.values()]
logger.debug("Deduping reduced from %s to %s.", len(chinups), len(uniques))
return uniques, dups
@classmethod
def redup(cls, chinups, dups):
"""
Returns full suite of chinups, integrating dups by setting their responses.
"""
for cu in chinups:
if not cu.completed:
continue
for i, dup in enumerate(dups[cu]):
if i == 0:
assert dup is cu
else:
assert not dup.completed
dup.response = cu.response
return [cu for v in dups.values() for cu in v]
def __getstate__(self):
d = dict(self.__dict__)
del d['chinups']
return d
def __getnewargs__(self):
return (self.app_token,)
def __setstate__(self, d):
self.__dict__.update(d)
def delete_queues():
try:
del _threadlocals.chinup_queues
except AttributeError:
pass
__all__ = ['ChinupQueue', 'delete_queues']
|
StarcoderdataPython
|
176669
|
<reponame>Grieverwzn/network-design-qap<gh_stars>1-10
import time
import multiprocessing as mp
import numpy as np
import sys
from queue import PriorityQueue
from assignment import *
sys.setrecursionlimit(10000)
class TreeNode: # This is the node for tree serch
def __init__(self, nb_unassigned_buildings, assigned_locations, assigned_buildings, location_status, building_status):
self.lower_value = None
self.upper_value = None
self.assignment_mat_1 = None
self.assignment_mat_2 = None
self.nb_unassigned_buildings = nb_unassigned_buildings
self.assigned_locations = assigned_locations
self.assigned_buildings = assigned_buildings
self.dest_location_assignment_status = location_status # True: available, False: assigned
self.dest_building_assignment_status = building_status
class BAB:
def __init__(self, instance, args):
self.instance = instance
self.args = args
self.LB_BFS, self.UB_BFS = None, None
self.best_solution_BFS_1, self.best_solution_BFS_2 = None, None
self.best_solution_LCS_1, self.best_solution_LCS_2 = None, None
# for quick access
self.branch_list = instance.branch_list
self.target_relative_gap = args['target_relative_gap']
self.M = args['M']
self.time_limit = args['time_limit']
self.start_time_breadth = 0.0
self.valid_time_breadth = 0.0
self.start_time_lb = 0.0
self.valid_time_lb = 0.0
self.nb_of_orig_building = instance.nb_of_orig_building
self.nb_of_orig_location = instance.nb_of_orig_location
self.nb_of_dest_building = instance.nb_of_dest_building
self.nb_of_dest_location = instance.nb_of_dest_location
self.flow_mat = instance.flow_mat
self.trans_cost_mat = instance.trans_cost_mat
self.build_cost_orig_mat = instance.build_cost_orig_mat
self.build_cost_dest_mat = instance.build_cost_dest_mat
def calculateGLB(self, node):
GLB_cost_mat = np.zeros([self.nb_of_orig_building, self.nb_of_orig_location])
for branch in self.branch_list:
trans_cost_array_temp = branch.trans_cost_array[node.dest_location_assignment_status]
flow_array_temp = branch.flow_array[node.dest_building_assignment_status]
value = sum(np.sort(trans_cost_array_temp) * np.sort(flow_array_temp)[::-1])
assigned_cost = sum(branch.trans_cost_array[node.assigned_locations] * branch.flow_array[node.assigned_buildings])
cost_ik = value + assigned_cost
GLB_cost_mat[branch.i_ind,branch.k_ind] = cost_ik
lower_solution_1 = Hungarian_1(GLB_cost_mat + self.build_cost_orig_mat)
assignment_mat_1 = np.zeros([self.nb_of_orig_building, self.nb_of_orig_location])
assignment_mat_1[lower_solution_1['building_ind'], lower_solution_1['location_ind']] = 1
build_cost_dest_mat_multiplier = self.build_cost_dest_mat.copy()
build_cost_dest_mat_multiplier[node.assigned_locations,node.assigned_buildings] -= self.M
lower_solution_2 = Hungarian_2(build_cost_dest_mat_multiplier)
assignment_mat_2 = np.zeros([self.nb_of_dest_location, self.nb_of_dest_building])
assignment_mat_2[lower_solution_2['location_ind'], lower_solution_2['building_ind']] = 1
node.assignment_mat_1 = assignment_mat_1
node.assignment_mat_2 = assignment_mat_2
lv = lower_solution_1['value'] + lower_solution_2['value'] + len(node.assigned_buildings) * self.M
uv = np.sum(self.flow_mat * np.matmul(np.matmul(assignment_mat_1, self.trans_cost_mat), assignment_mat_2)) + \
np.sum(self.build_cost_orig_mat * assignment_mat_1) + np.sum(self.build_cost_dest_mat * assignment_mat_2)
node.lower_value = lv
node.upper_value = uv
def solveNode(self, live_node):
tree_nodes = []
if live_node.assigned_buildings:
live_building_id = live_node.assigned_buildings[-1] + 1
else:
live_building_id = 0
dest_building_assignment_status = live_node.dest_building_assignment_status.copy()
dest_building_assignment_status[live_building_id] = False
assigned_buildings = live_node.assigned_buildings.copy()
assigned_buildings.append(live_building_id)
for i in range(self.nb_of_dest_location):
dest_location_assignment_status = live_node.dest_location_assignment_status.copy()
if dest_location_assignment_status[i]:
dest_location_assignment_status[i] = False
assigned_locations = live_node.assigned_locations.copy()
assigned_locations.append(i)
tree_node = TreeNode(live_node.nb_unassigned_buildings-1,
assigned_locations,
assigned_buildings,
dest_location_assignment_status,
dest_building_assignment_status)
self.calculateGLB(tree_node)
tree_nodes.append(tree_node)
return tree_nodes
def solveNodes(self, nodes):
child_node_list = []
lb, ub = np.inf, self.UB_BFS
best_node = None
for live_node in nodes:
if time.time() > self.valid_time_breadth: break
tree_nodes = self.solveNode(live_node)
for tree_node in tree_nodes:
if tree_node.upper_value < ub:
ub = tree_node.upper_value
best_node = tree_node
if tree_node.nb_unassigned_buildings > 1:
if tree_node.lower_value <= ub:
if tree_node.lower_value < lb: lb = tree_node.lower_value
child_node_list.append(tree_node)
return child_node_list, lb, ub, best_node
def createRoot(self):
root = TreeNode(self.nb_of_dest_building, [], [], [True] * self.nb_of_dest_location, [True] * self.nb_of_dest_building)
self.calculateGLB(root)
return root
def finishCurrentIter_BFS(self, branch_iter, number_of_nodes, solver_status):
GAP = (self.UB_BFS - self.LB_BFS) / self.UB_BFS
print(f'**BNB-BF iter {branch_iter}: Best Lower bound = ', self.LB_BFS)
print(f'**BNB-BF iter {branch_iter}: Best Upper bound = ', self.UB_BFS)
print(f'**BNB-BF iter {branch_iter}: GAP = ', GAP)
if number_of_nodes == 0:
print('**BNB-BF branch and bound complete')
solver_status.value = 1
return True
if GAP <= self.target_relative_gap:
print('**BNB-BF target relative gap reached')
solver_status.value = 1
return True
if time.time() >= self.valid_time_breadth:
print('**BNB-BF time limit reached')
return True
return False
def solve_breadth(self, solver_status, lock):
if self.args['threads'] == -1:
cores = mp.cpu_count()
else:
cores = self.args['threads']
p = mp.Pool(processes=cores)
self.start_time_breadth = time.time()
self.valid_time_breadth = self.start_time_breadth + self.time_limit
root = self.createRoot()
self.LB_BFS, self.UB_BFS = root.lower_value, root.upper_value
task_list = [[root]] + [[] for _ in range(cores-1)]
number_of_nodes = 1
branch_iter = 0
while True:
# new iter
branch_iter += 1
print(f'**BNB-BF iter {branch_iter}: nodes {number_of_nodes}')
# solve nodes
result_list = p.map(self.solveNodes, task_list)
# update lb and ub
result_with_new_lb = min(result_list, key=lambda x: x[1])
new_lb = result_with_new_lb[1]
if self.LB_BFS < new_lb < np.inf:
self.LB_BFS = new_lb
result_with_new_ub = min(result_list, key=lambda x: x[2])
new_ub = result_with_new_ub[2]
if new_ub < self.UB_BFS:
self.UB_BFS = new_ub
self.best_solution_BFS_1 = result_with_new_ub[3].assignment_mat_1
self.best_solution_BFS_2 = result_with_new_ub[3].assignment_mat_2
# check child nodes
all_node_list = []
for result in result_list:
for node in result[0]:
if node.lower_value < self.UB_BFS:
all_node_list.append(node)
number_of_nodes = len(all_node_list)
# end current iter
t1 = time.time()
print(f'**BNB-BF iter {branch_iter}: elapsed time {t1 - self.start_time_breadth}')
stop_flag = self.finishCurrentIter_BFS(branch_iter, number_of_nodes, solver_status)
if stop_flag: break
# prepare next iter
ave_load = int(np.ceil(number_of_nodes / cores))
task_list = []
for i in range(cores-1): task_list.append(all_node_list[i*ave_load:(i+1)*ave_load])
task_list.append(all_node_list[(i+1)*ave_load:])
lock.acquire()
print(f'**BNB-BF Best Solution')
print('assignment matrix 1')
print(self.best_solution_BFS_1)
print('assignment matrix 2')
print(self.best_solution_BFS_2)
lock.release()
def solve_lb(self, solver_status, lock):
self.start_time_lb = time.time()
self.valid_time_lb = self.start_time_lb + self.time_limit
root = self.createRoot()
lb, ub = root.lower_value, root.upper_value
pq = PriorityQueue()
node_no = 0
pq.put((root.lower_value, node_no, root))
node_no += 1
while (pq.queue):
if solver_status.value == 1:
print('--BNB-LB stopped as BNB-BF has completed')
break
if time.time() > self.valid_time_lb:
print('--BNB-LB time limit reached')
break
lower_value, _, live_node = pq.get()
if lower_value > ub:
print('--BNB-LB branch and bound complete')
break
lb = lower_value
tree_nodes = self.solveNode(live_node)
for tree_node in tree_nodes:
if tree_node.upper_value < ub:
ub = tree_node.upper_value
self.best_solution_LCS_1 = tree_node.assignment_mat_1
self.best_solution_LCS_2 = tree_node.assignment_mat_2
if tree_node.nb_unassigned_buildings > 1:
if tree_node.lower_value <= ub:
pq.put((tree_node.lower_value, node_no, tree_node))
node_no += 1
gap = (ub - lb) / ub
print(f'--BNB-LB Best Lower bound = {lb}, Best Upper bound = {ub}, GAP = {gap}')
lock.acquire()
print(f'**BNB-LB Best Solution')
print('assignment matrix 1')
print(self.best_solution_LCS_1)
print('assignment matrix 2')
print(self.best_solution_LCS_2)
lock.release()
def solve(self):
solver_status = mp.Manager().Value('i',0)
lock = mp.Manager().Lock()
p1 = mp.Process(target=self.solve_breadth, args=(solver_status,lock))
p2 = mp.Process(target=self.solve_lb, args=(solver_status,lock))
p1.start()
p2.start()
p1.join()
p2.join()
# self.solve_breadth(solver_status)
# self.solve_lb(0,0)
|
StarcoderdataPython
|
14241
|
<gh_stars>1-10
from enum import Enum
class TestShopNames(Enum):
AMAZON = ("AMAZON",)
TARGET = ("TARGET",)
WALMART = ("WALMART",)
TJMAXX = ("TJMAXX",)
GOOGLE = ("GOOGLE",)
NEWEGG = ("NEWEGG",)
HM = ("HM",)
MICROCENTER = ("MICROCENTER",)
FASHIONNOVA = ("FASHIONNOVA",)
SIXPM = ("SIXPM",)
POSHMARK = ("POSHMARK",)
MACYS = ("MACYS",)
ASOS = ("ASOS",)
JCPENNEY = ("JCPENNEY",)
KOHLS = "KOHLS"
FOOTLOCKER = ("FOOTLOCKER",)
BESTBUY = ("BESTBUY",)
EBAY = ("EBAY",)
KMART = ("KMART",)
BIGLOTS = ("BIGLOTS",)
BURLINGTON = ("BURLINGTON",)
MVMTWATCHES = ("MVMTWATCHES",)
BOOHOO = ("BOOHOO",)
FOREVER21 = ("FOREVER21",)
STYLERUNNER = ("STYLERUNNER",)
LEVI = ("LEVI",)
ZARA = ("ZARA",)
NORDSTROM = "NORDSTROM"
NORDSTROMRACK = "NORDSTROMRACK"
HAUTELOOK = "HAUTELOOK"
SAKSFIFTHAVENUE = "SAKSFIFTHAVENUE"
EXPRESS = "EXPRESS"
CHARLOTTERUSSE = "CHARLOTTERUSSE"
ALDO = "ALDO"
SHOPQUEEN = "SHOPQUEEN"
NIKE = "NIKE"
ADIDAS = "ADIDAS"
DICKSSPORTINGGOODS = "DICKSSPORTINGGOODS"
BINK = "BINK"
|
StarcoderdataPython
|
4841155
|
from collections import Counter
from scattertext.emojis.EmojiExtractor import extract_emoji
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
class FeatsFromSpacyDocOnlyEmoji(FeatsFromSpacyDoc):
'''
Strips away everything but emoji tokens from spaCy
'''
def get_feats(self, doc):
'''
Parameters
----------
doc, Spacy Docs
Returns
-------
Counter emoji -> count
'''
return Counter(extract_emoji(str(doc)))
|
StarcoderdataPython
|
3235183
|
__author__ = "<NAME>"
__copyright__ = "(C) 2021 Coalfire"
__contributors__ = ["<NAME>"]
__status__ = "Production"
__license__ = "MIT"
from ...API import API
class ActuatorAPI(API):
def __init__(self, host, api_key, verify_ssl, timeout, headers, user_agent, cert, debug):
"""
Initialize a ThreadFix Pro API instance.
:param host: The URL for the ThreadFix Pro server. (e.g., http://localhost:8080) NOTE: must include http://
:param api_key: The API key generated on the ThreadFix Pro API Key page.
:param verify_ssl: Specify if API requests will verify the host's SSL certificate, defaults to true.
:param timeout: HTTP timeout in seconds, default is 30.
:param headers: Headers are done automatically so feel free to leave this as None unless you really need custom headers
:param user_agent: HTTP user agent string, default is "threadfix_pro_api/[version]".
:param cert: You can also specify a local cert to use as client side certificate, as a single file (containing
the private key and the certificate) or as a tuple of both file’s path
:param debug: Prints requests and responses, useful for debugging.
"""
super().__init__(host, api_key, verify_ssl, timeout, headers, user_agent, cert, debug)
def get_version_info(self, page):
"""
Gets version of Threadfix page
:param page: page to get version of.
"""
return super().request('GET', f'/actuator/{page}')
|
StarcoderdataPython
|
1624834
|
<gh_stars>0
"""Backend class."""
import numpy as np
class Backend():
"""Defaults are currently ARTS observing properties."""
def __init__(self,
n_channels: int = 1536,
channel_bandwidth: float = 0.1953125, # MHz
fmin: float = 1219.700927734375, # MHz
sampling_time: float = 0.00008192, # second
samples_per_second: int = 12500):
"""Initiale of Backend class.
Parameters
----------
n_channels:int = 1536,
channel_bandwidth:float = 0.1953125, # MHz
fmin:float = 1219.700927734375, # MHz
sampling_time:float = 0.00008192, # second
samples_per_second:int = 12500
"""
self.n_channels = n_channels
self.channel_bandwidth = channel_bandwidth
self.fmin = fmin
self.fmax = self.fmin+self.n_channels*self.channel_bandwidth # MHz
self.sampling_time = sampling_time
self.samples_per_second = samples_per_second
self.freq_to_index = lambda frequency : int((frequency-self.fmin)/self.channel_bandwidth)
self.next_freq = lambda i : self.fmin + i * self.channel_bandwidth
self.frequencies = np.array([self.next_freq(i) for i in range(self.n_channels)])
self.freq_indices = np.array([self.freq_to_index(f) for f in self.frequencies])
self.frequency_range_to_n_channels = lambda range : np.ceil(range/self.channel_bandwidth).astype(int)
|
StarcoderdataPython
|
4819268
|
#!/usr/bin/python
import numpy as np
import json
import csv
from pyspark import SparkConf, SparkContext
conf = (SparkConf()
.setMaster("local[*]")
.setAppName("My app")
.set("spark.executor.memory", "8g")
.set("spark.executor.cores", "8"))
sc = SparkContext(conf = conf)
file1 = '/home/sir/Neighborhoods/Data/Test/2014-aggregated/departures/csv-out/lines-of-fit.json'
outfile1 = '/home/sir/Neighborhoods/Data/Test/2014-aggregated/departures/csv-out/ranked-differences.csv'
data1 = json.loads(open(file1).read())[1:]
data = sc.parallelize(data1)
#Compare each poly, point by point, from 0 to range. Basically we we just take
#the difference calculated at each point, and add them up, that will give us the 'distance'
def comparePolys(poly1, poly2, r):
first = np.array(poly1)
second = np.array(poly2)
a = np.poly1d(first)
b = np.poly1d(second)
diff = 0
for i in range(r):
diff += np.square(a(i) - b(i))
return diff
def calcDifferences(data):
differences = []
for i in range(len(data)):
for j in range(i+1, len(data)):
diff = comparePolys(data[i][2], data[j][2], 96)
differences.append([str(data[j][0])+"-"+str(data[j][1]), str(data[i][0])+'-'+str(data[i][1]), diff])
return differences
weekdayGrouping = data.map(lambda x: (x[1], [x]))
weekdayGrouping = weekdayGrouping.reduceByKey(lambda x, y: x+y)
differences = weekdayGrouping.map(lambda x: calcDifferences(x[1]))
sortedDifferences = differences.map(lambda x: sorted(x, key=lambda y: y[2]))
mixedDifferences = sorted(sortedDifferences.flatMap(lambda x: x).collect(), key=lambda y: y[2])
#print out to a file
with open(outfile1, 'w+') as f:
writer = csv.writer(f)
writer.writerow(["tract-weekday-1", "tract-weekday-1", "squared-difference"])
for record in mixedDifferences:
writer.writerow(record)
|
StarcoderdataPython
|
3342377
|
<filename>gnes/indexer/vector/annoy.py
import os
from typing import List, Tuple
import numpy as np
from ..base import BaseVectorIndexer
from ..key_only import ListKeyIndexer
class AnnoyIndexer(BaseVectorIndexer):
lock_work_dir = True
def __init__(self, num_dim: int, data_path: str, metric: str = 'angular', n_trees=10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_dim = num_dim
self.work_dir = data_path
self.indexer_file_path = os.path.join(self.work_dir, self.internal_index_path)
self.metric = metric
self.n_trees = n_trees
self._key_info_indexer = ListKeyIndexer()
def post_init(self):
from annoy import AnnoyIndex
self._index = AnnoyIndex(self.num_dim, self.metric)
try:
self._index.load(self.indexer_file_path)
except:
self.logger.warning('fail to load model from %s, will create an empty one' % self.indexer_file_path)
def add(self, keys: List[Tuple[int, int]], vectors: np.ndarray, weights: List[float], *args, **kwargs):
last_idx = self._key_info_indexer.size
if len(vectors) != len(keys):
raise ValueError('vectors length should be equal to doc_ids')
if vectors.dtype != np.float32:
raise ValueError("vectors should be ndarray of float32")
for idx, vec in enumerate(vectors):
self._index.add_item(last_idx + idx, vec)
self._key_info_indexer.add(keys, weights)
def query(self, keys: 'np.ndarray', top_k: int, *args, **kwargs) -> List[List[Tuple]]:
self._index.build(self.n_trees)
if keys.dtype != np.float32:
raise ValueError('vectors should be ndarray of float32')
res = []
for k in keys:
ret, relevance_score = self._index.get_nns_by_vector(k, top_k, include_distances=True)
chunk_info = self._key_info_indexer.query(ret)
res.append([(*r, -s) for r, s in zip(chunk_info, relevance_score)])
return res
@property
def size(self):
return self._index.get_n_items()
def __getstate__(self):
d = super().__getstate__()
self._index.save(self.indexer_file_path)
return d
|
StarcoderdataPython
|
194616
|
<reponame>quantummind/quantum
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target tfq_inner_product."""
import copy
import numpy as np
from absl.testing import parameterized
import tensorflow as tf
import cirq
from tensorflow_quantum.core.ops.math_ops import fidelity_op
from tensorflow_quantum.python import util
class FidelityTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq_fidelity_op."""
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 5
},
{
'n_qubits': 5,
'batch_size': 10,
'inner_dim_size': 1
},
{
'n_qubits': 10,
'batch_size': 10,
'inner_dim_size': 2
},
{
'n_qubits': 5,
'batch_size': 10,
'inner_dim_size': 5
},
])
def test_correctness_with_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that inner_product works with symbols."""
symbol_names = ['alpha', 'beta', 'gamma']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size)
other_batch = [
util.random_circuit_resolver_batch(qubits, inner_dim_size)[0]
for i in range(batch_size)
]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names = tf.convert_to_tensor(symbol_names,
dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor(symbol_values_array)
out = fidelity_op.fidelity(programs, symbol_names, symbol_values,
other_programs)
out_arr = np.empty((batch_size, inner_dim_size), dtype=np.complex64)
for i in range(batch_size):
final_circuit = cirq.resolve_parameters(circuit_batch[i],
resolver_batch[i])
final_wf = cirq.final_state_vector(final_circuit)
for j in range(inner_dim_size):
internal_wf = cirq.final_state_vector(other_batch[i][j])
out_arr[i][j] = np.abs(np.vdot(final_wf, internal_wf))**2
self.assertAllClose(out, out_arr, atol=1e-5)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 5
},
{
'n_qubits': 5,
'batch_size': 2,
'inner_dim_size': 1
},
{
'n_qubits': 10,
'batch_size': 3,
'inner_dim_size': 2
},
{
'n_qubits': 5,
'batch_size': 10,
'inner_dim_size': 5
},
])
def test_correctness_without_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that inner_product works without symbols."""
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, _ = \
util.random_circuit_resolver_batch(
qubits, batch_size)
other_batch = [
util.random_circuit_resolver_batch(qubits, inner_dim_size)[0]
for i in range(batch_size)
]
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names = tf.convert_to_tensor([], dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor([[] for _ in range(batch_size)])
out = fidelity_op.fidelity(programs, symbol_names, symbol_values,
other_programs)
out_arr = np.empty((batch_size, inner_dim_size), dtype=np.complex64)
for i in range(batch_size):
final_wf = cirq.final_state_vector(circuit_batch[i])
for j in range(inner_dim_size):
internal_wf = cirq.final_state_vector(other_batch[i][j])
out_arr[i][j] = np.abs(np.vdot(final_wf, internal_wf))**2
self.assertAllClose(out, out_arr, atol=1e-5)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
def test_correctness_empty(self):
"""Tests the fidelity with empty circuits."""
empty_circuit = util.convert_to_tensor([cirq.Circuit()])
empty_symbols = tf.convert_to_tensor([], dtype=tf.dtypes.string)
empty_values = tf.convert_to_tensor([[]])
other_program = util.convert_to_tensor([[cirq.Circuit()]])
out = fidelity_op.fidelity(empty_circuit, empty_symbols, empty_values,
other_program)
expected = np.array([[1.0]], dtype=np.complex64)
self.assertAllClose(out, expected)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
qubit = cirq.GridQubit(0, 0)
non_empty_circuit = util.convert_to_tensor(
[cirq.Circuit(cirq.X(qubit))])
empty_symbols = tf.convert_to_tensor([], dtype=tf.dtypes.string)
empty_values = tf.convert_to_tensor([[]])
other_program = util.convert_to_tensor([[cirq.Circuit()]])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'qubits not found'):
fidelity_op.fidelity(non_empty_circuit, empty_symbols, empty_values,
other_program)
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 1
},
{
'n_qubits': 5,
'batch_size': 3,
'inner_dim_size': 1
},
])
def test_tf_gradient_correctness_with_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that tf.gradient of inner_product works with symbols."""
symbol_names = ['alpha', 'beta', 'gamma']
n_params = len(symbol_names)
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size)
other_batch = [0 for i in range(batch_size)]
for i in range(len(other_batch)):
other_batch[i] = copy.deepcopy(circuit_batch)
for j in range(len(other_batch[i])):
other_batch[i][j] = cirq.resolve_parameters(
circuit_batch[i], resolver_batch[i])
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names_tensor = tf.convert_to_tensor(symbol_names,
dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor(symbol_values_array)
with tf.GradientTape() as tape:
tape.watch(symbol_values)
ip = fidelity_op.fidelity(programs, symbol_names_tensor,
symbol_values, other_programs)
out = tape.gradient(ip, symbol_values)
out_arr = np.zeros((batch_size, n_params), dtype=np.complex64)
# dx came from _GRAD_EPS of core/src/adj_util.cc
dx = 5e-3
for i in range(batch_size):
for k, name in enumerate(symbol_names):
if name in resolver_batch[i].param_dict:
new_resolver = copy.deepcopy(resolver_batch[i])
new_resolver.param_dict[name] += dx
final_circuit_p = cirq.resolve_parameters(
circuit_batch[i], new_resolver)
new_resolver = copy.deepcopy(resolver_batch[i])
new_resolver.param_dict[name] -= dx
final_circuit_m = cirq.resolve_parameters(
circuit_batch[i], new_resolver)
final_wf_p = cirq.final_state_vector(final_circuit_p)
final_wf_m = cirq.final_state_vector(final_circuit_m)
# Performs central finite difference.
for j in range(inner_dim_size):
internal_wf = cirq.final_state_vector(other_batch[i][j])
fid_p = cirq.fidelity(final_wf_p, internal_wf)
fid_m = cirq.fidelity(final_wf_m, internal_wf)
grad_fid = 0.5 * (fid_p - fid_m) / dx
out_arr[i][k] += grad_fid
self.assertAllClose(out, out_arr, atol=1e-3)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 5
},
{
'n_qubits': 5,
'batch_size': 3,
'inner_dim_size': 2
},
])
def test_tf_gradient_correctness_without_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that tf.gradient of inner_product works without symbols."""
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, _ = \
util.random_circuit_resolver_batch(
qubits, batch_size)
other_batch = [
util.random_circuit_resolver_batch(qubits, inner_dim_size)[0]
for i in range(batch_size)
]
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names = tf.convert_to_tensor([], dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor([[] for _ in range(batch_size)])
with tf.GradientTape() as tape:
tape.watch(symbol_values)
ip = fidelity_op.fidelity(programs, symbol_names, symbol_values,
other_programs)
out = tape.gradient(ip, symbol_values)
self.assertAllClose(out, tf.zeros_like(symbol_values), atol=1e-3)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
def test_correctness_no_circuit(self):
"""Test the inner product between no circuits."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
other_program = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
out = fidelity_op.fidelity(empty_circuit, empty_symbols, empty_values,
other_program)
self.assertShapeEqual(np.zeros((0, 0)), out)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
def test_tf_gradient_correctness_no_circuit(self):
"""Test the inner product grad between no circuits."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
other_program = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
with tf.GradientTape() as tape:
tape.watch(empty_values)
out = fidelity_op.fidelity(empty_circuit, empty_symbols,
empty_values, other_program)
self.assertShapeEqual(np.zeros((0, 0)), out)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
if __name__ == "__main__":
tf.test.main()
|
StarcoderdataPython
|
1634410
|
<filename>desktop/libs/notebook/src/notebook/connectors/jdbc_vertica.py
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from past.utils import old_div
from librdbms.jdbc import query_and_fetch
from notebook.connectors.jdbc import JdbcApi
from notebook.connectors.jdbc import Assist
import time
import logging
LOG = logging.getLogger(__name__)
class JdbcApiVertica(JdbcApi):
def _createAssist(self, db):
return VerticaAssist(db)
class VerticaAssist(Assist):
cached_data = {} # {key: {time: 123, result: some result}}
freeze_time = 30 # sec
cache_use_stat = {"query": 0, "cache": 0}
def get_databases(self):
cache_key = str(self.db.db_url)
if (
cache_key not in self.cached_data
or time.time() - self.cached_data[cache_key]["time"] > self.freeze_time
):
dbs, description = query_and_fetch(
self.db,
"select schema_name FROM v_catalog.schemata where is_system_schema=0 and schema_name not in ('v_func', 'v_txtindex') order by 1",
)
list_of_db = [db[0] and db[0].strip() for db in dbs]
VerticaAssist.cached_data[cache_key] = {
"time": time.time(),
"result": list_of_db,
}
VerticaAssist.cache_use_stat["query"] += 1
else:
VerticaAssist.cache_use_stat["cache"] += 1
if self.cache_use_stat["cache"] % 5 == 0:
LOG.info(
"Autocomplete data, vertica: "
+ str(self.cache_use_stat["query"])
+ " cache: "
+ str(self.cache_use_stat["cache"])
+ ", cache is used in "
+ "%.2f"
% (
old_div(100
* float(self.cache_use_stat["cache"]), (self.cache_use_stat["query"] + self.cache_use_stat["cache"]))
)
+ "% cases"
)
return self.cached_data[cache_key]["result"]
def get_tables_full(self, database, table_names=[]):
cache_key = str(self.db.db_url) + str(database)
if (
cache_key not in self.cached_data
or time.time() - self.cached_data[cache_key]["time"] > self.freeze_time
):
tables, description = query_and_fetch(
self.db,
"SELECT table_name, '' FROM v_catalog.tables WHERE table_schema='%s' order by 1"
% database,
)
list_of_tables = [
{
"comment": table[1] and table[1].strip(),
"type": "Table",
"name": table[0] and table[0].strip(),
}
for table in tables
]
VerticaAssist.cached_data[cache_key] = {
"time": time.time(),
"result": list_of_tables,
}
VerticaAssist.cache_use_stat["query"] += 1
else:
VerticaAssist.cache_use_stat["cache"] += 1
return self.cached_data[cache_key]["result"]
def get_columns_full(self, database, table):
cache_key = str(self.db.db_url) + str(database) + str(table)
if (
cache_key not in self.cached_data
or time.time() - self.cached_data[cache_key]["time"] > self.freeze_time
):
columns, description = query_and_fetch(
self.db,
"select column_name, data_type, '' from v_catalog.columns where table_schema='%s' and table_name='%s' order by 1"
% (database, table),
)
list_of_columns = [
{
"comment": col[2] and col[2].strip(),
"type": col[1],
"name": col[0] and col[0].strip(),
}
for col in columns
]
VerticaAssist.cached_data[cache_key] = {
"time": time.time(),
"result": list_of_columns,
}
VerticaAssist.cache_use_stat["query"] += 1
else:
VerticaAssist.cache_use_stat["cache"] += 1
return VerticaAssist.cached_data[cache_key]["result"]
def get_sample_data(self, database, table, column=None):
column = column or "*"
return query_and_fetch(
self.db, "SELECT %s FROM %s.%s limit 10" % (column, database, table)
)
|
StarcoderdataPython
|
199555
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-30 09:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0005_file_obfuscate_filename'),
]
operations = [
migrations.AlterField(
model_name='track',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False, verbose_name='Order'),
),
]
|
StarcoderdataPython
|
1789524
|
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
""""
Usage: python show_data.py
"""
# In[1]:
import os
import numpy as np
from scipy import spatial
import glob
from multiprocessing import Process
from tqdm import tqdm
from vedo import load, show
import sys
# ## 一、自定义函数
# ### 1.获取模型信息
# In[2]:
def get_edges(faces):
"""
根据面得到相应的边
@faces: 模型的所有面
return: 模型的边
"""
edge2key = dict()
edges = []
edges_count = 0
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
if edge not in edge2key:
edge2key[edge] = edges_count
edges_count += 1
edges.append(list(edge))
return edges
def parse_obje(obj_file):
"""
解析obj文件, 获取点,边,面
@obj_file: obj模型文件路径
return: 模型的点,边,面信息
"""
vs = []
faces = []
edges = []
with open(obj_file) as f:
for line in f:
line = line.strip()
splitted_line = line.split()
if not splitted_line:
continue
elif splitted_line[0] == 'v':
vs.append([float(v) for v in splitted_line[1:]])
elif splitted_line[0] == 'f':
try:
faces.append([int(c) - 1 for c in splitted_line[1:]])
except ValueError:
faces.append([int(c.split('/')[0]) - 1 for c in splitted_line[1:]])
elif splitted_line[0] == 'e':
if len(splitted_line) >= 4:
edge_v = [int(c) - 1 for c in splitted_line[1:-1]]
edge_c = int(splitted_line[-1])
edge_v.append(edge_c) # class
edges.append(edge_v)
else:
continue
vs = np.array(vs)
faces = np.array(faces, dtype=int)
# if len(edges) == 0:
# edges = get_edges(faces)
edges = np.array(edges)
return vs, faces, edges
# ### 2.根据边标记对面进行标记
# In[3]:
def label_face_by_edge(faces, edges, edge_labels):
"""
利用边标签对面进行标记
@faces: 模型的面
@edges: 模型的边
@edge_labels: 模型边对应的标签
return: 面的标签
"""
edge_dict = {} # key: str([pt1, pt2]) value: label
for ei, edge in enumerate(edges):
key = tuple(edge)
edge_dict[key] = edge_labels[ei]
# print(edge_dict)
face_labels = np.array(len(faces) * [[-1, -1, -1]])
for i, face in enumerate(faces):
# faces_edges = []
for j in range(3):
cur_edge = [face[j], face[(j + 1) % 3]]
cur_label = edge_dict[tuple(sorted(cur_edge))]
face_labels[i][j] = cur_label
# face_labels.append(faces_edges)
face_labels = np.where(np.sum(face_labels, axis=1) < 2, 1, 2)
return face_labels
# ### 3.利用边对点进行标记
# In[4]:
def label_pts_by_edges(vs, edges, edge_labels):
"""
根据边标签,对点进行标注
@vs: 模型的点
@edge: 模型的边
@edge_labels: 模型边对应的标签
return: 模型点的标签
"""
pts_labels = np.array(len(vs) * [[-1, -1]])
for ei, edge in enumerate(edges):
edge_label = edge_labels[ei]
pt1 = edge[0]
pt2 = edge[1]
pts_labels[pt1][edge_label] = edge_label
pts_labels[pt2][edge_label] = edge_label
return pts_labels
# In[5]:
def find_faces_by_2point(faces, id1, id2):
"""
根据两个点确定以两点所在边为公共边的两个面
@faces: 所有面,N*3, 值表示点的id值
@id1: 第一个点的id值
@id2: 第二个点的id值
return: 2*3, [面的id,第一个点的位置, 第二个点的位置]
"""
p1_faces = np.argwhere(faces == id1) # 行id, 列id
p2_faces = np.argwhere(faces == id2)
intersection_faces = []
for val1 in p1_faces:
for val2 in p2_faces:
if val1[0] == val2[0]:
intersection_faces.append([val1[0], val1[1], val2[1]])
return intersection_faces
# In[6]:
def label_pts_by_edges_and_faces(vs, edges, faces, face_labels):
"""
根据边和面标签,对点进行标注,一条边对应两个面,如果两个面标签不同,则保留点
@vs: 模型的点
@edges: 模型的边
@faces: 模型的面
@face_labels: 模型面对应的标签
return: 模型点的标签
"""
pts_labels = np.array(len(vs) * [False])
for ei, edge in enumerate(edges):
pt1 = edge[0]
pt2 = edge[1]
face_ids = find_faces_by_2point(faces, pt1, pt2)
if len(face_ids) == 2:
if face_labels[face_ids[0][0]] != face_labels[face_ids[1][0]]:
pts_labels[pt1] = True
pts_labels[pt2] = True
return pts_labels
# ### 4.边标签投影到原始模型
# In[7]:
def label_origin_edge(predict_edges, predict_labels, predict_vs, origin_edges, origin_vs):
"""
根据预测的边及标签,对原始模型的边进行标注
@predict_edges: 预测模型对应的边
@predict_labels: 预测模型对应的标签
@origin_edges: 原始模型的边
return: 原始模型边对应的标签
"""
predict_edge_pts = predict_vs[predict_edges].reshape(-1, 6)
tree = spatial.KDTree(predict_edge_pts)
origin_edge_pts = origin_vs[origin_edges].reshape(-1, 6)
origin_labels = []
for i, edge in enumerate(origin_edge_pts):
# if i % 50000 == 0:
# print(i, "is finded!")
dist, idx = tree.query(edge)
origin_labels.append(predict_labels[idx])
return origin_labels
# ### 5.点投影到原模型
# In[8]:
def project_points(predict_pts, origin_vs):
"""
根据预测的边,筛选出边界点,将点投影回原模型
@predict_pts: 边界点
@origin_vs: 原始模型所有点
return: 返回原始模型的边界点
"""
tree = spatial.KDTree(origin_vs)
origin_pts = []
for i, pt in enumerate(predict_pts):
dist, idx = tree.query(pt)
origin_pts.append(origin_vs[idx])
origin_pts = np.asarray(origin_pts)
return origin_pts
# ### 6.分开保存模型 便于显示
# In[9]:
def save_model_part(save_path, vs, faces, face_labels, model1_name="mesh1.obj", model2_name="mesh2.obj"):
"""
根据标签将模型标记的部分分别保存
@obj_vs: 模型的顶点
@obj_faces: 模型的面
@face_labels: 面的标签
return: None
"""
mesh1 = open(os.path.join(save_path, model1_name), "w")
mesh2 = open(os.path.join(save_path, model2_name), "w")
for v in vs:
mesh1.write("v " + str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + "\n")
mesh2.write("v " + str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + "\n")
for idx, face in enumerate(faces):
if face_labels[idx] == 1:
mesh1.write("f " + str(face[0]+1) + " " + str(face[1]+1) + " " + str(face[2]+1) + "\n")
if face_labels[idx] == 2:
mesh2.write("f " + str(face[0]+1) + " " + str(face[1]+1) + " " + str(face[2]+1) + "\n")
mesh1.close()
mesh2.close()
# ### 7. 导出边界点
# In[10]:
def save_pts_to_vtk(pts, save_path="./test.vtk"):
"""
将牙龈点pts格式转为vtk格式
@pts: 点集 [[x, y, z], [x, y, z], ...]
@save_path: 保存路径
return: None
"""
import vtkplotter as vtkp
vtk_point = vtkp.Points(pts.reshape(-1, 3))
vtkp.write(vtk_point, save_path, binary=False)
# print("vtk file is saved in ", save_path)
# ## 二、主函数
#
# In[12]:
def save_predict(predict_model, predict_path):
"""
对预测的模型进行分析,找出牙龈线点,并对原始模型进行分割
@predict_model: 预测的模型
@save_path: 结果保存路径
return: None
"""
# ------加载模型 获取信息------
# ## 预测模型
predict_vs, predict_faces, predict_edges = parse_obje(predict_model)
if len(predict_edges) == 0:
print("{} is no result!".format(predict_model))
return
origin_model_basename = os.path.basename(predict_model)[:-6]
save_path = os.path.join(predict_path, origin_model_basename)
if not os.path.exists(save_path):
os.makedirs(save_path)
predict_labels = predict_edges[:, -1]
predict_edges = predict_edges[:, :-1]
# ## 标记预测的面
predict_face_labels = label_face_by_edge(predict_faces, predict_edges, predict_labels)
save_model_part(save_path, predict_vs, predict_faces, predict_face_labels, "predict1.obj", "predict2.obj")
# ------处理预测模型------
# # 方案一 直接通过边解析点
# predict_pts_labels = label_pts_by_edges(predict_vs, predict_edges, predict_labels)
# predict_gum_pt_ids = np.where((predict_pts_labels[:,0]==0) & (predict_pts_labels[:,1]==1))[0]
# predict_gum_pts = predict_vs[predict_gum_pt_ids]
# print("predict_gum_pts: ", len(predict_gum_pts))
# save_pts_to_vtk(predict_gum_pts, os.path.join(save_path, "predict.vtk"))
# ## 方案二 通过面的标签来判断
pts_labels = label_pts_by_edges_and_faces(predict_vs, predict_edges, predict_faces, predict_face_labels)
predict_gum_pts = predict_vs[pts_labels]
# print("predict_gum_pts: ", len(predict_gum_pts))
save_pts_to_vtk(predict_gum_pts, os.path.join(save_path, "predict.vtk"))
# ## 三、批量处理
# In[15]:
def show_predict_batch(predict_model_list, predict_path):
"""
批量处理预测模型
@predict_model_list: 预测的模型列表
@predict_path: 预测模型存放路径
return: None
"""
for i, predict_model in enumerate(tqdm(predict_model_list)):
try:
save_predict(predict_model, predict_path)
except KeyError:
print("predict_model: ", predict_model)
except Exception as e:
raise e
# In[16]:
def parallel_show_predict(model_list, predict_path, n_workers=8):
"""
多进程处理
"""
if len(model_list) < n_workers:
n_workers = len(model_list)
chunk_len = len(model_list) // n_workers
chunk_lists = [model_list[i:i+chunk_len] for i in range(0, (n_workers-1)*chunk_len, chunk_len)]
chunk_lists.append(model_list[(n_workers - 1)*chunk_len:])
process_list = [Process(target=show_predict_batch, args=(chunk_list, predict_path, )) for chunk_list in chunk_lists]
for process in process_list:
process.start()
for process in process_list:
process.join()
# In[17]:
def show_predict(predict1, predict2, pts):
"""
显示预测结果
@predict1: 牙齿部分
@predict2: 牙龈部分
@pts: 牙龈线点
return: None
"""
a = load(predict1).c(('blue'))
b = load(predict2).c(('magenta'))
c = load(pts).pointSize(10).c(('green'))
show(a, b, c)
if __name__ == "__main__":
# predict_dir = "/home/heygears/work/github/MeshCNN/checkpoints/tooth_seg_20201231_add_data_with_curvature/meshes/"
predict_dir = "/home/heygears/work/github/MeshCNN/inference/results"
# # 解析结果
predict_model_list = glob.glob(os.path.join(predict_dir, "*.obj"))
parallel_show_predict(predict_model_list, predict_dir, n_workers=8)
# 显示结果
file_list = [os.path.join(predict_dir, file_path) for file_path in os.listdir(predict_dir)
if os.path.isdir(os.path.join(predict_dir, file_path))]
# print(file_list, len(file_list))
for i, file in enumerate(file_list):
print("{} file path is: {}".format(i+1, file))
predict1_path = os.path.join(file, "predict1.obj")
predict2_path = os.path.join(file, "predict2.obj")
predict_pts = os.path.join(file, "predict.vtk")
show_predict(predict1_path, predict2_path, predict_pts)
# ----- 按键控制 --------
# length = len(file_list)
# i = 0
# while True:
# file = file_list[i]
# print("\n第{}个 file path is: {}".format(i + 1, file))
# predict1_path = os.path.join(file, "predict1.obj")
# predict2_path = os.path.join(file, "predict2.obj")
# predict_pts = os.path.join(file, "predict.vtk")
# show_predict(predict1_path, predict2_path, predict_pts)
#
# print("*****A(a):上一张; D(d):下一张; Q(q):退出; 按完键后回车表示确定!!!")
# line = sys.stdin.readline()
# if line == "a\n" or line == "A\n":
# if i > 0:
# i -= 1
# else:
# print("已经到最前面一张了,请按D(d)到下一张,按Q(q)退出")
# i = 0
# if line == "d\n" or line == "D\n":
# if i < length - 1:
# i += 1
# else:
# print("已经到最后面一张了,请按A(a)到下一张,按Q(q)退出")
# i = length - 1
# if line == "q\n" or line == "Q\n":
# break
|
StarcoderdataPython
|
3368071
|
"""
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
THIS FILE IS DEPRECATED AND MAY BE REMOVED WITHOUT WARNING!
DO NOT CALL THESE FUNCTIONS IN YOUR CODE!
ALL SOLVER INTERFACES ARE BEING MIGRATED TO REDUCTIONS.
"""
import cvxpy.interface as intf
import cvxpy.settings as s
from cvxpy.problems.solvers.solver import Solver
class ECOS(Solver):
"""An interface for the ECOS solver.
"""
# Solver capabilities.
LP_CAPABLE = True
SOCP_CAPABLE = True
SDP_CAPABLE = False
EXP_CAPABLE = True
MIP_CAPABLE = False
# EXITCODES from ECOS
# ECOS_OPTIMAL (0) Problem solved to optimality
# ECOS_PINF (1) Found certificate of primal infeasibility
# ECOS_DINF (2) Found certificate of dual infeasibility
# ECOS_INACC_OFFSET (10) Offset exitflag at inaccurate results
# ECOS_MAXIT (-1) Maximum number of iterations reached
# ECOS_NUMERICS (-2) Search direction unreliable
# ECOS_OUTCONE (-3) s or z got outside the cone, numerics?
# ECOS_SIGINT (-4) solver interrupted by a signal/ctrl-c
# ECOS_FATAL (-7) Unknown problem in solver
# Map of ECOS status to CVXPY status.
STATUS_MAP = {0: s.OPTIMAL,
1: s.INFEASIBLE,
2: s.UNBOUNDED,
10: s.OPTIMAL_INACCURATE,
11: s.INFEASIBLE_INACCURATE,
12: s.UNBOUNDED_INACCURATE,
-1: s.SOLVER_ERROR,
-2: s.SOLVER_ERROR,
-3: s.SOLVER_ERROR,
-4: s.SOLVER_ERROR,
-7: s.SOLVER_ERROR}
def import_solver(self):
"""Imports the solver.
"""
import ecos
ecos # For flake8
def name(self):
"""The name of the solver.
"""
return s.ECOS
def matrix_intf(self):
"""The interface for matrices passed to the solver.
"""
return intf.DEFAULT_SPARSE_INTF
def vec_intf(self):
"""The interface for vectors passed to the solver.
"""
return intf.DEFAULT_INTF
def split_constr(self, constr_map):
"""Extracts the equality, inequality, and nonlinear constraints.
Parameters
----------
constr_map : dict
A dict of the canonicalized constraints.
Returns
-------
tuple
(eq_constr, ineq_constr, nonlin_constr)
"""
return (constr_map[s.EQ], constr_map[s.LEQ], [])
def solve(self, objective, constraints, cached_data,
warm_start, verbose, solver_opts):
"""Returns the result of the call to the solver.
Parameters
----------
objective : LinOp
The canonicalized objective.
constraints : list
The list of canonicalized cosntraints.
cached_data : dict
A map of solver name to cached problem data.
warm_start : bool
Not used.
verbose : bool
Should the solver print output?
solver_opts : dict
Additional arguments for the solver.
Returns
-------
tuple
(status, optimal value, primal, equality dual, inequality dual)
"""
import ecos
data = self.get_problem_data(objective, constraints, cached_data)
data[s.DIMS]['e'] = data[s.DIMS][s.EXP_DIM]
results_dict = ecos.solve(data[s.C], data[s.G], data[s.H],
data[s.DIMS], data[s.A], data[s.B],
verbose=verbose,
**solver_opts)
return self.format_results(results_dict, data, cached_data)
def format_results(self, results_dict, data, cached_data):
"""Converts the solver output into standard form.
Parameters
----------
results_dict : dict
The solver output.
data : dict
Information about the problem.
cached_data : dict
A map of solver name to cached problem data.
Returns
-------
dict
The solver output in standard form.
"""
new_results = {}
status = self.STATUS_MAP[results_dict['info']['exitFlag']]
new_results[s.STATUS] = status
# Timing data
new_results[s.SOLVE_TIME] = results_dict["info"]["timing"]["tsolve"]
new_results[s.SETUP_TIME] = results_dict["info"]["timing"]["tsetup"]
new_results[s.NUM_ITERS] = results_dict["info"]["iter"]
if new_results[s.STATUS] in s.SOLUTION_PRESENT:
primal_val = results_dict['info']['pcost']
new_results[s.VALUE] = primal_val + data[s.OFFSET]
new_results[s.PRIMAL] = results_dict['x']
new_results[s.EQ_DUAL] = results_dict['y']
new_results[s.INEQ_DUAL] = results_dict['z']
return new_results
|
StarcoderdataPython
|
1648090
|
#!/usr/bin/env python
# ===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
'''
Created on 8 Mar 2020
@author: <NAME> <<EMAIL>>
'''
import argparse
import numpy as np
import re
import os
import sys
from datetime import datetime
from pprint import pformat
import tempfile
import logging
import locale
from math import log10
from collections import OrderedDict
from functools import reduce
import zipfile
import zipstream
from geophys_utils import get_spatial_ref_from_wkt
from geophys_utils import NetCDFPointUtils
locale.setlocale(locale.LC_ALL, '') # Use '' for auto, or force e.g. to 'en_US.UTF-8'
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # Logging level for this module
# Dynamically adjust integer field widths to fit all data values if True
ADJUST_INTEGER_FIELD_WIDTH = True
# Truncate ASEG-GDF2 field names to eight characters if True
TRUNCATE_VARIABLE_NAMES = False
STRING_VAR_NULL_VALUE = "NULL"
# Set this to non zero to limit string field width in .dat file.
# WARNING - string truncation may corrupt data!
# N.B: Must be >= all numeric field widths defined in ASEG_GDF_FORMAT dict below (enforced by assertion)
MAX_FIELD_WIDTH = 0
# Maximum width of comment fields in .des file
MAX_COMMENT_WIDTH = 128
# Character encoding for .dfn, .dat & .des files
CHARACTER_ENCODING = 'utf-8'
# Default number of rows to read from netCDF before outputting a chunk of lines.
CACHE_CHUNK_ROWS = 32768
# Buffer size per-line for 64-bit zipfile
LINE_BUFFER_SIZE = 4096 # Conservative (biggest) line size in bytes
TEMP_DIR = tempfile.gettempdir()
# TEMP_DIR = 'C:\Temp'
# Set this to zero for no limit - only set a non-zero value for testing when debug = True
DEBUG_POINT_LIMIT = 0
# List of regular expressions for variable names to exclude from output
EXCLUDE_NAME_REGEXES = (['.*_index$', 'ga_.*metadata', 'latitude.+', 'longitude.+', 'easting.+', 'northing.+'] +
NetCDFPointUtils.CRS_VARIABLE_NAMES
)
# List of regular expressions for variable attributes to include in .dfn file
INCLUDE_VARIABLE_ATTRIBUTE_REGEXES = ['Intrepid.+']
# From <NAME>'s email to <NAME>, sent: Monday, 24 February 2020 4:27 PM
ASEG_GDF_FORMAT = {
'float64': {
'width': 18,
'null': -9.9999999999e+32,
'aseg_gdf_format': 'E18.10',
'python_format': '{:>18.10e}',
},
'float32': {
'width': 14,
'null': -9.999999e+32,
'aseg_gdf_format': 'E14.6',
'python_format': '{:>14.6e}',
},
'int64': {
'width': 21,
'null': -9223372036854775808,
'aseg_gdf_format': 'I21',
'python_format': '{:>21d}',
},
'uint64': {
'width': 21,
'null': 18446744073709551616,
'aseg_gdf_format': 'I21',
'python_format': '{:>21d}',
},
'int32': {
'width': 12,
'null': -2147483647,
'aseg_gdf_format': 'I12',
'python_format': '{:>12d}',
},
'uint32': {
'width': 12,
'null': 4294967295,
'aseg_gdf_format': 'I12',
'python_format': '{:>12d}',
},
'int16': {
'width': 7,
'null': -32767,
'aseg_gdf_format': 'I7',
'python_format': '{:>7d}',
},
'uint16': {
'width': 7,
'null': 65535,
'aseg_gdf_format': 'I7',
'python_format': '{:>7d}',
},
'int8': {
'width': 5,
'null': -127,
'aseg_gdf_format': 'I5',
'python_format': '{:>5d}',
},
'uint8': {
'width': 5,
'null': 255,
'aseg_gdf_format': 'I5',
'python_format': '{:>5d}',
},
}
# Check to ensure that MAX_FIELD_WIDTH will not truncate numeric fields
assert not MAX_FIELD_WIDTH or all([format_specification['width'] <= MAX_FIELD_WIDTH for format_specification in
ASEG_GDF_FORMAT.values()]), 'Invalid MAX_FIELD_WIDTH {}'.format(MAX_FIELD_WIDTH)
class RowValueCache(object):
'''\
Class to manage cache of row data from netCDF file
'''
def __init__(self, nc2aseggdf):
'''
Constructor
'''
self.nc2aseggdf = nc2aseggdf
self.total_points = nc2aseggdf.total_points
self.field_definitions = nc2aseggdf.field_definitions
self.netcdf_dataset = nc2aseggdf.netcdf_dataset
self.clear_cache()
def clear_cache(self):
'''
Clear cache
'''
self.index_range = 0
self.cache = {}
def read_points(self, start_index, end_index, point_mask=None):
'''
Function to read points from start_index to end_index
'''
self.index_range = end_index - start_index
if point_mask is None: # No point_mask defined - take all points in range
subset_mask = np.ones(shape=(self.index_range,), dtype='bool')
else:
subset_mask = point_mask[start_index:end_index]
self.index_range = np.count_nonzero(subset_mask)
# If no points to retrieve, don't read anything
if not self.index_range:
logger.debug('No points to retrieve - all masked out')
return
# Build cache of data value slices keyed by field_name
self.cache = {field_name: self.nc2aseggdf.get_data_values(field_name, slice(start_index, end_index))
for field_name in self.field_definitions.keys()
}
# logger.debug('self.cache: {}'.format(pformat(self.cache)))
def chunk_row_data_generator(self, clear_cache=True):
'''
Generator yielding chunks of all values from cache, expanding 2D variables to multiple columns
'''
if not self.index_range:
logger.debug('Cache is empty - nothing to yield')
return
for index in range(self.index_range):
row_value_list = []
for field_name, field_definition in self.field_definitions.items():
data = self.cache[field_name][index]
# Convert array to string if required (OPeNDAP behaviour with string arrays?)
if type(data) == np.ndarray and data.dtype == object:
data = str(data)
if field_definition['columns'] == 1: # Element from 1D variable
row_value_list.append(data)
else: # Row from 2D variable
row_value_list += [element for element in data]
# logger.debug('row_value_list: {}'.format(row_value_list))
yield row_value_list
if clear_cache:
self.clear_cache() # Clear cache after outputting all lines
class NC2ASEGGDF2(object):
def __init__(self,
netcdf_dataset,
debug=False,
verbose=False,
):
'''
'''
def build_field_definitions():
'''\
Helper function to build self.field_definitions as an OrderedDict of field definitions keyed by ASEG-GDF2 field name
'''
self.field_definitions = OrderedDict()
for variable_name, variable in self.netcdf_dataset.variables.items():
# Check for any name exclusion matches
if any([re.match(exclude_name_regex, variable_name, re.IGNORECASE)
for exclude_name_regex in EXCLUDE_NAME_REGEXES]):
logger.debug('Excluding variable {}'.format(variable_name))
continue
if variable_name in self.field_definitions.keys(): # already processed
continue
if len(variable.dimensions) == 1 and variable.dimensions != (
'point',): # Non-point indexed array variable, e.g.flight(line)
# Need to work backwards from variable to point indexed variable
try:
try:
index_variable = self.netcdf_dataset.variables[
variable.index] # Try to use explicit index attribute
except AttributeError:
variable_dimension_name = variable.dimensions[0]
index_variable = self.netcdf_dataset.variables[variable_dimension_name + '_index']
assert index_variable.dimensions == ('point',), 'Invalid dimensions for variable {}: {}'.format(
index_variable.name,
index_variable.dimensions)
variables = [index_variable, variable]
logger.debug(
'Found index variable {} for lookup variable {}'.format(index_variable.name, variable_name))
except:
logger.debug('Index variable not found for lookup variable {}'.format(variable_name))
continue # Can't find lookup variable - ignore this one
elif (
len(variable.dimensions)
and variable.dimensions[0] == 'point'
and not (
variable.dimensions == ('point',)
and (
variable_name.endswith('_index')
or hasattr(variable, 'lookup')
)
)
):
logger.debug('Found point-wise array data variable {}'.format(variable_name))
variables = [variable] # Not an index variable - just use primary variable values
elif not len(variable.dimensions) and variable_name != self.ncpu.crs_variable.name:
logger.debug('Found point-wise scalar data variable {}'.format(variable_name))
variables = [variable] # Scalar variable - broadcast out to all points
else:
logger.debug('Unable to deal with variable {} - ignoring'.format(variable_name))
continue
variable_attributes = dict(variables[-1].__dict__)
# logger.debug('variable_attributes = {}'.format(pformat(variable_attributes)))
dtype = variables[-1].dtype
logger.debug('Variable is of dtype {}'.format(dtype))
format_dict = dict(ASEG_GDF_FORMAT.get(str(dtype)) or {})
if not format_dict: # Unrecognised format. Treat as string
width = max([len(str(element).strip()) for element in variables[-1][:]]) + 1
if MAX_FIELD_WIDTH and width > MAX_FIELD_WIDTH:
logger.warning(
'WARNING: String variable "{}" data will be truncated from a width of {} to {}'.format(
variable_name, width, MAX_FIELD_WIDTH))
width = MAX_FIELD_WIDTH
format_dict = {
'width': width,
'null': STRING_VAR_NULL_VALUE,
'aseg_gdf_format': 'A{}'.format(width),
'python_format': '{{:>{}s}}'.format(width),
}
try:
column_count = reduce(lambda x, y: x * y, variable.shape[
1:]) # This will work for (2+)D, even if we only support 1D or 2D
except: # Scalar or 1D
column_count = 1
variable_definition = {
'variable_name': variable_name,
'variables': variables,
'attributes': variable_attributes,
'dtype': dtype,
'format': format_dict,
'columns': column_count
}
if TRUNCATE_VARIABLE_NAMES:
# Sanitise field name, truncate to 8 characters and ensure uniqueness
field_name = re.sub('(\W|_)+', '', variable_name)[:8].upper()
field_name_count = 0
while field_name in [variable_definition.get('field_name')
for variable_definition in self.field_definitions.values()]:
field_name_count += 1
field_name = field_name[:-len(str(field_name_count))] + str(field_name_count)
else:
field_name = re.sub('\W+', '_',
variable_name) # Sanitisation shouldn't be necessary, but we'll do it anyway
variable_definition['field_name'] = field_name
# Add definition to allow subsequent self.get_data_values(field_name) call
self.field_definitions[field_name] = variable_definition
if ADJUST_INTEGER_FIELD_WIDTH and 'int' in str(
dtype): # Field is some kind of integer - adjust format for data
# logger.debug('\tChecking values to adjust integer field width for variable {}'.format(variable_name))
max_abs_value = np.nanmax(np.abs(self.get_data_values(field_name)))
min_value = np.nanmin(self.get_data_values(field_name))
# logger.debug('\tMaximum absolute value = {}, minimum value = {}'.format(max_abs_value, min_value))
if max_abs_value > 0:
width = int(log10(max_abs_value)) + 2 # allow for leading space
if min_value < 0:
width += 1 # allow for "-"
else:
width = 2
if width != format_dict['width']:
logger.debug(
'\tAdjusting integer field width from {} to {} for variable {}'.format(format_dict['width'],
width,
variable_name))
format_dict['width'] = width
format_dict['aseg_gdf_format'] = 'I{}'.format(width)
format_dict['python_format'] = '{{:>{}d}}'.format(width)
# logger.debug(self.field_definitions)
# Start of __init__
# TODO: Make this a property
self.debug = debug
log_level = logging.DEBUG if debug else logging.INFO
logger.setLevel(level=log_level)
if verbose:
logger.debug('Enabling info level output')
self.info_output = logger.info # Verbose
else:
self.info_output = logger.debug # Non-verbose
self.ncpu = NetCDFPointUtils(netcdf_dataset, debug=debug)
self.netcdf_dataset = self.ncpu.netcdf_dataset
self.netcdf_path = self.ncpu.netcdf_dataset.filepath()
self.netcdf_dataset.set_auto_mask(False) # Turn auto-masking off to allow substitution of new null values
assert 'point' in self.netcdf_dataset.dimensions.keys(), '"point" not found in dataset dimensions'
self.info_output('Opened netCDF dataset {}'.format(self.netcdf_path))
self.total_points = self.ncpu.point_count
self.spatial_ref = get_spatial_ref_from_wkt(self.ncpu.wkt)
# set self.field_definitions
build_field_definitions()
# set reporting increment to nice number giving 100 - 199 progress reports
self.line_report_increment = (10.0 ** int(log10(self.ncpu.point_count / 50))) / 2.0
def get_data_values(self, field_name, point_slice=slice(None, None, None)):
'''\
Function to return data values as an array, expanding lookups and broadcasting scalars if necessary
@param field_name: Variable name to query (key in self.field_definitions)
@param point_slice: slice to apply to point (i.e. first) dimension
@return data_array: Array of data values
'''
variables = self.field_definitions[field_name]['variables']
# logger.debug('Field {} represents variable {}({})'.format(field_name, variables[-1].name, ','.join(variables[0].dimensions)))
if len(variables) == 1: # Single variable => no lookup
if len(variables[0].dimensions): # Array
assert variables[0].dimensions[0] == 'point', 'First array dimension must be "point"'
data = variables[0][point_slice]
else: # Scalar
# Broadcast scalar to required array shape
data = np.array([variables[0][:]] * (
((point_slice.stop or self.total_points) - (point_slice.start or 0)) // (
point_slice.step or 1)))
elif len(variables) == 2: # Index & Lookup variables
mask_value_index_var = getattr(variables[0], "_FillValue")
# mask_value_lookup_var = getattr(variables[1], "_FillValue")
# check if the index variable contains any masked values
# If so return a list of where the masked values are replaced with the STRING_VAR_NULL_VALUE
# and non masked values are given their corresponding value in the lookup table
#TODO this may be needed for lines also if any line variables contain masked values for lookup tables
if np.any(variables[0][:] == mask_value_index_var):
logger.debug("Variable '{}' contains one or more masked values. Converting masked value/s to {}".format(variables[0].name, STRING_VAR_NULL_VALUE))
i = 0
lookup_len = len(variables[0][:])
data = [None] * (lookup_len) # create a list of the required size to fill in with correct values
while i < lookup_len:
if variables[0][i] != mask_value_index_var:
data[i] = variables[1][variables[0][i]]
else:
data[i] = str(STRING_VAR_NULL_VALUE)
i = i + 1
# if no masked values, make a list with the index values converted to their corresponding lookup table
# values
else:
data = variables[1][:][variables[0][:][point_slice]] # Use first array to index second one
else:
raise BaseException(
'Unable to resolve chained lookups (yet): {}'.format([variable.name for variable in variables]))
# Substitute null_value for _FillValue if required. This
null_value = self.field_definitions[field_name]['format']['null']
if null_value is not None and hasattr(variables[-1], '_FillValue'):
data[(data == (variables[-1]._FillValue))] = null_value
return data
def create_dfn_line(
self,
rt,
name,
aseg_gdf_format,
definition=None,
defn=None,
st='RECD'
):
'''
Helper function to write line to .dfn file.
self.defn is used to track the DEFN number, which can be reset using the optional defn parameter
@param rt: value for "RT=<rt>" portion of DEFN line, e.g. '' or 'PROJ'
@param name: Name of DEFN
@param format_specifier_dict: format specifier dict, e.g. {'width': 5, 'null': 256, 'aseg_gdf_format': 'I5', 'python_format': '{:>5d}'}
@param definition=None: Definition string
@param defn=None: New value of DEFN number. Defaults to self.defn+1
@param st: value for "RT=<rt>" portion of DEFN line. Default = 'RECD'
@return line: output line
'''
if defn is None:
self.defn += 1 # Increment last DEFN value (initialised to 0 in constructor)
else:
self.defn = defn
line = 'DEFN {defn} ST={st},RT={rt}; {name}'.format(defn=self.defn,
st=st,
rt=rt,
name=name,
)
if aseg_gdf_format:
line += ': {aseg_gdf_format}'.format(aseg_gdf_format=aseg_gdf_format)
if definition:
line += ': ' + definition
# logger.debug('dfn file line: {}'.format(line))
return line
def create_dfn_file(self, dfn_out_path, zipstream_zipfile=None):
'''
Helper function to output .dfn file
'''
if zipstream_zipfile:
dfn_basename = os.path.basename(dfn_out_path)
zipstream_zipfile.write_iter(dfn_basename,
self.encoded_dfn_line_generator(encoding=CHARACTER_ENCODING),
)
else:
# Create, write and close .dfn file
with open(dfn_out_path, 'w') as dfn_file:
for dfn_line in self.dfn_line_generator():
dfn_file.write(dfn_line)
dfn_file.close()
self.info_output('Finished writing .dfn file {}'.format(self.dfn_out_path))
def encoded_dfn_line_generator(self, encoding=CHARACTER_ENCODING):
'''
Helper generator to yield encoded bytestrings of all lines in .dfn file
'''
for line_string in self.dfn_line_generator():
yield line_string.encode(encoding)
def dfn_line_generator(self):
'''
Helper generator to yield all lines in .dfn file
'''
def variable_defns_generator():
"""
Helper function to write a DEFN line for each variable
"""
self.defn = 0 # reset DEFN number
# for variable_name, variable_attributes in self.field_definitions.items():
for field_name, field_definition in self.field_definitions.items():
optional_attribute_list = []
units = field_definition['attributes'].get('units')
if units:
optional_attribute_list.append('UNITS={units}'.format(units=units))
# fill_value = field_definition['attributes'].get('_FillValue')
null_value = field_definition['format'].get('null')
if null_value is not None:
optional_attribute_list.append(
'NULL=' + field_definition['format']['python_format'].format(null_value).strip())
long_name = field_definition['attributes'].get('long_name') or re.sub('(\W|_)+', ' ',
field_definition['variable_name'])
if long_name:
optional_attribute_list.append('NAME={long_name}'.format(long_name=long_name))
# Include any variable attributes which match regexes in INCLUDE_VARIABLE_ATTRIBUTE_REGEXES
for attribute_name, attribute_value in field_definition['attributes'].items():
if any([re.match(variable_attribute_regex, attribute_name, re.IGNORECASE)
for variable_attribute_regex in INCLUDE_VARIABLE_ATTRIBUTE_REGEXES]):
optional_attribute_list.append('{}={}'.format(attribute_name,
attribute_value))
# ===========================================================
# # Check for additional ASEG-GDF attributes defined in settings
# variable_attributes = field_definition.get('variable_attributes')
# if variable_attributes:
# for aseg_gdf_attribute, netcdf_attribute in self.settings['attributes'].items():
# attribute_value = variable_attributes.get(netcdf_attribute)
# if attribute_value is not None:
# optional_attribute_list.append('{aseg_gdf_attribute}={attribute_value}'.format(aseg_gdf_attribute=aseg_gdf_attribute,
# attribute_value=attribute_value
# ))
# ===========================================================
if optional_attribute_list:
definition = ', '.join(optional_attribute_list)
else:
definition = None
aseg_gdf_format = field_definition['format']['aseg_gdf_format']
if field_definition['columns'] > 1: # Need to pre-pend number of columns to format string
aseg_gdf_format = '{}{}'.format(field_definition['columns'], aseg_gdf_format)
yield self.create_dfn_line(rt='',
name=field_name,
aseg_gdf_format=aseg_gdf_format,
definition=definition,
)
# Write 'END DEFN'
yield self.create_dfn_line(rt='',
name='END DEFN',
aseg_gdf_format=None
)
def proj_defns_generator():
"""
Helper function to write PROJ lines
From standard:
DEFN 1 ST=RECD,RT=PROJ; RT: A4
DEFN 2 ST=RECD,RT=PROJ; COORDSYS: A40: NAME=projection name, POSC projection name
DEFN 3 ST=RECD,RT=PROJ; DATUM: A40: NAME=datum name, EPSG compliant ellipsoid name
DEFN 4 ST=RECD,RT=PROJ; MAJ_AXIS: D12.1: UNIT=m, NAME=major_axis, Major axis in units
relevant to the ellipsoid definition
DEFN 5 ST=RECD,RT=PROJ; INVFLATT: D14.9: NAME=inverse flattening, 1/f inverse of flattening
DEFN 6 ST=RECD,RT=PROJ; PRIMEMER: F10.1: UNIT=deg, NAME=prime_meridian, Location of prime
meridian relative to Greenwich
DEFN 7 ST=RECD,RT=PROJ; PROJMETH: A30: NAME=projection_method, eg. Transverse Mercator,
Lambert etc
DEFN 8 ST=RECD,RT=PROJ; PARAM1: D14.0: NAME=Proj_par1, 1st projecton paramater See Table 1
DEFN 9 ST=RECD,RT=PROJ; PARAM2: D14.0: NAME=Proj_par2, 2nd projection parameter
DEFN 10 ST=RECD,RT=PROJ; PARAM3: D14.0: NAME=Proj_par3, 3rd projection parameter
DEFN 11 ST=RECD,RT=PROJ; PARAM4: D14.0: NAME=Proj_par4, 4th projection parameter
DEFN 12 ST=RECD,RT=PROJ; PARAM5: D14.0: NAME=Proj_par5, 5th projection parameter
DEFN 13 ST=RECD,RT=PROJ; PARAM6: D14.0: NAME=Proj_par6, 6th projection parameter
DEFN 14 ST=RECD,RT=PROJ; PARAM7: D14.0: NAME=Proj_par7, 7th projection parameter
DEFN 15 ST=RECD,RT=PROJ; END DEFN
From sample file:
DEFN 1 ST=RECD,RT=PROJ; RT:A4
DEFN 2 ST=RECD,RT=PROJ; PROJNAME:A30: COMMENT=GDA94 / MGA zone 54
DEFN 3 ST=RECD,RT=PROJ; ELLPSNAM:A30: COMMENT=GRS 1980
DEFN 4 ST=RECD,RT=PROJ; MAJ_AXIS: D12.1: UNIT=m, COMMENT=6378137.000000
DEFN 5 ST=RECD,RT=PROJ; ECCENT: D12.9: COMMENT=298.257222
DEFN 6 ST=RECD,RT=PROJ; PRIMEMER: F10.1: UNIT=deg, COMMENT=0.000000
DEFN 7 ST=RECD,RT=PROJ; PROJMETH: A30: COMMENT=Transverse Mercator
DEFN 8 ST=RECD,RT=PROJ; PARAM1: D14.0: COMMENT= 0.000000
DEFN 9 ST=RECD,RT=PROJ; PARAM2: D14.0: COMMENT= 141.000000
DEFN 10 ST=RECD,RT=PROJ; PARAM3: D14.0: COMMENT= 0.999600
DEFN 11 ST=RECD,RT=PROJ; PARAM4: D14.0: COMMENT= 500000.000000
DEFN 12 ST=RECD,RT=PROJ; PARAM5: D14.0: COMMENT=10000000.00000
DEFN 13 ST=RECD,RT=PROJ; PARAM6: D14.0:
DEFN 14 ST=RECD,RT=PROJ; PARAM7: D14.0:
DEFN 15 ST=RECD,RT=PROJ; END DEFN
PROJGDA94 / MGA zone 54 GRS 1980 6378137.0000 298.257222 0.000000 Transverse Mercator 0.000000 141.000000 0.999600 500000.000000 10000000.00000
"""
geogcs = self.spatial_ref.GetAttrValue('geogcs') # e.g. 'GDA94'
projcs = self.spatial_ref.GetAttrValue('projcs') # e.g. 'UTM Zone 54, Southern Hemisphere'
ellipse_name = self.spatial_ref.GetAttrValue('spheroid', 0)
major_axis = float(self.spatial_ref.GetAttrValue('spheroid', 1))
prime_meridian = float(self.spatial_ref.GetAttrValue('primem', 1))
inverse_flattening = float(self.spatial_ref.GetInvFlattening())
# eccentricity = self.spatial_ref.GetAttrValue('spheroid', 2) # Non-standard definition same as inverse_flattening?
if self.spatial_ref.IsProjected():
if projcs.startswith(geogcs):
projection_name = projcs
else:
projection_name = geogcs + ' / ' + re.sub('[\:\,\=]+', '',
projcs) # e.g. 'GDA94 / UTM Zone 54, Southern Hemisphere'
projection_method = self.spatial_ref.GetAttrValue('projection').replace('_', ' ')
projection_parameters = [(key, float(value))
for key, value in re.findall('PARAMETER\["(.+)",(\d+\.?\d*)\]',
self.spatial_ref.ExportToPrettyWkt())
]
else: # Unprojected CRS
projection_name = geogcs
projection_method = None
projection_parameters = None
self.defn = 0 # reset DEFN number
# write 'DEFN 1 ST=RECD,RT=PROJ; RT:A4'
yield self.create_dfn_line(rt='PROJ',
name='RT',
aseg_gdf_format='A4'
)
yield self.create_dfn_line(rt='PROJ',
name='COORDSYS',
aseg_gdf_format='A40',
definition='NAME={projection_name}, Projection name'.format(
projection_name=projection_name)
)
yield self.create_dfn_line(rt='PROJ',
name='DATUM',
aseg_gdf_format='A40',
definition='NAME={ellipse_name}, Ellipsoid name'.format(
ellipse_name=ellipse_name)
)
yield self.create_dfn_line(rt='PROJ',
name='MAJ_AXIS',
aseg_gdf_format='D12.1',
definition='UNIT={unit}, NAME={major_axis}, Major axis'.format(unit='m',
major_axis=major_axis)
)
yield self.create_dfn_line(rt='PROJ',
name='INVFLATT',
aseg_gdf_format='D14.9',
definition='NAME={inverse_flattening}, 1/f inverse of flattening'.format(
inverse_flattening=inverse_flattening)
)
yield self.create_dfn_line(rt='PROJ',
name='PRIMEMER',
aseg_gdf_format='F10.1',
definition='UNIT={unit}, NAME={prime_meridian}, Location of prime meridian'.format(
unit='degree', prime_meridian=prime_meridian)
)
# ===============================================================================
# # Non-standard definitions
# yield self.create_dfn_line(rt='PROJ',
# name='ELLPSNAM',
# aseg_gdf_format='A30',
# definition='NAME={ellipse_name}, Non-standard definition for ellipse name'.format(ellipse_name=ellipse_name)
# )
#
# yield self.create_dfn_line(rt='PROJ',
# name='PROJNAME',
# aseg_gdf_format='A40',
# definition='NAME={projection_name}, Non-standard definition for projection name'.format(projection_name=projection_name)
# )
#
# yield self.create_dfn_line(rt='PROJ',
# name='ECCENT',
# aseg_gdf_format='D12.9',
# definition='NAME={eccentricity}, Non-standard definition for ellipsoidal eccentricity'.format(eccentricity=eccentricity)
# )
# ===============================================================================
if projection_method:
yield self.create_dfn_line(rt='PROJ',
name='PROJMETH',
aseg_gdf_format='A30',
definition='NAME={projection_method}, projection method'.format(
projection_method=projection_method)
)
# Write all projection parameters starting from DEFN 8
param_no = 0
for param_name, param_value in projection_parameters:
param_no += 1
yield self.create_dfn_line(rt='PROJ',
name='PARAM{param_no}'.format(param_no=param_no),
aseg_gdf_format='D14.0',
# TODO: Investigate whether this is OK - it looks dodgy to me
definition='NAME={param_value}, {param_name}'.format(
param_value=param_value, param_name=param_name)
)
# Write 'END DEFN'
yield self.create_dfn_line(rt='PROJ',
name='END DEFN',
aseg_gdf_format=''
)
# TODO: Write fixed length PROJ line at end of file
return # End of function proj_defns_generator
yield 'DEFN ST=RECD,RT=COMM;RT:A4;COMMENTS:A{}\n'.format(MAX_COMMENT_WIDTH) # TODO: Check this first line
for defn_line in variable_defns_generator():
yield defn_line + '\n'
for proj_line in proj_defns_generator():
yield proj_line + '\n'
def create_dat_file(self, dat_out_path, cache_chunk_rows=None, point_mask=None, zipstream_zipfile=None):
'''
Helper function to output .dat file
'''
def chunk_buffer_generator(row_value_cache, python_format_list, cache_chunk_rows, point_mask=None,
encoding=None):
'''
Generator to yield all line strings across all point variables for specified row range
'''
def chunk_line_generator(row_value_cache, python_format_list, start_index, end_index, point_mask=None):
'''
Helper Generator to yield line strings for specified rows across all point variables
'''
logger.debug('Reading rows {:n} - {:n}'.format(start_index + 1, end_index))
row_value_cache.read_points(start_index, end_index, point_mask=point_mask)
logger.debug('Preparing ASEG-GDF lines for rows {:n} - {:n}'.format(start_index + 1, end_index))
for row_value_list in row_value_cache.chunk_row_data_generator():
# logger.debug('row_value_list: {}'.format(row_value_list))
# Turn list of values into a string using python_formats
# Truncate fields to maximum width with leading space - only string fields should be affected
yield ''.join([' ' + python_format_list[value_index].format(row_value_list[value_index])[
1 - MAX_FIELD_WIDTH::]
for value_index in range(len(
python_format_list))]) # .lstrip() # lstrip if we want to discard leading spaces from line
# Process all chunks
point_count = 0
for chunk_index in range(self.total_points // cache_chunk_rows + 1):
chunk_line_list = []
for line in chunk_line_generator(row_value_cache, python_format_list,
start_index=chunk_index * cache_chunk_rows,
end_index=min((chunk_index + 1) * cache_chunk_rows,
self.total_points
),
point_mask=point_mask
):
point_count += 1
if not (point_count % self.line_report_increment):
self.info_output(
'{:n} / {:n} ASEG-GDF2 rows converted to text'.format(point_count, self.total_points))
# logger.debug('line: "{}"'.format(line))
chunk_line_list.append(line)
if self.debug and DEBUG_POINT_LIMIT and (
point_count >= DEBUG_POINT_LIMIT): # Don't process more lines
break
chunk_buffer_string = '\n'.join(chunk_line_list) + '\n' # Yield a chunk of lines
if encoding:
encoded_bytestring = chunk_buffer_string.encode(encoding)
line_size = sys.getsizeof(encoded_bytestring)
assert line_size < LINE_BUFFER_SIZE * CACHE_CHUNK_ROWS, 'Line size of {} exceeds buffer size of {}'.format(
line_size,
LINE_BUFFER_SIZE * CACHE_CHUNK_ROWS)
logger.debug('Writing ASEG-GDF line buffer of size {:n} bytes'.format(line_size))
yield (encoded_bytestring)
else:
logger.debug('Writing ASEG-GDF line buffer')
yield (chunk_buffer_string)
if self.debug and DEBUG_POINT_LIMIT and (point_count >= DEBUG_POINT_LIMIT): # Don't process more chunks
logger.warning('WARNING: Output limited to {:n} points in debug mode'.format(DEBUG_POINT_LIMIT))
break
self.info_output('A total of {:n} rows were output'.format(point_count))
# Start of create_dat_file function
cache_chunk_rows = cache_chunk_rows or CACHE_CHUNK_ROWS
# Start of chunk_buffer_generator
row_value_cache = RowValueCache(self) # Create cache for multiple chunks of data
python_format_list = []
for field_definition in self.field_definitions.values():
for _column_index in range(field_definition['columns']):
python_format_list.append(field_definition['format']['python_format'])
# logger.debug('python_format_list: {}'.format(python_format_list))
if zipstream_zipfile:
# Write to zip file
dat_basename = os.path.basename(dat_out_path)
zipstream_zipfile.write_iter(
dat_basename,
chunk_buffer_generator(row_value_cache, python_format_list, cache_chunk_rows, point_mask,
encoding=CHARACTER_ENCODING),
buffer_size=self.ncpu.point_count * LINE_BUFFER_SIZE # Need this to force 64-bit zip
)
else: # No zip
# Create, write and close .dat file
dat_out_file = open(dat_out_path, mode='w')
for chunk_buffer in chunk_buffer_generator(row_value_cache, python_format_list, cache_chunk_rows,
point_mask):
dat_out_file.write(chunk_buffer + '\n')
dat_out_file.close()
self.info_output('Finished writing .dat file {}'.format(dat_out_path))
def create_des_file(self, des_out_path, zipstream_zipfile=None):
'''
Helper function to output .des file
'''
def des_line_generator(encoding=None):
'''
Helper Generator to yield line strings for .des file
'''
# Ignore netCDF system attributes
global_attributes_dict = {key: str(value).strip()
for key, value in self.netcdf_dataset.__dict__.items()
if not key.startswith('_')
}
# Determine maximum key length for fixed field width
max_key_length = max([len(key) for key in global_attributes_dict.keys()])
global_attributes_dict['ASEG_GDF2'] = 'Generated at {} from {} using nc2aseg.py'.format(
datetime.now().isoformat(),
os.path.basename(self.netcdf_path))
# Show dimension sizes
for dimension_name, dimension in self.netcdf_dataset.dimensions.items():
global_attributes_dict[dimension_name + '_count'] = str(dimension.size)
#logger.debug('global_attributes_dict = {}'.format(pformat(global_attributes_dict)))
for key in sorted(global_attributes_dict.keys()):
value = global_attributes_dict[key]
key_string = (' {{:<{}s}} : '.format(max_key_length)).format(key) # Include leading space
for value_line in value.split('\n'):
# Split long values into multiple lines. Need to be careful with leading & trailing spaces when reassembling
while value_line:
comment_line = 'COMM{}{}'.format(key_string,
value_line[:MAX_COMMENT_WIDTH - len(key_string)]) + '\n'
if encoding:
yield comment_line.encode(encoding)
else:
yield comment_line
value_line = value_line[MAX_COMMENT_WIDTH - len(key_string):]
if zipstream_zipfile:
# Write to zip file
des_basename = os.path.basename(des_out_path)
zipstream_zipfile.write_iter(des_basename,
des_line_generator(encoding=CHARACTER_ENCODING),
)
else: # No zip
# Create, write and close .dat file
des_out_file = open(des_out_path, mode='w')
logger.debug('Writing lines to .des file {}'.format(self.dat_out_path))
for des_line in des_line_generator():
logger.debug('Writing "{}" to .des file'.format(des_line))
des_out_file.write(des_line)
des_out_file.close()
self.info_output('Finished writing .des file {}'.format(des_out_path))
def convert2aseg_gdf(self,
dat_out_path=None,
zip_out_path=None,
stride=1,
point_mask=None):
'''
Function to convert netCDF file to ASEG-GDF
'''
start_time = datetime.now()
self.dat_out_path = dat_out_path or os.path.splitext(self.netcdf_dataset.filepath())[0] + '.dat'
self.dfn_out_path = os.path.splitext(dat_out_path)[0] + '.dfn'
self.des_out_path = os.path.splitext(dat_out_path)[0] + '.des'
if zip_out_path:
zipstream_zipfile = zipstream.ZipFile(compression=zipfile.ZIP_DEFLATED,
allowZip64=True
)
zipstream_zipfile.comment = ('ASEG-GDF2 files generated at {} from {}'.format(datetime.now().isoformat(),
os.path.basename(
self.netcdf_path))
).encode(CHARACTER_ENCODING)
try:
os.remove(zip_out_path)
except:
pass
else:
zipstream_zipfile = None
try:
self.create_dfn_file(self.dfn_out_path, zipstream_zipfile=zipstream_zipfile)
self.create_dat_file(self.dat_out_path, zipstream_zipfile=zipstream_zipfile)
self.create_des_file(self.des_out_path, zipstream_zipfile=zipstream_zipfile)
if zipstream_zipfile:
zip_out_file = open(zip_out_path, 'wb')
self.info_output('Writing zip file {}'.format(zip_out_path))
for data in zipstream_zipfile:
zip_out_file.write(data)
self.info_output('Closing zip file {}'.format(zip_out_path))
zipstream_zipfile.close()
except:
# Close and remove incomplete zip file
try:
zipstream_zipfile.close()
except:
pass
try:
zip_out_file.close()
except:
pass
try:
os.remove(zip_out_path)
logger.debug('Removed failed zip file {}'.format(zip_out_path))
except:
pass
raise
elapsed_time = datetime.now() - start_time
self.info_output(
'ASEG-GDF output completed in {}'.format(str(elapsed_time).split('.')[0])) # Discard partial seconds
def main():
'''
Main function
'''
def get_args():
"""
Handles all the arguments that are passed into the script
:return: Returns a parsed version of the arguments.
"""
parser = argparse.ArgumentParser(description='Convert netCDF file to ASEG-GDF2')
parser.add_argument("-r", "--crs",
help="Coordinate Reference System string (e.g. GDA94, EPSG:4283) for output",
type=str,
dest="crs")
parser.add_argument('-z', '--zip', action='store_const', const=True, default=False,
help='Zip directly to an archive file. Default is no zip')
parser.add_argument('-d', '--debug', action='store_const', const=True, default=False,
help='output debug information. Default is no debug info')
parser.add_argument('-v', '--verbose', action='store_const', const=True, default=False,
help='output verbosity. Default is non-verbose')
parser.add_argument('positional_args',
nargs=argparse.REMAINDER,
help='<nc_in_path> [<dat_out_path>] [<zip_out_path>]')
return parser.parse_args()
args = get_args()
# Setup Logging
log_level = logging.DEBUG if args.debug else logging.INFO
logger.setLevel(level=log_level)
assert 1 <= len(args.positional_args) <= 2, 'Invalid number of positional arguments.\n\
Usage: python {} <options> <nc_in_path> [<dat_out_path>] [<zip_out_path>]'.format(os.path.basename(sys.argv[0]))
nc_in_path = args.positional_args[0]
if len(args.positional_args) == 2:
dat_out_path = args.positional_args[1]
else:
dat_out_path = os.path.splitext(nc_in_path)[0] + '.dat'
if args.zip:
if len(args.positional_args) == 3:
zip_out_path = args.positional_args[2]
else:
zip_out_path = os.path.splitext(nc_in_path)[0] + '_ASEG_GDF2.zip'
else:
zip_out_path = None
logger.debug('args: {}'.format(args.__dict__))
nc2aseggdf2 = NC2ASEGGDF2(nc_in_path, debug=args.debug, verbose=args.verbose)
nc2aseggdf2.convert2aseg_gdf(dat_out_path, zip_out_path)
if __name__ == '__main__':
# Setup logging handlers if required
if not logger.handlers:
# Set handler for root logger to standard output
console_handler = logging.StreamHandler(sys.stdout)
# console_handler.setLevel(logging.INFO)
console_handler.setLevel(logging.DEBUG)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
logger.debug('Logging handlers set up for logger {}'.format(logger.name))
main()
|
StarcoderdataPython
|
1703380
|
<filename>experiments.py
import pandas as pd
import numpy as np
from src.util import transform_datasets
from src.preprocessing import full_data, split_data
from sklearn.model_selection import StratifiedKFold
from src.model import cov_detector, dual_cov_detector
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import FactorAnalysis, PCA
import argparse
import sys
def train_valid_split(X, y, shuffle=True):
skf = StratifiedKFold(n_splits=2, shuffle=shuffle)
train, valid = skf.split(X, y)
train_index = train[0]
valid_index = valid[0]
return X[train_index], y[train_index], X[valid_index], y[valid_index]
def experiment(exp, seed, dual=False):
np.random.seed(seed)
safe_dataset, train_dataset, test_dataset = transform_datasets(full_data)
X_safe, y_safe = split_data(safe_dataset)
X_train, y_train = split_data(train_dataset)
X_test, y_test = split_data(test_dataset)
X_train, y_train, X_valid, y_valid = train_valid_split(X_train, y_train)
if dual is True:
exp.fit_cov(X_safe, X_train)
else:
exp.fit_cov(X_safe)
exp.fit(X_train, y_train)
print("Safe fp: {}".format(exp.false_positives(X_safe, y_safe)))
print("Valid MCC: {:.3f}".format(exp.score(X_valid, y_valid)))
print("Valid fp: {}".format(exp.false_positives(X_valid, y_valid)))
def single_estimator_experiment(args):
if args.estimator[0] == "GMM":
estimator = GaussianMixture(n_components=args.components[0], covariance_type='full')
elif args.estimator[0] == "FA":
estimator = FactorAnalysis(n_components=args.components[0])
elif args.estimator[0] == "PCA":
estimator = PCA(n_components=args.components[0], whiten=True)
else:
print("Invalid estimator:", args.estimator)
print("Supported estimators: 'GMM', 'FA', 'PCA'")
error_exit()
estimator_experiment = cov_detector(estimator, neighbors=args.neighbors, weights='uniform')
experiment(estimator_experiment, seed=args.seed)
def dual_estimator_experiment(args):
if args.estimator[0] == "GMM":
first_est = GaussianMixture(n_components=args.components[0], covariance_type='full')
elif args.estimator[0] == "FA":
first_est = FactorAnalysis(n_components=args.components[0])
elif args.estimator[0] == "PCA":
first_est = PCA(n_components=args.components[0], whiten=True)
else:
print("Invalid estimator:", args.estimator[0])
print("Supported estimators: 'GMM', 'FA', or 'PCA'")
error_exit()
if len(args.components) < 2:
print("Error: missing second estimator components.")
error_exit()
if args.estimator[1] == "GMM":
second_est = GaussianMixture(n_components=args.components[1], covariance_type='full')
elif args.estimator[1] == "FA":
second_est = FactorAnalysis(n_components=args.components[1])
elif args.estimator[1] == "PCA":
second_est = PCA(n_components=args.components[1], whiten=True)
else:
print("Invalid estimator:", args.estimator[1])
print("Supported estimators: 'GMM', 'FA', or 'PCA'")
error_exit()
dual_estimator = dual_cov_detector(first_est, second_est, neighbors=args.neighbors, weights='uniform')
experiment(dual_estimator, seed=args.seed, dual=True)
def error_exit():
parser.print_usage()
sys.exit(-1)
def main(args):
if args.estimator is None:
print("Error: missing an estimator. Valid options are: 'GMM', 'FA', or 'PCA'")
error_exit()
elif args.components is None:
print("Error: missing number of components.")
error_exit()
elif len(args.estimator) == 1:
single_estimator_experiment(args)
elif len(args.estimator) == 2:
dual_estimator_experiment(args)
else:
error_exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--estimator", help="the covariance matrix estimator", nargs='+')
parser.add_argument("-c", "--components", help="the number of components", type=int, nargs='+')
parser.add_argument("-n", "--neighbors", help="the number of neighbors", type=int, nargs='?', default=10)
parser.add_argument("-s", "--seed", help="initial seed", type=int, nargs='?', default=0)
main(args=parser.parse_args())
|
StarcoderdataPython
|
134961
|
<filename>helpers/graph_algorithms.py
"""
<NAME>
April 19, 2019
California Institute of Technology
Simple algorithm to transitive reduce an (acyclic) graph
"""
import numpy as np
def transitive_reduce(A):
"""
input: A is a reachability Boolean numpy array
output: transitive reduced version of A
"""
n = A.shape[0]
A_red = A.copy()
for i in range(n):
for j in range(n):
if A[i][j] and i != j:
for k in range(n):
if k != i and k != j and A[i][k] and A[k][j]:
A_red[i][j] = False
return A_red
#test case
#A = np.array([[1,0,0,0],[1,1,0,0],[1,0,1,0],[1,0,1,1]])
#print(transitive_reduce(A))
|
StarcoderdataPython
|
3385112
|
# Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the Hilbert-Schmidt templates.
"""
import pytest
import pennylane as qml
class TestHilbertSchmidt:
"""Tests for the Hilbert-Schmidt template."""
def test_hs_decomposition_1_qubit(self):
"""Test if the HS operation is correctly decomposed for a 1 qubit unitary."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires=0)
def v_circuit(params):
qml.RZ(params[0], wires=1)
op = qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=U)
with qml.tape.QuantumTape() as tape_dec:
op.decomposition()
expected_operations = [
qml.Hadamard(wires=[0]),
qml.CNOT(wires=[0, 1]),
qml.Hadamard(wires=[0]),
qml.RZ(-0.1, wires=[1]),
qml.CNOT(wires=[0, 1]),
qml.Hadamard(wires=[0]),
]
for i, j in zip(tape_dec.operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_hs_adjoint_method(self):
"""Test the adjoint method of the HS operation for a 1 qubit unitary."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.RX(0.3, wires=0)
def v_circuit(params):
qml.RZ(params[0], wires=1)
adj = qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=U).adjoint()
operations = adj.expand().operations
expected_operations = [
qml.Hadamard(wires=[0]),
qml.CNOT(wires=[0, 1]),
qml.RZ(0.1, wires=[1]),
qml.RX(-0.3, wires=[0]),
qml.CNOT(wires=[0, 1]),
qml.Hadamard(wires=[0]),
]
for i, j in zip(operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_hs_decomposition_2_qubits(self):
"""Test if the HS operation is correctly decomposed for 2 qubits."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.SWAP(wires=[0, 1])
def v_circuit(params):
qml.RZ(params[0], wires=2)
qml.CNOT(wires=[2, 3])
op = qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[2, 3], u_tape=U)
with qml.tape.QuantumTape() as tape_dec:
op.decomposition()
expected_operations = [
qml.Hadamard(wires=[0]),
qml.Hadamard(wires=[1]),
qml.CNOT(wires=[0, 2]),
qml.CNOT(wires=[1, 3]),
qml.SWAP(wires=[0, 1]),
qml.RZ(-0.1, wires=[2]),
qml.CNOT(wires=[2, 3]),
qml.CNOT(wires=[1, 3]),
qml.CNOT(wires=[0, 2]),
qml.Hadamard(wires=[0]),
qml.Hadamard(wires=[1]),
]
for i, j in zip(tape_dec.operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_hs_decomposition_2_qubits_custom_wires(self):
"""Test if the HS operation is correctly decomposed for 2 qubits with custom wires."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.SWAP(wires=["a", "b"])
def v_circuit(params):
qml.RZ(params[0], wires="c")
qml.CNOT(wires=["c", "d"])
op = qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=["c", "d"], u_tape=U)
with qml.tape.QuantumTape() as tape_dec:
op.decomposition()
expected_operations = [
qml.Hadamard(wires=["a"]),
qml.Hadamard(wires=["b"]),
qml.CNOT(wires=["a", "c"]),
qml.CNOT(wires=["b", "d"]),
qml.SWAP(wires=["a", "b"]),
qml.RZ(-0.1, wires=["c"]),
qml.CNOT(wires=["c", "d"]),
qml.CNOT(wires=["b", "d"]),
qml.CNOT(wires=["a", "c"]),
qml.Hadamard(wires=["a"]),
qml.Hadamard(wires=["b"]),
]
for i, j in zip(tape_dec.operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_hs_adjoint_method_2_qubits(self):
"""Test the adjoint method of the HS operations for 2 qubits."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.SWAP(wires=[0, 1])
def v_circuit(params):
qml.RZ(params[0], wires=2)
qml.CNOT(wires=[2, 3])
adj = qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[2, 3], u_tape=U).adjoint()
operations = adj.expand().operations
expected_operations = [
qml.Hadamard(wires=[1]),
qml.Hadamard(wires=[0]),
qml.CNOT(wires=[0, 2]),
qml.CNOT(wires=[1, 3]),
qml.CNOT(wires=[2, 3]),
qml.RZ(0.1, wires=[2]),
qml.SWAP(wires=[0, 1]),
qml.CNOT(wires=[1, 3]),
qml.CNOT(wires=[0, 2]),
qml.Hadamard(wires=[1]),
qml.Hadamard(wires=[0]),
]
for i, j in zip(operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_v_not_quantum_function(self):
"""Test that we cannot pass a non quantum function to the HS operation"""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires=0)
with qml.tape.QuantumTape(do_queue=False) as v_circuit:
qml.RZ(0.1, wires=1)
with pytest.raises(
qml.QuantumFunctionError,
match="The argument v_function must be a callable quantum " "function.",
):
qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=U)
def test_u_v_same_number_of_wires(self):
"""Test that U and V must have the same number of wires."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.CNOT(wires=[0, 1])
def v_circuit(params):
qml.RZ(params[0], wires=1)
with pytest.raises(
qml.QuantumFunctionError, match="U and V must have the same number of wires."
):
qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[2], u_tape=U)
def test_u_quantum_tape(self):
"""Test that U must be a quantum tape."""
def u_circuit():
qml.CNOT(wires=[0, 1])
def v_circuit(params):
qml.RZ(params[0], wires=1)
with pytest.raises(
qml.QuantumFunctionError, match="The argument u_tape must be a QuantumTape."
):
qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=u_circuit)
def test_v_wires(self):
"""Test that all wires in V are also in v_wires."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires=0)
def v_circuit(params):
qml.RZ(params[0], wires=2)
with pytest.raises(
qml.QuantumFunctionError, match="All wires in v_tape must be in v_wires."
):
qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=U)
def test_distinct_wires(self):
"""Test that U and V have distinct wires."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires=0)
def v_circuit(params):
qml.RZ(params[0], wires=0)
with pytest.raises(
qml.QuantumFunctionError, match="u_tape and v_tape must act on distinct wires."
):
qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[0], u_tape=U)
class TestLocalHilbertSchmidt:
"""Tests for the Local Hilbert-Schmidt template."""
def test_lhs_decomposition_1_qubit(self):
"""Test if the LHS operation is correctly decomposed"""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires=0)
def v_circuit(params):
qml.RZ(params[0], wires=1)
op = qml.LocalHilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=U)
with qml.tape.QuantumTape() as tape_dec:
op.decomposition()
expected_operations = [
qml.Hadamard(wires=[0]),
qml.CNOT(wires=[0, 1]),
qml.Hadamard(wires=[0]),
qml.RZ(-0.1, wires=[1]),
qml.CNOT(wires=[0, 1]),
qml.Hadamard(wires=[0]),
]
for i, j in zip(tape_dec.operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_lhs_decomposition_1_qubit_custom_wires(self):
"""Test if the LHS operation is correctly decomposed with custom wires."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires="a")
def v_circuit(params):
qml.RZ(params[0], wires="b")
op = qml.LocalHilbertSchmidt([0.1], v_function=v_circuit, v_wires=["b"], u_tape=U)
with qml.tape.QuantumTape() as tape_dec:
op.decomposition()
expected_operations = [
qml.Hadamard(wires=["a"]),
qml.CNOT(wires=["a", "b"]),
qml.Hadamard(wires=["a"]),
qml.RZ(-0.1, wires=["b"]),
qml.CNOT(wires=["a", "b"]),
qml.Hadamard(wires=["a"]),
]
for i, j in zip(tape_dec.operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_lhs_adjoint_method_1_qubit(self):
"""Test the adjoint method of the LHS operation for 1 qubit."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.RX(0.3, wires=0)
def v_circuit(params):
qml.RZ(params[0], wires=1)
adj = qml.HilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=U).adjoint()
operations = adj.expand().operations
expected_operations = [
qml.Hadamard(wires=[0]),
qml.CNOT(wires=[0, 1]),
qml.RZ(0.1, wires=[1]),
qml.RX(-0.3, wires=[0]),
qml.CNOT(wires=[0, 1]),
qml.Hadamard(wires=[0]),
]
for i, j in zip(operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_lhs_decomposition_2_qubits(self):
"""Test if the LHS operation is correctly decomposed for 2 qubits."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.SWAP(wires=[0, 1])
def v_circuit(params):
qml.RZ(params[0], wires=2)
qml.CNOT(wires=[2, 3])
op = qml.LocalHilbertSchmidt([0.1], v_function=v_circuit, v_wires=[2, 3], u_tape=U)
with qml.tape.QuantumTape() as tape_dec:
op.decomposition()
expected_operations = [
qml.Hadamard(wires=[0]),
qml.Hadamard(wires=[1]),
qml.CNOT(wires=[0, 2]),
qml.CNOT(wires=[1, 3]),
qml.SWAP(wires=[0, 1]),
qml.RZ(-0.1, wires=[2]),
qml.CNOT(wires=[2, 3]),
qml.CNOT(wires=[0, 2]),
qml.Hadamard(wires=[0]),
]
for i, j in zip(tape_dec.operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_lhs_adjoint_method_2_qubits(self):
"""Test the adjoint method of the LHS operation for 2 qubits."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.SWAP(wires=[0, 1])
def v_circuit(params):
qml.RZ(params[0], wires=2)
qml.CNOT(wires=[2, 3])
adj = qml.LocalHilbertSchmidt(
[0.1], v_function=v_circuit, v_wires=[2, 3], u_tape=U
).adjoint()
operations = adj.expand().operations
expected_operations = [
qml.Hadamard(wires=[0]),
qml.CNOT(wires=[0, 2]),
qml.CNOT(wires=[2, 3]),
qml.RZ(0.1, wires=[2]),
qml.SWAP(wires=[0, 1]),
qml.CNOT(wires=[1, 3]),
qml.CNOT(wires=[0, 2]),
qml.Hadamard(wires=[1]),
qml.Hadamard(wires=[0]),
]
for i, j in zip(operations, expected_operations):
assert i.name == j.name
assert i.data == j.data
assert i.wires == j.wires
def test_v_not_quantum_function(self):
"""Test that we cannot pass a non quantum function to the HS operation"""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires=0)
with qml.tape.QuantumTape(do_queue=False) as v_circuit:
qml.RZ(0.1, wires=1)
with pytest.raises(
qml.QuantumFunctionError,
match="The argument v_function must be a callable quantum " "function.",
):
qml.LocalHilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=U)
def test_u_v_same_number_of_wires(self):
"""Test that U and V must have the same number of wires."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.CNOT(wires=[0, 1])
def v_circuit(params):
qml.RZ(params[0], wires=1)
with pytest.raises(
qml.QuantumFunctionError, match="U and V must have the same number of wires."
):
qml.LocalHilbertSchmidt([0.1], v_function=v_circuit, v_wires=[2], u_tape=U)
def test_u_quantum_tape(self):
"""Test that U must be a quantum tape."""
def u_circuit():
qml.CNOT(wires=[0, 1])
def v_circuit(params):
qml.RZ(params[0], wires=1)
with pytest.raises(
qml.QuantumFunctionError, match="The argument u_tape must be a QuantumTape."
):
qml.LocalHilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=u_circuit)
def test_v_wires(self):
"""Test that all wires in V are also in v_wires."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires=0)
def v_circuit(params):
qml.RZ(params[0], wires=2)
with pytest.raises(
qml.QuantumFunctionError, match="All wires in v_tape must be in v_wires."
):
qml.LocalHilbertSchmidt([0.1], v_function=v_circuit, v_wires=[1], u_tape=U)
def test_distinct_wires(self):
"""Test that U and V have distinct wires."""
with qml.tape.QuantumTape(do_queue=False) as U:
qml.Hadamard(wires=0)
def v_circuit(params):
qml.RZ(params[0], wires=0)
with pytest.raises(
qml.QuantumFunctionError, match="u_tape and v_tape must act on distinct wires."
):
qml.LocalHilbertSchmidt([0.1], v_function=v_circuit, v_wires=[0], u_tape=U)
|
StarcoderdataPython
|
188598
|
# Parse header file to generate Python wrapper around the rszvb DLL for the Rohde&Scwarz ZVA 40 Network Anlyzer
fout = open("rszvb_v2.py",'w')
# First load the DLL
print >>fout , """import numpy as numpy
from ctypes import *
# Prerequisition: installed rszvb driver 32-bit
# Reference to rszvb dll
rszvbDLL = windll.rszvb_32
class ZVBDLLERROR(Exception):
pass
iStringBufferLen = 1024
sStringBuffer = create_string_buffer(iStringBufferLen)
def __errorcheck__(iCode, func, args):
if iCode<0:
iHandle = args[0]
rszvbDLL.rszvb_error_message(iHandle, iCode, sStringBuffer)
msg = " {} : {}".format(func.name, sStringBuffer.value)
eqcode = c_int(0)
rszvbDLL.rszvb_error_query (iHandle, byref(eqcode), sStringBuffer)
if eqcode.value != 0:
msg += " - ZVB Instrument Error : {}".format(sStringBuffer.value)
raise ZVBDLLERROR(msg)
else:
return args
"""
# Load all constants
print >>fout,"\n#DLL constants\n"
fin = open("rszvb_header_file.h",'r')
for l in fin:
if l.startswith("#define RSZVB") and "0x" not in l :
vals = l[8:].split(' ',1)
print >>fout, vals[0],"=",vals[1].strip()
fin.close()
# Protoype dll functions
print >>fout,"\n#DLL functions\n"
type_convert = {'ViSession' : 'c_int',
'ViSession*' : 'POINTER(c_int)',
'ViStatus' : 'c_int',
'ViInt16' : 'c_int16',
'ViInt16*' : 'POINTER(c_int16)',
'ViInt32' : 'c_int32',
'ViInt32*' : 'POINTER(c_int32)',
'ViBoolean' : 'c_bool',
'ViBoolean*' : 'POINTER(c_bool)',
'ViString' : 'c_char_p',
'ViRsrc' : 'c_char_p',
'ViReal64' : 'c_double',
'ViReal64*' : 'POINTER(c_double)',
'ViReal64 _VI_FAR' : 'numpy.ctypeslib.ndpointer(dtype=numpy.float64)',
'ViInt32 _VI_FAR' : 'numpy.ctypeslib.ndpointer(dtype=numpy.int32)',
'ViChar _VI_FAR' : 'c_char_p',
}
with open("rszvb_header_file.h",'r') as fin:
while True:
l = fin.readline()
if not l: break
if l.startswith("ViStatus _VI_FUNC"):
func_block = l[17:]
if ';' not in func_block:
l = ''
while ';' not in l:
l = fin.readline()
func_block += l
func_name,func_args = [s.strip('\n );') for s in func_block.split('(')]
func_args = [s.strip() for s in func_args.split(',')]
print >>fout, "#",func_name,func_args
arg_types = []
arg_names = []
for s in func_args:
s = s.split(' ')
arg_types.append(s[0]+' '+s[1] if len(s)>2 else s[0])
arg_names.append(s[-1])
print >>fout, "prototype = WINFUNCTYPE(c_int, {0})".format( ','.join([type_convert.get(typ, 'Unknown') for typ in arg_types]) )
print >>fout, "paramflags = ({0},)".format(','.join(["({0}, '{1}')".format(2 if '*' in typ else 1 ,nam) for (typ,nam) in zip(arg_types,arg_names)]))
print >>fout, func_name," = prototype(('{0}', rszvbDLL), paramflags)".format(func_name)
print >>fout, "{0}.name = '{0}'".format(func_name)
print >>fout, "{0}.errcheck = __errorcheck__".format(func_name)
print >>fout, "{0}.output = {1}".format(func_name, any(['*' in typ for typ in arg_types]))
fin.close()
fout.close()
|
StarcoderdataPython
|
4800484
|
<reponame>vlsantos-bit/Meteograma_GFS_GRIB2
# -*- coding: utf-8 -*-
"""Meteograma_gfs_data.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/136EV_t3AaGttb8F43aTEQt5BRAYOVR1x
"""
#Baixando bibliotecas
!sudo apt-get install python-grib
!sudo python setup.py install
!sudo apt-get install libgeos-dev
!sudo pip3 install -U git+https://github.com/matplotlib/basemap.git
!apt install libgrib-api-dev libgrib2c-dev
!pip install pyproj==1.9.6
!pip install pygrib
#Instalando as bibliotecas
import pygrib
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import shiftgrid
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import datetime
#3 dias de previsão
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_000.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_003.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_006.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_009.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_012.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_015.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_018.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_021.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_024.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_027.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_030.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_033.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_036.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_039.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_042.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_045.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_048.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_051.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_054.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_057.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_060.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_063.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_066.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_069.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_072.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_075.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_078.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_081.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_084.grb2
!wget https://www.ncei.noaa.gov/data/global-forecast-system/access/grid-004-0.5-degree/forecast/202107/20210714/gfs_4_20210714_0000_087.grb2
#Abrindo os dados e selecionando as váriaveis
gr = pygrib.open('gfs_4_20210702_1800_006.grb2')
t = gr.select(name='Temperature')[0]
#Definindo região do RS
data, lats, lons = t.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
#Visualizando a grade selecionada
m = Basemap(projection='cyl',llcrnrlat=-35,urcrnrlat=-25,\
llcrnrlon=302,urcrnrlon=315,resolution='i')
# Função Matplotlib para definir o tamanho da figura.
plt.figure(figsize=(8,10))
#Inserindo os lat e lon a figura criada pelo Basemap e vinculando às variavéis x e y
x, y = m(lons, lats)
#Função do Basemap para iserir ao mapa continentes, países e estados.
m.drawcoastlines()
m.drawcountries()
m.drawstates()
la = np.arange(-35,-25,3.)
lo = np.arange(302.,315.,3.)
m.drawparallels(la,labels=[False,True,True,False])
m.drawmeridians(lo,labels=[True,False,False,True])
#m.scatter(lons, lats, marker = 'o', color='r', zorder=2)
#Função do Basemap para realizar a interpolação e criação de polígonos
contourf = m.contourf(x, y, np.squeeze(data),cmap='jet')
m.colorbar(contourf, location='right', pad="10%")
#Marcando cidade de Pelotas
m.scatter(307.6, -31.6, marker = 'o', color='r', zorder=3)
#Encontrando um lat e lon mais próximo
#Lat Lon selecionado
def encontra_lat_lon(la,lo):
stn_lat = la
stn_lon = lo
lat = lats
lon = lons
abslat = np.abs(lat-stn_lat)
abslon= np.abs(lon-stn_lon)
c = np.maximum(abslon,abslat)
latlon_idx = np.argmin(c)
x, y = np.where(c == np.min(c))
return(x,y)
#Aplicando para encontrar os index dos valores mais próximas
encontra_lat_lon(-31.6,307.6)
#Pegando o dado da respectiva região de Pelotas
data[13,175]
#Abrindo 3 dias de dados
gr = pygrib.open('gfs_4_20210714_0000_000.grb2')
gr2 = pygrib.open('gfs_4_20210714_0000_003.grb2')
gr3 = pygrib.open('gfs_4_20210714_0000_006.grb2')
gr4 = pygrib.open('gfs_4_20210714_0000_009.grb2')
gr5 = pygrib.open('gfs_4_20210714_0000_012.grb2')
gr6 = pygrib.open('gfs_4_20210714_0000_015.grb2')
gr7 = pygrib.open('gfs_4_20210714_0000_018.grb2')
gr8 = pygrib.open('gfs_4_20210714_0000_021.grb2')
gr9 = pygrib.open('gfs_4_20210714_0000_024.grb2')
gr10 = pygrib.open('gfs_4_20210714_0000_027.grb2')
gr11 = pygrib.open('gfs_4_20210714_0000_030.grb2')
gr12 = pygrib.open('gfs_4_20210714_0000_033.grb2')
gr13 = pygrib.open('gfs_4_20210714_0000_036.grb2')
gr14 = pygrib.open('gfs_4_20210714_0000_039.grb2')
gr15 = pygrib.open('gfs_4_20210714_0000_042.grb2')
gr16 = pygrib.open('gfs_4_20210714_0000_045.grb2')
gr17 = pygrib.open('gfs_4_20210714_0000_048.grb2')
gr18 = pygrib.open('gfs_4_20210714_0000_051.grb2')
gr19 = pygrib.open('gfs_4_20210714_0000_054.grb2')
gr20 = pygrib.open('gfs_4_20210714_0000_057.grb2')
gr21 = pygrib.open('gfs_4_20210714_0000_060.grb2')
gr22 = pygrib.open('gfs_4_20210714_0000_063.grb2')
gr23 = pygrib.open('gfs_4_20210714_0000_066.grb2')
gr24 = pygrib.open('gfs_4_20210714_0000_069.grb2')
gr25 = pygrib.open('gfs_4_20210714_0000_072.grb2')
gr26 = pygrib.open('gfs_4_20210714_0000_075.grb2')
gr27 = pygrib.open('gfs_4_20210714_0000_078.grb2')
gr28 = pygrib.open('gfs_4_20210714_0000_081.grb2')
gr29 = pygrib.open('gfs_4_20210714_0000_084.grb2')
gr30 = pygrib.open('gfs_4_20210714_0000_087.grb2')
# 1-Pegando os dados de temperatura referente aos níveis do modelo (ajustar lat e lon).
def selec_t(dadoo,name_v,indx_lat,index_lon):
dadt=[]
#Superfície
t1=dadoo.select(name=name_v)[41]
data1, lats, lons = t1.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data1[indx_lat,index_lon])
#1000
t2=dadoo.select(name=name_v)[40]
data2, lats, lons = t2.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data2[indx_lat,index_lon])
#975
t3=dadoo.select(name=name_v)[39]
data3, lats, lons = t3.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data3[indx_lat,index_lon])
#950
t4=dadoo.select(name=name_v)[38]
data4, lats, lons = t4.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data4[indx_lat,index_lon])
#925
t5=dadoo.select(name=name_v)[37]
data5, lats, lons = t5.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data5[indx_lat,index_lon])
#900
t6=dadoo.select(name=name_v)[36]
data6, lats, lons = t6.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data6[indx_lat,index_lon])
#850
t7=dadoo.select(name=name_v)[35]
data7, lats, lons = t7.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data7[indx_lat,index_lon])
#800
t8=dadoo.select(name=name_v)[34]
data8, lats, lons = t8.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data8[indx_lat,index_lon])
#750
t9=dadoo.select(name=name_v)[33]
data9, lats, lons = t9.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data9[indx_lat,index_lon])
#700
t10=dadoo.select(name=name_v)[32]
data10, lats, lons = t10.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data10[indx_lat,index_lon])
#650
t11=dadoo.select(name=name_v)[31]
data11, lats, lons = t11.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data11[indx_lat,index_lon])
#600
t12=dadoo.select(name=name_v)[30]
data12, lats, lons = t12.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data12[indx_lat,index_lon])
#550
t13=dadoo.select(name=name_v)[29]
data13, lats, lons = t13.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data13[indx_lat,index_lon])
#500hPa
t14=dadoo.select(name=name_v)[28]
data14, lats, lons = t14.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data14[indx_lat,index_lon])
#450hPa
t15=dadoo.select(name=name_v)[27]
data15, lats, lons = t15.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data15[indx_lat,index_lon])
#400hPa
t16=dadoo.select(name=name_v)[26]
data16, lats, lons = t16.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data16[indx_lat,index_lon])
#350hPa
t17=dadoo.select(name=name_v)[25]
data17, lats, lons = t17.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data17[indx_lat,index_lon])
#300hPa
t18=dadoo.select(name=name_v)[24]
data18, lats, lons = t18.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data18[indx_lat,index_lon])
#250hPa
t19=dadoo.select(name=name_v)[23]
data19, lats, lons = t19.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data19[indx_lat,index_lon])
#200hPa
t20=dadoo.select(name=name_v)[22]
data20, lats, lons = t20.data(lat1=-35,lat2=-25,lon1=220,lon2=320)
dadt.append(data20[indx_lat,index_lon])
return dadt
# 2 Função para criar e concatenar dataFrame (Aajuste de Temperatura - 273.15)
def transf_ajust(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30):
index=['1013','1000','975','950','925','900','850','800','750','700','650','600','550','500','450','400','350','300','250','200']
ind = pd.DataFrame(index, columns=['niveis'] )
a1=pd.DataFrame(a1)
a2=pd.DataFrame(a2)
a3=pd.DataFrame(a3)
a4=pd.DataFrame(a4)
a5=pd.DataFrame(a5)
a6=pd.DataFrame(a6)
a7=pd.DataFrame(a7)
a8=pd.DataFrame(a8)
a9=pd.DataFrame(a9)
a10=pd.DataFrame(a10)
a11=pd.DataFrame(a11)
a12=pd.DataFrame(a12)
a13=pd.DataFrame(a13)
a14=pd.DataFrame(a14)
a15=pd.DataFrame(a15)
a16=pd.DataFrame(a16)
a17=pd.DataFrame(a17)
a18=pd.DataFrame(a18)
a19=pd.DataFrame(a19)
a20=pd.DataFrame(a20)
a21=pd.DataFrame(a21)
a22=pd.DataFrame(a22)
a23=pd.DataFrame(a23)
a24=pd.DataFrame(a24)
a25=pd.DataFrame(a25)
a26=pd.DataFrame(a26)
a27=pd.DataFrame(a27)
a28=pd.DataFrame(a28)
a29=pd.DataFrame(a29)
a30=pd.DataFrame(a30)
f = pd.concat([ind,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30], axis=1)
return f
#Aplicando Função(dado no formato grib, 'nome da variavél desejada')
na='Relative humidity'
dad1=selec_t(gr,na)
dad2=selec_t(gr2,na)
dad3=selec_t(gr3,na)
dad4=selec_t(gr4,na)
dad5=selec_t(gr5,na)
dad6=selec_t(gr6,na)
dad7=selec_t(gr7,na)
dad8=selec_t(gr8,na)
dad9=selec_t(gr9,na)
dad10=selec_t(gr10,na)
dad11=selec_t(gr11,na)
dad12=selec_t(gr12,na)
dad13=selec_t(gr13,na)
dad14=selec_t(gr14,na)
dad15=selec_t(gr15,na)
dad16=selec_t(gr16,na)
dad17=selec_t(gr17,na)
dad18=selec_t(gr18,na)
dad19=selec_t(gr19,na)
dad20=selec_t(gr20,na)
dad21=selec_t(gr21,na)
dad22=selec_t(gr22,na)
dad23=selec_t(gr23,na)
dad24=selec_t(gr24,na)
dad25=selec_t(gr25,na)
dad26=selec_t(gr26,na)
dad27=selec_t(gr27,na)
dad28=selec_t(gr28,na)
dad29=selec_t(gr29,na)
dad30=selec_t(gr30,na)
# 2 Função para criar e concatenar dataFrame (Aajuste de Temperatura - 273.15)
df=transf_ajust(dad1,dad2,dad3,dad4,dad5,dad6,dad7,dad8,dad9,dad10,dad11,dad12,dad13,dad14,dad15,dad16,dad17,dad18,dad19,dad20,dad21,dad22,dad23,dad24,dad25,dad26,dad27,dad28,dad29,dad30)
df.columns = ['niveis','00', '03', '06','09','12','15','18','21','24','27','30','33','36','39','42','45','48','51','54','57','60', '63', '66','69','72','75','78','81','84','87']
Y = df['niveis'].astype(float)
del df['niveis']
df.iloc[::-1]
df.reset_index(drop=True)
X = df.columns.values[:].astype(float)
X, Y = np.meshgrid(X, Y)
#Plotando figura
fig,ax=plt.subplots(1,1)
fig.set_size_inches(12, 6)
fig.set_size_inches(12, 6)
plt.gca().invert_yaxis()
plt.title('Meteograma altura geopotencial para Cidade de Pelotas')
plt.xlabel('14/07/2021 -- 17/07/2021 (Z)')
plt.ylabel('Níveis Hpa')
cp= ax.contourf(X,Y,df,1000,cmap='jet')
fig.colorbar(cp)
for g in gr:
print(g)
|
StarcoderdataPython
|
129793
|
<reponame>hieu1999210/image_compression
"""
from https://github.com/facebookresearch/detectron2/blob/master/detectron2/modeling/meta_arch/build.py
"""
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from utils import Registry
META_ARCH_REGISTRY = Registry("META_ARCH")
META_ARCH_REGISTRY.__doc__ = """
Registry for meta-architectures, i.e. the whole model.
The registered object will be called with `obj(cfg)`
and expected to return a `nn.Module` object.
"""
def build_model(cfg):
"""
Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
Note that it does not load any weights from ``cfg``.
"""
meta_arch = cfg.MODEL.META_ARCHITECTURE
return META_ARCH_REGISTRY.get(meta_arch)(cfg)
|
StarcoderdataPython
|
37672
|
<reponame>xiangnan-fan/proj01<gh_stars>0
#!/bin/python3
# encoding: utf-8
import tensorflow as tf
tf.enable_eager_execution()
class CNN(tf.keras.Model):
def __init__(self):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu
)
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
self.conv2 = tf.keras.layers.Conv2D(
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu
)
self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))
self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(units=10)
def call(self, inputs):
inputs = tf.reshape(inputs, [-1, 28, 28, 1])
x = self.conv1(inputs) # [batch_size, 28, 28, 32]
x = self.pool1(x) # [batch_size, 14, 14, 32]
x = self.conv2(x) # [batch_size, 14, 14, 64]
x = self.pool2(x) # [batch_size, 7, 7, 64]
x = self.flatten(x) # [batch_size, 7 * 7 * 64]
x = self.dense1(x) # [batch_size, 1024]
x = self.dense2(x) # [batch_size, 10]
return x
def predict(self, inputs):
logits = self(inputs)
return tf.argmax(logits, axis=-1)
|
StarcoderdataPython
|
3256910
|
__version__ = '0.0.1'
import mongoengine as mongo
from bson import DBRef
#
# # https://hack.close.io/posts/mongomallard
class SafeReferenceField(mongo.ReferenceField):
"""
Like a ReferenceField, but doesn't return non-existing references when
dereferencing, i.e. no DBRefs are returned. This means that the next time
an object is saved, the non-existing references are removed and application
code can rely on having only valid dereferenced objects.
When the field is referenced, the referenced object is loaded from the
database.
"""
def __init__(self, field, **kwargs):
self.__class__.__name__ = "ReferenceField"
# Fake as a ListField so form generation works.
# Removing the line above will cause errors in Jinja complaining
# that it cannot find fields to render HTML.
# Check code in: env/lib/python2.7/site-packages/flask_admin/contrib/mongoengine/form.py:57
# Convert function will remove SafeReferenceListField because it does not know how to convert them.
# if not isinstance(field, mongo.ReferenceField):
# raise ValueError('Field argument must be a ReferenceField instance.')
super(SafeReferenceField, self).__init__(field, **kwargs)
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
try:
# import pdb
# from pprint import pprint
# print value
# pdb.set_trace()
if (not self.dbref and
not isinstance(value, (DBRef, mongo.Document, mongo.EmbeddedDocument))):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
return value
except:
raise Exception("Blah blah")
def __get__(self, instance, owner):
toRet = None
try:
toRet = super(SafeReferenceField, self).__get__(instance, owner)
except:
pass
return toRet
# https://hack.close.io/posts/mongomallard
class SafeReferenceListField(mongo.ListField):
"""
Like a ListField, but doesn't return non-existing references when
dereferencing, i.e. no DBRefs are returned. This means that the next time
an object is saved, the non-existing references are removed and application
code can rely on having only valid dereferenced objects.
When the field is referenced, all referenced objects are loaded from the
database.
Must use ReferenceField as its field class.
"""
def __init__(self, field, **kwargs):
self.__class__.__name__ = "ListField"
# Fake as a ListField so form generation works.
# Removing the line above will cause errors in Jinja complaining
# that it cannot find fields to render HTML.
# Check code in: env/lib/python2.7/site-packages/flask_admin/contrib/mongoengine/form.py:57
# Convert function will remove SafeReferenceListField because it does not know how to convert them.
if not isinstance(field, mongo.ReferenceField):
raise ValueError('Field argument must be a ReferenceField instance.')
super(SafeReferenceListField, self).__init__(field, **kwargs)
def to_python(self, value):
result = super(SafeReferenceListField, self).to_python(value)
if result:
# for item in result:
# print "before >>>>>", type(item)
objs = self.field.document_type.objects.in_bulk([obj.id for obj in result])
# tmp = filter(None, [objs.get(obj.id) for obj in result])
tmp = [_f for _f in [objs.get(obj.id) for obj in result] if _f]
# for item in tmp:
# print "after >>>>>", type(item), item.name
# print "Before:", len(result), " After:", len(tmp)
return tmp
|
StarcoderdataPython
|
3340935
|
from time import sleep
import logging
class DialogBox():
"""Implements a dialog box with given values (or some default ones if chosen)."""
value_selected = False
pointer = 0
default_options = {"y":["Yes", True], 'n':["No", False], 'c':["Cancel", None]}
def __init__(self, values, i, o, message="Are you sure?", name="DialogBox"):
"""Initialises the DialogBox object.
Args:
* ``values``: values to be used. Should be a list of ``[label, returned_value]`` pairs.
* You can also pass a string "yn" to get "Yes(True), No(False)" options, or "ync" to get "Yes(True), No(False), Cancel(None)" options.
* Values put together with spaces between them shouldn't be longer than the screen's width.
* ``i``, ``o``: input&output device objects
Kwargs:
* ``message``: Message to be shown on the first line of the screen when UI element is activated
* ``name``: UI element name which can be used internally and for debugging.
"""
self.i = i
self.o = o
self.name = name
if isinstance(values, str):
self.values = []
for char in values:
self.values.append(self.default_options[char])
#value_str = " ".join([value[0] for value in values])
#assert(len(value_str) <= o.cols, "Resulting string too long for the display!")
else:
assert type(values) in (list, tuple), "Unsupported 'values' argument!"
assert values, "DialogBox: Empty/invalid 'values' argument!"
self.values = values
self.message = message
self.process_values()
self.generate_keymap()
def to_foreground(self):
self.in_foreground = True
self.refresh()
self.set_keymap()
def activate(self):
logging.info("{0} activated".format(self.name))
self.to_foreground()
self.value_selected = False
self.pointer = 0
self.o.cursor()
while self.in_foreground: #All the work is done in input callbacks
sleep(0.1)
self.o.noCursor()
logging.debug(self.name+" exited")
if self.value_selected:
return self.values[self.pointer][1]
else:
return None
def deactivate(self):
self.in_foreground = False
logging.info("{0} deactivated".format(self.name))
def generate_keymap(self):
self.keymap = {
"KEY_RIGHT":lambda: self.move_right(),
"KEY_LEFT":lambda: self.move_left(),
"KEY_KPENTER":lambda: self.accept_value(),
"KEY_ENTER":lambda: self.accept_value()
}
def set_keymap(self):
self.i.stop_listen()
self.i.clear_keymap()
self.i.keymap = self.keymap
self.i.listen()
def move_left(self):
if self.pointer == 0:
self.deactivate()
return
self.pointer -= 1
self.refresh()
def move_right(self):
if self.pointer == len(self.values)-1:
return
self.pointer += 1
self.refresh()
def accept_value(self):
self.value_selected = True
self.deactivate()
def process_values(self):
self.labels = [label for label, value in self.values]
label_string = " ".join(self.labels)
if len(label_string) > self.o.cols:
raise ValueError("DialogBox {}: all values combined are longer than screen's width".format(self.name))
self.right_offset = (self.o.cols - len(label_string))/2
self.displayed_label = " "*self.right_offset+label_string
#Need to go through the string to mark the first places because we need to remember where to put the cursors
current_position = self.right_offset
self.positions = []
for label in self.labels:
self.positions.append(current_position)
current_position += len(label) + 1
def refresh(self):
self.o.noCursor()
self.o.display_data(self.message, self.displayed_label)
self.o.cursor()
self.o.setCursor(1, self.positions[self.pointer])
|
StarcoderdataPython
|
4805753
|
# -*- coding: utf-8 -*-
u"""
.. _qook_tutorial:
Using xrtQook for script generation
-----------------------------------
- Start xrtQook: type ``python xrtQookStart.pyw`` from xrt/gui or, if you have
installed xrt by running setup.py, type ``xrtQookStart.pyw`` from any
location.
.. note::
The scaling of GUI may behave differently in different systems and Qt
versions. If it looks wrong, try the scaling options in
``xrtQookStart.pyw``.
.. note::
If you want to start xrtQook from Spyder, select the run option
"Execute in an external system terminal".
- Rename beamLine to myTestBeamline by double clicking on it (you do not have
to, only for demonstration).
- Right-click on myTestBeamline and Add Source → BendingMagnet. The same can be
done from the icon buttons on the left.
.. imagezoom:: _images/qookTutor01.png
:scale: 60 %
- In its properties change eMin to 10000-10 and eMax to 10000+10. The middle of
this range will be used to automatically align crystals (one crystal in this
example) unless the parameter myTestBeamline.alignE sets another value. Blue
color indicates non-default values. These will be included into the generated
script. All the default-valued parameters do not propagate into the script.
.. imagezoom:: _images/qookTutor02.png
:scale: 60 %
- Create a crystalline material CrystalSi. This will create a Si111 crystal at
room temperature.
.. imagezoom:: _images/qookTutor03.png
:scale: 60 %
- Add a generic OE -> OE. This will add an optical element with a flat surface.
.. note::
The sequence of the inserted optical elements does matter! This sequence
determines the order of beam propagation.
.. imagezoom:: _images/qookTutor04.png
:scale: 60 %
- In its properties select the created crystal as 'material', put [0, 20000, 0]
as 'center' (i.e. 20 m from source) and "auto" (with or without quotes) as
'pitch'.
.. imagezoom:: _images/qookTutor05.png
:scale: 60 %
- Add a screen to the beamline.
.. imagezoom:: _images/qookTutor06.png
:scale: 60 %
- Give it [0, 21000, auto] as 'center'. Its height -- the last coordinate --
will be automatically calculated from the previous elements.
.. imagezoom:: _images/qookTutor07.png
:scale: 60 %
- Check the beamline layout with xrtGlow.
.. imagezoom:: _images/qookTutor08.png
:scale: 60 %
- Add a plot and select the local screen beam.
.. imagezoom:: _images/qookTutor09.png
:scale: 60 %
- Define an offset to the color (energy) axis.
.. imagezoom:: _images/qookTutor10.png
:scale: 60 %
- Save the beamline layout as xml.
- Generate python script (the button with a code page and the python logo),
save the script and run it.
- In the console output you can read the actual pitch (Bragg angle) for the
crystal and the screen position.
.. imagezoom:: _images/qookTutor11.png
:scale: 60 %
"""
|
StarcoderdataPython
|
3342212
|
#!/usr/bin/env python
"""A module that maps unique strings to the same objects.
This is useful when, for example, you have some complicated structures that
are identified and referenced by names.
"""
__docformat__ = "restructuredtext"
import os
import warnings
import inspect
csPath = os.path.join("cleversheep3", "Test", "Tester")
for count, frame in enumerate(inspect.stack()):
if count == 0:
continue
if csPath not in frame[1]:
break
warnings.warn("""
The 'cleversheep3.Prog.Ustr' module is deprecated. It will nolonger be
supported from version 0.5 onwards and will be removed in version 0.6.
Please use the 'Intern' module instead :
----------------------------------------------------------------------
""", PendingDeprecationWarning, stacklevel=count + 1)
from cleversheep3.Prog import Intern
Ustr = Intern.intern
ustrStr = Intern.internIfStr
ustr_property = Intern.internProperty
ustr_func = Intern.internFuncStrings
ustr_method = Intern.internMethodStrings
def reset():
"""Reset the module, forgetting previously defined Ustr instances.
This exists for backward compatability only. It does nothing.
"""
|
StarcoderdataPython
|
3238759
|
<reponame>justdjango/django-nft-sniper<filename>djsniper/sniper/admin.py<gh_stars>10-100
from django.contrib import admin
from .models import NFTProject, NFT, NFTTrait, NFTAttribute
class NFTAdmin(admin.ModelAdmin):
list_display = ["nft_id", "rank", "rarity_score"]
search_fields = ["nft_id__exact"]
class NFTAttributeAdmin(admin.ModelAdmin):
list_display = ["name", "value"]
list_filter = ["name"]
admin.site.register(NFTProject)
admin.site.register(NFTTrait)
admin.site.register(NFT, NFTAdmin)
admin.site.register(NFTAttribute, NFTAttributeAdmin)
|
StarcoderdataPython
|
1653336
|
<reponame>konflic/sqlite_database
import sqlite3
con3 = sqlite3.connect(":memory:")
con3.close()
def example_db():
sql = """
CREATE TABLE
IF NOT EXISTS users
(
id INTEGER PRIMARY KEY,
nickname TEXT,
email TEXT,
reg_date DATETIME
);
"""
con = sqlite3.connect("example.db")
con.execute(sql)
return con
def simple_db():
con = sqlite3.connect("simple.sqlite")
with open("sql/create_simple_table.sql", "r") as script:
con.execute(script.read())
con.close()
|
StarcoderdataPython
|
3358167
|
<reponame>doctsystems/obsCovidTja
from django.db import models
from core.models import ClaseModelo
from django.contrib.gis.db import models
class Persona(ClaseModelo):
nombres=models.CharField(max_length=20)
apellidos=models.CharField(max_length=30)
carnet=models.CharField(max_length=10)
celular=models.CharField(max_length=8, default='00111222')
direccion=models.CharField(max_length=100)
fecha_nacimiento=models.DateField()
OPositivo='O +'
ONegativo='O -'
APositivo='A +'
ANegativo='A -'
BPositivo='B +'
BNegativo='B -'
ABPositivo='AB +'
ABNegativo='AB -'
TIPO_SANGRE=[
(OPositivo, 'O +'),
(ONegativo, 'O -'),
(APositivo, 'A +'),
(ANegativo, 'A -'),
(BPositivo, 'B +'),
(BNegativo, 'B -'),
(ABPositivo, 'AB +'),
(ABNegativo, 'AB -'),
]
grupo_sanguineo=models.CharField(max_length=5, choices=TIPO_SANGRE, default=OPositivo)
def __str__(self):
return '{} {}'.format(self.nombres, self.apellidos)
def save(self):
self.nombres=self.nombres.upper()
self.apellidos=self.apellidos.upper()
super(Persona, self).save()
class Meta:
verbose_name_plural='Personas'
def get_absolute_url(self):
return reverse('persona-detalle', args=[str(self.id)])
class Especialidad(ClaseModelo):
nombre=models.CharField(max_length=50)
descripcion=models.CharField(max_length=100)
observaciones=models.TextField(blank=True, null=True)
def __str__(self):
return '{}'.format(self.nombre)
def save(self):
self.nombre=self.nombre.upper()
super(Especialidad, self).save()
class Meta():
verbose_name_plural='Especialidades'
class Medicamento(ClaseModelo):
nombre=models.CharField(max_length=30)
descripcion=models.CharField(max_length=100)
def __str__(self):
return '{}'.format(self.nombre)
def save(self):
self.nombre=self.nombre.upper()
super(Medicamento, self).save()
class Meta():
verbose_name_plural='Medicamentos'
class EnfermedadBase(ClaseModelo):
nombre=models.CharField(max_length=20)
descripcion=models.CharField(max_length=50)
def __str__(self):
return '{}'.format(self.nombre)
def save(self):
self.nombre=self.nombre.upper()
super(EnfermedadBase, self).save()
class Meta():
verbose_name_plural='Enfermedades de Base'
class Sintomatologia(ClaseModelo):
nombre=models.CharField(max_length=30)
descripcion=models.CharField(max_length=50)
observaciones=models.TextField(blank=True, null=True)
def __str__(self):
return '{}'.format(self.nombre)
def save(self):
self.nombre=self.nombre.upper()
super(Sintomatologia, self).save()
class Meta():
verbose_name_plural='Sintomas'
class Departamento(ClaseModelo):
nombre=models.CharField(max_length=10)
def __str__(self):
return '{}'.format(self.nombre)
def save(self):
self.nombre=self.nombre.upper()
super(Departamento, self).save()
def get_absolute_url(self):
return reverse('departamento-detail-view', args=[str(self.id)])
class Meta():
verbose_name_plural='Departamentos'
class Municipio(ClaseModelo):
nombre=models.CharField(max_length=30)
departamento=models.ForeignKey(Departamento, null=True, on_delete=models.SET_NULL)
def __str__(self):
return '{}'.format(self.nombre)
def save(self):
self.nombre=self.nombre.upper()
super(Municipio, self).save()
class Meta():
verbose_name_plural='Municipios'
class Entidad(ClaseModelo):
nombre=models.CharField(max_length=100)
direccion=models.CharField(max_length=100)
telefono=models.CharField(max_length=8, default='0011122')
observaciones=models.TextField(blank=True, null=True)
ciudad=models.ForeignKey(Municipio, null=True, on_delete=models.SET_NULL)
ubicacion=models.PointField(srid=4326)
def __str__(self):
return '{}'.format(self.nombre)
def save(self):
self.nombre=self.nombre.upper()
super(Entidad, self).save()
class Meta():
verbose_name_plural='Entidades'
class Contacto(ClaseModelo):
datos_personales=models.ForeignKey(Persona, null=True, on_delete=models.SET_NULL)
def __str__(self):
return '{} {}'.format(self.datos_personales.nombres, self.datos_personales.apellidos)
class Meta():
verbose_name_plural='Contactos Directos'
class Paciente(ClaseModelo):
datos_personales=models.ForeignKey(Persona, null=True, on_delete=models.SET_NULL)
contactos_directos=models.ManyToManyField(Contacto)
ciudad=models.ForeignKey(Municipio, null=True, on_delete=models.SET_NULL)
ubicacion=models.PointField(srid=4326)
def __str__(self):
return '{} {}'.format(self.datos_personales.nombres, self.datos_personales.apellidos)
class Meta():
verbose_name_plural='Pacientes'
class Medico(ClaseModelo):
datos_personales=models.ForeignKey(Persona, null=True, on_delete=models.SET_NULL)
matricula=models.CharField(max_length=20)
especialidad=models.ForeignKey(Especialidad, null=True, on_delete=models.SET_NULL)
entidad_medica=models.ForeignKey(Entidad, null=True, on_delete=models.SET_NULL)
def __str__(self):
return '{} {} {}'.format(self.matricula, self.datos_personales.nombres,self.datos_personales.apellidos)
class Meta():
verbose_name_plural='Medicos'
class Usuario(ClaseModelo):
info=models.OneToOneField(Medico, on_delete=models.CASCADE)
nombre_usuario=models.CharField(max_length=20)
contraseña=models.CharField(max_length=20)
def __str__(self):
return '{}'.format(self.nombre_usuario)
class Meta():
verbose_name_plural='Usuarios del Sistema'
class Receta(ClaseModelo):
medicamento=models.ManyToManyField(Medicamento, help_text="Seleccione un medicamento.")
dosificacion=models.CharField(max_length=50)
observaciones=models.TextField(blank=True, null=True)
def __str__(self):
return '{} {}'.format(self.id, self.dosificacion)
class Meta():
verbose_name_plural='Recetas'
class HistoriaClinica(ClaseModelo):
fecha=models.DateField()
paciente=models.ForeignKey(Paciente, null=True, on_delete=models.SET_NULL)
enfermedades=models.ManyToManyField(EnfermedadBase, help_text="Seleccione una enfermedad.")
sintomas=models.ManyToManyField(Sintomatologia, help_text="Seleccione un sintoma.")
medico=models.ForeignKey(Medico, null=True, on_delete=models.SET_NULL)
def __str__(self):
return '{} {}'.format(self.fecha, self.paciente.datos_personales.nombres)
class Meta():
verbose_name_plural='Historiales Medicos'
class Tratamiento(ClaseModelo):
fecha_inicio=models.DateField()
fecha_final=models.DateField()
lista_medicacion=models.ForeignKey(Receta, null=True, on_delete=models.SET_NULL)
historial=models.ForeignKey(HistoriaClinica, null=True, on_delete=models.SET_NULL)
def __str__(self):
return '{} {}'.format(self.fecha_inicio, self.fecha_final)
class Meta():
verbose_name_plural='Tratamientos Medicos'
|
StarcoderdataPython
|
3231585
|
import traceback
from flask import current_app
from urllib.parse import urljoin
from ..lib import utils
from .base import db
from .setting import Setting
from .user import User
from .account_user import AccountUser
class Account(db.Model):
__tablename__ = 'account'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(40), index=True, unique=True, nullable=False)
description = db.Column(db.String(128))
contact = db.Column(db.String(128))
mail = db.Column(db.String(128))
domains = db.relationship("Domain", back_populates="account")
apikeys = db.relationship("ApiKey",
secondary="apikey_account",
back_populates="accounts")
def __init__(self, name=None, description=None, contact=None, mail=None):
self.name = name
self.description = description
self.contact = contact
self.mail = mail
# PDNS configs
self.PDNS_STATS_URL = Setting().get('pdns_api_url')
self.PDNS_API_KEY = Setting().get('pdns_api_key')
self.PDNS_VERSION = Setting().get('pdns_version')
self.API_EXTENDED_URL = utils.pdns_api_extended_uri(self.PDNS_VERSION)
if self.name is not None:
self.name = ''.join(c for c in self.name.lower()
if c in "abcdefghijklmnopqrstuvwxyz0123456789")
def __repr__(self):
return '<Account {0}r>'.format(self.name)
def get_name_by_id(self, account_id):
"""
Convert account_id to account_name
"""
account = Account.query.filter(Account.id == account_id).first()
if account is None:
return ''
return account.name
def get_id_by_name(self, account_name):
"""
Convert account_name to account_id
"""
# Skip actual database lookup for empty queries
if account_name is None or account_name == "":
return None
account = Account.query.filter(Account.name == account_name).first()
if account is None:
return None
return account.id
def create_account(self):
"""
Create a new account
"""
# Sanity check - account name
if self.name == "":
return {'status': False, 'msg': 'No account name specified'}
# check that account name is not already used
account = Account.query.filter(Account.name == self.name).first()
if account:
return {'status': False, 'msg': 'Account already exists'}
db.session.add(self)
db.session.commit()
return {'status': True, 'msg': 'Account created successfully'}
def update_account(self):
"""
Update an existing account
"""
# Sanity check - account name
if self.name == "":
return {'status': False, 'msg': 'No account name specified'}
# read account and check that it exists
account = Account.query.filter(Account.name == self.name).first()
if not account:
return {'status': False, 'msg': 'Account does not exist'}
account.description = self.description
account.contact = self.contact
account.mail = self.mail
db.session.commit()
return {'status': True, 'msg': 'Account updated successfully'}
def delete_account(self, commit=True):
"""
Delete an account
"""
# unassociate all users first
self.grant_privileges([])
try:
Account.query.filter(Account.name == self.name).delete()
if commit:
db.session.commit()
return True
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot delete account {0} from DB. DETAIL: {1}'.format(
self.name, e))
return False
def get_user(self):
"""
Get users (id) associated with this account
"""
user_ids = []
query = db.session.query(
AccountUser,
Account).filter(User.id == AccountUser.user_id).filter(
Account.id == AccountUser.account_id).filter(
Account.name == self.name).all()
for q in query:
user_ids.append(q[0].user_id)
return user_ids
def grant_privileges(self, new_user_list):
"""
Reconfigure account_user table
"""
account_id = self.get_id_by_name(self.name)
account_user_ids = self.get_user()
new_user_ids = [
u.id
for u in User.query.filter(User.username.in_(new_user_list)).all()
] if new_user_list else []
removed_ids = list(set(account_user_ids).difference(new_user_ids))
added_ids = list(set(new_user_ids).difference(account_user_ids))
try:
for uid in removed_ids:
AccountUser.query.filter(AccountUser.user_id == uid).filter(
AccountUser.account_id == account_id).delete()
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot revoke user privileges on account {0}. DETAIL: {1}'.
format(self.name, e))
try:
for uid in added_ids:
au = AccountUser(account_id, uid)
db.session.add(au)
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot grant user privileges to account {0}. DETAIL: {1}'.
format(self.name, e))
def revoke_privileges_by_id(self, user_id):
"""
Remove a single user from privilege list based on user_id
"""
new_uids = [u for u in self.get_user() if u != user_id]
users = []
for uid in new_uids:
users.append(User(id=uid).get_user_info_by_id().username)
self.grant_privileges(users)
def add_user(self, user):
"""
Add a single user to Account by User
"""
try:
au = AccountUser(self.id, user.id)
db.session.add(au)
db.session.commit()
return True
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot add user privileges on account {0}. DETAIL: {1}'.
format(self.name, e))
return False
def remove_user(self, user):
"""
Remove a single user from Account by User
"""
# TODO: This func is currently used by SAML feature in a wrong way. Fix it
try:
AccountUser.query.filter(AccountUser.user_id == user.id).filter(
AccountUser.account_id == self.id).delete()
db.session.commit()
return True
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot revoke user privileges on account {0}. DETAIL: {1}'.
format(self.name, e))
return False
def update(self):
"""
Fetch accounts from PowerDNS and syncs them into DB
"""
db_accounts = Account.query.all()
list_db_accounts = [d.name for d in db_accounts]
current_app.logger.info("Found {} accounts in PowerDNS-Admin".format(
len(list_db_accounts)))
headers = {'X-API-Key': self.PDNS_API_KEY}
try:
jdata = utils.fetch_json(
urljoin(self.PDNS_STATS_URL,
self.API_EXTENDED_URL + '/servers/localhost/zones'),
headers=headers,
timeout=int(Setting().get('pdns_api_timeout')),
verify=Setting().get('verify_ssl_connections'))
list_jaccount = set(d['account'] for d in jdata if d['account'])
current_app.logger.info("Found {} accounts in PowerDNS".format(
len(list_jaccount)))
try:
# Remove accounts that don't exist any more
should_removed_db_account = list(
set(list_db_accounts).difference(list_jaccount))
for account_name in should_removed_db_account:
account_id = self.get_id_by_name(account_name)
if not account_id:
continue
current_app.logger.info("Deleting account for {0}".format(account_name))
account = Account.query.get(account_id)
account.delete_account(commit=False)
except Exception as e:
current_app.logger.error(
'Can not delete account from DB. DETAIL: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
for account_name in list_jaccount:
account_id = self.get_id_by_name(account_name)
if account_id:
continue
current_app.logger.info("Creating account for {0}".format(account_name))
account = Account(name=account_name)
db.session.add(account)
db.session.commit()
current_app.logger.info('Update accounts finished')
return {
'status': 'ok',
'msg': 'Account table has been updated successfully'
}
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot update account table. Error: {0}'.format(e))
return {'status': 'error', 'msg': 'Cannot update account table'}
|
StarcoderdataPython
|
169346
|
<reponame>Leonardo-H/DR-PG
import numpy as np
class PerformanceEstimate(object):
"""
A helper function to compute gradient of the form
E_{d_\pi} (\nabla E_{\pi}) [ A ]
where
the unnormalized state distribution is
d_{\pi} = \sum_{t=1}^\infty \gamma^t * d_{pi,t}
and for \lambda in [0,1],
A_t = (1-\lambda) \sum_{k=0}^\infty \lambda^k A_{k,t}
with
A_{k,t} = c_t - v_t + \delta * V_{k,t+1}
V_{k,t} = w_t * c_t + \delta * w_t * w_{t+1} * c_{t+1} + ...
+ \delta^{k-1} * w_t * ... * w_{t+k-1} * c_{t+k-1}
+ \delta^k * w_t * ... * w_{t+k-1} * v_{t+k}
c is the instantaneous cost,
w is the importance weight
v is the baseline
In implementationn, A_t is computed as
A_t = x_t + (\lambda*\delta) * X_{t+1} + Y_t
where x_t = c_t - v_t + \delta * v_{t+1}
X_t = (w*c)_t - v_t + \delta * (wv)_{t+1}
Y_t = \sum_{k=2}^\infty (\lambda*\delta)^k * w_{t+1} * ... * w_{t+k-1} X_{t+k}
=========================================================================================
\gamma in [0,1] is the discount factor in the problem
\lambda in [0,1] defines the \lambda-mixing of a family of estimates
\delta in [0, \gamma], w, and V define the reshaped costs
The feature of the estimator is determined by the following criteria:
1) \delta==\gamma or \delta<\gamma:
whether to use the same discount factor as the problem's definition
for estimating value function. Using smaller delta simplifes the
estimation problem but introduces additional bias.
2) \lambda==1 or \lambda <1:
whether to use Monte-Carlo rollouts or a \lambda-weighted estimate,
which has larger bias but smaller vairance.
3) w==1, or w==p(\pi*)/p(\pi):
whether to use importance sampling to estimate the advantage function
with respect to some other policy \pi* using the samples from the
exploration policy \pi. The use of non-identity w can let A_{V, k}
to estimate the advantage function with respect to \pi* even when the
rollouts are collected by \pi.
Some examples (and their imposed constraints):
1) Actor-Critic Family (\delta==\gamma) (w=1)
a) \lambda==1, unbiased Monte-Carlo rollout with costs reshaped by some
arbitrary function V
b) \lambda==0, basic Actor-Critic when V is the current value estimate
c) \labmda in (0,1), lambda-weighted Actor-Critic, when V is the current
value estimate.
2) GAE Family (\delta<\gamma) (w=1)
a) \gamma==1, (\delta, \lambda)-GAE estimator for undiscounted problems
when V is the current value estimate (Schulmann et al.,
2016)
b) \gamma in (0,1], (\delta, \lambda)-GAE for \gamma-discounted
problems, when V is the current value estimate
3) Imitation Family (V=expert value estimate) (\delta=\gamma) (w=1):
a) \lambda==0, AggreVaTe(D) (Sun et al., 2017)
b) only using A_{V,k}, k-THOR (Sun et al, 2018) #TODO
c) \lambda in (0,1] lambda-weighted truncated-gradient (Happy UAI)
4) PDE (Performance Difference Estimate) Family (w = p(\pi') / p(\pi) ):
PDE builds an estimate of E_{d_\pi} (\nabla E_{\pi}) [ A_{\pi'} ]
where A_{\pi'} is the (dis)advantage function wrt \pi', in which
V is the value estimate of some arbitrary policy \pi', \lambda in [0,
1] and \delta in [0, \gamma] are bias-variance.
"""
def __init__(self, gamma, lambd=0., delta=None, default_v=0.0):
delta = np.min([delta, gamma]) if delta is not None else np.min([gamma, 0.9999])
self.gamma = np.clip(gamma, 0., 1.)
self.delta = np.clip(delta, 0., 1.)
self.lambd = np.clip(lambd, 0., 1.)
self.default_v = default_v # the value function of absorbing states
@staticmethod
def shift_l(v, padding=0.):
return np.append(v[1:], padding) if np.array(v).size > 1 else v
def reshape_cost(self, c, V, done, w=1., padding=0.):
v, v_next = V[:-1], V[1:]
if done: # ignore the last element
v_next[-1] = padding
return w * (c + self.delta * v_next) - v
def dynamic_program(self, a, b, c, d, w):
# Compute the expression below recursibely from the end
# val_t = d^t * ( a_t + \sum_{k=1}^infty c^k w_{t+1} ... w_{t+k} b_{t+k} )
# = d^t * ( a_t + e_t )
#
# in which e_t is computed recursively from the end as
#
# e_t = \sum_{k=1}^infty c^k w_{t+1} ... w_{t+k} b_{t+k} )
# = c w_{t+1} b_{t+1} + \sum_{k=2}^infty c^k w_{t+1} ... w_{t+k} b_{t+k} )
# = c w_{t+1} b_{t+1} + c w_{t+1} \sum_{k=1}^infty c^k w_{t+1+1} ... w_{t+1+k} b_{t+1+k} )
# = c w_{t+1} b_{t+1} + c w_{t+1} e_{t+1}
# = c w_{t+1} (b_{t+1} + e_{t+1})
#
# where the boundary condition of e is zero.
assert len(a) == len(b), 'Lengths of the two sequences do not match.'
horizon = len(a)
if type(w) is not np.ndarray:
w = np.full_like(a, w) # try to make it one
e = np.zeros_like(a) # last val is 0
cw = c * w
for i in reversed(range(1, len(e))):
e[i - 1] = cw[i] * (b[i] + e[i])
val = (d**np.arange(horizon)) * (a + e)
return val
def adv(self, c, V, done, w=1., lambd=None, gamma=None):
# compute pde such that
# \sum_{s,a in traj_pi} \nabla \log p(a|s) pde(s,a)
# is unbiased estimate of
# E_{d_\pi} (\nabla E_{\pi}) [ A_V ]
#
# V is an np.ndarray with length equal to len(c)+1
# w can be int, float, or np.ndarray with length equal to len(c)
# if done is True, the last element of V is set to the default value
assert len(c) + 1 == len(V), 'V needs to be one element longer than c.'
assert type(done) is bool
gamma = gamma if gamma is not None else self.gamma
lambd = lambd if lambd is not None else self.lambd
X = self.reshape_cost(c, V, done, w=w, padding=self.default_v)
x = self.reshape_cost(c, V, done, w=1.0, padding=self.default_v)
Y = self.shift_l(X) * self.delta * lambd # this always pads 0
a = x + Y
b = Y
c = lambd * self.delta
d = gamma
return self.dynamic_program(a, b, c, d, w)
def qfn(self, c, V, done, w=1., lambd=None, gamma=None):
# compute the qfn such that
# \sum_{s,a in traj_pi} qfn(s,a)
# is unbiased estimate of
# A_V + V
# under policy pi
# w is the importance sampling weight
return V + self.adv(c, V, done, w, lambd, gamma)
|
StarcoderdataPython
|
1648268
|
from .calculate import *
from .file import *
|
StarcoderdataPython
|
1618570
|
#!/usr/bin/python3
import random
import string
import os
from dotenv import load_dotenv
class CouponAPI:
@staticmethod
def get_random_string(length):
""" Generate a random string (upper and lower case letters) to be used as a coupon code
:param length: the length of the coupon code
:return: the coupon code
"""
# Random string with the combination of lower and upper case
letters = string.ascii_letters
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
@staticmethod
def create(storeModel, song_name, user, code):
""" Gets called by the API when a coupon form is submitted
:param storeModel: the database object
:param code: -1 if the coupon code has not been established for one of the stores or the coupon code to be used
to sync local and global stores
:param song_name: song for the coupon to be made for
:param user: the user who uploaded the song
:return: the coupon code if it was successful or an error message
"""
load_dotenv()
coupon_code_length = os.getenv('COUPON_CODE_LENGTH')
woocommerce = storeModel
coupon_code = code
if coupon_code == -1:
coupon_code = CouponAPI.get_random_string(coupon_code_length)
# print(coupon_code)
exists = True
while exists:
if woocommerce.check_if_coupon_code_exists(woocommerce, coupon_code):
coupon_code = CouponAPI.get_random_string(coupon_code_length)
else:
exists = False
product_id = woocommerce.get_product_id(woocommerce, user, song_name)
if product_id != -1:
coupon_data = {
"code": coupon_code,
"discount_type": "percent",
"amount": "100",
"product_ids": [product_id],
"usage_limit": 1
}
data = woocommerce.create_coupon(woocommerce, coupon_data)
return coupon_code
else:
return "Cannot find the song: " + song_name + " with the user name: " + user
|
StarcoderdataPython
|
112032
|
<filename>oarepo_model_builder/invenio/invenio_views.py
from .invenio_base import InvenioBaseClassPythonBuilder
class InvenioViewsBuilder(InvenioBaseClassPythonBuilder):
TYPE = 'invenio_views'
class_config = 'create_blueprint_from_app'
template = 'views'
|
StarcoderdataPython
|
3369461
|
<reponame>gujralsanyam22/pyrobot<gh_stars>1000+
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Example for commanding robot with position control using moveit planner
"""
from pyrobot import Robot
from pyrobot.utils.util import MoveitObjectHandler
import time
import numpy as np
def main():
config = dict(moveit_planner="ESTkConfigDefault")
bot = Robot(
"sawyer",
use_arm=True,
use_base=False,
use_camera=False,
use_gripper=True,
arm_config=config,
)
obstacle_handler = MoveitObjectHandler()
# Add a table
# position and orientation (quaternion: x, y, z, w) of the table
pose = [0.8, 0.0, -0.23, 0.0, 0.0, 0.0, 1.0]
# size of the table (x, y, z)
size = (1.35, 2.0, 0.1)
obstacle_handler.add_table(pose, size)
target_poses = [
{
"position": np.array([0.8219, 0.0239, -0.1]),
"orientation": np.array(
[
[-0.3656171, 0.6683861, 0.6477531],
[0.9298826, 0.2319989, 0.2854731],
[0.0405283, 0.7067082, -0.7063434],
]
),
},
{
"position": np.array([0.7320, 0.1548, -0.15]),
"orientation": np.array([0.1817, 0.9046, -0.1997, 0.3298]),
},
]
bot.arm.go_home()
time.sleep(1)
for pose in target_poses:
bot.arm.set_ee_pose(plan=True, **pose)
time.sleep(1)
bot.arm.go_home()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
33382
|
import numpy as np
import math
import fatpack
import matplotlib.pyplot as plt
import pandas as pd
#Create a function that reutrns the Goodman correction:
def Goodman_method_correction(M_a,M_m,M_max):
M_u = 1.5*M_max
M_ar = M_a/(1-M_m/M_u)
return M_ar
def Equivalent_bending_moment(M_ar,Neq,m):
P = M_ar.shape
M_sum = 0
j = P[0]
for i in range(j):
M_sum = math.pow(M_ar[i],m) + M_sum
M_eq = math.pow((M_sum/Neq),(1/m))
return M_eq
def get_DEL(y,Neq,m):
S, Sm = fatpack.find_rainflow_ranges(y.flatten(), return_means=True, k=256)
data_arr = np.array([Sm , S ]).T
M_ar = Goodman_method_correction(data_arr[:,1],data_arr[:,0],np.max(S))
print(sum(M_ar.shape))
M_eq = Equivalent_bending_moment(M_ar,Neq,m)
return M_eq
|
StarcoderdataPython
|
3323071
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for constructing messages for instance configs requests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.api_lib.compute import path_simplifier
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute.instance_groups.flags import AutoDeleteFlag
from googlecloudsdk.command_lib.compute.instance_groups.flags import STATEFUL_IP_DEFAULT_INTERFACE_NAME
from googlecloudsdk.command_lib.compute.instance_groups.managed.instance_configs import instance_disk_getter
import six
def GetMode(messages, mode):
"""Returns mode message based on short user friendly string."""
enum_class = messages.PreservedStatePreservedDisk.ModeValueValuesEnum
if isinstance(mode, six.string_types):
return {
'ro': enum_class.READ_ONLY,
'rw': enum_class.READ_WRITE,
}[mode]
else:
# handle converting from AttachedDisk.ModeValueValuesEnum
return enum_class(mode.name)
def MakePreservedStateDiskEntry(messages, stateful_disk_data, disk_getter):
"""Prepares disk preserved state entry, combining with params from the instance."""
if stateful_disk_data.get('source'):
source = stateful_disk_data.get('source')
mode = stateful_disk_data.get('mode', 'rw')
else:
disk = disk_getter.get_disk(
device_name=stateful_disk_data.get('device-name'))
if disk is None:
if disk_getter.instance_exists:
error_message = ('[source] is required because the disk with the '
'[device-name]: `{0}` is not yet configured in the '
'instance config'.format(
stateful_disk_data.get('device-name')))
else:
error_message = ('[source] must be given while defining stateful disks '
'in instance configs for new instances')
raise exceptions.BadArgumentException('stateful_disk', error_message)
source = disk.source
mode = stateful_disk_data.get('mode') or disk.mode
preserved_disk = (
messages.PreservedStatePreservedDisk(
autoDelete=(stateful_disk_data.get('auto-delete') or
AutoDeleteFlag.NEVER).GetAutoDeleteEnumValue(
messages.PreservedStatePreservedDisk
.AutoDeleteValueValuesEnum),
source=source,
mode=GetMode(messages, mode)))
return messages.PreservedState.DisksValue.AdditionalProperty(
key=stateful_disk_data.get('device-name'), value=preserved_disk)
def MakePreservedStateMetadataEntry(messages, key, value):
return messages.PreservedState.MetadataValue.AdditionalProperty(
key=key,
value=value
)
def _CreateIpAddress(messages, ip_address):
# Checking if the address is not an IP v4 address, assumed to be a URL then.
if re.search('[A-Za-z]', ip_address):
return messages.PreservedStatePreservedNetworkIpIpAddress(
address=ip_address)
else:
return messages.PreservedStatePreservedNetworkIpIpAddress(
literal=ip_address)
def _MakePreservedStateNetworkIpEntry(messages, stateful_ip):
"""Prepares stateful ip preserved state entry."""
auto_delete = (stateful_ip.get('auto-delete') or
AutoDeleteFlag.NEVER).GetAutoDeleteEnumValue(
messages.PreservedStatePreservedNetworkIp
.AutoDeleteValueValuesEnum)
address = None
if stateful_ip.get('address'):
ip_address = stateful_ip.get('address')
address = _CreateIpAddress(messages, ip_address)
return messages.PreservedStatePreservedNetworkIp(
autoDelete=auto_delete,
ipAddress=address)
def PatchPreservedStateNetworkIpEntry(messages, stateful_ip_to_patch,
update_stateful_ip):
"""Prepares stateful ip preserved state entry."""
auto_delete = update_stateful_ip.get('auto-delete')
if auto_delete:
stateful_ip_to_patch.autoDelete = auto_delete.GetAutoDeleteEnumValue(
messages.PreservedStatePreservedNetworkIp.AutoDeleteValueValuesEnum)
ip_address = update_stateful_ip.get('address')
if ip_address:
stateful_ip_to_patch.ipAddress = _CreateIpAddress(messages, ip_address)
return stateful_ip_to_patch
def MakePreservedStateInternalNetworkIpEntry(messages, stateful_ip):
return messages.PreservedState.InternalIPsValue.AdditionalProperty(
key=stateful_ip.get('interface-name',
STATEFUL_IP_DEFAULT_INTERFACE_NAME),
value=_MakePreservedStateNetworkIpEntry(messages, stateful_ip)
)
def MakePreservedStateExternalNetworkIpEntry(messages, stateful_ip):
return messages.PreservedState.ExternalIPsValue.AdditionalProperty(
key=stateful_ip.get('interface-name',
STATEFUL_IP_DEFAULT_INTERFACE_NAME),
value=_MakePreservedStateNetworkIpEntry(messages, stateful_ip)
)
def CreatePerInstanceConfigMessage(holder,
instance_ref,
stateful_disks,
stateful_metadata,
disk_getter=None):
"""Create per-instance config message from the given stateful disks and metadata."""
if not disk_getter:
disk_getter = instance_disk_getter.InstanceDiskGetter(
instance_ref=instance_ref, holder=holder)
messages = holder.client.messages
preserved_state_disks = []
for stateful_disk in stateful_disks or []:
preserved_state_disks.append(
MakePreservedStateDiskEntry(messages, stateful_disk, disk_getter))
preserved_state_metadata = []
# Keeping the metadata sorted to maintain consistency across commands
for metadata_key, metadata_value in sorted(six.iteritems(stateful_metadata)):
preserved_state_metadata.append(
MakePreservedStateMetadataEntry(
messages, key=metadata_key, value=metadata_value))
per_instance_config = messages.PerInstanceConfig(
name=path_simplifier.Name(six.text_type(instance_ref)))
per_instance_config.preservedState = messages.PreservedState(
disks=messages.PreservedState.DisksValue(
additionalProperties=preserved_state_disks),
metadata=messages.PreservedState.MetadataValue(
additionalProperties=preserved_state_metadata))
return per_instance_config
def CreatePerInstanceConfigMessageWithIPs(holder,
instance_ref,
stateful_disks,
stateful_metadata,
stateful_internal_ips,
stateful_external_ips,
disk_getter=None):
"""Create per-instance config message from the given stateful attributes."""
messages = holder.client.messages
per_instance_config = CreatePerInstanceConfigMessage(holder,
instance_ref,
stateful_disks,
stateful_metadata,
disk_getter)
preserved_state_internal_ips = []
for stateful_internal_ip in stateful_internal_ips or []:
preserved_state_internal_ips.append(
MakePreservedStateInternalNetworkIpEntry(messages,
stateful_internal_ip))
per_instance_config.preservedState.internalIPs = (
messages.PreservedState.InternalIPsValue(
additionalProperties=preserved_state_internal_ips))
preserved_state_external_ips = []
for stateful_external_ip in stateful_external_ips or []:
preserved_state_external_ips.append(
MakePreservedStateExternalNetworkIpEntry(messages,
stateful_external_ip))
per_instance_config.preservedState.externalIPs = (
messages.PreservedState.ExternalIPsValue(
additionalProperties=preserved_state_external_ips))
return per_instance_config
def CallPerInstanceConfigUpdate(holder, igm_ref, per_instance_config_message):
"""Calls proper (zonal or regional) resource for instance config update."""
messages = holder.client.messages
if igm_ref.Collection() == 'compute.instanceGroupManagers':
service = holder.client.apitools_client.instanceGroupManagers
request = (
messages.ComputeInstanceGroupManagersUpdatePerInstanceConfigsRequest)(
instanceGroupManager=igm_ref.Name(),
instanceGroupManagersUpdatePerInstanceConfigsReq=messages.
InstanceGroupManagersUpdatePerInstanceConfigsReq(
perInstanceConfigs=[per_instance_config_message]),
project=igm_ref.project,
zone=igm_ref.zone,
)
operation_collection = 'compute.zoneOperations'
elif igm_ref.Collection() == 'compute.regionInstanceGroupManagers':
service = holder.client.apitools_client.regionInstanceGroupManagers
request = (
messages.
ComputeRegionInstanceGroupManagersUpdatePerInstanceConfigsRequest)(
instanceGroupManager=igm_ref.Name(),
regionInstanceGroupManagerUpdateInstanceConfigReq=messages.
RegionInstanceGroupManagerUpdateInstanceConfigReq(
perInstanceConfigs=[per_instance_config_message]),
project=igm_ref.project,
region=igm_ref.region,
)
operation_collection = 'compute.regionOperations'
else:
raise ValueError('Unknown reference type {0}'.format(igm_ref.Collection()))
operation = service.UpdatePerInstanceConfigs(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection=operation_collection)
return operation_ref
def CallCreateInstances(holder, igm_ref, per_instance_config_message):
"""Make CreateInstances API call using the given per-instance config messages."""
messages = holder.client.messages
if igm_ref.Collection() == 'compute.instanceGroupManagers':
service = holder.client.apitools_client.instanceGroupManagers
request = (
messages.ComputeInstanceGroupManagersCreateInstancesRequest(
instanceGroupManager=igm_ref.Name(),
instanceGroupManagersCreateInstancesRequest=
messages.InstanceGroupManagersCreateInstancesRequest(
instances=[per_instance_config_message]),
project=igm_ref.project,
zone=igm_ref.zone))
operation_collection = 'compute.zoneOperations'
elif igm_ref.Collection() == 'compute.regionInstanceGroupManagers':
service = holder.client.apitools_client.regionInstanceGroupManagers
request = (
messages.ComputeRegionInstanceGroupManagersCreateInstancesRequest(
instanceGroupManager=igm_ref.Name(),
regionInstanceGroupManagersCreateInstancesRequest=
messages.RegionInstanceGroupManagersCreateInstancesRequest(
instances=[per_instance_config_message]),
project=igm_ref.project,
region=igm_ref.region))
operation_collection = 'compute.regionOperations'
else:
raise ValueError('Unknown reference type {0}'.format(igm_ref.Collection()))
operation = service.CreateInstances(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection=operation_collection)
return operation_ref, service
def GetApplyUpdatesToInstancesRequestsZonal(holder, igm_ref, instances,
minimal_action):
"""Immediately applies updates to instances (zonal case)."""
messages = holder.client.messages
request = messages.InstanceGroupManagersApplyUpdatesRequest(
instances=instances,
minimalAction=minimal_action,
mostDisruptiveAllowedAction=messages
.InstanceGroupManagersApplyUpdatesRequest
.MostDisruptiveAllowedActionValueValuesEnum.REPLACE)
return messages.ComputeInstanceGroupManagersApplyUpdatesToInstancesRequest(
instanceGroupManager=igm_ref.Name(),
instanceGroupManagersApplyUpdatesRequest=request,
project=igm_ref.project,
zone=igm_ref.zone,
)
def GetApplyUpdatesToInstancesRequestsRegional(holder, igm_ref, instances,
minimal_action):
"""Immediately applies updates to instances (regional case)."""
messages = holder.client.messages
request = messages.RegionInstanceGroupManagersApplyUpdatesRequest(
instances=instances,
minimalAction=minimal_action,
mostDisruptiveAllowedAction=messages
.RegionInstanceGroupManagersApplyUpdatesRequest
.MostDisruptiveAllowedActionValueValuesEnum.REPLACE)
return (
messages.ComputeRegionInstanceGroupManagersApplyUpdatesToInstancesRequest
)(
instanceGroupManager=igm_ref.Name(),
regionInstanceGroupManagersApplyUpdatesRequest=request,
project=igm_ref.project,
region=igm_ref.region,
)
def CallApplyUpdatesToInstances(holder, igm_ref, instances, minimal_action):
"""Calls proper (zonal or reg.) resource for applying updates to instances."""
if igm_ref.Collection() == 'compute.instanceGroupManagers':
operation_collection = 'compute.zoneOperations'
service = holder.client.apitools_client.instanceGroupManagers
minimal_action = (
holder.client.messages.InstanceGroupManagersApplyUpdatesRequest
.MinimalActionValueValuesEnum(minimal_action.upper()))
apply_request = GetApplyUpdatesToInstancesRequestsZonal(
holder, igm_ref, instances, minimal_action)
elif igm_ref.Collection() == 'compute.regionInstanceGroupManagers':
operation_collection = 'compute.regionOperations'
service = holder.client.apitools_client.regionInstanceGroupManagers
minimal_action = (
holder.client.messages.RegionInstanceGroupManagersApplyUpdatesRequest
.MinimalActionValueValuesEnum(minimal_action.upper()))
apply_request = GetApplyUpdatesToInstancesRequestsRegional(
holder, igm_ref, instances, minimal_action)
else:
raise ValueError('Unknown reference type {0}'.format(igm_ref.Collection()))
apply_operation = service.ApplyUpdatesToInstances(apply_request)
apply_operation_ref = holder.resources.Parse(
apply_operation.selfLink, collection=operation_collection)
return apply_operation_ref
|
StarcoderdataPython
|
3375817
|
<reponame>barsgroup/barsup-core
# coding:utf-8
"""Функционал для работы уровня WSGI."""
from datetime import datetime
import json
from os import path
from sys import stderr, exc_info
import traceback
from uuid import uuid4
from simplejson.scanner import JSONDecodeError
from webob import Response, exc
from webob.dec import wsgify
from webob.static import DirectoryApp
from pynch import core, exceptions
from pynch.router import RoutingError
from pynch.util import serialize_to_json
def handler(config_file_name, catch_cookies=None):
"""Обработчик HTTP-запросов к приложению."""
fend = core.init(config=config_file_name)
catch_cookies = catch_cookies or []
@wsgify
def app(request):
params = {}
try:
params.update(request.json)
except JSONDecodeError:
params.update(request.params)
for cookie in catch_cookies:
# cookies не декларируются, как параметры
# поэтому инена дополняются префиксом "_"
params["_" + cookie] = request.cookies.get(cookie, None)
status = 200
try:
result = fend.populate(
request.method,
request.path,
**params
)
except (exceptions.BadRequest, RoutingError) as e:
status, result = 400, e
except exceptions.Unauthorized as e:
status, result = 401, e
except exceptions.Forbidden as e:
status, result = 403, e
except exceptions.NotFound as e:
status, result = 404, e
if status == 200:
body = json.dumps({'data': result,
'success': True},
default=serialize_to_json)
else:
body = json.dumps(getattr(result, 'values', str(result)))
return Response(
content_type='application/json',
body=body,
status=status
)
return app
def static_server(url_prefix, static_path):
"""MW для статики."""
static_app = DirectoryApp(
path.expandvars(static_path),
hide_index_with_redirect=True
)
@wsgify.middleware
def mware(request, app):
url = request.path
if url == '/':
raise exc.HTTPMovedPermanently(location=url_prefix)
elif url.startswith(url_prefix):
return static_app
else:
return app
return mware
@wsgify.middleware
def catch_errors(request, app, debug=False):
"""MW для конвертации неотловленных исключений."""
try:
return request.get_response(app)
except Exception:
trace = ''.join(traceback.format_exception(*exc_info(), limit=20))
stderr.write('%s\n%s\n' % (
datetime.now().strftime('%Y.%m.%d/%H:%M:%S'),
trace
))
if debug:
params = {'body_template': '<pre>%s</pre>' % trace}
else:
params = {}
raise exc.HTTPInternalServerError(**params)
@wsgify.middleware
def with_session(request, app, cookie_name, generator):
"""MW для контроля cookies."""
if cookie_name not in request.cookies:
request.cookies[cookie_name] = generator()
value = request.cookies[cookie_name]
res = request.get_response(app)
res.set_cookie(cookie_name, value)
return res
def make_application():
"""
Возвращает преднастроенный wsgi app.
Можно использовать в конечных приложениях
"""
return with_session(
static_server(
url_prefix='/pynch',
static_path=path.join('$PYNCH_PATH', 'static')
)(
catch_errors(
handler(
config_file_name='$PYNCH_CONFIG',
catch_cookies=('web_session_id',),
),
debug=True
)
),
cookie_name='web_session_id',
generator=lambda: uuid4().hex
)
__all__ = ('make_application', 'handler', 'catch_errors',
'static_server', 'with_session')
|
StarcoderdataPython
|
1705297
|
import anodos.tools
import swarm.models
import pflops.models
import distributors.models
import swarm.workers.worker
class Worker(swarm.workers.worker.Worker):
name = 'Центральный банк России'
urls = {'base': 'http://cbr.ru',
'data': 'https://cbr.ru/currency_base/daily/'}
cols = {'Цифр. код': None,
'Букв. код': None,
'Единиц': None,
'Валюта': None,
'Курс': None}
def __init__(self):
self.source = swarm.models.Source.objects.take(name=self.name)
self.count_of_currencies = 0
self.message = None
super().__init__()
def run(self):
if self.command == 'update_currencies':
self.update_currencies()
self.message = f'- валют: {self.count_of_currencies}.'
if self.message:
anodos.tools.send(content=f'{self.name}: {self.command} finish at {self.delta()}:\n'
f'{self.message}')
else:
anodos.tools.send(content=f'{self.name}: {self.command} finish at {self.delta()}.\n')
def update_currencies(self):
# Получаем данные
tree = self.load(self.urls['data'], result_type='html')
table = tree.xpath('.//table[@class="data"]')[0]
# Проходим по строкам полученной таблицы
trs = table.xpath('.//tr')
for n, tr in enumerate(trs):
if n == 0:
# Определяем номера колонок
ths = tr.xpath('.//th//text()')
for m, th in enumerate(ths):
self.cols[th] = m
else:
tds = tr.xpath('.//td//text()')
key = tds[self.cols['Букв. код']]
key_digit = tds[self.cols['Цифр. код']]
name = tds[self.cols['Валюта']]
quantity = tds[self.cols['Единиц']]
rate = tds[self.cols['Курс']]
currency = distributors.models.Currency.objects.take(key=key,
name=name,
key_digit=key_digit,
quantity=quantity,
rate=rate)
currency = pflops.models.Currency.objects.take(key=key,
name=name,
key_digit=key_digit,
quantity=quantity,
rate=rate)
print(currency)
self.count_of_currencies += 1
|
StarcoderdataPython
|
1720643
|
import os
import sys
import json
import socket
import threading
_foo = None
class LineTracer:
def __init__(self, target):
self.sourcefiles = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(("localhost", 5050))
# read in the configuration
self.target = target
self.last_filename = ""
def trace_calls(self, frame, event, arg):
global _foo
current_filename = os.path.abspath(frame.f_code.co_filename)
if _foo != current_filename:
#print(current_filename)
pass
_foo = current_filename
if current_filename.find(self.target) != -1:
print (current_filename)
payload = {"line": frame.f_lineno}
if current_filename != self.last_filename:
payload['file'] = current_filename
self.socket.sendall(json.dumps(payload) + "\n")
self.socket.recv(32)
return self.trace_calls
class Tracer:
def __init__(self, config_file):
# Every target has a filename, a func name,
# a variable name, and a line number
self.targets = []
self.sourcefiles = {}
self.record_flag = -1
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(("localhost", 5050))
# read in the configuration
self.targets = json.load(open(config_file))
for i, _ in enumerate(self.targets):
self.targets[i]['values'] = []
def trace_calls(self, frame, event, arg):
counter = 0
next_func = self.trace_calls
for i, target in enumerate(self.targets):
try:
current_filename = os.path.abspath(frame.f_code.co_filename)
except:
continue
if current_filename != target['filename']:
if not current_filename.startswith('/') and \
not current_filename.startswith('<'):
print 'skipping', current_filename, 'for', i
continue
counter += 1
if current_filename not in self.sourcefiles:
self.sourcefiles[current_filename] = \
open(current_filename).readlines()
#if frame.f_code.co_name == target['func']:
# get the current line being executed to see if it has a
# target variable
line = self.sourcefiles[current_filename][frame.f_lineno - 1]
if line.find(target['varname']) != -1 and frame.f_lineno == target['lineno']:
next_func = self.observe_single_line
return next_func
def observe_single_line(self, frame, event, arg):
for i, target in enumerate(self.targets):
if target['varname'] in frame.f_locals:
self.targets[i]['values'].append(frame.f_locals[target['varname']])
value = frame.f_locals[target['varname']]
value = self.simplify(value)
message = {"varname": target['varname'], "value": value}
self.socket.sendall(json.dumps(message))
self.socket.recv(32)
# give control back to trace_calls
return self.trace_calls
def simplify(self, value):
if type(value) is np.ndarray:
return value.tolist()
return value
if __name__ == '__main__':
mode = sys.argv[1]
config_filename = sys.argv[2]
filename_to_run = sys.argv[3]
sys.argv.pop(0)
sys.argv.pop(0)
sys.argv.pop(0)
if mode == "line":
tracer = LineTracer(config_filename) # in this case, project_path is config_filename
if mode == "variable":
tracer = Tracer(config_filename)
threading.settrace(tracer.trace_calls)
sys.settrace(tracer.trace_calls)
execfile(filename_to_run, globals(), locals())
|
StarcoderdataPython
|
112373
|
from flourish import Flourish
from flourish.generators.base import SourceGenerator
from flourish.source import SourceFile
import pytest
class TestFlourishPaths:
@classmethod
def setup_class(cls):
with pytest.warns(None) as warnings:
cls.flourish = Flourish('tests/source')
def test_homepage_resolves(self):
assert self.flourish.resolve_path('homepage') == '/'
def test_homepage_resolves_even_with_arguments(self):
assert self.flourish.resolve_path('homepage', tag='series') == '/'
def test_tag_index_with_arguments_resolves(self):
assert(self.flourish.resolve_path('tags-tag-page', tag='series') ==
'/tags/series/')
assert(self.flourish.resolve_path('tags-tag-page', tag='css') ==
'/tags/css/')
def test_tag_index_without_arguments_raises(self):
with pytest.raises(KeyError):
_ = self.flourish.resolve_path('tags-tag-page')
def test_homepage_has_one_valid_filter(self):
assert self.flourish.all_valid_filters_for_path('homepage') == [
{}
]
def test_post_detail_has_many_valid_filters(self):
assert self.flourish.all_valid_filters_for_path('source') == [
{'slug': 'basic-page'},
{'slug': 'markdown-page'},
{'slug': 'nothing'},
{'slug': 'series/index'},
{'slug': 'series/part-one'},
{'slug': 'series/part-three'},
{'slug': 'series/part-two'},
{'slug': 'thing-one'},
{'slug': 'thing-two'},
]
def test_tag_index_has_many_valid_filters(self):
assert self.flourish.all_valid_filters_for_path('tags-tag-page') == [
{'tag': 'basic-page'},
{'tag': 'basically'},
{'tag': 'first'},
{'tag': 'index'},
{'tag': 'one'},
{'tag': 'second'},
{'tag': 'series'},
{'tag': 'three'},
{'tag': 'two'},
]
def test_tag_post_detail_resolves_to_many_with_only_one_source_each(self):
_filters = self.flourish.all_valid_filters_for_path('tag-post-detail')
assert _filters == [
{'tag': 'basic-page', 'slug': 'basic-page'},
{'tag': 'basically', 'slug': 'thing-one'},
{'tag': 'basically', 'slug': 'thing-two'},
{'tag': 'first', 'slug': 'thing-one'},
{'tag': 'index', 'slug': 'series/index'},
{'tag': 'one', 'slug': 'series/part-one'},
{'tag': 'one', 'slug': 'thing-one'},
{'tag': 'second', 'slug': 'thing-two'},
{'tag': 'series', 'slug': 'series/index'},
{'tag': 'series', 'slug': 'series/part-one'},
{'tag': 'series', 'slug': 'series/part-three'},
{'tag': 'series', 'slug': 'series/part-two'},
{'tag': 'three', 'slug': 'series/part-three'},
{'tag': 'two', 'slug': 'series/part-two'},
{'tag': 'two', 'slug': 'thing-two'},
]
# as the filters include `slug` (which is unique),
# each should only match one source
for _filter in _filters:
assert self.flourish.sources.filter(**_filter).count() == 1
def test_year_index(self):
_filters = self.flourish.all_valid_filters_for_path('year-index')
assert _filters == [
{'year': '2015'},
{'year': '2016'},
]
sources = self.flourish.sources
assert sources.filter(**_filters[0]).count() == 1 # 2015
assert sources.filter(**_filters[1]).count() == 7 # 2016
assert sources.filter(**_filters[0]).count() == 1 # 2015
def test_month_index(self):
_filters = self.flourish.all_valid_filters_for_path('month-index')
assert _filters == [
{'month': '12', 'year': '2015'},
{'month': '02', 'year': '2016'},
{'month': '06', 'year': '2016'},
]
sources = self.flourish.sources
assert sources.filter(**_filters[0]).count() == 1 # 2015/12
assert sources.filter(**_filters[1]).count() == 1 # 2016/02
assert sources.filter(**_filters[2]).count() == 6 # 2016/06
def test_day_index(self):
_filters = self.flourish.all_valid_filters_for_path('day-index')
assert _filters == [
{'day': '25', 'month': '12', 'year': '2015'},
{'day': '29', 'month': '02', 'year': '2016'},
{'day': '04', 'month': '06', 'year': '2016'},
{'day': '06', 'month': '06', 'year': '2016'},
]
sources = self.flourish.sources
assert sources.filter(**_filters[0]).count() == 1 # 2015/12/25
assert sources.filter(**_filters[1]).count() == 1 # 2016/02/29
assert sources.filter(**_filters[2]).count() == 5 # 2016/06/04
assert sources.filter(**_filters[3]).count() == 1 # 2016/06/06
def test_no_such_keyword_has_no_filters(self):
assert self.flourish.all_valid_filters_for_path('no-such-keyword') \
== []
def test_not_configured_has_no_filters(self):
with pytest.raises(SourceFile.DoesNotExist):
_ = self.flourish.all_valid_filters_for_path('awooga')
def test_paths_for_sources(self):
assert [
'/basic-page',
'/markdown-page',
'/nothing',
'/thing-one',
'/thing-two',
'/series/part-one',
'/series/part-three',
'/series/part-two',
'/series/',
] == [source.path for source in self.flourish.sources.all()]
def test_lookup_path_handler(self):
paths = (
('/', ('homepage', {})),
('/tags/first/', ('tags-tag-page', {'tag': 'first'})),
('/index.atom', ('atom-feed', {})),
('/thing-one', ('source', {'slug': 'thing-one'})),
)
for path, args in paths:
matches = self.flourish.get_handler_for_path(path)
assert matches[0] == args
assert [] == self.flourish.get_handler_for_path('/rabble')
def test_lookup_path_handler_wildcard(self):
expected = [
('tags-tag-page', {'tag': 'first'}),
('tag-post-detail', {'slug': 'thing-one', 'tag': 'first'}),
('tags-atom-feed', {'tag': 'first'}),
]
assert expected == self.flourish.get_handler_for_path('/tags/first/?')
def test_lookup_path_handler_wildcard_submatches(self):
expected = [
('year-index', {'year': '2016'}),
('month-index', {'month': '02', 'year': '2016'}),
('month-index', {'month': '06', 'year': '2016'}),
('day-index', {'day': '29', 'month': '02', 'year': '2016'}),
('day-index', {'day': '04', 'month': '06', 'year': '2016'}),
('day-index', {'day': '06', 'month': '06', 'year': '2016'}),
]
assert expected == self.flourish.get_handler_for_path('/2016?')
class TestFlourishSourcesPath:
def test_category_prefixed_sources(self):
with pytest.warns(None) as _warnings:
_flourish = Flourish('tests/source')
_flourish.add_path(
SourceGenerator(
path = '/#category/#slug',
name = 'source',
),
)
assert [
'/static/basic-page',
'/post/markdown-page',
None,
'/thing/thing-one',
'/thing/thing-two',
'/article/series/part-one',
'/article/series/part-three',
'/article/series/part-two',
'/article/series/',
] == [source.path for source in _flourish.sources.all()]
def test_invalid_prefixed_sources(self):
with pytest.warns(None) as _warnings:
_flourish = Flourish('tests/source')
_flourish.add_path(
SourceGenerator(
path = '/#page_type/#slug',
name = 'source',
),
)
assert [
None,
None,
None,
'/post/thing-one',
'/post/thing-two',
'/post/series/part-one',
'/post/series/part-three',
'/post/series/part-two',
'/series_index/series/',
] == [source.path for source in _flourish.sources.all()]
# FIXME catch third warning
def test_multiple_option_prefixed_sources(self):
with pytest.warns(None) as _warnings:
_flourish = Flourish('tests/source')
_flourish.add_path(
SourceGenerator(
path = '/#tag/#slug',
name = 'source',
),
)
assert [
'/basic-page/basic-page',
None,
None,
'/basically/thing-one',
'/basically/thing-two',
'/series/series/part-one',
'/three/series/part-three',
'/series/series/part-two',
'/series/series/',
] == [source.path for source in _flourish.sources.all()]
# FIXME catch third warning
|
StarcoderdataPython
|
3233256
|
# SPDX-FileCopyrightText: 2021-present <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
def fibonacci(n: int) -> int:
if n <= 1:
return n
else:
return fibonacci(n - 2) + fibonacci(n - 1)
|
StarcoderdataPython
|
1762416
|
import re
import six
import unittest
from geodata.addresses.entrances import *
from geodata.addresses.floors import *
from geodata.intersections.query import *
from geodata.addresses.po_boxes import *
from geodata.addresses.postcodes import *
from geodata.addresses.staircases import *
from geodata.addresses.units import *
from geodata.categories.query import *
from geodata.math.floats import isclose
invalid_phrase_re = re.compile(r'\b(None|False|True)\b')
class TestAddressConfigs(unittest.TestCase):
def valid_phrase(self, phrase):
return phrase is None or not invalid_phrase_re.search(phrase)
def check_components(self, language, country):
conf = address_config.get_property('components', language, country=country)
for component, value in six.iteritems(conf):
if component == 'combinations':
continue
total_prob = 0.0
for k, v in six.iteritems(value):
if k.endswith('probability'):
total_prob += v
self.assertTrue(isclose(total_prob, 1.0), six.u('language: {}, country: {}, component: {}'.format(language, country, component)))
def check_entrance_phrases(self, language, country=None):
for i in xrange(1000):
phrase = Entrance.phrase(Entrance.random(language, country=country), language, country=country)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
def check_staircase_phrases(self, language, country=None):
for i in xrange(1000):
phrase = Entrance.phrase(Entrance.random(language, country=country), language, country=country)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
def check_floor_phrases(self, language, country=None):
for i in xrange(10000):
phrase = Floor.phrase(Floor.random(language, country=country), language, country=country)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
for i in xrange(1000):
phrase = Floor.phrase(None, language, country=country)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
for i in xrange(1000):
phrase = Floor.phrase(None, language, country=country, num_floors=3)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
def check_unit_phrases(self, language, country=None):
for i in xrange(10000):
phrase = Unit.phrase(Unit.random(language, country=country), language, country=country)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
for i in xrange(1000):
phrase = Unit.phrase(None, language, country=country)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
for i in xrange(1000):
phrase = Unit.phrase(Unit.random(language, country=country, num_floors=3, num_basements=1), language, country=country)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
for zone in ('commercial', 'industrial', 'university'):
for i in xrange(1000):
phrase = Unit.phrase(Unit.random(language, country=country), language, country=country, zone=zone)
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
def check_po_boxes(self, language, country=None):
for i in xrange(1000):
phrase = POBox.phrase(POBox.random(language, country=country), language, country=country)
if phrase is None:
break
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
def check_postcodes(self, language, country=None):
for i in xrange(1000):
phrase = PostCode.phrase('12345', language, country=country)
if phrase is None:
break
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
def check_intersection_phrases(self, language, country=None):
for i in xrange(1000):
phrase = Intersection.phrase(language, country=country)
if phrase is None:
break
self.assertTrue(self.valid_phrase(phrase), six.u('phrase was: {}').format(phrase))
def check_category_phrases(self, language, country=None):
for i in xrange(1000):
phrase = Category.phrase(language, 'amenity', 'restaurant', country=country)
if phrase.category is None:
break
def check_config(self, language, country=None):
print('Doing lang={}, country={}'.format(language, country))
print('Checking components')
self.check_components(language, country=country)
print('Checking entrances')
self.check_entrance_phrases(language, country=country)
print('Checking staircases')
self.check_staircase_phrases(language, country=country)
print('Checking floors')
self.check_floor_phrases(language, country=country)
print('Checking units')
self.check_unit_phrases(language, country=country)
print('Checking intersections')
self.check_intersection_phrases(language, country=country)
print('Checking categories')
self.check_category_phrases(language, country=country)
print('Checking PO boxes')
self.check_po_boxes(language, country=country)
print('Checking postcodes')
self.check_postcodes(language, country=country)
def test_configs(self):
for lang, value in six.iteritems(address_config.address_configs):
self.check_config(lang)
for country in value.get('countries', []):
self.check_config(lang, country)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3325463
|
<filename>testing/train_cb.py
import pdb
import sys
import subprocess
import argparse
p = argparse.ArgumentParser()
p.add_argument('model_file', help="model specification", default="cb_hand2_v_shuffle_3areas")
p.add_argument('-g', '--gpus', nargs='?', type=int, default=1)
p.add_argument('-s', '--seed', nargs='?', type=int, default=100)
p.add_argument('-lambdar', '--lambdar', type=float, default=0)
p.add_argument('-lambdaw', '--lambdaw', type=float, default=1)
p.add_argument('-clean', '--clean', type=bool, default=False)
p.add_argument('-suffix', '--suffix', nargs='?', type=str, default=' ')
p.add_argument('-lr', '--lr', type=float, default=5e-5)
# a is a class and a.model_file is the model
a = p.parse_args()
def call(s):
rv = subprocess.call(s.split())
if rv != 0:
sys.stdout.flush()
print("Something went wrong (return code {}).".format(rv)
+ " We're probably out of memory.")
sys.exit(1)
if a.clean:
call("python ../examples/do.py ../examples/models/{} clean --suffix {}".format(a.model_file, a.suffix))
# train
call("python ../examples/do.py ../examples/models/{}.py train -s {} -g {} -lr {} -lambdar {} -lambdaw {} --suffix {}".format(a.model_file, a.seed, a.gpus, a.lr, a.lambdar, a.lambdaw, a.suffix))
# call("python ../examples/do.py ../examples/models/{}.py train -s {} -g {}".format(a.model_file, a.seed, a.gpus))
# structure
#call("python ../examples/do.py ../examples/models/{}.py structure -s 1".format(a.model_file))
|
StarcoderdataPython
|
3322344
|
import tensorflow as tf
import numpy as np
import PIL as pil
import scipy
import skimage.measure
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, ZeroPadding2D, Convolution2D, Activation, AveragePooling2D, Flatten, Reshape
from keras.layers import Deconvolution2D as Conv2DTranspose
from keras.layers.normalization import BatchNormalization
from keras.applications.resnet50 import conv_block, identity_block
from keras.models import Model
from keras.optimizers import SGD, RMSprop
from keras import backend as K
from keras import regularizers
def pixel_weighted_loss(x_p,y):
x=x_p[:,:,:,:1]
weights=x_p[:,:,:,1:]
return K.mean(weights * K.square(y - x), axis=-1)
def mse_evbyev0(x,y):
return K.mean(K.square(y-x),axis=0)
def mse_evbyev1(x,y):
return K.mean(K.square(y-x),axis=1)
def mse_evbyev2(x,y):
return K.mean(K.square(y-x),axis=2)
def mse_evbyev3(x,y):
return K.mean(K.square(y-x),axis=3)
def mse_evbyev(x,y):
return K.mean(K.square(y-x),axis=(1,2,3))
def mse_evbyev_w(x_p,y):
x=x_p[:,:,:,:1]
weights=x_p[:,:,:,1:]
return K.mean(weights * K.square(y-x),axis=(1,2,3))
base_wh = 512
input_img = Input(shape=(base_wh, base_wh, 1)) # adapt this if using `channels_first` image data format
if True:
x = ZeroPadding2D((3, 3))(input_img)
print x.name, x.get_shape()
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
print x.name, x.get_shape()
x = BatchNormalization(axis=3, name='bn_conv1')(x)
print x.name, x.get_shape()
x = Activation('relu')(x)
print x.name, x.get_shape()
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
print x.name, x.get_shape()
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
print x.name, x.get_shape()
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
print x.name, x.get_shape()
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
print x.name, x.get_shape()
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
print x.name, x.get_shape()
x = AveragePooling2D((7, 7), name='avg_pool')(x)
print x.name, x.get_shape()
x = Flatten()(x)
print x.name, x.get_shape()
x = Dense(2*32*32)(x)
print x.get_shape()
encoded = x
#decoded = Reshape((32,32,2))(x)
x = Dense(2*2*2048)(x)
print x.name, x.get_shape()
x = Reshape((2,2,2048))(x)
print x.name, x.get_shape()
x = Conv2DTranspose(2048,1,1,(None,16,16,2048),subsample=(8,8))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [512, 512, 2048], strides=(1,1), stage=6, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [512, 512, 2048], stage=6, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [512, 512, 2048], stage=6, block='c')
print x.name, x.get_shape()
x = Conv2DTranspose(1024,1,1,(None,32,32,1024),subsample=(2,2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [256, 256, 1024], strides=(1,1), stage=7, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='c')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='d')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='e')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='f')
print x.name, x.get_shape()
x = Conv2DTranspose(512,1,1,(None,64,64,512),subsample=(2,2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [128, 128, 512], stage=8, strides=(1,1), block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=8, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=8, block='c')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=8, block='d')
print x.name, x.get_shape()
x = Conv2DTranspose(256,1,1,(None,128,128,256),subsample=(2,2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [64, 64, 256], stage=9, block='a', strides=(1, 1))
print x.name, x.get_shape()
x = identity_block(x, 3, [64, 64, 256], stage=9, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [64, 64, 256], stage=9, block='c')
print x.name, x.get_shape()
x = Conv2DTranspose(128,1,1,(None,256,256,128),subsample=(2,2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [32, 32, 128], stage=10, block='a', strides=(1, 1))
print x.name, x.get_shape()
x = identity_block(x, 3, [32, 32, 128], stage=10, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [32, 32, 128], stage=10, block='c')
print x.name, x.get_shape()
x = Conv2DTranspose(64,1,1,(None,512,512,64),subsample=(2,2))(x)
print x.name, x.get_shape()
x = ZeroPadding2D((3, 3))(x)
print x.name, x.get_shape()
x = Convolution2D(64, 7, 7, subsample=(1, 1), name='conv3')(x)
print x.name, x.get_shape()
x = ZeroPadding2D((3, 3))(x)
print x.name, x.get_shape()
x = Convolution2D(3, 7, 7, subsample=(1, 1), name='conv4')(x)
print x.name, x.get_shape()
x = ZeroPadding2D((3, 3))(x)
print x.name, x.get_shape()
x = Convolution2D(1, 7, 7, subsample=(1, 1), name='conv5')(x)
print x.name, x.get_shape()
#x = Activation('softmax')(x)
#print x.name, x.get_shape()
decoded = x
autoencoder = Model(input_img, decoded,)
autoencoder.compile(
#optimizer='adadelta',
optimizer=RMSprop(lr=0.0003),
#optimizer=SGD(lr=0.1, decay=1e-6, momentum=1.9),
#loss='mse',
#loss='binary_crossentropy',
loss=pixel_weighted_loss,
#metrics=[mse_evbyev,mse_evbyev1,mse_evbyev2,mse_evbyev3,mse_evbyev4]
metrics=[mse_evbyev_w]
)
def _parse_function(filename):
X=np.load(filename)['plane2'].reshape((1,))[0]
z00 = X.astype(np.float32).toarray().reshape((3456,1008,1));
while True:
i = np.random.randint(3456-base_wh)
j = np.random.randint(1008-base_wh)
z0 = z00[i:i+base_wh,j:j+base_wh,:]
if z0.max() > 0. or z0.min() < 0.: break
#print 'z0 shape:', z0.shape
z = z0
if z.max() > z.min(): z = (z0-np.min(z0))/(np.max(z0)-np.min(z0))
#zwh,edg = np.histogram(z0,bins=[0,1,13])
maxneg=-0.5
minpos=0.5
#print z0.min(),z0.max(),z0[z0<0.].shape,z0[z0>0.].shape
if z0.min()<0.: maxneg = np.max(z0[z0<0.])
if z0.max()>0.: minpos = np.min(z0[z0>0.])
zwh,edg = np.histogram(z0,bins=[-5000,maxneg/2,minpos/2,5000])
zwh=zwh.sum().astype(np.float32)/(zwh+1e-10)
zw = np.piecewise(z0,[(z0>=edg[i]-0.5)&(z0<edg[i+1]-0.5) for i in xrange(len(edg)-1)],zwh)
sumw = np.sum(zw) / zw.shape[0] / zw.shape[1]
return z, np.dstack([z,zw/sumw])
def randint(filename):
X=np.load(filename)['plane2'].reshape((1,))[0]
z00 = X.astype(np.float32).toarray().reshape((3456,1008,1));
i = np.random.randint(3456-base_wh)
j = np.random.randint(1008-base_wh)
while True:
z0 = z00[i:i+base_wh,j:j+base_wh,:]
if z0.max() > 0. or z0.min() < 0.: break
i = np.random.randint(3456-base_wh)
j = np.random.randint(1008-base_wh)
return (i, j)
def _parse_function_v(arg):
filename,(i,j) = arg
X=np.load(filename)['plane2'].reshape((1,))[0]
z0 = X.astype(np.float32).toarray().reshape((3456,1008,1));
z0 = z0[i:i+base_wh,j:j+base_wh,:]
z = z0
if z.max() > z.min(): z = (z0-np.min(z0))/(np.max(z0)-np.min(z0))
#zwh,edg = np.histogram(z0,bins=[0,1,13])
maxneg=-0.5
minpos=0.5
if z0.min()<0.: maxneg = np.max(z0[z0<0.])
if z0.max()>0.: minpos = np.min(z0[z0>0.])
zwh,edg = np.histogram(z0,bins=[-5000,maxneg/2,minpos/2,5000])
zwh=zwh.sum().astype(np.float32)/(zwh+1e-10)
zw = np.piecewise(z0,[(z0>=edg[i]-0.5)&(z0<edg[i+1]-0.5) for i in xrange(len(edg)-1)],zwh)
sumw = np.sum(zw) / zw.shape[0] / zw.shape[1]
return z, np.dstack([z,zw/sumw])
if False:
#z = (z0+4096.)/4096./2.
z = (z0-np.min(z0))/(np.max(z0)-np.min(z0))
zz = skimage.measure.block_reduce(z,(6,2),np.max)
zz2 = skimage.measure.block_reduce(z,(6,2),np.min)
zzm = skimage.measure.block_reduce(z,(6,2),np.mean)
zzw = skimage.measure.block_reduce(z0,(6,2),np.count_nonzero)
zzwh,edg = np.histogram(zzw,bins=[0,1,5,13])
zzwh = zzwh.sum().astype(np.float32)/(zzwh+1e-10)
#zzwh[0] = zzwh[0]/100.
zzw = zzw.astype(np.float32)
zzw = np.piecewise(zzw,[(zzw>=edg[i]-0.5)&(zzw<edg[i+1]-0.5) for i in xrange(len(edg)-1)],zzwh)
#zzw = v_reweight(x=zzw,hist=zzwh,bins=edg)
sumw = np.sum(zzw) / zzw.shape[0] / zzw.shape[1]
zzw = zzw / sumw
zz3 = np.dstack([zz,zz2,zzm])
zz4 = np.dstack([zz,zz2,zzm,zzw])
#return zz3,zz4
# A vector of filenames.
import os
filenames = ['output7/%s'%f for f in os.listdir('output7') if f.endswith('.npz') ]
valid_filenames = ['outputV/%s'%f for f in os.listdir('outputV') if f.endswith('.npz') ]
valid_starts = [randint(f) for f in valid_filenames]
np.random.shuffle(filenames)
epochs=350
steps_per_epoch=25
batch_size=4
valid_batch_size=4
valid_steps=640/valid_batch_size
min_mean_valid_loss = 1e10000
alllosses=[]
try:
for epoch in xrange(epochs):
for step in xrange(steps_per_epoch):
startev = (epoch * steps_per_epoch + step * batch_size) % len(filenames)
stopev = (epoch * steps_per_epoch + (step+1) * batch_size) % len(filenames)
if(startev > stopev):
a = filenames[startev:]
np.random.shuffle(filenames)
dataset=map(_parse_function,filenames[:stopev]+a)
else:
dataset=map(_parse_function,filenames[startev:stopev])
x,y = zip(*dataset)
loss = autoencoder.train_on_batch(np.stack(x),np.stack(y))
#print loss
#print loss[1].shape
#print loss[2].shape
#print loss[3].shape
#print loss[4].shape
#print loss[5].shape
#print len(y)
#print len(dataset)
#print np.stack(y).shape
#raise Exception
#print epoch, step, loss
mean_valid_loss = 0.;
alllosses=[]
for step in xrange(valid_steps):
startev = (step * valid_batch_size) % len(valid_filenames)
stopev = ((step+1) * valid_batch_size) % len(valid_filenames)
if(startev > stopev):
dataset=map(_parse_function_v,zip(valid_filenames[:stopev]+valid_filenames[startev:],valid_starts[:stopev]+valid_starts[startev:]))
else:
dataset=map(_parse_function_v,zip(valid_filenames[startev:stopev],valid_starts[startev:stopev]))
x,y = zip(*dataset)
losses=autoencoder.test_on_batch(np.stack(x),np.stack(y))
mean_valid_loss+=losses[0]
alllosses+=[losses[1]]
print epoch,'VALID',mean_valid_loss/valid_steps#,alllosses
if mean_valid_loss < min_mean_valid_loss:
min_mean_valid_loss = mean_valid_loss
autoencoder.save('autoencoder.min.mdl')
np.save('alllosses.min.npy',np.concatenate(alllosses))
except KeyboardInterrupt:
pass
finally:
autoencoder.save('autoencoder.mdl')
if len(alllosses) >0: np.save('alllosses.npy',np.concatenate(alllosses))
#print dataset
#print dataset
#autoencoder.fit(x,y,epochs=50,steps_per_epoch=25,validation_data = (xv,yv),validation_steps=10)
if False:
input_img = Input(shape=(576, 504, 3)) # adapt this if using `channels_first` image data format
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
#encoded = MaxPooling2D((2, 2), padding='same')(x)
#print encoded.shape
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(3, (3,3), activation='sigmoid', padding='same')(x)
decoded = Cropping2D(cropping=(0,4),data_format='channels_last')(x)
autoencoder = Model(input_img, decoded,)
autoencoder.compile(
optimizer='adadelta',
#optimizer=SGD(lr=0.1, decay=1e-6, momentum=1.9),
#loss='mse',
#loss='binary_crossentropy',
loss=pixel_weighted_loss,
)
# 2018 09 18
if False:
print input_img.get_shape()
x = Conv2D(8, 7, 7, activation='relu',border_mode='same')(input_img)
print x.get_shape()
x = MaxPooling2D((2, 2))(x)
print x.get_shape()
x = Conv2D(16, 7, 7, activation='relu',border_mode='same')(x)
print x.get_shape()
x = MaxPooling2D((2, 2))(x)
print x.get_shape()
x = Conv2D(24, 7, 7, activation='relu',border_mode='same')(x)
print x.get_shape()
x = MaxPooling2D((2, 2))(x)
print x.get_shape()
x = Conv2D(32, 7, 7, activation='relu',border_mode='same')(x)
print x.get_shape()
encoded = MaxPooling2D((2, 2))(x)
print encoded.get_shape()
x = Conv2D(32, 7, 7, activation='relu',border_mode='same')(encoded)
print x.get_shape()
x = UpSampling2D((2, 2))(x)
print x.get_shape()
x = Conv2D(24, 7, 7, activation='relu',border_mode='same')(x)
print x.get_shape()
x = UpSampling2D((2, 2))(x)
print x.get_shape()
x = Conv2D(16, 7, 7, activation='relu',border_mode='same')(x)
print x.get_shape()
x = UpSampling2D((2, 2))(x)
print x.get_shape()
x = Conv2D(8, 7, 7, activation='relu',border_mode='same')(x)
print x.get_shape()
x = UpSampling2D((2, 2))(x)
print x.get_shape()
decoded = Conv2D(1, 7, 7, activation='sigmoid',border_mode='same')(x)
print decoded.get_shape()
|
StarcoderdataPython
|
1774886
|
<filename>data/test_waypoints_show.py<gh_stars>1-10
#!/usr/bin/env python
import os
import csv
import tf
import numpy as np
import matplotlib.pyplot as plt
"""
self.lights.len=8
lights[0] : [1172.183, 1186.299],
lights[1] : [1584.065, 1156.953],
lights[2] : [2126.353, 1550.636],
lights[3] : [2178.291, 1819.328],
lights[4] : [1469.499, 2946.97],
lights[5] : [797.9147, 2905.59],
lights[6] : [160.8088, 2279.929],
lights[7] : [363.378, 1553.731],
"""
simu_traffic_lights = np.array([
[1172.183, 1186.299],
[1584.065, 1156.953],
[2126.353, 1550.636],
[2178.291, 1819.328],
[1469.499, 2946.97],
[797.9147, 2905.59],
[160.8088, 2279.929],
[363.378, 1553.731]])
simu_stop_lines=np.array([
[1148.56, 1184.65],
[1559.2, 1158.43],
[2122.14, 1526.79],
[2175.237, 1795.71],
[1493.29, 2947.67],
[821.96, 2905.8],
[161.76, 2303.82],
[351.84, 1574.65]])
site_stop_lines=np.array([
#[20.991, 22.837]
[8.0, 16.2]
])
CSV_HEADER = ['x', 'y', 'z', 'yaw']
waypointsX = []
waypointsY = []
waypointsYaw = []
def quaternion_from_yaw(yaw):
return tf.transformations.quaternion_from_euler(0., 0., yaw)
def load_waypoints(fname):
with open(fname) as wfile:
wpsX = []
wpsY = []
wpsYaw = []
reader = csv.DictReader(wfile, CSV_HEADER)
for wp in reader:
wpsX.append(float(wp['x']))
wpsY.append(float(wp['y']))
wpsYaw.append(quaternion_from_yaw(float(wp['yaw'])))
return wpsX, wpsY, wpsYaw
def ShowSimulatorDatas(path):
if os.path.isfile(path):
print
print "Load Simulator waypoints: path=", path
waypointsX, waypointsY, waypointsYaw = load_waypoints(path)
print "waypointsX.len=", len(waypointsX)
print "waypointsY.len=", len(waypointsY)
print "waypointsYaw.len=", len(waypointsYaw)
tl_x = simu_traffic_lights[:,0]
tl_y = simu_traffic_lights[:,1]
stop_x = simu_stop_lines[:,0]
stop_y = simu_stop_lines[:,1]
plt.axes().set(xlabel='x', ylabel='y', title='Simulator')
plt.plot(tl_x, tl_y, 'o', color='red')
plt.plot(stop_x, stop_y, 'x', color='blue')
plt.plot(waypointsX, waypointsY, color='green')
plt.show()
def ShowSiteDatas(path):
if os.path.isfile(path):
print
print "Load Site waypoints: path=", path
waypointsX, waypointsY, waypointsYaw = load_waypoints(path)
print "waypointsX.len=", len(waypointsX)
print "waypointsY.len=", len(waypointsY)
print "waypointsYaw.len=", len(waypointsYaw)
#tl_x = simu_traffic_lights[:,0]
#tl_y = simu_traffic_lights[:,1]
stop_x = site_stop_lines[:,0]
stop_y = site_stop_lines[:,1]
plt.axes().set(xlabel='x', ylabel='y', title='Site')
#plt.plot(tl_x, tl_y, 'o', color='red')
plt.plot(stop_x, stop_y, 'o', color='red')
plt.plot(waypointsX, waypointsY, color='green')
plt.show()
#-------- Simulator base waypoints -------------#
path = "wp_yaw_const.csv"
ShowSimulatorDatas(path)
#-------- Site base waypoints -------------#
path = "churchlot_with_cars.csv"
ShowSiteDatas(path)
|
StarcoderdataPython
|
3246739
|
<reponame>plilja/project-euler
from common.primes import *
from common.functions import *
def largest_pandigital_primes(n):
for i in range(n, 1, -1):
for pandigital_number in sorted(_all_pandigital_numbers(i), reverse=True):
if is_prime(pandigital_number):
return pandigital_number
raise ValueError("No pandigial prime exists for n=%s" % n)
def _all_pandigital_numbers(n):
assert n <= 9
_permutations = permutations(range(1, n + 1))
_pandigital_numbers_as_strings = map(lambda x: "".join(map(str, x)), _permutations)
return map(int, _pandigital_numbers_as_strings)
|
StarcoderdataPython
|
4822555
|
# same as Consecutive 1's not allowed
# just there instead of 0s 1s but output will be same
# lets say n=3
# so binary strings = 2^3 = 8
# 000 not allowed
# 001 not allowed
# 010
# 011
# 100 not allowed
# 101
# 110
# 111
# so output : 5
def countStrings(n):
if n==0: return 0 # if n=0 then no string can be formed
zeros = ones = 1 # for base length of 1
for i in range(2, n+1):
newZeros = ones
newOnes = ones + zeros
zeros = newZeros
ones = newOnes
return zeros + ones
if __name__ == "__main__":
n = 3
print(countStrings(n))
|
StarcoderdataPython
|
157850
|
#!/usr/bin/env python
""" AER1415 Computer Optimization - Assignment 1
Author: <NAME>
Submitted: Feb 25, 2021
Email: <EMAIL>
Descripton:
"""
from numpy import *
import os
from matplotlib import pyplot as plt
from IPython import embed
from mpl_toolkits import mplot3d
from matplotlib import cm
import matplotlib
import cmath
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
matplotlib.use('TkAgg')
class Particle:
def __init__(self, x0, bounds, params):
self.pos = array(x0[:])
self.vel = array([random.uniform(*i) for i in bounds])
self.posBest = None
self.valBest = None
self.val = None
self.params = params
self.bounds = bounds
self.penaltyParam = 1
def calc(self,costFunc,iterNum):
#setup exponential penalty function
if self.params.get('penalty',False):
self.penaltyParam = self.params['penalty']**iterNum
elif self.params.get('penaltyStatic',False):
self.penaltyParam = self.params['penaltyStatic']
#Call cost function with current particle position
self.val = costFunc(self.pos,iterNum=iterNum,penaltyParam=self.penaltyParam,params=self.params)
#if new val is less than stored minimum particle val, update value and position
if self.valBest is None or self.val < self.valBest:
self.posBest = self.pos
self.valBest = self.val
def update_position(self,posGlobBest):
r1, r2 = random.uniform(size=2)
vel_cognitive = self.params['c1']*r1*(self.posBest-self.pos.copy())
vel_social = self.params['c2']*r2*(posGlobBest-self.pos.copy())
#calcualte new particle velocity
self.vel = self.params['w']*self.vel + vel_cognitive + vel_social
#Hyperbolic bound approach
if self.params['boundMethod'] == 'hyperbolic':
for idx,xNew in enumerate(self.pos):
if self.vel[idx] > 0:
self.vel[idx] = self.vel[idx] / (1. + abs(self.vel[idx]/(self.bounds[idx][1]-self.pos[idx])))
else:
self.vel[idx] = self.vel[idx] / (1. + abs(self.vel[idx]/(self.pos[idx]-self.bounds[idx][0])))
#set new particle position from velcotiy addition
self.pos = add(self.pos,self.vel)
#Reflect bound approach
if self.params['boundMethod'] == 'reflect':
for idx,xNew in enumerate(self.pos):
if xNew < self.bounds[idx][0]:
self.pos[idx] = self.bounds[idx][0] + (self.bounds[idx][0] - self.pos[idx])
elif xNew > self.bounds[idx][1]:
self.pos[idx] = self.bounds[idx][1] - (self.bounds[idx][1] - self.pos[idx])
#Nearest bound approach
if self.params['boundMethod'] == 'nearest':
for idx,xNew in enumerate(self.pos):
if xNew < self.bounds[idx][0]:
self.pos[idx] = self.bounds[idx][0]
#self.vel[idx] = 0
elif xNew > self.bounds[idx][1]:
self.pos[idx] = self.bounds[idx][1]
#self.vel[idx] = 0
class PSO:
def __init__(self,costFunc,x0,bounds,numParticles,maxRepeat,maxIter,params):
self.costBestVal = None
self.posGlobBest = None
self.iterGlobBest = None
self.costFunc = costFunc
self.numParticles = numParticles
self.maxIter = maxIter
self.maxRepeat = maxRepeat
self.bounds = bounds
self.params = params
self.x0 = x0
def optimize(self,verbose=False):
allResultsDict = {}
print(self.params)
#repeat M times to get mean values for parameter set
for repeatIter in range(self.maxRepeat):
self.swarm = [Particle(self.x0,self.bounds,self.params) for i in range(self.numParticles)]
self.currentDiff = None
self.costBestVal = None
self.posGlobBest = None
iterResults = []
#for N number of iterations per independent run
for idx in range(self.maxIter):
for particle in self.swarm:
#for every particle, calculate new particle val and position at current iteration step
particle.calc(self.costFunc, idx)
#update gloval cost function value and position based on all the new particle positions and values
if self.costBestVal is None or particle.val < self.costBestVal:
#calcualte current iterations gloval best value differential for convergence
self.currentDiff = abs(subtract(self.costBestVal,particle.val)) if idx != 0 else None
self.costBestVal = particle.val
self.posGlobBest = particle.pos
iterResults.append(append(self.posGlobBest,[self.costBestVal,self.currentDiff]))
#store index at which differntial convergence first happens
try:
if idx != 0 and self.currentDiff is not None and abs(self.currentDiff) <= self.params['rel_tol'] and self.iterGlobBest is None:
self.iterGlobBest = idx
except:
embed()
#update all particles with new global best value
for particle in self.swarm:
particle.update_position(self.posGlobBest)
if verbose:
print('Iter: {}/{} - CostFunc: {}, val: {}, df: {}'.format(idx,self.maxIter,self.posGlobBest,self.costBestVal,self.currentDiff))
print('{} / {} - CostFunc: {}, val: {}'.format(repeatIter,self.maxRepeat,self.posGlobBest,self.costBestVal))
allResultsDict[repeatIter] = array(iterResults)
#calculate mean and std values for later plotting
repeatResults = array([v.T[-2].T for v in allResultsDict.values()]).T.astype(float)
bestRun, bestIter = divmod(repeatResults.T.argmin(),repeatResults.T.shape[1])
meanVals = mean(repeatResults,axis=1)
meanPos = array([mean(i,axis=1) for i in array([v.T[:-2].T for v in allResultsDict.values()]).T])
meanPosVal = vstack([meanPos,meanVals]).astype(float)
results = {'minVal': float(repeatResults.T[bestRun][bestIter]), 'x*': (allResultsDict[bestRun][bestIter][:-2]).astype(float), 'iter': bestIter, 'relTolPass': True, 'meanPosVal': meanPosVal,'meanRepeatValues': meanVals, 'stdRepeatValues': std(repeatResults,axis=1)}
if self.iterGlobBest is None:
results['iter'] = idx
results['relTolPass'] = False
#if getAllResults:
# results['allResultsDict'] = allResultsDict
return results
def P1(x,**kwargs):
x0 = x[:-1]
x1 = x[1:]
return sum(100.0*(x1 - x0**2.0)**2.0 + (1 - x0)**2.0)
def P2(x,**kwargs):
return (10*len(x) + sum(x**2 - 10*cos(2*pi*x)))
def P3(x,**kwargs):
#https://www.wolframalpha.com/input/?i=extrema+%5B%2F%2Fmath%3Ax%5E2+%2B+0.5*x+%2B+3*x*y+%2B+5*y%5E2%2F%2F%5D+subject+to+%5B%2F%2Fmath%3A3*x%2B2*y%2B2%3C%3D0%2C15*x-3*y-1%3C%3D0%2C-1%3C%3Dx%3C%3D1%2C-1%3C%3Dy%3C%3D1%2F%2F%5D++
#(-0.8064516129032258, 0.20967741935483872)
fx = x[0]**2 + 0.5*x[0] + 3*x[0]*x[1] + 5*x[1]**2
gx1 = 3*x[0] + 2*x[1] + 2
gx2 = 15*x[0] - 3*x[1] - 1
psi = max([0,gx1])**2 + max([0,gx2])**2
return (fx + kwargs['penaltyParam']*psi)
def P4(x,**kwargs):
t1 = sum(cos(x)**4)
t2 = prod(cos(x)**2)
t3 = sum((arange(len(x))+1)*x**2)
gx1 = 0.75 - prod(x)
gx2 = sum(x) - (7.5*len(x))
fx = divide(-abs(t1-2*t2),sqrt(t3))
if isnan(fx):
fx = 0
psi = max([0,gx1])**2 + max([0,gx2])**2
if 'onlyFx' in kwargs.keys() and kwargs['onlyFx']:
return fx
return fx + kwargs['penaltyParam']*psi
class P5:
def __init__(self,**kwargs):
self.time, self.displacement = loadtxt('MeasuredResponse.dat').T
self.m = 1.0
self.F0 = 1.0
self.w = 0.1
def groundTruth(self):
return array([self.time,self.displacement])
def evaluate(self,x):
c,k = x
alpha = arctan2(c*self.w,(k-self.m*self.w**2))
C = sqrt((k-self.m*self.w**2)**2 + (c*self.w)**2)
w_d = cmath.sqrt((k/self.m)-(c/(2*self.m))**2).real
A = -self.F0/C*cos(alpha)
B = -(divide(self.F0,(C*w_d)))*(self.w*sin(alpha) + (c/(2*self.m))*cos(alpha))
u_t = (A*cos(w_d*self.time) + B*sin(w_d*self.time))*exp(divide(-(c*self.time),(2*(self.m)))) + (self.F0/C)*cos(self.w*self.time - alpha)
if isnan(u_t).any():
u_t = 0
return u_t
def costFuncVal(self,x,**kwargs):
u_t = self.evaluate(x)
RMSE = sqrt(square(subtract(u_t,self.displacement)).mean())
return RMSE
# c1 – cognitive parameter (confidence in itself)
# c2 – social parameter (confidence in the swarm)
# w – inertial weight (inertia of a particle)
# P1
def runPS1(xSize,params):
print('PS1:\n')
bounds = array([(-5,5)]*xSize)
x0 = random.uniform(-5,5,xSize)
return PSO(P1,x0,bounds,numParticles=max([200,xSize*10]),maxRepeat=10,maxIter=100,params=params).optimize(verbose=False)
def runPS2(xSize,params):
print('PS2:\n')
bounds = array([(-5,5)]*xSize)
x0 = random.uniform(-5,5,xSize)
return PSO(P2,x0,bounds,numParticles=max([200,xSize*10]),maxRepeat=10,maxIter=100,params=params).optimize(verbose=False)
#(-0.8064516129032258, 0.20967741935483872)
def runPS3(xSize,params):
print('PS3:\n')
bounds = array([(-1,1)]*xSize)
x0 = random.uniform(-1,1,xSize)
return PSO(P3,x0,bounds,numParticles=max([200,xSize*10]),maxRepeat=10,maxIter=100,params=params).optimize(verbose=False)
def runPS4(xSize, params):
print('PS4:\n')
bounds = array([(0,10)]*xSize)
x0 = random.uniform(0.5,4,xSize)
return PSO(P4,x0,bounds,numParticles=max([200,xSize*10]),maxRepeat=10,maxIter=100,params=params).optimize(verbose=False)
def runPS5(xSize, params):
print('PS5:\n')
bounds = array([(0,100)]*xSize)
x0 = random.uniform(0,20,xSize)
oscil = P5()
return PSO(oscil.costFuncVal,x0,bounds,numParticles=max([200,xSize*10]),maxRepeat=10,maxIter=100,params=params).optimize(verbose=False)
#runPS1()
#runPS3()
# P2
#n = 2
#bounds = [(-5,5)]*n
# P3
def plotPS4_3D(ps4Results):
x = linspace(0, 10, 100)
y = linspace(0, 10, 100)
X, Y = meshgrid(x, y)
Z = []
xx = X.flatten()
yy = Y.flatten()
for idx in range(xx.size):
Z.append(P4(x=array([xx[idx], yy[idx]]),onlyFx=True))
Z = array(Z).reshape(X.shape)
fig = plt.figure()
ax = plt.axes(projection="3d")
#ax.plot_wireframe(X, Y, Z, color='green')
ax = plt.axes(projection='3d')
ax.plot3D(*ps4Results['meanPosVal'],'ro',alpha=0.7,zorder=2)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='viridis', edgecolor='none', antialiased=True, zorder=1, alpha=0.5)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f (x_n)$')
ax.set_title('P4: Bump Function')
plt.show()
#grid searched parameter combinations
paramCombinations = [
{'w': 0.3, 'c1': 0.5, 'c2': 1.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.3, 'c1': 3.5, 'c2': 0.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.3, 'c1': 1.5, 'c2': 1.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.5, 'c1': 0.5, 'c2': 3.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.5, 'c1': 3.5, 'c2': 0.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.5, 'c1': 1.5, 'c2': 1.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.7, 'c1': 0.5, 'c2': 3.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.7, 'c1': 3.5, 'c2': 0.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.7, 'c1': 1.5, 'c2': 1.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.9, 'c1': 0.5, 'c2': 3.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.9, 'c1': 3.5, 'c2': 0.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
{'w': 0.9, 'c1': 1.5, 'c2': 1.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'},
]
def plotPS4_iter(dim,paramCombinations):
fig, ax = plt.subplots()
for paramsCombo in paramCombinations:
ps4Results = runPS4(dim,paramsCombo)
penalty = 'rho: {}'.format(paramsCombo['penalty']) if paramsCombo.get('penalty',False) else 'rho_static: {}'.format(paramsCombo['penaltyStatic'])
bound = 'boundMethod: {}'.format(paramsCombo['boundMethod'])
ax.plot(ps4Results['meanRepeatValues'],label='minVal:{:.8f}, cIter: {} - w: {:.5f}, c1: {:.5f}, c2: {:.5f}, {}, {}'.format(ps4Results['minVal'],ps4Results['iter'],paramsCombo['w'],paramsCombo['c1'],paramsCombo['c2'],penalty,bound))
ax.fill_between(range(len(ps4Results['meanRepeatValues'])), ps4Results['meanRepeatValues']-ps4Results['stdRepeatValues'], ps4Results['meanRepeatValues']+ps4Results['stdRepeatValues'], alpha=0.2)
ax.legend()
ax.set_title('PS4 Bump Test n={} - Objective Func. Val (w/ Quad. Penalty) vs. Iteration Number'.format(dim))
ax.set_xlabel('Iteration #')
ax.set_ylabel(r'$\pi(x,\rho)$')
ax.minorticks_on()
ax.grid(True, which='both')
plt.show()
def plotPS5(results):
fig, ax = plt.subplots()
oscil = P5()
ax.plot(*oscil.groundTruth(),label='Measured Response')
ax.plot(oscil.time,oscil.evaluate(results['x*']),label='Estimated Response - c: {:.6f}, k: {:.6f}'.format(*results['x*']))
ax.legend()
ax.set_title('PS5 Dynamic Response - Displacement Response vs. Time')
ax.set_xlabel('Time [t]')
ax.set_ylabel('Displace response [u]')
ax.minorticks_on()
ax.grid(True, which='both')
plt.show()
return results
#--------------------------------------------------------------------------------------------------------------
plotPS4_iter(2,paramCombinations)
#plotPS4_iter(50,paramCombinations)
selectParams = {'w': 0.5, 'c1': 1.5, 'c2': 1.5, 'rel_tol': 1e-9, 'penalty': 5, 'boundMethod': 'reflect'}
ps4_n2 = runPS4(2,selectParams)
print('\nPS4 N=2 Soln:\nx* = {}\nf(x*) = {:.6f}'.format(ps4_n2['x*'],ps4_n2['minVal']))
plotPS4_3D(ps4_n2)
ps4_n10 = runPS4(10,selectParams)
print('\nPS4 N=10 Soln:\nx* = {}\nf(x*) = {:.6f}'.format(ps4_n10['x*'],ps4_n10['minVal']))
ps4_n50 = runPS4(50,selectParams)
print('\nPS4 N=50 Soln:\nx* = {}\nf(x*) = {:.6f}'.format(ps4_n50['x*'],ps4_n50['minVal']))
#--------------------------------------------------------------------------------------------------------------
for n in [2,5]:
ps1 = runPS1(n,selectParams)
print('\nPS1 N={} Soln:\nx* = {}\nf(x*) = {:.6f}\n'.format(n,ps1['x*'],ps1['minVal']))
#--------------------------------------------------------------------------------------------------------------
for n in [2,5]:
ps2 = runPS2(n,selectParams)
print('\nPS2 N={} Soln:\nx* = {}\nf(x*) = {:.6f}\n'.format(n,ps2['x*'],ps2['minVal']))
#--------------------------------------------------------------------------------------------------------------
ps3 = runPS3(2,selectParams)
print('\nPS3 N={} Soln:\nx* = {}\nf(x*) = {:.6f}\n'.format(2,ps3['x*'],ps3['minVal']))
#--------------------------------------------------------------------------------------------------------------
ps5 = plotPS5(runPS5(2,selectParams))
print('\nPS5 N={} Soln:\nx* = {}\nf(x*) = {:.6f}\n'.format(2,ps5['x*'],ps5['minVal']))
embed()
#
|
StarcoderdataPython
|
1664639
|
<filename>dev/python/2018-12-08 discontinuous sweeps.py
"""
Ensure discontinuous sweeps (sweeps whose sweep length is less than their inter-
sweep interval) are loaded properly and work with comments.
I confirmed there is a problem on 2018_11_16_sh_0006.abf: print(abf) says total
length of 0.10 min when it should be 5 minutes.
Inter-sweep interval seems to be defined by fEpisodeStartToStart.
"""
import os
import sys
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_DATA = os.path.abspath(PATH_HERE+"../../../data/abfs/")
PATH_SRC = os.path.abspath(PATH_HERE+"../../../src/")
sys.path.insert(0, PATH_SRC)
import pyabf
import glob
if __name__ == "__main__":
print("ABFs where sweep length != sweep interval:")
print("interval\tlength\t\tABF")
for abfFilePath in glob.glob(PATH_DATA+"/*.abf"):
abf = pyabf.ABF(abfFilePath)
if abf.sweepIntervalSec != abf.sweepLengthSec:
print("%.02f\t\t%.02f\t\t%s" %
(abf.sweepIntervalSec, abf.sweepLengthSec, abf.abfFilePath))
abf.setSweep(1, absoluteTime=True)
assert(abf.sweepX[0])==abf.sweepIntervalSec
|
StarcoderdataPython
|
84836
|
import pronto, six, csv
from sys import *
reader = csv.DictReader(open('goid.tab', 'r'), delimiter='\t')
efs = {}
for item in reader:
go = item.get('goid')
iturl = item.get('p')
it = iturl[iturl.rfind('/')+1:]
git = efs.get(go)
if git is None:
efs[go] = it
else:
print('============= {}'.format(go))
reader = csv.DictReader(open('t.tab', 'r'), delimiter='\t')
mfs = {}
for item in reader:
ec = item.get('ecLabel')
iturl = item.get('p')
it = iturl[iturl.rfind('/')+1:]
git = mfs.get(it)
if git is None:
mfs[it] = ec
else:
print(it)
reader = csv.DictReader(open('efnames.tab', 'r'), delimiter='\t')
ns = {}
for item in reader:
ec = item.get('pLabel')
iturl = item.get('p')
it = iturl[iturl.rfind('/')+1:]
git = ns.get(it)
if git is None:
ns[it] = ec
else:
print(it)
reader = csv.DictReader(open('mf.tab', 'r'), delimiter='\t')
ms = {}
for item in reader:
lab = item.get('pLabel')
iturl = item.get('p')
it = iturl[iturl.rfind('/')+1:]
git = ms.get(it)
if git is None:
ms[it] = lab
else:
print(it)
reader = csv.DictReader(open('ec2go.tab', 'r'), delimiter='\t')
ecgo = {}
for item in reader:
ec = item.get('ec')
go = item.get('goid')
git = ecgo.get(ec)
if git is None:
ecgo[ec] = [go]
else:
git.append(go)
for tup in mfs.items():
red = False
ec = tup[1].replace('.-', '')
goecl = ecgo.get(ec)
while goecl is None:
red = True
ec = ec[:ec.rfind('.')]
goecl = ecgo.get(ec)
if None is efs.get(goecl[0]):
print(tup[0])
continue
if len(goecl) == 1:
pass #print('{}|P680|{}'.format(tup[0], efs.get(goecl[0]), goecl[0]))
else:
for e in goecl:
print('{}|P680|{} x {} {}'.format(tup[0], efs.get(e), ns.get(tup[0]), ms.get(efs.get(e))))
|
StarcoderdataPython
|
45983
|
"""
Present both functional and object-oriented interfaces for executing
lookups in Hesiod, Project Athena's service name resolution protocol.
"""
from _hesiod import bind, resolve
from pwd import struct_passwd
from grp import struct_group
class HesiodParseError(Exception):
pass
class Lookup(object):
"""
A Generic Hesiod lookup
"""
def __init__(self, hes_name, hes_type):
self.results = resolve(hes_name, hes_type)
self.parseRecords()
def parseRecords(self):
pass
class FilsysLookup(Lookup):
def __init__(self, name):
Lookup.__init__(self, name, 'filsys')
def parseRecords(self):
Lookup.parseRecords(self)
self.filsys = []
self.multiRecords = (len(self.results) > 1)
for result in self.results:
priority = 0
if self.multiRecords:
result, priority = result.rsplit(" ", 1)
priority = int(priority)
parts = result.split(" ")
type = parts[0]
if type == 'AFS':
self.filsys.append(dict(type=type,
location=parts[1],
mode=parts[2],
mountpoint=parts[3],
priority=priority))
elif type == 'NFS':
self.filsys.append(dict(type=type,
remote_location=parts[1],
server=parts[2],
mode=parts[3],
mountpoint=parts[4],
priority=priority))
elif type == 'ERR':
self.filsys.append(dict(type=type,
message=parts[1],
priority=priority))
elif type == 'UFS':
self.filsys.append(dict(type=type,
device=parts[1],
mode=parts[2],
mountpoint=parts[3],
priority=priority))
elif type == 'LOC':
self.filsys.append(dict(type=type,
location=parts[1],
mode=parts[2],
mountpoint=parts[3],
priority=priority))
else:
raise HesiodParseError('Unknown filsys type: %s' % type)
self.filsys.sort(key=(lambda x: x['priority']))
class PasswdLookup(Lookup):
def __init__(self, name):
Lookup.__init__(self, name, 'passwd')
def parseRecords(self):
passwd_info = self.results[0].split(':')
passwd_info[2] = int(passwd_info[2])
passwd_info[3] = int(passwd_info[3])
self.passwd = struct_passwd(passwd_info)
class UidLookup(PasswdLookup):
def __init__(self, uid):
Lookup.__init__(self, uid, 'uid')
class GroupLookup(Lookup):
def __init__(self, group):
Lookup.__init__(self, group, 'group')
def parseRecords(self):
group_info = self.results[0].split(':')
group_info[2] = int(group_info[2])
members = group_info[3]
if members != '':
members = members.split(',')
else:
members = []
group_info[3] = members
self.group = struct_group(group_info)
class GidLookup(GroupLookup):
def __init__(self, gid):
Lookup.__init__(self, gid, 'gid')
__all__ = ['bind', 'resolve',
'Lookup', 'FilsysLookup', 'PasswdLookup', 'UidLookup',
'GroupLookup', 'GidLookup',
'HesiodParseError']
|
StarcoderdataPython
|
1664159
|
# -*- coding: utf-8 -*-
N = int(input())
MONEY = [100, 50, 20, 10, 5, 2, 1]
print(N)
for i in range(7):
print("%d nota(s) de R$ %d,00" % (N / MONEY[i], MONEY[i]))
N = N % MONEY[i]
|
StarcoderdataPython
|
1752188
|
from platypush.backend.sensor import SensorBackend
class SensorSerialBackend(SensorBackend):
"""
This backend listens for new events from sensors connected through a serial
interface (like Arduino) acting as a wrapper for the ``serial`` plugin.
Requires:
* The :mod:`platypush.plugins.serial` plugin configured
"""
def __init__(self, **kwargs):
super().__init__(plugin='serial', **kwargs)
# vim:sw=4:ts=4:et:
|
StarcoderdataPython
|
140693
|
from abc import ABCMeta, abstractmethod
from tgt_grease.core import Logging, GreaseContainer
from datetime import datetime
import sys
import os
import traceback
class Command(object):
"""Abstract class for commands in GREASE
Attributes:
__metaclass__ (ABCMeta): Metadata class object
purpose (str): The purpose of the command
help (str): Help string for the command line
__author__ (str): Authorship string
__version__ (str): Command Version
os_needed (str): If a specific OS is needed then set this
ioc (GreaseContainer): IOC container for access to system resources
variable_storage (pymongo.collection): collection object for command
"""
###
# Command Metadata information
###
purpose = "Default"
help = """
No Help Information Provided
"""
__author__ = "<NAME>"
__version__ = "1.0.0"
os_needed = None
__metaclass__ = ABCMeta
def __init__(self, Logger=None):
if Logging and isinstance(Logger, Logging):
self.ioc = GreaseContainer(Logger)
else:
self.ioc = GreaseContainer()
self.variable_storage = self.ioc.getMongo()\
.Client()\
.get_database(self.ioc.getConfig().get('Connectivity', 'MongoDB').get('db', 'grease'))\
.get_collection(self.__class__.__name__)
self.start_time = datetime.utcnow()
self.exec_data = {'execVal': False, 'retVal': False, 'data': {}}
self.__failures = 0
@property
def failures(self):
return self.__failures
@failures.setter
def failures(self, val):
self.__failures = val
def getExecVal(self):
"""Get the execution attempt success
Returns:
bool: If the command executed without exception
"""
return self.exec_data.get('execVal', False)
def getRetVal(self):
"""Get the execution boolean return state
Returns:
bool: the boolean return value of execute
"""
return self.exec_data.get('retVal', False)
def getData(self):
"""Get any data the execute method wanted to put into telemetry
Returns:
dict: The Key/Value pairs from the execute method execution
"""
return self.exec_data.get('data', {})
def setData(self, Key, Data):
"""Put Data into the data object to be inserted into telemetry
Args:
Key (str): Key for the data to be stored
Data (object): JSON-able object to store
Returns:
None: Void Method to put data
"""
self.exec_data['data'][Key] = Data
def __del__(self):
# close mongo connection
self.ioc.getMongo().Close()
def safe_execute(self, context=None):
"""Attempt execution and prevent MOST exceptions
Args:
context (dict): context for the command to use
Returns:
None: Void method to attempt exceptions
"""
if not context:
context = {}
try:
try:
self.exec_data['execVal'] = True
self.exec_data['retVal'] = bool(self.execute(context))
except BaseException:
self.exec_data['execVal'] = False
exc_type, exc_obj, exc_tb = sys.exc_info()
tb = traceback.format_exception(exc_type, exc_obj, exc_tb)
self.ioc.getLogger().error(
"Failed to execute [{0}] execute got exception!".format(self.__class__.__name__),
additional={
'file': os.path.split(str(str(tb[2]).split('"')[1]))[1],
'type': str(exc_type),
'line': str(str(tb[2]).split(",")[1]).split(' ')[2]
}
)
except:
self.ioc.getLogger().error(
"Failed to execute [{0}] execute got exception!".format(self.__class__.__name__),
)
except:
self.ioc.getLogger().error(
"Failed to execute [{0}] execute major exception".format(self.__class__.__name__),
)
@abstractmethod
def execute(self, context):
"""Base Execute Method
This method should *always* be overridden in child classes. This is the code that will run when your command
is called. If this method is not implemented then the class will fail loading.
Args:
context (dict): context for the command to use
Returns:
bool: Command Success
"""
pass
def prevent_retries(self):
"""
Sets a flag in the command's return data that will signal to stop retrying, even before the default
retry limit is met.
"""
self.setData("no_retry", True)
|
StarcoderdataPython
|
159219
|
<reponame>gabrielpetersson/rnngen
import numpy as np
def vec_word(word_vecs, dic, dim=2, rev=False):
if rev:
dic = {value: letter for letter, value in dic.items()}
if dim == 1:
res = dic[np.argmax(word_vecs)]
return res
if dim == 2:
res = ''
for letter in word_vecs:
res = res + dic[np.argmax(letter)] + ' '
return res
if dim == 3:
res = ''
for letter in word_vecs:
for let in letter:
res = res + dic[np.argmax(let)] + ' '
res = res + '\n'
return res
def id_word(letters, dic, dim=2):
dic = {value: letter for letter, value in dic.items()}
if dim == 1:
res = dic[letters]
return res
if dim == 2:
res = ''
for letter in letters:
res = res + dic[letter] + ' '
return res
if dim == 3:
res = ''
for letter in letters:
for let in letter:
res = res + dic[let]
res = res + '\n\n'
return res
def word_id(letters, dic, dim=2):
if dim == 1:
res = dic[letters]
return res
if dim == 2:
res = ''
for letter in letters:
res = res + dic[letter]
return res
if dim == 3:
res = ''
for letter in letters:
for let in letter:
res = res + dic[let]
res = res + '\n\n'
return res
|
StarcoderdataPython
|
164241
|
# -*- coding: utf-8 -*-
from pymongo import MongoClient
import settings
import datetime
#连接
client=MongoClient('mongodb://127.0.0.1:27017/')
db = client[settings.DBNAME]
def getCollection(collection):
if(collection):
return db[collection]
else:
return None
def saveWeibo(article):
articleCollection = getCollection('tempArticle')
if articleCollection.find_one({"itemid": article['itemid']}):
print {"_id": None, "message": "已有相关数据", "isQuery": "false"}
_id = articleCollection.insert_one(article).inserted_id
print {"_id": str(_id), "message": "保存成功"}
|
StarcoderdataPython
|
3316836
|
# Simple Math
# We're going to work with print statements to output the results to the screen.
# You can separate multiple print items with a comma, as shown below:
print("Four times four is ", 4 * 4)
# Addition
print("5 + 3 is ", 5 + 3)
# Subtraction
print("6 - 2 is ", 6 - 2)
# Multiplication
print("10 * 20 is ", 10 * 20)
# Division
print("55 / 2 is ", 55 / 2) # Hmm ... !
print("By default, Python treats numbers as whole numbers (integers)")
print("So in a way, Python's answer of 55 / 2 = 27 makes sense, even if it's not quite what we're looking for.")
print("Luckily, there are other ways to get the answer we want.")
# Precise Division (using floats)
print("55.0 / 2 is ", 55.0 / 2)
print("55 / 2.0 is ", 55 / 2.0)
print("55.0 / 2.0 is ", 55.0 / 2.0)
# Remainder Division, %, the "modulo"
print("55 % 2 is ", 55 % 2) # This is super useful for determining whether a number is odd or even
# Powers, **
print("2 ** 10 is ", 2 ** 10)
|
StarcoderdataPython
|
4804748
|
class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
def nCr(n):
import math
f = math.factorial
return f(n) / f(2) / f(n-2)
s=set(nums)
b=0
for i in s:
x=nums.count(i)
if x>=2:
b=b+nCr(x)
return int(b)
|
StarcoderdataPython
|
3304432
|
from django.db import models
class Task(models.Model):
"""
Task parent. Task will be sometimes generated to run asynchroon
A worker will run these task one by one when enough resources are free
"""
VERY_LOW = 0
LOW = 1
NORMAL = 2
HIGH = 3
VERY_HIGH = 4
PRIORITIES = (
#bulk data updates
(VERY_LOW, "Very Low"),
(LOW, "Low"),
#startup some cache user has not visited yet
(NORMAL, "Normal"),
#user requests that need to update cache
(HIGH, "High"),
#user requests that have no cache yet
(VERY_HIGH, "Very High"),
)
active = models.BooleanField(default=False)
date = models.DateTimeField(auto_now_add=True)
priority = models.IntegerField(choices=PRIORITIES)
class Meta:
abstract = True
ordering = ["-date"]
|
StarcoderdataPython
|
1605289
|
<reponame>naokishibuya/simple_transformer
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class MultiHeadAttention(nn.Module):
""" Multi-head attention runs multiple attention calculations in parallel.
"""
def __init__(self, num_heads: int, dim_embed: int, drop_prob: float) -> None:
super().__init__()
assert dim_embed % num_heads == 0
# num_head x dim_head = dim_embed
self.num_heads = num_heads
self.dim_embed = dim_embed
self.dim_head = dim_embed // num_heads
# Linear operations and dropout
self.query = nn.Linear(dim_embed, dim_embed)
self.key = nn.Linear(dim_embed, dim_embed)
self.value = nn.Linear(dim_embed, dim_embed)
self.output = nn.Linear(dim_embed, dim_embed)
self.dropout = nn.Dropout(drop_prob)
def forward(self, x: Tensor, y: Tensor, mask: Tensor=None) -> Tensor:
""" The main idea is to apply multiple attention operations on the same set of inputs.
Instead of using seperate linear operations and concatenate them later, we do a single
linear operation per query, key, value and rearange them to be independent heads by
partitioning dim_embed into num_heads x dim_head.
Applies the linear operations to extract query, key, and value tensors.
Then, divide dim_embed into num_heads x dim_head
"""
# linear transformation in one shot per query, key, value
query = self.query(x)
key = self.key (y)
value = self.value(y)
# Note: max here is within a batch and it's either for target or source batch
# (batch_size, max_sequence_length, dim_embed) =>
# (batch_size, max_sequence_length, num_heads, dim_head) =>
# (batch_size, num_heads, max_sequence_length, dim_head)
batch_size = x.size(0)
query = query.view(batch_size, -1, self.num_heads, self.dim_head).transpose(1, 2)
key = key .view(batch_size, -1, self.num_heads, self.dim_head).transpose(1, 2)
value = value.view(batch_size, -1, self.num_heads, self.dim_head).transpose(1, 2)
if mask is not None:
# Mask needs to have an extra dimension to be broadcastable across multiple heads
# - Encoder self-attention: (batch_size, 1, 1, max_source_sequence_length)
# - Decoder self-attention: (batch_size, 1, max_target_sequence_length, max_target_sequence_length)
mask = mask.unsqueeze(1)
# Applies the attention function on all heads in parallel
attn = attention(query, key, value, mask)
# Restores the original shapes:
# (batch_size, num_heads, max_sequence_length, dim_head) =>
# (batch_size, max_sequence_length, num_heads, dim_head) =>
# (batch_size, max_sequence_length, dim_embed)
attn = attn.transpose(1, 2).contiguous().view(batch_size, -1, self.dim_embed)
# Finally, applies one more linear operation and dropout
out = self.dropout(self.output(attn))
return out
def attention(query: Tensor, key: Tensor, value: Tensor, mask: Tensor=None) -> Tensor:
""" Attention calculator used by the multi-headed attention.
[1] For self-attention, query (Q), key (K), value (V) all have the same shape:
- Q, K, V: (batch_size, num_heads, max_sequence_length, dim_head)
Note: these max sequence length is determined per batch.
Attention scores will be calculated by the scaled dot-product divided by the square-root of dim_head.
- Scores : (batch_size, num_heads, max_sequence_length, max_sequence_length)
It tells us which token is relevant to which tokens within the same sequence.
[2] For target-source attention, Q and K may have different max sequence length:
- Q : (batch_size, num_heads, max_target_sequence_length, dim_head)
- K, V : (batch_size, num_heads, max_source_sequence_length, dim_head)
Note: these max sequence lengths are determined per batch.
- Scores : (batch_size, num_heads, max_target_sequence_length, max_source_sequence_length)
It tells us which token in the target sequence is relevant to which tokens in the source sequence.
[3] Mask is used to make certain tokens excluded from attention scores.
For Encoder, PAD_IDX tokens are masked which has the following broadcastable shape:
- Encoder self-attention : (batch_size, 1, 1, max_source_sequence_length)
Note:
- The second dimension has 1 which is broadcasted across the number of heads.
- The third dimension has 1 which is broadcasted across the number of source sequence tokens.
For Decoder, PAD_IDX and subsequent tokens are masked:
- Decoder self-attention : (batch_size, 1, max_target_sequence_length, max_target_sequence_length)
Note:
- The second dimension has 1 which is broadcasted across the number of heads.
For Decoder-Encoder link, PAD_IDX tokens are masked in the source tokens:
- Target-source attention: (batch_size, 1, 1, max_source_sequence_length)
Note:
- The second dimension has 1 which is broadcasted across the number of heads.
- The third dimension has 1 which is broadcasted across the number of target sequence tokens.
"""
sqrt_dim_head = query.shape[-1]**0.5 # sqrt(dim_head)
# Scaled Dot-Product by matrix operation: Q K^T / sqrt(dim_head)
scores = torch.matmul(query, key.transpose(-2, -1))
scores = scores / sqrt_dim_head
if mask is not None:
# Sets large negative value to masked token positions - softmax will give effectively zero probability to them.
scores = scores.masked_fill(mask==0, -1e9)
# Attention weighted value
weight = F.softmax(scores, dim=-1)
return torch.matmul(weight, value)
|
StarcoderdataPython
|
3340531
|
<reponame>totorigolo/genetic_snake
import logging
import os
import signal
import time
from ga_snake.snake_game_executor import SnakeGameExecutor
log = logging.getLogger("training")
class Training(object):
def __init__(self, args):
self.args = args
self.run_counter = 0
self.main_pid = None
self.execution_stopped = False
def initial_batch(self):
raise NotImplementedError()
def create_batch_from_results(self, results):
""" Create the next batch and return an estimate of the work
done in the previous batch.
:param results: the list of all the results for the previous batch
:return: (new_batch, estimate_work_previous_batch)
"""
raise NotImplementedError()
def training_interrupted(self):
pass
def train(self):
if self.execution_stopped:
log.warning("Can't train: the execution has been stopped.")
return
# Set our own signal handler for SIGINT
signal.signal(signal.SIGINT, self.__signal_handler)
self.main_pid = os.getpid()
global_start_time = time.time()
with SnakeGameExecutor(self.args) as executor:
batch = self.initial_batch()
while len(batch) > 0:
start_time = time.time()
batch_size = len(batch)
log.info('Running new batch: %d jobs.', batch_size)
self.run_counter += batch_size
results = executor.run_batch(batch)
batch, qty_work_prev = self.create_batch_from_results(results)
batch_duration = time.time() - start_time
log.info('Batch: %d simulations (%g W) in %g sec:'
'\n => %g sim/sec'
'\n => %g W/sec.',
batch_size, qty_work_prev, batch_duration,
batch_size / batch_duration,
qty_work_prev / batch_duration)
if self.execution_stopped:
self.training_interrupted()
break
training_duration = time.time() - global_start_time
log.info('Trained with %d simulations in %g sec.',
self.run_counter, training_duration)
# Unregister the signal handler
signal.signal(signal.SIGINT, signal.SIG_DFL)
def __signal_handler(self, sig, frame):
if os.getpid() != self.main_pid:
return
if sig == signal.SIGINT:
if not self.execution_stopped:
log.critical('SIGINT received, stopping...')
self.execution_stopped = True
# Unregister the signal handler
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
StarcoderdataPython
|
1656946
|
<filename>src/zenml/utils/secrets_manager_utils.py
# Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import base64
from typing import Dict, Tuple
from zenml.constants import ZENML_SCHEMA_NAME
from zenml.secret.base_secret import BaseSecretSchema
def encode_string(string: str) -> str:
"""Base64 encode a string.
Args:
string: String to encode
Returns:
Encoded string
"""
encoded_bytes = base64.b64encode(string.encode("utf-8"))
return str(encoded_bytes, "utf-8")
def encode_secret(secret: BaseSecretSchema) -> Dict[str, str]:
"""Base64 encode all values within a secret.
Args:
secret: Secret containing key-value pairs
Returns:
Encoded secret Dict containing key-value pairs
"""
encoded_secret = {k: encode_string(v) for k, v in secret.content.items()}
encoded_secret[ZENML_SCHEMA_NAME] = secret.schema_type.value
return encoded_secret
def decode_string(string: str) -> str:
"""Base64 decode a string.
Args:
string: String to decode
Returns:
Decoded string
"""
decoded_bytes = base64.b64decode(string)
return str(decoded_bytes, "utf-8")
def decode_secret_dict(
secret_dict: Dict[str, str]
) -> Tuple[Dict[str, str], str]:
"""Base64 decode a Secret.
Args:
secret_dict: dict containing key-value pairs to decode
Returns:
Decoded secret Dict containing key-value pairs
"""
zenml_schema_name = secret_dict.pop(ZENML_SCHEMA_NAME)
decoded_secret = {k: decode_string(v) for k, v in secret_dict.items()}
return decoded_secret, zenml_schema_name
|
StarcoderdataPython
|
3359032
|
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utility to create compressed, encrypted and signed DFU files.
"""
import argparse
import os
import re
import struct
import sys
import zlib
sys.path.append(os.path.dirname(__file__) + "/../../../tools")
import dfu
try:
import pyhy
except ImportError:
raise SystemExit(
"ERROR: pyhy not found. Please install python pyhy for encrypted mboot support: pip3 install pyhy"
)
# Currenty supported version of a packed DFU file.
MBOOT_PACK_HEADER_VERSION = 1
# Must match MBOOT_PACK_HYDRO_CONTEXT in mboot/pack.h
MBOOT_PACK_HYDRO_CONTEXT = "mbootenc"
# Must match enum in mboot/pack.h.
MBOOT_PACK_CHUNK_META = 0
MBOOT_PACK_CHUNK_FULL_SIG = 1
MBOOT_PACK_CHUNK_FW_RAW = 2
MBOOT_PACK_CHUNK_FW_GZIP = 3
class Keys:
def __init__(self, filename):
self.filename = filename
def generate(self):
kp = pyhy.hydro_sign_keygen()
self.sign_sk = kp.sk
self.sign_pk = kp.pk
self.secretbox = pyhy.hydro_secretbox_keygen()
def _save_data(self, name, data, file_, hide=False):
prefix = "//" if hide else ""
data = ",".join("0x{:02x}".format(b) for b in data)
file_.write("{}const uint8_t {}[] = {{{}}};\n".format(prefix, name, data))
def _load_data(self, name, line):
line = line.split(name + "[] = ")
if len(line) != 2:
raise Exception("malformed input keys: {}".format(line))
data = line[1].strip()
return bytes(int(value, 16) for value in data[1:-2].split(","))
def save(self):
with open(self.filename, "w") as f:
self._save_data("mboot_pack_sign_secret_key", self.sign_sk, f, hide=True)
self._save_data("mboot_pack_sign_public_key", self.sign_pk, f)
self._save_data("mboot_pack_secretbox_key", self.secretbox, f)
def load(self):
with open(self.filename) as f:
self.sign_sk = self._load_data("mboot_pack_sign_secret_key", f.readline())
self.sign_pk = self._load_data("mboot_pack_sign_public_key", f.readline())
self.secretbox = self._load_data("mboot_pack_secretbox_key", f.readline())
def dfu_read(filename):
elems = []
with open(filename, "rb") as f:
hdr = f.read(11)
sig, ver, size, num_targ = struct.unpack("<5sBIB", hdr)
file_offset = 11
for i in range(num_targ):
hdr = f.read(274)
sig, alt, has_name, name, t_size, num_elem = struct.unpack("<6sBi255sII", hdr)
file_offset += 274
file_offset_t = file_offset
for j in range(num_elem):
hdr = f.read(8)
addr, e_size = struct.unpack("<II", hdr)
data = f.read(e_size)
elems.append((addr, data))
file_offset += 8 + e_size
if t_size != file_offset - file_offset_t:
raise Exception("corrupt DFU {} {}".format(t_size, file_offset - file_offset_t))
if size != file_offset:
raise Exception("corrupt DFU {} {}".format(size, file_offset))
hdr = f.read(16)
hdr = struct.unpack("<HHHH3sBI", hdr)
vid_pid = "0x{:04x}:0x{:04x}".format(hdr[2], hdr[1])
return vid_pid, elems
def compress(data):
c = zlib.compressobj(level=9, memLevel=9, wbits=-15) # wsize=15, no header
return c.compress(data) + c.flush()
def encrypt(keys, data):
return pyhy.hydro_secretbox_encrypt(data, 0, MBOOT_PACK_HYDRO_CONTEXT, keys.secretbox)
def sign(keys, data):
return pyhy.hydro_sign_create(data, MBOOT_PACK_HYDRO_CONTEXT, keys.sign_sk)
def pack_chunk(keys, format_, chunk_addr, chunk_payload):
header = struct.pack(
"<BBBBII", MBOOT_PACK_HEADER_VERSION, format_, 0, 0, chunk_addr, len(chunk_payload)
)
chunk = header + chunk_payload
sig = sign(keys, chunk)
chunk = chunk + sig
return chunk
def data_chunks(data, n):
for i in range(0, len(data), n):
yield data[i : i + n]
def generate_keys(keys, args):
keys.generate()
keys.save()
def pack_dfu(keys, args):
chunk_size = int(args.chunk_size[0])
# Load previously generated keys.
keys.load()
# Read the input DFU file.
vid_pid, elems = dfu_read(args.infile[0])
# Ensure firmware sections are processed in order of destination memory address.
elems = sorted(elems, key=lambda e: e[0])
# Build list of packed chunks.
target = []
full_fw = b""
full_signature_payload = b""
for address, fw in elems:
# Update full firmware and full signature chunk.
full_fw += fw
full_signature_payload += struct.pack("<II", address, len(fw))
# Split the firmware into chunks, encrypt and sign the chunks
# then register them as individual DFU targets.
for i, chunk in enumerate(data_chunks(fw, chunk_size)):
chunk_addr = address + i * chunk_size
if args.gzip:
chunk = compress(chunk)
chunk = encrypt(keys, chunk)
chunk = pack_chunk(
keys,
MBOOT_PACK_CHUNK_FW_GZIP if args.gzip else MBOOT_PACK_CHUNK_FW_RAW,
chunk_addr,
chunk,
)
target.append({"address": chunk_addr, "data": chunk})
# Add full signature to targets, at location following the last chunk.
chunk_addr += chunk_size
sig = sign(keys, full_fw)
full_signature_payload += sig
full_signature_chunk = pack_chunk(
keys, MBOOT_PACK_CHUNK_FULL_SIG, chunk_addr, full_signature_payload
)
target.append({"address": chunk_addr, "data": full_signature_chunk})
# Build the packed DFU file of all the encrypted and signed chunks.
dfu.build(args.outfile[0], [target], vid_pid)
# Verify the packed DFU file.
verify_pack_dfu(keys, args.outfile[0])
def verify_pack_dfu(keys, filename):
"""Verify packed dfu file against keys. Gathers decrypted binary data."""
full_sig = pyhy.hydro_sign(MBOOT_PACK_HYDRO_CONTEXT)
_, elems = dfu_read(filename)
base_addr = None
binary_data = b""
for addr, data in elems:
if base_addr is None:
base_addr = addr
header = struct.unpack("<BBBBII", data[:12])
chunk = data[12 : 12 + header[5]]
sig = data[12 + header[5] :]
sig_pass = pyhy.hydro_sign_verify(
sig, data[:12] + chunk, MBOOT_PACK_HYDRO_CONTEXT, keys.sign_pk
)
assert sig_pass
if header[1] == MBOOT_PACK_CHUNK_FULL_SIG:
actual_sig = chunk[-64:]
else:
chunk = pyhy.hydro_secretbox_decrypt(
chunk, 0, MBOOT_PACK_HYDRO_CONTEXT, keys.secretbox
)
assert chunk is not None
if header[1] == MBOOT_PACK_CHUNK_FW_GZIP:
chunk = zlib.decompress(chunk, wbits=-15)
full_sig.update(chunk)
assert addr == base_addr + len(binary_data)
binary_data += chunk
full_sig_pass = full_sig.final_verify(actual_sig, keys.sign_pk)
assert full_sig_pass
return [{"address": base_addr, "data": binary_data}]
def unpack_dfu(keys, args):
# Load previously generated keys.
keys.load()
# Build a DFU file from the decrypted binary data.
data = verify_pack_dfu(keys, args.infile[0])
dfu.build(args.outfile[0], [data])
def main():
cmd_parser = argparse.ArgumentParser(description="Build signed/encrypted DFU files")
cmd_parser.add_argument("-k", "--keys", default="mboot_keys.h", help="filename for keys")
subparsers = cmd_parser.add_subparsers()
parser_gk = subparsers.add_parser("generate-keys", help="generate keys")
parser_gk.set_defaults(func=generate_keys)
parser_ed = subparsers.add_parser("pack-dfu", help="encrypt and sign a DFU file")
parser_ed.add_argument("-z", "--gzip", action="store_true", help="compress chunks")
parser_ed.add_argument("chunk_size", nargs=1, help="maximum size in bytes of each chunk")
parser_ed.add_argument("infile", nargs=1, help="input DFU file")
parser_ed.add_argument("outfile", nargs=1, help="output DFU file")
parser_ed.set_defaults(func=pack_dfu)
parser_dd = subparsers.add_parser("unpack-dfu", help="decrypt a signed/encrypted DFU file")
parser_dd.add_argument("infile", nargs=1, help="input packed DFU file")
parser_dd.add_argument("outfile", nargs=1, help="output DFU file")
parser_dd.set_defaults(func=unpack_dfu)
args = cmd_parser.parse_args()
keys = Keys(args.keys)
args.func(keys, args)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3344428
|
# -*- coding: utf-8 -*-.
from . import ln_koko_sd_trip
from . import koko_sd_shop_detail
from . import res_config_settings
|
StarcoderdataPython
|
3285213
|
<reponame>mikemalinowski/carapace
"""
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
factories is a module which exposes a take on the Factory/Plugin design
pattern. The idea behind this pattern is to be able to define a structure
which your functionality sits within - allowing you to call that
functionality without ever really knowing what it is doing.
This approach is particularly useful when building systems which are
likely to expand in unknown ways over time. Example use cases might include:
* Toolboxes, where each tool is represented as a plugin - and an
interface which is arbitrarily populated with those tools
* Node graphs, where we have no up-front knowledge of what nodes
may be available to use
* Data parsers which include data that changes format over time due
to deprecation, meaning each data type can be represented by a
plugin allowing the framework to never care about the storage
details of the data
The commonality between all these structures is that the core of each
system needs to do something but it does not have to care about the
detail of how that task is achieved. Instead the detail is held within
plugins libraries which can be expanded and contracted over time.
This pattern is incredibly useful but tends to come with an overhead
of writing dynamic loading mechanisms and functionality to easily
interact and query the plugins. The Factories module aims to diminish
that overhead - allowing you to focus on your end goal and the
development of plugins.
This library was written based from the information here:
https://sourcemaking.com/design_patterns/factory_method
It is also designed based on the principals given during the
GDC 2018 Talk - A Practical Approach to Developing Forward-Facing Rigs, Tools and
Pipelines. Which can be explored in more detail here:
https://www.gdcvault.com/play/1025427/A-Practical-Approach-to-Developing
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = "MIT"
__version__ = "1.2.0"
from .factory import (
Factory,
enable_debugging,
)
from .constants import (
log,
)
|
StarcoderdataPython
|
3311698
|
<reponame>ewiger/len
from .defaults import DefaultScope
class AppScope(DefaultScope):
@property
def kind(self):
return "app"
|
StarcoderdataPython
|
13566
|
import pytest
import ast
from pytest_mock import MockerFixture
from pystratis.api.node import Node
from pystratis.api.node.responsemodels import *
from pystratis.api import FullNodeState, FeatureInitializationState, LogRule
from pystratis.core.networks import StraxMain, CirrusMain
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_status_no_publish(mocker: MockerFixture, network):
data = {
'agent': 'nodeagent',
'version': 'nodeversion',
'externalAddress': '[::0.0.0.0]',
'network': network.name,
'coin_ticker': 'STRAX' if 'Strax' in network.name else 'CRS',
'processId': '0',
'consensusHeight': 10,
'blockStoreHeight': 10,
'bestPeerHeight': 10,
'inboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'outboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'featuresData': [
{
'namespace': 'node.feature',
'state': FeatureInitializationState.Initialized
}
],
'dataDirectoryPath': '/my/data/dir',
'runningTime': 'a long time',
'difficulty': 100000.0000,
'protocolVersion': 123,
'testnet': False,
'relayFee': 0,
'state': FullNodeState.Initialized,
'inIbd': False,
'headerHeight': 1
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.status(publish=False)
assert response == StatusModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_status_publish(mocker: MockerFixture, network):
data = {
'agent': 'nodeagent',
'version': 'nodeversion',
'externalAddress': '[::0.0.0.0]',
'network': network.name,
'coin_ticker': 'STRAX' if 'Strax' in network.name else 'CRS',
'processId': '0',
'consensusHeight': 10,
'blockStoreHeight': 10,
'bestPeerHeight': 10,
'inboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'outboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'featuresData': [
{
'namespace': 'node.feature',
'state': FeatureInitializationState.Initialized
}
],
'dataDirectoryPath': '/my/data/dir',
'runningTime': 'a long time',
'difficulty': 100000.0000,
'protocolVersion': 123,
'testnet': False,
'relayFee': 0,
'state': FullNodeState.Initialized,
'inIbd': False,
'headerHeight': 1
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.status(publish=True)
assert response == StatusModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_blockheader(mocker: MockerFixture, network, generate_uint256):
data = {
'version': 1,
'merkleroot': generate_uint256,
'nonce': 0,
'bits': 'bits',
'previousblockhash': generate_uint256,
'time': 1,
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_blockheader(
block_hash=generate_uint256,
is_json_format=True
)
assert response == BlockHeaderModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_raw_transaction_verbose(mocker: MockerFixture, network, generate_coinbase_transaction, generate_uint256):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_raw_transaction(trxid=trxid, verbose=True)
assert response == TransactionModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_raw_transaction_nonverbose(mocker: MockerFixture, network, generate_coinbase_transaction, generate_uint256):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
hexified_data = bytes(str(data), 'ascii').hex()
mocker.patch.object(Node, 'get', return_value=hexified_data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_raw_transaction(trxid=trxid, verbose=False)
assert response == hexified_data
unserialized_response = ast.literal_eval(bytes.fromhex(hexified_data).decode('ascii'))
assert data == unserialized_response
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_decode_raw_transaction(mocker: MockerFixture, network, generate_uint256, generate_coinbase_transaction):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
hexified_data = bytes(str(data), 'ascii').hex()
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.decode_raw_transaction(raw_hex=hexified_data)
assert response == TransactionModel(**data)
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_validate_address(mocker: MockerFixture, network, generate_p2pkh_address):
address = generate_p2pkh_address(network=network)
data = {
'isvalid': True,
'address': address,
'scriptPubKey': 'a scriptPubKey',
'isscript': False,
'iswitness': False
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.validate_address(address=address)
assert response == ValidateAddressModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_txout(mocker: MockerFixture, network, generate_uint256, generate_hexstring, generate_p2pkh_address):
data = {
'bestblock': generate_uint256,
'confirmations': 1,
'value': 5,
'scriptPubKey': {
'asm': generate_hexstring(128),
'hex': generate_hexstring(128),
'type': 'pubkey',
'reqSigs': 1,
"addresses": [
generate_p2pkh_address(network=network)
]
},
'coinbase': False
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_txout(trxid=generate_uint256, vout=0, include_mempool=False)
assert response == GetTxOutModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_txout_proof(mocker: MockerFixture, network, generate_uint256, generate_hexstring):
data = generate_hexstring(128)
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_txout_proof(
txids=[
generate_uint256,
generate_uint256
],
block_hash=generate_uint256
)
assert response == data
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_shutdown(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.shutdown()
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_stop(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.stop()
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_log_levels(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'put', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.log_levels(log_rules=[LogRule(rule_name='TestRule', log_level='Debug', filename='filename')])
# noinspection PyUnresolvedReferences
node.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_log_rules(mocker: MockerFixture, network):
data = [
{
'ruleName': 'TestRule',
'logLevel': 'Debug',
'filename': 'filename'
}
]
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.log_rules()
assert response == [LogRule(**x) for x in data]
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_async_loops(mocker: MockerFixture, network):
data = [
{
'loopName': 'Loop1',
'status': 'Running'
}
]
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.async_loops()
assert response == [AsyncLoopsModel(**x) for x in data]
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_rewind(mocker: MockerFixture, network):
data = "Rewind flag set, please restart the node."
mocker.patch.object(Node, 'put', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.rewind(height=2)
assert isinstance(response, str)
# noinspection PyUnresolvedReferences
node.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_delete_datafolder_chain(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'delete', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.delete_datafolder_chain()
# noinspection PyUnresolvedReferences
node.delete.assert_called_once()
|
StarcoderdataPython
|
3203856
|
<filename>tests/test_settings.py
import logging
from pathlib import Path
def test_get_exception(caplog, settings):
with caplog.at_level(logging.INFO):
assert settings.get("RANDOM_KEY") is False
assert "No Value" in caplog.text
def test_update_exception(caplog, settings):
# set collection to something that is not a collection
collection = 1
with caplog.at_level(logging.INFO):
assert settings.update(collection) is False
assert "Uncaught error" in caplog.text
def test_reset_data_exception(caplog, monkeypatch, settings):
def fake_store_data():
1 / 0
monkeypatch.setattr(
"pygluu.kubernetes.settings.ValuesHandler.store_data",
fake_store_data,
)
with caplog.at_level(logging.INFO):
assert settings.reset_data() is False
assert "Uncaught error" in caplog.text
|
StarcoderdataPython
|
1788767
|
"Unit test for the game-board class"
import unittest
from .board import *
def place_stone(board, color, x, y):
board[x,y] = color
class TestBoard(unittest.TestCase):
def test_creation(self):
width = 20
height = 40
board = Board(height, width)
self.assertEqual(board.shape, (height,width))
for i in range(height):
for j in range(width):
# empty refers to "no stone laid" and should be defined in the module ``board``
self.assertEqual(board[i,j], empty)
def test_reset(self):
width = 20
height = 40
board = Board(height, width)
place_stone(board, white, 5, 5)
place_stone(board, black, 4, 5)
place_stone(board, white, 4, 3)
self.assertEqual(board.in_turn, black)
self.assertFalse( (board.board == np.zeros([height, width]) ).all() )
board.reset()
self.assertEqual(board.in_turn, white)
self.assertEqual(board.shape, (height,width))
for i in range(height):
for j in range(width):
# empty refers to "no stone laid" and should be defined in the module ``board``
self.assertEqual(board[i,j], empty)
def test_lay_stone(self):
width = height= 20
board = Board(width, height)
# try "place a black stone at 5,5" --> white starts therefore expect error
self.assertRaisesRegexp(InvalidMoveError, 'White is in turn', place_stone, board, black, 5, 5)
# "place a white stone at 5,5" should be OK
place_stone(board, white, 5, 5)
# "place another white stone" is an invalid move
self.assertRaisesRegexp(InvalidMoveError, 'Black is in turn', place_stone, board, white, 5, 4)
# place black stone at 5,5 is invalid since 5,5 is already occupied
self.assertRaisesRegexp(InvalidMoveError, r'Position \(5, 5\) is already taken', place_stone, board, white, 5, 5)
def test_log(self):
width = height= 20
board = Board(width, height)
self.assertEqual(board.log, [])
place_stone(board, white, 5, 5)
self.assertEqual(board.log, [(5, 5)])
place_stone(board, black, 1, 19)
self.assertEqual(board.log, [(5, 5), (1, 19)])
place_stone(board, white, 2, 8)
self.assertEqual(board.log, [(5, 5), (1, 19), (2, 8)])
board.reset()
self.assertEqual(board.log, [])
def test_full(self):
width = height= 4
board = Board(height, width)
in_turn = white
for i in range(width):
for j in range(height):
board[i,j] = in_turn
if in_turn == white:
in_turn = black
else:
in_turn = white
if not (i,j) == (width-1, height-1):
self.assertFalse(board.full())
else:
self.assertTrue(board.full())
self.assertTrue(board.full())
def test_winner(self):
width = height= 10
board = Board(width, height)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,2)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,2)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,3)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,3)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,4)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,4)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,5)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,5)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,6)
self.assertEqual(board.winner()[0], white)
self.assertEqual(board.winner()[1], [(1,2), (1,3), (1,4), (1,5), (1,6)])
class TestGetLine(unittest.TestCase):
def setUp(self):
self.target_shape = (5,)
width = 7
height = 7
self.board = Board(width=width, height=height)
# make row
place_stone(self.board, white, 1,2)
place_stone(self.board, black, 1,3)
place_stone(self.board, white, 1,4)
place_stone(self.board, black, 1,5)
place_stone(self.board, white, 1,6)
# make column
place_stone(self.board, black, 2,6)
place_stone(self.board, white, 3,6)
place_stone(self.board, black, 4,6)
place_stone(self.board, white, 5,6)
# leave (6,6) empty
# make diagonal upleft to lowright
place_stone(self.board, black, 0,0)
place_stone(self.board, white, 1,1)
place_stone(self.board, black, 2,2)
place_stone(self.board, white, 3,3)
place_stone(self.board, black, 4,4)
# make diagonal lowleft to upright
place_stone(self.board, white, 5,0)
# leave (4,1) empty
place_stone(self.board, black, 3,2)
place_stone(self.board, white, 2,3)
# (1,4) is already white from "make column"
def test_get_column(self):
column, positions = self.board.get_column(2,6)
target_positions = [(2,6), (3,6), (4,6), (5,6), (6,6)]
self.assertEqual(column.shape, self.target_shape)
np.testing.assert_equal(column, np.array([black,white,black,white,empty]))
self.assertEqual(positions, target_positions)
def test_get_row(self):
row, positions = self.board.get_row(1,2)
target_positions = [(1,2), (1,3), (1,4), (1,5), (1,6)]
self.assertEqual(row.shape, self.target_shape)
np.testing.assert_equal(row, np.array([white,black,white,black,white]))
self.assertEqual(positions, target_positions)
def test_get_diagonal_upleft_to_lowright(self):
diagonal, positions = self.board.get_diagonal_upleft_to_lowright(0,0)
target_positions = [(0,0), (1,1), (2,2), (3,3), (4,4)]
self.assertEqual(diagonal.shape, self.target_shape)
np.testing.assert_equal(diagonal, np.array([black,white,black,white,black]))
self.assertEqual(positions, target_positions)
def test_diagonal_lowleft_to_upright(self):
diagonal, positions = self.board.get_diagonal_lowleft_to_upright(5,0)
target_positions = [(5,0), (4,1), (3,2), (2,3), (1,4)]
self.assertEqual(diagonal.shape, self.target_shape)
np.testing.assert_equal(diagonal, np.array([white,empty,black,white,white]))
self.assertEqual(positions, target_positions)
# no negative Y-index?
width = 7
height = 7
self.board = Board(width=width, height=height)
place_stone(self.board, white, 3,0)
place_stone(self.board, black, 2,1)
place_stone(self.board, white, 1,2)
place_stone(self.board, black, 0,3)
place_stone(self.board, white, -1,4)
self.assertRaises(IndexError, self.board.get_diagonal_lowleft_to_upright, 3,0)
# reach upmost row?
width = 7
height = 7
self.board = Board(width=width, height=height)
place_stone(self.board, white, 4,0)
place_stone(self.board, black, 3,1)
place_stone(self.board, white, 2,2)
place_stone(self.board, black, 1,3)
place_stone(self.board, white, 0,4)
line, positions = self.board.get_diagonal_lowleft_to_upright(4,0)
np.testing.assert_equal(line, [white, black, white, black, white])
np.testing.assert_equal(positions, [(4,0), (3,1), (2,2), (1,3), (0,4)])
|
StarcoderdataPython
|
154552
|
'''Faça um programa que ajude um jogador da MEGA SENA a criar palpites.
O programa vai perguntar quantos jogos serão gerados e vai sortear 6 números entre 1 e 60 para cada jogo, cadastrando tudo em uma lista composta.'''
from random import randint,sample
from time import sleep
print('-='*15)
print('{:=^30}'.format('Jogo da mega sena'))
print('-='*15)
njogos=int(input('\nQuantos jogos você quer fazer? '))
print()
jogos=[]
temp=[]
print('Ok,vamos começar!')
print('\nloading...')
print()
sleep(2)
print('-='*15)
for j in range(0,njogos):
tipo=str(input(f'\n[Jogo {j+1}/{njogos}].\n\nQuer digitar os números ou quer jogar na modalidade "surpresa"? [D=Digitar / S=Surpresa]: ')).strip().upper()[0]
while tipo not in ('D','S'):
print('Opção inválida.')
tipo = str(input(
f'\n[Jogo {j + 1}/{njogos}].\n\nQuer digitar os números ou quer jogar na modalidade "surpresa"? [D=Digitar / S=Surpresa]: ')).strip().upper()[0]
if tipo=="D":
for c in range(0,6):
num=int(input(f'\nDigite o número desejado entre 1 e 60.(Número {c+1}/6): '))
while num>60 or num<1 or (num in temp):
print('Número inválido.')
num = int(input(f'\nDigite o número desejado entre 1 e 60.(Número {c + 1}/6): '))
else:
temp.append(num)
jogos.append(temp[:])
print(f'\nSeu jogo fechado foi: {temp}')
temp.clear()
print('-='*15)
if tipo=='S':
temp=list(sample(range(1,60),6))
jogos.append(temp[:])
print(f'\nSeu jogo fechado foi: {temp}')
temp.clear()
print('-=' * 15)
print(f'\nSeus jogos foram: ')
for q in range(0,njogos):
print(f'Jogo {q+1}: {jogos[q]}')
cpu=list(sample(range(1,60),6))
print(f'\nOs números sorteados foram: ',end='')
for pos,a in enumerate(cpu):
if pos!=5:
print(f'{a},',end='')
else:
print(f'{a}.')
sleep(2)
for j in range(0,njogos):
for c in range(0,6):
cont=0
if cpu[c] in jogos[j]:
cont+=1
temp.append(cpu[c])
print(f'\nNo jogo {j+1}, você acerto {len(temp)} número(s).')
if len(temp)>0:
print(f'Os números foram {temp}')
elif len(temp)==4:
print('Parabéns! Você ganhou na quadra!')
elif len(temp)==5:
print('Parabéns! Você ganhou na quina!')
elif len(temp)==6:
print('PARABÉNS!Você ganhou na MEGA-SENA! INACREDITÁVEL!!!!!!!')
temp.clear()
|
StarcoderdataPython
|
136155
|
<filename>src/zveronics/__main__.py<gh_stars>0
import logging.config
import pkg_resources
import shutil
from pathlib import Path
import yaml
from zveronics import serve_zveronics
def load_cfg():
cfg_dir = Path.home() / '.config' / 'zveronics'
cfg_dir.mkdir(parents=True, exist_ok=True)
logging_cfg_file = 'logging.yaml'
logging_cfg_path = cfg_dir / logging_cfg_file
stream = pkg_resources.resource_stream('zveronics', 'etc/logging.yaml')
if not logging_cfg_path.is_file():
with stream as src, open(logging_cfg_path, 'wb') as dst:
shutil.copyfileobj(src, dst)
with open(logging_cfg_path, 'r') as f:
return yaml.safe_load(f)
def main():
logging.config.dictConfig(load_cfg())
serve_zveronics('0.0.0.0', 50000)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4828657
|
<filename>global-ip-pool/global-ip-pool-functions.py
# Modules import
import requests
from requests.auth import HTTPBasicAuth
import time
# Disable SSL warnings. Not needed in production environments with valid certificates
import urllib3
urllib3.disable_warnings()
# Authentication
BASE_URL = 'https://<IP ADDRESS or FQDN>' # Example BASE_URL = 'https://sandboxdnac.cisco.com'
AUTH_URL = '/dna/system/api/v1/auth/token'
USERNAME = '<USERNAME>' # Example USERNAME = 'devnetuser'
PASSWORD = '<PASSWORD>' # Example PASSWORD = '<PASSWORD>!'
# URLs
GLOBAL_IP_POOLS_URL='/dna/intent/api/v1/global-pool'
# Get Authentication token
def get_dnac_jwt_token():
response = requests.post(BASE_URL + AUTH_URL,
auth=HTTPBasicAuth(USERNAME, PASSWORD),
verify=False)
token = response.json()['Token']
return token
# Get Global pools
def get_global_ip_pools(headers, query_params):
response = requests.get(BASE_URL + GLOBAL_IP_POOLS_URL,
params=query_params,
headers=headers, verify=False)
return response.json()['response']
# Create Global pools
def create_global_ip_pools(headers, pool_information):
response = requests.post(BASE_URL + GLOBAL_IP_POOLS_URL,
json=pool_information,
headers=headers, verify=False)
return response.json()
def main():
# obtain the Cisco DNA Center Auth Token
token = get_dnac_jwt_token()
headers = {'X-Auth-Token': token, 'Content-Type': 'application/json'}
pool_information = {
"settings": {
"ippool": [
{
"ipPoolName": "DNAC-Guide_Pool",
"type": "Generic",
"ipPoolCidr": "172.30.200.0/24",
"gateway": "172.30.200.1",
"dhcpServerIps": ["10.255.3.50"],
"dnsServerIps": ["10.255.3.50"],
"IpAddressSpace":"IPv4"
}
]
}
}
response = create_global_ip_pools(headers, pool_information)
time.sleep(5)
response = get_global_ip_pools(headers, {})
for credential in response:
print(credential['id'], credential['ipPoolName'], credential['ipPoolCidr'])
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
166951
|
<reponame>johnscancella/open-oni<gh_stars>0
import logging
from django.core.management.base import BaseCommand
from core.management.commands import configure_logging
from core.solr_index import index_titles
configure_logging("index_titles_logging.config", "index_titles.log")
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, **options):
_logger.info("indexing titles")
index_titles()
_logger.info("finished indexing titles")
|
StarcoderdataPython
|
1626695
|
<filename>sleeper_ff_bot/slack.py
import requests
from bot_interface import BotInterface
class Slack(BotInterface):
def __init__(self, webhook):
self.webhook = webhook
def send_message(self, message):
requests.post(self.webhook, json={"text": message})
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.