filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_25144 | import copy
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms.functional as tv_tf
from scipy import signal
from torchvision.utils import make_grid
matplotlib.use("agg")
plt.style.use("bmh")
MARKERS = [
".",
",",
"o",
"v",
"^",
"<",
">",
"1",
"2",
"3",
"4",
"8",
"s",
"p",
"P",
"*",
"h",
"H",
"+",
"x",
"X",
"D",
"d",
"|",
]
@torch.no_grad()
def plot_results(data_container, save_path=None):
"""Plot the results conresponding to the batched images based on the `make_grid` method from `torchvision`.
Args:
data_container (dict): Dict containing data you want to plot.
save_path (str): Path of the exported image.
"""
axes = plt.subplots(nrows=len(data_container), ncols=1)[1].ravel()
plt.subplots_adjust(hspace=0.03, left=0.05, bottom=0.01, right=0.99, top=0.99)
for subplot_id, (name, data) in enumerate(data_container.items()):
grid = make_grid(data, nrow=data.shape[0], padding=2, normalize=False)
grid_image = np.asarray(tv_tf.to_pil_image(grid))
axes[subplot_id].imshow(grid_image)
axes[subplot_id].set_ylabel(name)
axes[subplot_id].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if save_path is not None:
plt.savefig(save_path, dpi=300)
else:
plt.show()
def plot_lr_coef_curve(lr_lambda, num_steps, save_path=None):
fig, ax = plt.subplots()
# give plot a title
ax.set_title("Learning Rate Coefficient Curve")
# make axis labels
ax.set_xlabel("Index")
ax.set_ylabel("Coefficient")
# set ticks
ax.set_xticks(np.linspace(0, num_steps, 11))
ax.set_yticks(np.linspace(0, 1, 11))
# set lim
ax.set_xlim((-int(num_steps * 0.1), int(num_steps * 1.5)))
ax.set_ylim((-0.1, 1))
x_data = np.arange(num_steps)
y_data = np.array([lr_lambda(x) for x in x_data])
ax.plot(x_data, y_data, linewidth=2)
maximum_xs = signal.argrelextrema(y_data, comparator=np.greater_equal)[0]
maximum_ys = y_data[maximum_xs]
minimum_xs = signal.argrelextrema(y_data, comparator=np.less_equal)[0]
minimum_ys = y_data[minimum_xs]
end_point_xs = np.array([x_data[0], x_data[-1]])
end_point_ys = np.array([y_data[0], y_data[-1]])
for pt in zip(
np.concatenate((maximum_xs, minimum_xs, end_point_xs)),
np.concatenate((maximum_ys, minimum_ys, end_point_ys)),
):
ax.text(pt[0], pt[1], s=f"x={pt[0]:d}")
ax.text(pt[0], pt[1] - 0.05, s=f"y={pt[1]:.3e}")
if save_path:
fig.savefig(save_path, dpi=300)
else:
plt.show()
def plot_lr_curve_for_scheduler(scheduler, num_steps, save_path=None):
scheduler = copy.deepcopy(scheduler)
fig, ax = plt.subplots()
# give plot a title
ax.set_title("Learning Rate Curve")
# make axis labels
ax.set_xlabel("Iter")
ax.set_ylabel("LR")
x_data = np.arange(num_steps)
ys = []
for _ in x_data:
scheduler.step()
ys.append(max(scheduler.get_last_lr()))
y_data = np.array(ys)
# set lim
ax.set_xlim((-int(num_steps * 0.1), int(num_steps * 1.5)))
ax.set_ylim((y_data.min(), y_data.max()))
ax.plot(x_data, y_data, linewidth=2)
maximum_xs = signal.argrelextrema(y_data, comparator=np.greater_equal)[0]
maximum_ys = y_data[maximum_xs]
minimum_xs = signal.argrelextrema(y_data, comparator=np.less_equal)[0]
minimum_ys = y_data[minimum_xs]
end_point_xs = np.array([x_data[0], x_data[-1]])
end_point_ys = np.array([y_data[0], y_data[-1]])
x_ticks = [0, num_steps]
for pt in zip(
np.concatenate((maximum_xs, minimum_xs, end_point_xs)),
np.concatenate((maximum_ys, minimum_ys, end_point_ys)),
):
ax.text(pt[0], pt[1], s=f"x={pt[0]:d}")
ax.text(pt[0], pt[1] - 0.05, s=f"y={pt[1]:.3e}")
x_ticks.append(pt[0])
# set ticks
ax.set_xticks(list(sorted(list(set(x_ticks)))))
# ax.set_yticks(np.linspace(0, 1, 11))
if save_path:
fig.savefig(save_path, dpi=300)
else:
plt.show()
def plot_lr_curve(log_path):
lrs = []
with open(log_path, encoding="utf-8", mode="r") as f:
for line in f:
if "Lr:" not in line:
continue
line = line.rstrip()
# [Train@0/13160 0/329 0/40] | Lr:[0.0005, 0.0005] | M:0.65033/C:0.65033 | [32, 3, 384, 384] | bce: 0.650327
lrs.append([float(f) for f in line.split(" | ")[1][4:-1].split(", ")])
_, ax = plt.subplots()
# give plot a title
ax.set_title("Learning Rate Curve")
# make axis labels
ax.set_xlabel("Index")
ax.set_ylabel("LR")
# set ticks
ax.set_xticks(np.linspace(0, len(lrs), 11))
ax.set_yticks(np.linspace(0, 0.1, 11))
# set lim
ax.set_xlim((-int(len(lrs) * 0.1), int(len(lrs) * 1.5)))
ax.set_ylim((-0.01, 0.1))
x_data = np.arange(len(lrs))
for y_idx, y_data in enumerate(zip(*lrs)):
y_data = np.array(y_data)
print(y_data)
ax.plot(x_data, y_data, linewidth=1, label=str(y_idx), marker=MARKERS[y_idx])
maximum_xs = signal.argrelextrema(y_data, comparator=np.greater_equal)[0]
maximum_ys = y_data[maximum_xs]
minimum_xs = signal.argrelextrema(y_data, comparator=np.less_equal)[0]
minimum_ys = y_data[minimum_xs]
end_point_xs = np.array([x_data[0], x_data[-1]])
end_point_ys = np.array([y_data[0], y_data[-1]])
for pt in zip(
np.concatenate((maximum_xs, minimum_xs, end_point_xs)),
np.concatenate((maximum_ys, minimum_ys, end_point_ys)),
):
ax.text(pt[0], pt[1], s=f"x={pt[0]:d}")
ax.text(pt[0], pt[1] - 0.005, s=f"y={pt[1]:.3e}")
ax.legend()
plt.show()
|
the-stack_106_25145 | # camera-ready
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from resnet import ResNet18_OS16, ResNet34_OS16, ResNet50_OS16, ResNet101_OS16, ResNet152_OS16, ResNet18_OS8, ResNet34_OS8
from aspp import ASPP, ASPP_Bottleneck
class DeepLabV3(nn.Module):
def __init__(self, model_id, project_dir):
super(DeepLabV3, self).__init__()
self.num_classes = 13
self.model_id = model_id
self.project_dir = project_dir
self.resnet = ResNet18_OS8() # NOTE! specify the type of ResNet here
self.aspp = ASPP(num_classes=self.num_classes) # NOTE! if you use ResNet50-152, set self.aspp = ASPP_Bottleneck(num_classes=self.num_classes) instead
def forward(self, x):
# (x has shape (batch_size, 3, h, w))
h = x.size()[2]
w = x.size()[3]
feature_map = self.resnet(x) # (shape: (batch_size, 512, h/16, w/16)) (assuming self.resnet is ResNet18_OS16 or ResNet34_OS16. If self.resnet is ResNet18_OS8 or ResNet34_OS8, it will be (batch_size, 512, h/8, w/8). If self.resnet is ResNet50-152, it will be (batch_size, 4*512, h/16, w/16))
output = self.aspp(feature_map) # (shape: (batch_size, num_classes, h/16, w/16))
output = F.upsample(output, size=(h, w), mode="bilinear") # (shape: (batch_size, num_classes, h, w))
return output
|
the-stack_106_25148 | # @lc app=leetcode id=488 lang=python3
#
# [488] Zuma Game
#
# https://leetcode.com/problems/zuma-game/description/
#
# algorithms
# Hard (37.71%)
# Likes: 309
# Dislikes: 341
# Total Accepted: 18.7K
# Total Submissions: 49.4K
# Testcase Example: '"WRRBBW"\n"RB"'
#
# You are playing a variation of the game Zuma.
#
# In this variation of Zuma, there is a single row of colored balls on a board,
# where each ball can be colored red 'R', yellow 'Y', blue 'B', green 'G', or
# white 'W'. You also have several colored balls in your hand.
#
# Your goal is to clear all of the balls from the board. On each turn:
#
#
# Pick any ball from your hand and insert it in between two balls in the row or
# on either end of the row.
# If there is a group of three or more consecutive balls of the same color,
# remove the group of balls from the board.
#
# If this removal causes more groups of three or more of the same color to
# form, then continue removing each group until there are none
# left.
#
#
# If there are no more balls on the board, then you win the game.
# Repeat this process until you either win or do not have any more balls in
# your hand.
#
#
# Given a string board, representing the row of balls on the board, and a
# string hand, representing the balls in your hand, return the minimum number
# of balls you have to insert to clear all the balls from the board. If you
# cannot clear all the balls from the board using the balls in your hand,
# return -1.
#
#
# Example 1:
#
#
# Input: board = "WRRBBW", hand = "RB"
# Output: -1
# Explanation: It is impossible to clear all the balls. The best you can do is:
# - Insert 'R' so the board becomes WRRRBBW. WRRRBBW -> WBBW.
# - Insert 'B' so the board becomes WBBBW. WBBBW -> WW.
# There are still balls remaining on the board, and you are out of balls to
# insert.
#
# Example 2:
#
#
# Input: board = "WWRRBBWW", hand = "WRBRW"
# Output: 2
# Explanation: To make the board empty:
# - Insert 'R' so the board becomes WWRRRBBWW. WWRRRBBWW -> WWBBWW.
# - Insert 'B' so the board becomes WWBBBWW. WWBBBWW -> WWWW -> empty.
# 2 balls from your hand were needed to clear the board.
#
#
# Example 3:
#
#
# Input: board = "G", hand = "GGGGG"
# Output: 2
# Explanation: To make the board empty:
# - Insert 'G' so the board becomes GG.
# - Insert 'G' so the board becomes GGG. GGG -> empty.
# 2 balls from your hand were needed to clear the board.
#
#
# Example 4:
#
#
# Input: board = "RBYYBBRRB", hand = "YRBGB"
# Output: 3
# Explanation: To make the board empty:
# - Insert 'Y' so the board becomes RBYYYBBRRB. RBYYYBBRRB -> RBBBRRB -> RRRB
# -> B.
# - Insert 'B' so the board becomes BB.
# - Insert 'B' so the board becomes BBB. BBB -> empty.
# 3 balls from your hand were needed to clear the board.
#
#
#
# Constraints:
#
#
# 1 <= board.length <= 16
# 1 <= hand.length <= 5
# board and hand consist of the characters 'R', 'Y', 'B', 'G', and 'W'.
# The initial row of balls on the board will not have any groups of three or
# more consecutive balls of the same color.
#
#
#
# @lc tags=depth-first-search
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 祖玛游戏,一列,给定可以使用的球,任意顺序。求最少清除次数。不能清除返回0.
# 直接暴力试试。竟然接受了。
# 消除的时候,逻辑错了。。
#
# @lc idea=end
# @lc group=depth-first-search
# @lc rank=10
# @lc code=start
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
buffer = {}
ds = 'RYBGW'
hand = [hand.count(c) for c in ds]
ds = list(enumerate(ds))
def remove(s, i):
l, r = i, i
length = len(s)
while True:
if l < 0 or r >= length:
break
if s[l] != s[r]:
break
c = s[l]
n = 1 if l == r else 2
lt, rt = l, r
while lt - 1 >= 0 and s[lt - 1] == c:
lt -= 1
n += 1
while rt + 1 < length and s[rt + 1] == c:
rt += 1
n += 1
if n >= 3:
l = lt - 1
r = rt + 1
else:
break
# 这个位置出错了。 不加这条,会导致结果多一个字符。
if l == r:
l -= 1
return s[:l + 1] + s[r:]
def recur(b: str):
if b == '':
return 0
if sum(hand) == 0:
return -1
key = tuple([b, *hand])
if key in buffer:
return buffer[key]
res = 6
for i, hc in ds:
if hand[i] == 0:
continue
hand[i] -= 1
t = recur(remove(hc + b, 0))
if t != -1 and t < res:
res = t
for j in range(1, len(b) + 1):
t = recur(remove(b[:j] + hc + b[j:], j))
if t != -1 and t < res:
res = t
hand[i] += 1
if res == 6:
res = -1
else:
res += 1
buffer[key] = res
return res
return recur(board)
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('board = "WRRBBW", hand = "RB"')
print('Exception :')
print('-1')
print('Output :')
print(str(Solution().findMinStep("WRRBBW", "RB")))
print()
print('Example 2:')
print('Input : ')
print('board = "WWRRBBWW", hand = "WRBRW"')
print('Exception :')
print('2')
print('Output :')
print(str(Solution().findMinStep("WWRRBBWW", "WRBRW")))
print()
print('Example 3:')
print('Input : ')
print('board = "G", hand = "GGGGG"')
print('Exception :')
print('2')
print('Output :')
print(str(Solution().findMinStep("G", "GGGGG")))
print()
print('Example 4:')
print('Input : ')
print('board = "RBYYBBRRB", hand = "YRBGB"')
print('Exception :')
print('3')
print('Output :')
print(str(Solution().findMinStep("RBYYBBRRB", "YRBGB")))
print()
pass
# @lc main=end |
the-stack_106_25150 | from configs.models.backbone_2stream import backbone
from configs.models.neck import neck
from configs.models.bbox_head import set_num_classes
from configs.models.ca_motion_head import set_params
from configs.models.panoptic_head import panoptic_head
from configs.experiments.general import *
from configs.data.cscapesvps_motion_supp import data as cscapesvps_data
from configs.data.kittimots_motion_supp import data as kittimots_data
from configs.data.cscapesvps_motion_supp import *
# model settings
bbox_head = set_num_classes(num_classes=9)
ca_head = set_params(num_classes=3)
# model settings
model = dict(
type='SOLO',
pretrained='torchvision://resnet50',
backbone=backbone,
neck=neck,
panoptic_head=panoptic_head,
bbox_head=bbox_head,
ca_head=ca_head,
)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=0,
train=[kittimots_data['train'], cscapesvps_data['train']],
val=cscapesvps_data['val'],
test=cscapesvps_data['test'],)
# optimizer
total_epochs = 15
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[6, 8])
checkpoint_config = dict(interval=5)
# yapf:enable
work_dir = './work_dirs/ca_motion/'
pretrain_weights = './trained_models/panopticseg_cscapesvps.pth'
ignore_clf = False
same_nclasses = True
freeze_vars={'backbone.appearance_stream':True, 'neck':True, 'bbox_head':True, 'panoptic_head':True}
|
the-stack_106_25151 | """Tests for aiida_optimade.entry_collections."""
# pylint: disable=protected-access
from typing import Any, Callable, Dict
import pytest
def test_insert():
"""Test AiidaCollection.insert() raises NotImplentedError."""
from aiida_optimade.routers.structures import STRUCTURES
with pytest.raises(
NotImplementedError, match="The insert method is not implemented.*"
):
STRUCTURES.insert([])
@pytest.mark.parametrize("attribute", ["data_available", "data_returned"])
def test_causation_errors(attribute: str):
"""Test CausationError is returned if requesting `data_available` or `data_returned`
before setting them."""
from aiida_optimade.common.exceptions import CausationError
from aiida_optimade.routers.structures import STRUCTURES
with pytest.raises(
CausationError, match=f"{attribute} MUST be set before it can be retrieved."
):
getattr(STRUCTURES, attribute)
def test_bad_fields(
get_good_response: Callable[[str], Dict[str, Any]],
check_error_response: Callable[[str, int, str, str], None],
):
"""Test a UnknownProviderProperty warning is emitted for unrecognized provider
fields."""
from optimade.server.config import CONFIG
from optimade.server.warnings import UnknownProviderProperty
# Ignore this unknown provider field
response = get_good_response(
"/structures?response_fields=_exmpl_test_provider_field"
)
assert (
response.get("meta", {}).get("warnings", []) == []
), f"Warnings found: {response}"
# Warn about this provider-specific unknown field
with pytest.warns(UnknownProviderProperty):
response = get_good_response(
f"/structures?response_fields=_{CONFIG.provider.prefix}_unknown_provider_"
"field"
)
assert response.get("meta", {}).get(
"warnings", []
), f"No warnings found: {response}"
# Raise for unknown non-provider field
bad_field = "unknown_non_provider_field"
check_error_response(
request=f"/structures?response_fields={bad_field}",
expected_status=400,
expected_title="Bad Request",
expected_detail=(
"Unrecognised OPTIMADE field(s) in requested `response_fields`: "
f"{set([bad_field,])}."
),
)
def test_prepare_query_kwargs():
"""Check only valid QueryBuilder arguments are allowed for _prepare_query()."""
from aiida_optimade.routers.structures import STRUCTURES
with pytest.raises(ValueError):
STRUCTURES._prepare_query(node_types=[], **{"wrong_arg": "some_value"})
def test_array_sort_type():
"""Check TypeError is raised if sorting on list value types."""
from aiida_optimade.routers.structures import STRUCTURES
with pytest.raises(TypeError):
STRUCTURES.parse_sort_params("cartesian_site_positions")
|
the-stack_106_25152 | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
pretrained='open-mmlab://detectron/resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=10,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=None,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=0,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
lr=0.001, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
total_epochs = 12
|
the-stack_106_25153 | __all__ = [
"Node"
, "Comment"
, "NewLine"
, "MacroBranch"
, "Ifdef"
, "CNode"
, "Label"
, "LoopWhile"
, "LoopDoWhile"
, "LoopFor"
, "BranchIf"
, "BranchSwitch"
, "BranchElse"
, "SwitchCase"
, "SwitchCaseDefault"
, "StrConcat"
# SemicolonPresence
, "Break"
, "Call"
, "Goto"
, "Declare"
, "MCall"
, "Return"
# Operator
, "OpIndex"
, "OpSDeref"
# UnaryOperator
, "OpAddr"
, "OpDec"
, "OpInc"
, "OpPostDec"
, "OpPostInc"
, "OpPreDec"
, "OpPreInc"
, "OpDeref"
, "OpNot"
, "OpCast"
, "OpSizeOf"
# BinaryOperator
, "OpAssign"
, "OpDeclareAssign"
, "OpCombAssign"
, "OpAdd"
, "OpSub"
, "OpMul"
, "OpDiv"
, "OpRem"
, "OpAnd"
, "OpOr"
, "OpXor"
, "OpLShift"
, "OpRShift"
, "OpLogAnd"
, "OpLogOr"
, "OpLogNot"
, "OpEq"
, "OpNEq"
, "OpGE"
, "OpLE"
, "OpGreater"
, "OpLess"
, "CaseRange"
]
from ..c_const import (
CConst,
CINT
)
from ..model import (
Type,
Pointer,
Macro,
NodeVisitor,
Function,
Variable
)
from ..type_container import (
TypeContainer,
)
from common import (
ee,
BreakVisiting,
lazy
)
from six import (
integer_types
)
# OpSDeref is automatically re-directed to definition of structure if
# available.
OPSDEREF_FROM_DEFINITION = ee("QDT_OPSDEREF_FROM_DEFINITION", "True")
class DeclarationSearcher(NodeVisitor):
def __init__(self, root):
super(DeclarationSearcher, self).__init__(root)
self.have_declaration = False
def on_visit(self):
if isinstance(self.cur, Declare):
self.have_declaration = True
raise BreakVisiting()
class Node(TypeContainer):
# traverse order indicator for `ObjectVisitor`
__node__ = ("children",)
__type_references__ = __node__
def __init__(self,
val = "",
new_line = "",
indent_children = True,
children = []
):
super(Node, self).__init__()
self.val = val
self.new_line = new_line
self.indent_children = indent_children
self.children = []
for child in children:
self.add_child(child)
def __call__(self, *children):
for c in children:
self.add_child(c)
return self
def add_child(self, child):
self.children.append(child)
def out_children(self, writer):
if self.indent_children:
writer.push_indent()
for child in self.children:
child.__c__(writer)
if child.new_line is not None:
writer.line(child.new_line)
if self.indent_children:
writer.pop_indent()
def __c__(self, writer):
writer.write(self.val)
self.out_children(writer)
class Ifdef(Node):
def __init__(self, val, *args):
if isinstance(val, Macro):
val = val.c_name
super(Ifdef, self).__init__(
# Since the macro can be undefined and unknown to the model,
# we refer it using its string name.
val = str(val),
indent_children = False,
children = args
)
self.new_line = None
def __c__(self, writer):
with writer.cpp:
writer.line("ifdef@b" + self.val)
writer.push_indent()
self.out_children(writer)
with writer.cpp:
writer.pop_indent()
writer.line("endif")
class CNode(Node):
def add_child(self, child):
if isinstance(child, str):
child = CConst.parse(child)
elif isinstance(child, integer_types):
child = CINT(child)
super(CNode, self).add_child(child)
@staticmethod
def out_child(child, writer):
child.__c__(writer)
class Comment(Node):
def __init__(self, text):
super(Comment, self).__init__(
val = "/*@s" + text.replace(" ", "@s") + "@s*/"
)
class Label(CNode):
def __init__(self, name):
super(Label, self).__init__()
self.name = name
def __c__(self, writer):
# A label must be written without an indent.
writer.save_indent()
writer.write(self.name + ":")
writer.load_indent()
class NewLine(Node):
pass
class MacroBranch(Node):
""" MacroBranch describes construction like MACRO(x, y) { ... } """
__node__ = ("children", "macro_call")
__type_references__ = ("macro_call",)
def __init__(self, macro_call):
super(MacroBranch, self).__init__()
self.macro_call = macro_call
def __c__(self, writer):
self.macro_call.__c__(writer)
writer.line("@b{")
self.out_children(writer)
writer.write("}")
class LoopWhile(CNode):
__node__ = ("children", "cond")
__type_references__ = ("cond",)
def __init__(self, cond):
super(LoopWhile, self).__init__()
self.cond = cond
def __c__(self, writer):
writer.write("while (")
self.cond.__c__(writer)
writer.line(")@b{")
self.out_children(writer)
writer.write("}")
class LoopDoWhile(CNode):
__node__ = ("children", "cond")
__type_references__ = ("cond",)
def __init__(self, cond):
super(LoopDoWhile, self).__init__()
self.cond = cond
def __c__(self, writer):
writer.line("do@b{")
self.out_children(writer)
writer.write("}@bwhile@b(")
self.cond.__c__(writer)
writer.write(");")
class LoopFor(CNode):
__node__ = ("children", "init", "cond", "step")
__type_references__ = ("init", "cond", "step")
def __init__(self, init = None, cond = None, step = None):
super(LoopFor, self).__init__()
self.init = init
self.cond = cond
self.step = step
def __c__(self, writer):
writer.write("for@b(")
if self.init is not None:
self.init.__c__(writer)
writer.write(";")
if self.cond is not None:
writer.write("@b")
self.cond.__c__(writer)
writer.write(";")
if self.step is not None:
writer.write("@b")
self.step.__c__(writer)
writer.line(")@b{")
self.out_children(writer)
writer.write("}")
class BranchIf(CNode):
__node__ = ("children", "cond", "else_blocks")
__type_references__ = ("cond", "else_blocks")
def __init__(self, cond):
super(BranchIf, self).__init__()
self.cond = cond
self.else_blocks = []
def add_else(self, else_bl):
self.else_blocks.append(else_bl)
def __call__(self, *children_and_elses):
for ce in children_and_elses:
if isinstance(ce, BranchElse):
self.add_else(ce)
else:
self.add_child(ce)
return self
def __c__(self, writer):
writer.write("if@b(")
self.cond.__c__(writer)
writer.line(")@b{")
self.out_children(writer)
for e in self.else_blocks:
e.__c__(writer)
writer.write("}")
class BranchElse(CNode):
""" BranchElse must be added to parent BranchIf node using `add_else`. """
__node__ = ("children", "cond")
__type_references__ = ("cond",)
def __init__(self, cond = None):
super(BranchElse, self).__init__()
self.cond = cond
def __c__(self, writer):
if self.cond is not None:
writer.write("}@belse@bif@b(")
self.cond.__c__(writer)
writer.line(")@b{")
else:
writer.line("}@belse@b{")
self.out_children(writer)
class BranchSwitch(CNode):
__node__ = ("children", "var")
__type_references__ = ("var",)
def __init__(self, var,
add_break_in_default = True,
cases = [],
child_indent = False,
separate_cases = False
):
super(BranchSwitch, self).__init__(indent_children = child_indent)
self.default_case = None
self.add_break_in_default = add_break_in_default
self.var = var
self.separate_cases = separate_cases
self.add_cases(cases)
def add_child(self, case):
if isinstance(case, SwitchCaseDefault):
if self.default_case:
raise ValueError("Multiple default labels in one switch")
self.default_case = case
self.children.append(case)
def add_cases(self, cases):
for case in cases:
self.add_child(case)
def __call__(self, *cases):
self.add_cases(cases)
return self
def __c__(self, writer):
if not self.default_case:
self.add_child(SwitchCaseDefault(self.add_break_in_default))
if self.separate_cases and self.children:
self._add_empty_lines(self.children)
writer.write("switch@b(")
self.var.__c__(writer)
writer.line(")@b{")
self.out_children(writer)
writer.write("}")
@staticmethod
def _add_empty_lines(children):
new_ch = [ children[0] ]
need_nl = not isinstance(new_ch[0], NewLine)
for ch in children[1:]:
is_not_nl = not isinstance(ch, NewLine)
if need_nl and is_not_nl:
new_ch.append(NewLine())
new_ch.append(ch)
need_nl = is_not_nl
children[:] = new_ch
class SwitchCase(CNode):
def __init__(self, const, add_break = True):
super(SwitchCase, self).__init__()
self.add_break = add_break
if isinstance(const, integer_types):
const = CINT(const)
elif isinstance(const, tuple):
const = CaseRange(*const)
self.const = const
def __c__(self, writer):
if ( self.add_break
and ( self.children
and not isinstance(self.children[-1], Break)
or not self.children
)
):
self.add_child(Break())
writer.write("case@b")
self.const.__c__(writer)
if DeclarationSearcher(self).visit().have_declaration:
writer.line(":@b{")
self.out_children(writer)
self.new_line = "}"
else:
writer.line(":")
self.out_children(writer)
self.new_line = None
class SwitchCaseDefault(CNode):
def __init__(self, add_break = True):
super(SwitchCaseDefault, self).__init__()
self.add_break = add_break
def __c__(self, writer):
if ( self.add_break
and ( self.children
and not isinstance(self.children[-1], Break)
or not self.children
)
):
self.add_child(Break())
if DeclarationSearcher(self).visit().have_declaration:
writer.line("default:@b{")
self.out_children(writer)
self.new_line = "}"
else:
writer.line("default:")
self.out_children(writer)
self.new_line = None
class StrConcat(CNode):
def __init__(self, *args, **kw_args):
super(StrConcat, self).__init__(children = args)
self.delim = kw_args.get("delim", "")
def __c__(self, writer):
writer.join(self.delim, self.children, self.out_child)
class SemicolonPresence(CNode):
"SemicolonPresence class is used to decide when to print semicolon."
def __init__(self, *args, **kw_args):
kw_args["new_line"] = ";"
super(SemicolonPresence, self).__init__(*args, **kw_args)
class Break(SemicolonPresence):
def __init__(self):
super(Break, self).__init__(val = "break")
class Call(SemicolonPresence):
def __init__(self, func, *args):
if isinstance(func, str):
func = Type[func]
elif not isinstance(func, (Variable, Function, CNode)):
raise ValueError(
"Invalid type of func in Call: " + type(func).__name__
)
super(Call, self).__init__(children = (func,) + args)
@property
def func(self):
return self.children[0]
@property
def args(self):
return self.children[1:]
def __c__(self, writer):
self.func.__c__(writer)
writer.write("(@a")
writer.join(",@s", self.args, self.out_child)
writer.write("@c)")
class Declare(SemicolonPresence):
def __init__(self, *variables):
super(Declare, self).__init__(children = variables)
def iter_variables(self):
for child in self.children:
if isinstance(child, OpDeclareAssign):
yield child.variable
else:
yield child
def add_child(self, child):
if isinstance(child, OpDeclareAssign):
if not isinstance(child.children[0], Variable):
raise TypeError(
"Wrong child type: expected Variable"
)
var = child.children[0]
elif isinstance(child, Variable):
var = child
else:
raise TypeError(
"Wrong child type: expected Variable or OpDeclareAssign"
)
if self.children:
first_child = self.children[0]
if isinstance(first_child, OpDeclareAssign):
v = first_child.children[0]
else:
v = first_child
if ( v.full_deref != var.full_deref
or v.static != var.static
or v.const != var.const
):
raise TypeError("All variables in Declare must have the same"
" type and qualifiers"
)
super(Declare, self).add_child(child)
def __c__(self, writer):
child = self.children[0]
if isinstance(child, OpDeclareAssign):
v = child.children[0]
else:
v = child
if v.static:
writer.write("static@b")
if v.const:
writer.write("const@b")
writer.write(v.full_deref.c_name + "@b" + v.asterisks)
self._write_child(child, writer)
for child in self.children[1:]:
if isinstance(child, OpDeclareAssign):
v = child.children[0]
else:
v = child
writer.write(",@s" + v.asterisks)
self._write_child(child, writer)
@staticmethod
def _write_child(child, writer):
if isinstance(child, Variable):
if child.array_size is not None:
if not child.used:
writer.write("__attribute__((unused))@b")
child.__c__(writer)
writer.write("[%d]" % child.array_size)
else:
child.__c__(writer)
if not child.used:
writer.write("@b__attribute__((unused))")
if child.initializer:
writer.write("@b=@s")
writer.write(child.type.gen_usage_string(child.initializer))
else:
child.__c__(writer)
class MCall(SemicolonPresence):
__type_references__ = ("type",)
def __init__(self, macro, *args):
super(MCall, self).__init__(children = args)
self.type = Type[macro]
def __c__(self, writer):
writer.write(self.type.c_name)
if self.children:
writer.write("(@a")
writer.join(",@s", self.children, self.out_child)
writer.write("@c)")
class Return(SemicolonPresence):
def __init__(self, arg = None):
super(Return, self).__init__()
if arg is not None:
self.val = "return" + "@b"
self.add_child(arg)
else:
self.val = "return"
def __c__(self, writer):
writer.write(self.val)
if self.children:
self.children[0].__c__(writer)
class Goto(SemicolonPresence):
def __init__(self, label):
super(Goto, self).__init__(val = "goto@b" + label.name)
class Operator(SemicolonPresence):
prefix = ""
delim = "@s"
suffix = ""
def __init__(self, *args, **kw_args):
self.prior = op_priority[type(self)]
super(Operator, self).__init__(children = args)
self.parenthesis = kw_args.get("parenthesis", False)
def add_child(self, child):
super(Operator, self).add_child(child)
if isinstance(child, Operator):
if self.prior < child.prior:
child.parenthesis = True
def __c__(self, writer):
if self.parenthesis:
writer.write("(")
writer.write(self.prefix)
writer.join(self.delim, self.children, self.out_child)
writer.write(self.suffix)
if self.parenthesis:
writer.write(")")
class OpIndex(Operator):
def __init__(self, var, index):
super(OpIndex, self).__init__(var, index)
self.delim = "["
self.suffix = "]"
class OpSDeref(Operator):
__type_references__ = ("struct",)
def __init__(self, value, field):
super(OpSDeref, self).__init__(value)
if not isinstance(field, str):
raise ValueError(
"Invalid type of field in OpSDeref: " + type(field).__name__
)
self.field = field
struct = value.type
# Note, pointer nesting must be at most 1.
if isinstance(struct, Pointer):
struct = struct.type
if OPSDEREF_FROM_DEFINITION:
struct = struct.definition
# for type collection
self.struct = struct
try:
struct.fields[field]
except KeyError:
raise RuntimeError('Structure "%s" has no field "%s"' % (
struct, field
))
@lazy
def type(self):
return self.struct.fields[self.field].type
@property
def suffix(self):
if isinstance(self.container.type, Pointer):
return "->" + self.field
else:
return "." + self.field
@property
def container(self):
return self.children[0]
class UnaryOperator(Operator):
def __init__(self, op_str, arg1, suffix_op = False):
super(UnaryOperator, self).__init__(arg1)
if suffix_op:
self.suffix = op_str
else:
self.prefix = op_str
class OpInc(UnaryOperator):
def __init__(self, var):
super(OpInc, self).__init__("++", var, suffix_op = True)
class OpDec(UnaryOperator):
def __init__(self, var):
super(OpDec, self).__init__("--", var, suffix_op = True)
OpPostDec = OpDec
OpPostInc = OpInc
class OpPreDec(UnaryOperator):
def __init__(self, var):
super(OpPreDec, self).__init__("--", var, suffix_op = False)
class OpPreInc(UnaryOperator):
def __init__(self, var):
super(OpPreInc, self).__init__("++", var, suffix_op = False)
class OpCast(UnaryOperator):
__type_references__ = ("type",)
def __init__(self, type_name, arg):
super(OpCast, self).__init__("(" + type_name + ")", arg)
self.type = Type[type_name]
class OpSizeOf(UnaryOperator):
def __init__(self, arg):
super(OpSizeOf, self).__init__("sizeof(", arg, suffix_op = False)
self.suffix = ")"
class OpAddr(UnaryOperator):
def __init__(self, arg1):
super(OpAddr, self).__init__("&", arg1)
class OpDeref(UnaryOperator):
def __init__(self, arg1):
super(OpDeref, self).__init__("*", arg1)
class OpLogNot(UnaryOperator):
def __init__(self, arg1):
super(OpLogNot, self).__init__("!", arg1)
class OpNot(UnaryOperator):
def __init__(self, arg1):
super(OpNot, self).__init__("~", arg1)
class BinaryOperator(Operator):
def __init__(self, op_str, arg1, arg2, parenthesis):
super(BinaryOperator, self).__init__(arg1, arg2,
parenthesis = parenthesis
)
self.delim = "@b" + op_str + "@s"
class OpAssign(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpAssign, self).__init__("=", arg1, arg2, parenthesis)
class OpDeclareAssign(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpDeclareAssign, self).__init__("=", arg1, arg2, parenthesis)
@staticmethod
def out_child(child, writer):
if isinstance(child, Variable):
if child.array_size is not None:
if not child.used:
writer.write("__attribute__((unused))@b")
child.__c__(writer)
writer.write("[%d]" % child.array_size)
else:
child.__c__(writer)
if not child.used:
writer.write("@b__attribute__((unused))")
else:
child.__c__(writer)
@property
def variable(self):
return self.children[0]
class OpCombAssign(BinaryOperator):
def __init__(self, arg1, arg2, op_str, parenthesis = False):
super(OpCombAssign, self).__init__(op_str + "=",
arg1, arg2, parenthesis
)
class OpAdd(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpAdd, self).__init__("+", arg1, arg2, parenthesis)
class OpSub(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpSub, self).__init__("-", arg1, arg2, parenthesis)
class OpMul(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpMul, self).__init__("*", arg1, arg2, parenthesis)
class OpDiv(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpDiv, self).__init__("/", arg1, arg2, parenthesis)
class OpRem(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpRem, self).__init__("%", arg1, arg2, parenthesis)
class OpAnd(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpAnd, self).__init__("&", arg1, arg2, parenthesis)
class OpOr(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpOr, self).__init__("|", arg1, arg2, parenthesis)
class OpXor(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpXor, self).__init__("^", arg1, arg2, parenthesis)
class OpLShift(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpLShift, self).__init__("<<", arg1, arg2, parenthesis)
class OpRShift(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpRShift, self).__init__(">>", arg1, arg2, parenthesis)
class OpLogAnd(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpLogAnd, self).__init__("&&", arg1, arg2, parenthesis)
class OpLogOr(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpLogOr, self).__init__("||", arg1, arg2, parenthesis)
class OpEq(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpEq, self).__init__("==", arg1, arg2, parenthesis)
class OpNEq(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpNEq, self).__init__("!=", arg1, arg2, parenthesis)
class OpGE(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpGE, self).__init__(">=", arg1, arg2, parenthesis)
class OpLE(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpLE, self).__init__("<=", arg1, arg2, parenthesis)
class OpGreater(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpGreater, self).__init__(">", arg1, arg2, parenthesis)
class OpLess(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(OpLess, self).__init__("<", arg1, arg2, parenthesis)
class CaseRange(BinaryOperator):
def __init__(self, arg1, arg2, parenthesis = False):
super(CaseRange, self).__init__("...", arg1, arg2, parenthesis)
op_priority = {
CaseRange: 1,
OpIndex: 1,
OpSDeref: 1,
OpDec: 1,
OpInc: 1,
OpPreDec: 1,
OpPreInc: 1,
OpDeref: 2,
OpAddr: 2,
OpNot: 2,
OpLogNot: 2,
OpCast: 2,
OpSizeOf: 2,
OpMul: 3,
OpDiv: 3,
OpRem: 3,
OpAdd: 4,
OpSub: 4,
OpLShift: 5,
OpRShift: 5,
OpGE: 6,
OpLE: 6,
OpGreater: 6,
OpLess: 6,
OpEq: 7,
OpNEq: 7,
OpAnd: 8,
OpXor: 9,
OpOr: 10,
OpLogAnd: 11,
OpLogOr: 12,
OpAssign: 13,
OpDeclareAssign: 13,
OpCombAssign: 13,
}
|
the-stack_106_25154 | import sys
import sqlite3
import datetime
from random import randint
connection = None
cursor = None
class Interface:
def __init__(self):
self.exit_app = False # set to TRUE when user attempts to quit application
self.logged_in = False # set to TRUE when user has successfully logged in
self.get_database()
self.run()
def get_database(self):
self.database = input("Name of database: ")
def run(self):
while not self.exit_app:
self.login()
while self.logged_in:
if self.user_type == 'a':
self.agent()
elif self.user_type == 'o':
self.officer()
print("You have been logged out.")
print("You have exited the application.")
def login(self):
global connection, cursor
while not self.exit_app and not self.logged_in:
print("\nPlease insert valid username and password\n")
username = input("Username: ")
password = input("Password: ")
connection = sqlite3.connect(self.database) # establish connection to database
cursor = connection.cursor()
login_query = ''' SELECT *
FROM users
WHERE uid=:user_field AND pwd=:pass_field;
'''
cursor.execute(login_query, {"user_field":username, "pass_field":password})
found = cursor.fetchone() # find user in database
if found:
self.user_type = found[2]
self.user_city = found[5]
self.logged_in = True
else:
print("\nInvalid username and/or password\nPlease try again")
def agent(self):
print('1 - Register a birth',
'2 - Register a marriage',
'3 - Renew a vehicle registration',
'4 - Process a bill of sale',
'5 - Process a payment',
'6 - Get a driver abstract',
'X - Logout',
'XX - Exit Application', sep='\n')
selection = input("\nSelect an option from 1-6, X, XX: ")
if selection == '1':
self.register_birth()
elif selection == '2':
self.register_marriage()
elif selection == '3':
self.renew_registration()
elif selection == '4':
self.process_sale()
elif selection == '5':
self.process_payment()
elif selection == '6':
self.get_driver_abstract()
elif selection == 'X':
self.logged_in = False
elif selection == "XX":
self.logged_in = False
self.exit_app = True
else:
print("\nInvalid Input\nPlease try again\n")
def officer(self):
print('1 - Issue a ticket',
'2 - Find a car owner',
'X - Logout',
'XX - Exit Application', sep='\n')
selection = input("\nSelect an option from 1-2, X, XX: ")
if selection == '1':
self.issue_ticket()
elif selection == '2':
self.find_owner()
elif selection == 'X':
self.logged_in = False
elif selection == "XX":
self.logged_in = False
self.exit_app = True
else:
print("\nInvalid Input\nPlease try again\n")
def register_birth(self):
insert_birth_query = ''' INSERT INTO births values
(:r_no, :f_name, :l_name, :r_date, :r_place, :g, :f_fname, :f_lname, :m_fname, :m_lname);
'''
insert_person_query = ''' INSERT INTO persons VALUES
(:fname_field, :lname_field, :bdate_field, :bplace_field, :address_field, :phone_field);
'''
check_person_query = ''' SELECT *
FROM persons
WHERE fname=:fname_field COLLATE NOCASE AND lname=:lname_field COLLATE NOCASE;
'''
b_fname = input("Enter newborn's first name: ")
b_lname = input("Enter newborn's last name: ")
cursor.execute(check_person_query, {"fname_field":b_fname, "lname_field":b_lname})
found = cursor.fetchall()
while found:
print("Record already exists. Please try again.")
b_fname = input("Enter newborn's first name: ")
b_lname = input("Enter newborn's last name: ")
cursor.execute(check_person_query, {"fname_field":b_fname, "lname_field":b_lname})
found = cursor.fetchall()
gender = input("Enter newborn's gender (M/F): ")
bir_date = input("Enter newborn's birth date (YYYY-MM-DD): ")
bir_place = self.user_city
m_fname = input("Enter mother's first name: ")
m_lname = input("Enter mother's last name: ")
f_fname = input("Enter father's first name: ")
f_lname = input("Enter father's last name: ")
reg_date = reg_date = str(datetime.date.today())
while True: # generate unique registration number
reg_no = randint(0, sys.maxsize)
cursor.execute("SELECT * FROM births where regno=?;", (reg_no,))
found = cursor.fetchone()
if not found: # if registration number is unique
break
cursor.execute(check_person_query, {"fname_field":m_fname, "lname_field":m_lname})
found = cursor.fetchall()
if not found: # no person found matching 'Mother'
print("Mother not found. Please provide the following information: \n")
bdate = input("Birth Date: ")
if bdate == "":
bdate = "NULL"
bplace = input("Birth Place: ")
if bplace == "":
bplace = "NULL"
address = input("Address: ")
if address == "":
address = "NULL"
phone = input("Contact Number: ")
if phone == "":
phone = "NULL"
cursor.execute(insert_person_query, {"fname_field":m_fname, "lname_field":m_lname, "bdate_field":bdate, "bplace_field":bplace, "address_field":address, "phone_field":phone})
connection.commit()
cursor.execute("SELECT * FROM persons WHERE fname=:m_fname COLLATE NOCASE AND lname=:m_lname COLLATE NOCASE;", {'m_fname':m_fname,'m_lname':m_lname})
found = cursor.fetchone()
m_address = found[4]
m_phone = found[5]
cursor.execute(check_person_query, {"fname_field":f_fname, "lname_field":f_lname})
found = cursor.fetchall()
if not found: # no person found matching 'Father'
print("Father not found. Please provide the following information: \n")
bdate = input("Birth Date: ")
if bdate == "":
bdate = "NULL"
bplace = input("Birth Place: ")
if bplace == "":
bplace = "NULL"
address = input("Address: ")
if address == "":
address = "NULL"
phone = input("Contact Number: ")
if phone == "":
phone = "NULL"
cursor.execute(insert_person_query, {"fname_field":f_fname, "lname_field":f_lname, "bdate_field":bdate, "bplace_field":bplace, "address_field":address, "phone_field":phone})
connection.commit()
cursor.execute(insert_birth_query, {"r_no":reg_no, "f_name":b_fname, "l_name":b_lname, "r_date":reg_date, "r_place":bir_place,
"g":gender, "f_fname": f_fname, "f_lname":f_lname, "m_fname": m_fname, "m_lname":m_lname})
connection.commit()
cursor.execute(insert_person_query, {"fname_field":b_fname, "lname_field":b_lname, "bdate_field":bir_date, "bplace_field":bir_place, "address_field":m_address, "phone_field":m_phone})
connection.commit()
print("Birth has been successfully recorded!\n")
def register_marriage(self):
p1_fname = input("Enter Partner 1 First Name: ")
p1_lname = input("Enter Partner 1 Last Name: ")
p2_fname = input("Enter Partner 2 First Name: ")
p2_lname = input("Enter Partner 2 Last Name: ")
while True: # generate unique registration number
reg_no = randint(0, sys.maxsize)
cursor.execute("SELECT * FROM marriages where regno=?;", (reg_no,))
found = cursor.fetchone()
if not found: # if registration number is unique
break
reg_date = str(datetime.date.today())
reg_place = self.user_city
check_person_query = ''' SELECT *
FROM persons
WHERE fname=:fname_field COLLATE NOCASE AND lname=:lname_field COLLATE NOCASE;
'''
insert_person_query = ''' INSERT INTO persons VALUES
(:fname_field, :lname_field, :bdate_field, :bplace_field, :address_field, :phone_field);
'''
insert_marriage_query = ''' INSERT INTO marriages VALUES
(:reg_no_field, :reg_date_field, :reg_place_field, :p1_fname_field, :p1_lname_field, :p2_fname_field, :p2_lname_field);
'''
cursor.execute(check_person_query, {"fname_field":p1_fname, "lname_field":p1_lname})
found = cursor.fetchall()
if not found: # no person found matching 'Partner 1'
print("Partner 1 not found. Please provide the following information: \n")
bdate = input("Birth Date: ")
if bdate == "":
bdate = "NULL"
bplace = input("Birth Place: ")
if bplace == "":
bplace = "NULL"
address = input("Address: ")
if address == "":
address = "NULL"
phone = input("Contact Number: ")
if phone == "":
phone = "NULL"
cursor.execute(insert_person_query, {"fname_field":p1_fname, "lname_field":p1_lname, "bdate_field":bdate, "bplace_field":bplace, "address_field":address, "phone_field":phone})
connection.commit()
cursor.execute(check_person_query, {"fname_field":p2_fname, "lname_field":p2_lname})
found = cursor.fetchall()
if not found: # no person found matching 'Partner 2'
print("Partner 2 not found. Please provide the following information: \n")
bdate = input("Birth Date: ")
if bdate == "":
bdate = "NULL"
bplace = input("Birth Place: ")
if bplace == "":
bplace = "NULL"
address = input("Address: ")
if address == "":
address = "NULL"
phone = input("Contact Number: ")
if phone == "":
phone = "NULL"
cursor.execute(insert_person_query, {"fname_field":p2_fname, "lname_field":p2_lname, "bdate_field":bdate, "bplace_field":bplace, "address_field":address, "phone_field":phone})
connection.commit()
cursor.execute(insert_marriage_query, {"reg_no_field":reg_no, "reg_date_field":reg_date, "reg_place_field":reg_place,
"p1_fname_field":p1_fname, "p1_lname_field":p1_lname, "p2_fname_field":p2_fname, "p2_lname_field":p2_lname})
connection.commit()
print("Marriage has been successfully recorded!\n")
def renew_registration(self):
reg_no = int(input("Enter registration number: "))
cursor.execute("SELECT * FROM registrations WHERE regno=?;", (reg_no,))
found = cursor.fetchone()
while not found:
print("Invalid registration number. Please try again.")
reg_no = int(input("Enter registration number: "))
cursor.execute("SELECT * FROM registrations WHERE regno=?;", (reg_no,))
found = cursor.fetchone()
curr_expiry = found[2]
today = str(datetime.date.today())
if (curr_expiry <= today): # if registration has expired
cursor.execute("UPDATE registrations SET expiry=date('now', '+1 year') WHERE regno=?", (reg_no,))
connection.commit()
else:
cursor.execute("UPDATE registrations SET expiry=date(expiry, '+1 year') WHERE regno=?", (reg_no,))
connection.commit()
print("Registration has been renewed!\n")
def process_sale(self):
vehicle_id = int(input("Enter Vehicle Identification Number (VIN): "))
cursor.execute("SELECT * FROM registrations WHERE vin=?;", (vehicle_id,))
found = cursor.fetchone()
while not found:
print("Invalid VIN. Please try again.")
vehicle_id = int(input("Enter Vehicle Identification Number (VIN): "))
cursor.execute("SELECT * FROM registrations WHERE vin=?;", (vehicle_id,))
found = cursor.fetchone()
curr_owner_fname = input("Enter Current Owner's First Name: ")
curr_owner_lname = input("Enter Current Owner's Last Name: ")
new_owner_fname = input("Enter New Owner's First Name: ")
new_owner_lname = input("Enter New Owner's Last Name: ")
plate_no = input("Enter Plate Number: ")
if found[5]!=curr_owner_fname or found[6]!=curr_owner_lname:
print("Incorrect current owner. Please try again.")
process_sale(self)
reg_no = found[0]
cursor.execute("UPDATE registrations SET expiry=date('now') WHERE regno=?;", (reg_no,)) # update current registration to expire today
connection.commit()
while True: # generate unique registration number
reg_no = randint(0, sys.maxsize)
cursor.execute("SELECT * FROM registrations where regno=?;", (reg_no,))
found = cursor.fetchone()
if not found: # if registration number is unique
break
insert_reg_query = ''' INSERT INTO registrations VALUES
(:reg_no_field, date('now'), date('now', '+1 year'), :plate_field, :vin_field, :fname_field, :lname_field);
'''
cursor.execute(insert_reg_query, {"reg_no_field":reg_no, "plate_field":plate_no, "vin_field":vehicle_id, "fname_field":new_owner_fname, "lname_field":new_owner_lname})
connection.commit()
print("Sale has been processed!\n")
def process_payment(self):
ticket_no = int(input("Enter the ticket number: "))
cursor.execute("SELECT * FROM tickets WHERE tno=?;", (ticket_no,))
found = cursor.fetchone()
# check for valid ticket number
while not found:
print("Invalid Ticket Number. Please try again.")
ticket_no = int(input("Enter the ticket number: "))
cursor.execute("SELECT * FROM tickets WHERE tno=?;", (ticket_no,))
found = cursor.fetchone()
fine = found[2]
pay_date = str(datetime.date.today())
# check for multiple payments in same day
cursor.execute("SELECT * FROM payments WHERE tno=:ticket_no_field AND pdate=:date_field;", {"ticket_no_field":ticket_no, "date_field":pay_date})
found = cursor.fetchone()
if found:
print("Cannot process multiple payments on same day. Please try again tomorrow.")
sys.exit()
insert_amt_query = ''' INSERT INTO payments
VALUES (:t_no, :t_date, :t_amt );
'''
while True:
amt_paying = int(input("Please enter the payment amount: "))
if amt_paying > fine:
amt_paying = int(input("Error! Amount Remaining:{}.\n"))
else:
break
cursor.execute("SELECT SUM(amount) FROM payments WHERE tno=:t_no GROUP BY tno;", {"t_no":ticket_no})
found = cursor.fetchone()
# if first payment
if not found:
print("Thank you! Amount Remaining:{}.\n".format(fine-amt_paying))
cursor.execute(insert_amt_query, {"t_no":ticket_no,"t_date":pay_date,"t_amt":amt_paying})
connection.commit()
# if partial payment has been made before
else:
amt_paid = found[0]
while (amt_paid + amt_paying > fine):
print("Error! Amount Remaining:{}.\n".format(fine - amt_paid))
amt_paying = int(input("Please enter the payment amount: "))
sum_amt = amt_paid + amt_paying
cursor.execute(insert_amt_query, {"t_no":ticket_no,"t_date":pay_date,"t_amt":amt_paying})
connection.commit()
if sum_amt < fine:
print("Thank you! Amount Remaining:{}.\n".format(fine - sum_amt))
elif sum_amt == fine:
print("You have paid off your ticket.")
def get_driver_abstract(self):
p_fname = input("Driver's First name: ")
p_lname = input("Driver's Last name: ")
# finds the number of tickets
cursor.execute("SELECT COUNT(*)\
FROM registrations r, tickets t\
WHERE r.fname = ? COLLATE NOCASE AND r.lname = ? COLLATE NOCASE\
AND r.regno = t.regno;", (p_fname, p_lname))
found = cursor.fetchone()
num_tickets = found[0]
# finds the number of demerit points
cursor.execute("SELECT COUNT(*)\
FROM demeritNotices\
WHERE fname=:f_name COLLATE NOCASE AND lname=:l_name COLLATE NOCASE;", {"f_name":p_fname, "l_name":p_lname})
found = cursor.fetchone()
num_dmp = found[0]
# finds the sum of demerit points of last 2years, if nothing found then sets to 0
cursor.execute("SELECT SUM(points)\
FROM demeritNotices\
WHERE fname=:f_name COLLATE NOCASE AND lname=:l_name COLLATE NOCASE\
AND ddate = date('now','-2 years');", {"f_name":p_fname, "l_name":p_lname})
found = cursor.fetchone()
if not found :
sum_dmp = 0
dmp_2years = found[0]
# finds the sum of demerit points of lifetime, if nothing found then sets sum to 0
cursor.execute("SELECT SUM(points)\
FROM demeritNotices\
WHERE fname=:f_name COLLATE NOCASE AND lname=:l_name COLLATE NOCASE;", {"f_name":p_fname, "l_name":p_lname})
found = cursor.fetchone()
if not found:
dmp_lifetime = 0
dmp_lifetime = found[0]
print("Driver's Abstract\n")
print ("Number of Tickets {} | Number of Demerit Notices {} | Demerit Points (Last 2 years) {} | Demerit Points (Lifetime) {}".format(num_tickets, num_dmp, dmp_2years, dmp_lifetime))
check = input("Would you like to see the tickets associated with this driver (Y/N): ")
if check == 'Y':
cursor.execute("SELECT t.tno, t.vdate, t.violation, t.fine, r.regno, v.make, v.model\
FROM tickets AS t\
LEFT OUTER JOIN registrations AS r on t.regno = r.regno\
LEFT OUTER JOIN vehicles AS v on r.vin = v.vin\
WHERE r.fname =:f_name and r.lname =:l_name\
ORDER by t.vdate DESC;", {"f_name":p_fname, "l_name":p_lname})
found = cursor.fetchall()
first = 0
last = 5
# less than 5 tickets
if num_tickets <= 5:
for t_no in range(first, last):
print( "Ticket Number: {} | Violation Date:{} | Violation: {} | Amount: {} | Registration No.: {} | Make: {} | Model : {}"
.format(found[t_no][0], found[t_no][1], found[t_no][2], found[t_no][3], found[t_no][4], found[t_no][5], found[t_no][6]))
# more than 5 tickets
# display 5 at a time
else:
while True:
for t_no in range(first, last):
print( "Ticket Number: {} | Violation Date:{} | Violation: {} | Amount: {} | Registration No.: {} | Make: {} | Model : {}"
.format(found[t_no][0], found[t_no][1], found[t_no][2], found[t_no][3], found[t_no][4], found[t_no][5], found[t_no][6]))
check_more = input("Would you like to see more tickets (Y/N): ")
if check_more == 'Y':
first = last
if (last + 5 < num_tickets):
last += 5
else:
last = num_tickets
else:
break
elif check == 'N':
sys.exit()
def issue_ticket(self):
reg_no = input("Please enter the registration number: ")
cursor.execute("SELECT * FROM registrations where regno=:reg", {"reg":reg_no})
found = cursor.fetchone()
while not found:
print("Invalid Registration Number. Please try again.")
reg_no = input("Please enter the registration number: ")
cursor.execute("SELECT * FROM registrations where regno=:reg", {"reg":reg_no})
found = cursor.fetchone()
search_registration_query = ''' SELECT r.fname, r.lname, v.make, v.model, v.year, v.color
FROM vehicles as v, registrations as r
WHERE r.regno=:reg_no_field
AND r.vin = v.vin;
'''
cursor.execute(search_registration_query, {"reg_no_field":reg_no})
found = cursor.fetchone()
print( "Driver Name: {} {} | Make : {} | Model : {} | Year : {} | Color : {}"
.format(found[0], found[1], found[2], found[3], found[4], found[5]))
v_date = input("Please enter the violation date (YYYY-MM-DD): ")
if not v_date:
v_date = str(datetime.date.today())
v_text = input("Enter description: ")
v_fine = int(input("Enter fine amount: "))
while True: # generate unique ticket number
t_no = randint(0, sys.maxsize)
cursor.execute("SELECT * FROM tickets where tno=?;", (t_no,))
found = cursor.fetchone()
if not found: # if ticket number is unique
break
cursor.execute("INSERT INTO tickets VALUES (?,?,?,?,?)", (t_no, reg_no, v_fine, v_text, v_date))
connection.commit()
print("Ticket issued successfully!")
def find_owner(self):
make_input = input("Enter the make of the car: ")
model_input = input("Enter the model of the car: ")
year_input = input("Enter the model year of the car: ")
color_input = input("Enter the color of the car: ")
plate_input = input("Enter the plate number of the car: ")
search_query = ''' SELECT v.make, v.model, v.year, v.color, r.plate, r.regdate, r.expiry, r.fname, r.lname
FROM registrations r, vehicles v
WHERE r.vin = v.vin'''
# add to query if field has been provided
if make_input != "":
search_query += " AND v.make='{}'".format(make_input)
if model_input != "":
search_query += " AND v.model='{}'".format(model_input)
if year_input != "":
search_query += " AND v.year='{}'".format(year_input)
if color_input != "":
search_query += " AND v.color='{}'".format(color_input)
if plate_input != "":
search_query += " AND r.plate='{}'".format(plate_input)
search_query += " COLLATE NOCASE;"
cursor.execute(search_query)
found = cursor.fetchall()
matches = len(found)
# if number of matches are more than 4
if (matches > 4):
for i in range(len(found)):
print("{0} - {1} | {2} | {3} | {4} | {5}\n".format(i+1, found[i][0], found[i][1], found [i][2], found [i][3], found [i][4]))
# allow user to select
user_select = int(matches + 1)
while (user_select > matches or user_select <= 0):
user_select = int(input("Choose from 1 - {}: ".format(matches)))
if (user_select > matches or user_select <= 0):
print("Invalid Input. Please try again.\n")
index = user_select - 1
print( "Make: {0} | Model: {1} | Year: {2} | Color: {3} | Plate: {4} | Registration Date: {5} | Expiry Date: {6} | Owner Name: {7} {8}\n"
.format(found[index][0], found[index][1], found [index][2], found [index][3], found [index][4], found[index][5], found[index][6], found [index][7], found [index][8]))
# if number of matches is between 1-4
elif (matches > 0 and matches <= 4):
for i in range(len(found)):
print( "Make: {0} | Model: {1} | Year: {2} | Color: {3} | Plate: {4} | Registration Date: {5} | Expiry Date: {6} | Owner Name: {7} {8}\n"
.format(found[i][0], found[i][1], found [i][2], found [i][3], found [i][4], found[i][5], found[i][6], found [i][7], found [i][8]))
elif (matches == 0):
print("No matches found.\n")
def main():
Interface()
if __name__ == "__main__":
main() |
the-stack_106_25156 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import collections
import uuid
from eventlet import event
from eventlet import greenthread
from eventlet.support import greenlets as greenlet
from neutron.common import exceptions
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.nicira.vshield.tasks.constants import TaskState
from neutron.plugins.nicira.vshield.tasks.constants import TaskStatus
DEFAULT_INTERVAL = 1000
LOG = logging.getLogger(__name__)
def nop(task):
return TaskStatus.COMPLETED
class TaskException(exceptions.NeutronException):
def __init__(self, message=None, **kwargs):
if message is not None:
self.message = message
super(TaskException, self).__init__(**kwargs)
class InvalidState(TaskException):
message = _("Invalid state %(state)d")
class TaskStateSkipped(TaskException):
message = _("State %(state)d skipped. Current state %(current)d")
class Task():
def __init__(self, name, resource_id, execute_callback,
status_callback=nop, result_callback=nop, userdata=None):
self.name = name
self.resource_id = resource_id
self._execute_callback = execute_callback
self._status_callback = status_callback
self._result_callback = result_callback
self.userdata = userdata
self.id = None
self.status = None
self._monitors = {
TaskState.START: [],
TaskState.EXECUTED: [],
TaskState.RESULT: []
}
self._states = [None, None, None, None]
self._state = TaskState.NONE
def _add_monitor(self, action, func):
self._monitors[action].append(func)
return self
def _move_state(self, state):
self._state = state
if self._states[state] is not None:
e = self._states[state]
self._states[state] = None
e.send()
for s in range(state):
if self._states[s] is not None:
e = self._states[s]
self._states[s] = None
e.send_exception(
TaskStateSkipped(state=s, current=self._state))
def _invoke_monitor(self, state):
for func in self._monitors[state]:
try:
func(self)
except Exception:
msg = _("Task %(task)s encountered exception in %(func)s "
"at state %(state)s") % {
'task': str(self),
'func': str(func),
'state': state}
LOG.exception(msg)
self._move_state(state)
return self
def _start(self):
return self._invoke_monitor(TaskState.START)
def _executed(self):
return self._invoke_monitor(TaskState.EXECUTED)
def _update_status(self, status):
if self.status == status:
return self
self.status = status
def _finished(self):
return self._invoke_monitor(TaskState.RESULT)
def add_start_monitor(self, func):
return self._add_monitor(TaskState.START, func)
def add_executed_monitor(self, func):
return self._add_monitor(TaskState.EXECUTED, func)
def add_result_monitor(self, func):
return self._add_monitor(TaskState.RESULT, func)
def wait(self, state):
if (state < TaskState.START or
state > TaskState.RESULT or
state == TaskState.STATUS):
raise InvalidState(state=state)
if state <= self._state:
# we already passed this current state, so no wait
return
e = event.Event()
self._states[state] = e
e.wait()
def __repr__(self):
return "Task-%s-%s-%s" % (
self.name, self.resource_id, self.id)
class TaskManager():
_instance = None
_default_interval = DEFAULT_INTERVAL
def __init__(self, interval=None):
self._interval = interval or TaskManager._default_interval
# A queue to pass tasks from other threads
self._tasks_queue = collections.deque()
# A dict to store resource -> resource's tasks
self._tasks = {}
# New request event
self._req = event.Event()
# TaskHandler stopped event
self._stopped = False
# Periodic function trigger
self._monitor = None
self._monitor_busy = False
# Thread handling the task request
self._thread = None
def _execute(self, task):
"""Execute task."""
msg = _("Start task %s") % str(task)
LOG.debug(msg)
task._start()
try:
status = task._execute_callback(task)
except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % {
'task': str(task),
'cb': str(task._execute_callback)}
LOG.exception(msg)
status = TaskStatus.ERROR
LOG.debug(_("Task %(task)s return %(status)s"), {
'task': str(task),
'status': status})
task._update_status(status)
task._executed()
return status
def _result(self, task):
"""Notify task execution result."""
try:
task._result_callback(task)
except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % {
'task': str(task),
'cb': str(task._result_callback)}
LOG.exception(msg)
LOG.debug(_("Task %(task)s return %(status)s"),
{'task': str(task), 'status': task.status})
task._finished()
def _check_pending_tasks(self):
"""Check all pending tasks status."""
for resource_id in self._tasks.keys():
if self._stopped:
# Task manager is stopped, return now
return
tasks = self._tasks[resource_id]
# only the first task is executed and pending
task = tasks[0]
try:
status = task._status_callback(task)
except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % {
'task': str(task),
'cb': str(task._status_callback)}
LOG.exception(msg)
status = TaskStatus.ERROR
task._update_status(status)
if status != TaskStatus.PENDING:
self._dequeue(task, True)
def _enqueue(self, task):
if task.resource_id in self._tasks:
# append to existing resource queue for ordered processing
self._tasks[task.resource_id].append(task)
else:
# put the task to a new resource queue
tasks = collections.deque()
tasks.append(task)
self._tasks[task.resource_id] = tasks
def _dequeue(self, task, run_next):
self._result(task)
tasks = self._tasks[task.resource_id]
tasks.remove(task)
if not tasks:
# no more tasks for this resource
del self._tasks[task.resource_id]
return
if run_next:
# process next task for this resource
while tasks:
task = tasks[0]
status = self._execute(task)
if status == TaskStatus.PENDING:
break
self._dequeue(task, False)
def _abort(self):
"""Abort all tasks."""
# put all tasks haven't been received by main thread to queue
# so the following abort handling can cover them
for t in self._tasks_queue:
self._enqueue(t)
self._tasks_queue.clear()
for resource_id in self._tasks.keys():
tasks = list(self._tasks[resource_id])
for task in tasks:
task._update_status(TaskStatus.ABORT)
self._dequeue(task, False)
def _get_task(self):
"""Get task request."""
while True:
for t in self._tasks_queue:
return self._tasks_queue.popleft()
self._req.wait()
self._req.reset()
def run(self):
while True:
try:
if self._stopped:
# Somehow greenlet.GreenletExit exception is ignored
# during unit-test when self._execute() is making db
# access. This makes this thread not terminating and
# stop() caller wait indefinitely. So we added a check
# here before trying to do a block call on getting a
# task from queue
break
# get a task from queue, or timeout for periodic status check
task = self._get_task()
if task.resource_id in self._tasks:
# this resource already has some tasks under processing,
# append the task to same queue for ordered processing
self._enqueue(task)
continue
try:
self._execute(task)
finally:
if task.status is None:
# The thread is killed during _execute(). To guarantee
# the task been aborted correctly, put it to the queue.
self._enqueue(task)
elif task.status != TaskStatus.PENDING:
self._result(task)
else:
self._enqueue(task)
except greenlet.GreenletExit:
break
except Exception:
LOG.exception(_("TaskManager terminated"))
break
self._monitor.stop()
if self._monitor_busy:
self._monitor.wait()
self._abort()
def add(self, task):
task.id = uuid.uuid1()
self._tasks_queue.append(task)
if not self._req.ready():
self._req.send()
return task.id
def stop(self):
if self._thread is None:
return
self._stopped = True
self._thread.kill()
self._thread.wait()
self._thread = None
def has_pending_task(self):
if self._tasks_queue:
return True
if self._tasks:
return True
return False
def show_pending_tasks(self):
for task in self._tasks_queue:
print(str(task))
for resource, tasks in self._tasks.iteritems():
for task in tasks:
print(str(task))
def count(self):
count = 0
for resource_id, tasks in self._tasks.iteritems():
count += len(tasks)
return count
def start(self, interval=None):
def _inner():
self.run()
def _loopingcall_callback():
self._monitor_busy = True
try:
self._check_pending_tasks()
except Exception:
LOG.exception(_("Exception in _check_pending_tasks"))
self._monitor_busy = False
if self._thread is not None:
return self
if interval is None or interval == 0:
interval = self._interval
self._stopped = False
self._thread = greenthread.spawn(_inner)
self._monitor = loopingcall.FixedIntervalLoopingCall(
_loopingcall_callback)
self._monitor.start(interval / 1000.0,
interval / 1000.0)
# To allow the created thread start running
greenthread.sleep(0)
return self
@classmethod
def set_default_interval(cls, interval):
cls._default_interval = interval
|
the-stack_106_25157 | from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbol = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbol) for i in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + [
Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
for i in range(n)
]
# testdata = [
# Group(name=name, header=header, footer=footer)
# for name in ["", random_string("name", 10)]
# for header in ["", random_string("header", 20)]
# for footer in ["", random_string("footer", 20)]
#
# ]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata)) |
the-stack_106_25158 | import os, sys
import cv2
import math
import numpy as np
import _pickle as cPickle
from PIL import Image
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from .utils import *
#import open3d as o3d
import matplotlib.pyplot as plt
CLASS_MAP_FOR_CATEGORY = {'bottle':1, 'bowl':2, 'camera':3, 'can':4, 'laptop':5, 'mug':6}
class SymDataset(data.Dataset):
def __init__(self, mode, category, data_dir, n_pts, m_pts, img_size, mask_size):
self.mode = mode
self.category = CLASS_MAP_FOR_CATEGORY[category]
self.data_dir = data_dir
self.n_pts = n_pts
self.m_pts = m_pts
self.img_size = img_size
self.mask_size = mask_size
assert mode in ['train', 'test']
img_list_path = []
model_file_path = []
code_file_path = []
if mode == 'train':
# sampled data to max iters 50000 for one epoch
#img_list_path = 'CAMERA/split/sym_train_' + category + '_remv_list.txt'
img_list_path = 'param/'+ category + '_remv/split/sym_train_' + category + '_remv_sampled_list.txt'
model_file_path = 'obj_models/camera_train.pkl'
code_file_path = 'param/'+ category + '_remv/' + category + '_train_remv_lat.pkl'
else:
img_list_path = 'param/'+ category + '_remv/split/sym_val_' + category + '_list.txt'
model_file_path = 'obj_models/camera_val.pkl'
code_file_path = 'param/'+ category + '_remv/' + category + '_val_remv_lat.pkl'
self.img_list = [os.path.join('CAMERA', line.rstrip('\n')) for line in open(img_list_path)]
self.length = len(self.img_list)
models = {}
with open(os.path.join(data_dir, model_file_path), 'rb') as f:
models.update(cPickle.load(f))
self.models = models
codes = {}
with open(code_file_path, 'rb') as f:
codes.update(cPickle.load(f))
self.codes = codes
self.camera_intrinsics = [577.5, 577.5, 319.5, 239.5]
self.norm_scale = 1000.0
self.xmap = np.array([[i for i in range(640)] for j in range(480)])
self.ymap = np.array([[j for i in range(640)] for j in range(480)])
self.shift_range = 0.01
self.colorjitter = transforms.ColorJitter(0.2, 0.2, 0.2, 0.05)
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
self.sym_list = [1, 2, 4]
print('CAMERA dataset ' + self.mode + ': {} images found.'.format(self.length))
def __len__(self):
return self.length
def __getitem__(self, index):
img_path = os.path.join(self.data_dir, self.img_list[index])
rgb = cv2.imread(img_path + '_color.png')[:, :, :3]
rgb = rgb[:, :, ::-1]
depth = load_depth(img_path)
mask_raw = cv2.imread(img_path + '_mask.png')[:, :, 2]
cam_fx, cam_fy, cam_cx, cam_cy = self.camera_intrinsics
with open(img_path + '_label.pkl', 'rb') as f:
gts = cPickle.load(f)
idx = -1
for i in range(len(list(gts['class_ids']))):
if self.category == gts['class_ids'][i] and gts['model_list'][i] in self.codes:
idx = i
break
if idx == -1:
# for i in range(len(list(gts['class_ids']))):
# if self.category == gts['class_ids'][i]:
# print(self.category)
# print(gts['class_ids'][i])
# print(gts['model_list'][i])
# print(self.codes.keys())
raise "Dataset Error!"
#idx = list(gts['class_ids']).index(self.category)
inst_id = gts['instance_ids'][idx]
rmin, rmax, cmin, cmax = get_bbox(gts['bboxes'][idx], rgb.shape[0], rgb.shape[1])
mask_raw = np.equal(mask_raw, inst_id)
mask = np.logical_and(mask_raw, depth > 0)
choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
if len(choose) >= self.n_pts:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:self.n_pts] = 1
np.random.shuffle(c_mask)
choose = choose[c_mask.nonzero()]
else:
choose = np.pad(choose, (0, self.n_pts-len(choose)), 'wrap')
depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
pt2 = depth_masked / self.norm_scale
pt0 = (xmap_masked - cam_cx) * pt2 / cam_fx
pt1 = (ymap_masked - cam_cy) * pt2 / cam_fy
points = np.concatenate((pt0, pt1, pt2), axis=1)
rgb = rgb[rmin:rmax, cmin:cmax, :]
rgb = cv2.resize(rgb, (self.img_size, self.img_size), interpolation=cv2.INTER_LINEAR)
crop_w = rmax - rmin
ratio = self.img_size / crop_w
col_idx = choose % crop_w
row_idx = choose // crop_w
choose = (np.floor(row_idx * ratio) * self.img_size + np.floor(col_idx * ratio)).astype(np.int64)
model = self.models[gts['model_list'][idx]].astype(np.float32)
sample_index = np.random.choice(model.shape[0], self.m_pts)
model= model[sample_index, :]
code = self.codes[gts['model_list'][idx]].astype(np.float32)
scale = gts['scales'][idx]
rotation = gts['rotations'][idx]
translation = gts['translations'][idx]
if self.mode == 'train':
rgb = self.colorjitter(Image.fromarray(np.uint8(rgb)))
rgb = np.array(rgb)
# add_t = np.random.uniform(-self.shift_range, self.shift_range, (1, 3))
# translation = translation + add_t[0]
add_t = np.clip(0.001*np.random.randn(points.shape[0], 3), -0.005, 0.005)
points = np.add(points, add_t)
rgb = self.transform(rgb)
points = points.astype(np.float32)
if self.category in self.sym_list:
rotation = gts['rotations'][idx]
theta_x = rotation[0, 0] + rotation[2, 2]
theta_y = rotation[0, 2] - rotation[2, 0]
r_norm = math.sqrt(theta_x**2 + theta_y**2)
s_map = np.array([[theta_x/r_norm, 0.0, -theta_y/r_norm],
[0.0, 1.0, 0.0 ],
[theta_y/r_norm, 0.0, theta_x/r_norm]])
rotation = rotation @ s_map
sRT = np.identity(4, dtype=np.float32)
sRT[:3, :3] = scale * rotation
sRT[:3, 3] = translation
#target = np.dot(model, sRT[:3, :3].T) + sRT[:3, 3]
intrix = np.array([cam_fx, cam_fy, cam_cx, cam_cy], dtype=np.float32)
mask_gt = mask_raw.astype(np.float32)
mask_gt = cv2.resize(mask_gt[rmin:rmax, cmin:cmax], (self.mask_size, self.mask_size), interpolation=cv2.INTER_NEAREST)
sample = dict()
sample['points'] = points
sample['rgb'] = rgb
sample['choose'] = choose
sample['sRT'] = sRT
#sample['target'] = target
sample['model'] = model
sample['code'] = code
sample['scale'] = scale
sample['rotation'] = rotation
sample['translation'] = translation
sample['bbox'] = np.array([rmin, rmax, cmin, cmax])
sample['intrix'] = intrix
sample['mask'] = mask_gt
sample['model_name'] = gts['model_list'][idx]
return sample
if __name__ == "__main__":
dataset = SymDataset('train', 'laptop', 'data/nocs/', 1024, 512, 192)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=10)
for i, data in enumerate(dataloader):
points = data['points']
rgb = data['rgb']
choose = data['choose']
target = data['target']
model = data['model']
code = data['code']
points = points.numpy()[0]
rgb = rgb.numpy()[0].transpose(1,2,0)
plt.imshow(rgb)
plt.show()
color_red = np.array([[1.0,0.0,0.0]]*512)
color_blue = np.array([[0.0,0.0,1.0]]*512)
target = target.numpy()[0]
break |
the-stack_106_25159 | #!/usr/bin/env python
#
# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
#
# Based on the software developed by:
# Copyright (c) 2008,2016 david decotigny (Pool of threads)
# Copyright (c) 2006-2008, R Oudkerk (multiprocessing.Pool)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from __future__ import print_function
import time
import threading
from .api import *
from .pool import *
def test(arg=None):
if arg == "-v":
def say(*x):
print(*x)
else:
def say(*x):
pass
say("Start Pool testing")
get_tid = lambda: threading.current_thread().ident
def return42():
return 42
def f(x):
return x * x
def work(mseconds):
res = str(mseconds)
if mseconds < 0:
mseconds = -mseconds
say("[%d] Start to work for %fms..." % (get_tid(), mseconds*10))
time.sleep(mseconds/100.)
say("[%d] Work done (%fms)." % (get_tid(), mseconds*10))
return res
### Test copy/pasted from multiprocessing
pool = Pool(4) # start worker threads
# edge cases
assert pool.map(return42, []) == []
assert pool.apply_async(return42, []).get() == 42
assert pool.apply(return42, []) == 42
assert list(pool.imap(return42, iter([]))) == []
assert list(pool.imap_unordered(return42, iter([]))) == []
assert pool.map_async(return42, []).get() == []
assert list(pool.imap_async(return42, iter([])).get()) == []
assert list(pool.imap_unordered_async(return42, iter([])).get()) == []
# basic tests
result = pool.apply_async(f, (10,)) # evaluate "f(10)" asynchronously
assert result.get(timeout=1) == 100 # ... unless slow computer
assert list(pool.map(f, range(10))) == list(map(f, range(10)))
it = pool.imap(f, range(10))
assert next(it) == 0
assert next(it) == 1
assert next(it) == 4
# Test apply_sync exceptions
result = pool.apply_async(time.sleep, (3,))
try:
say(result.get(timeout=1)) # raises `TimeoutError`
except TimeoutError:
say("Good. Got expected timeout exception.")
else:
assert False, "Expected exception !"
assert result.get() is None # sleep() returns None
def cb(s):
say("Result ready: %s" % s)
# Test imap()
assert list(pool.imap(work, range(10, 3, -1), chunksize=4)) == list(map(
str, range(10, 3, -1)))
# Test imap_unordered()
assert sorted(pool.imap_unordered(work, range(10, 3, -1))) == sorted(map(
str, range(10, 3, -1)))
# Test map_async()
result = pool.map_async(work, range(10), callback=cb)
try:
result.get(timeout=0.01) # raises `TimeoutError`
except TimeoutError:
say("Good. Got expected timeout exception.")
else:
assert False, "Expected exception !"
say(result.get())
# Test imap_async()
result = pool.imap_async(work, range(3, 10), callback=cb)
try:
result.get(timeout=0.01) # raises `TimeoutError`
except TimeoutError:
say("Good. Got expected timeout exception.")
else:
assert False, "Expected exception !"
for i in result.get():
say("Item:", i)
say("### Loop again:")
for i in result.get():
say("Item2:", i)
# Test imap_unordered_async()
result = pool.imap_unordered_async(work, range(10, 3, -1), callback=cb)
try:
say(result.get(timeout=0.01)) # raises `TimeoutError`
except TimeoutError:
say("Good. Got expected timeout exception.")
else:
assert False, "Expected exception !"
for i in result.get():
say("Item1:", i)
for i in result.get():
say("Item2:", i)
r = result.get()
for i in r:
say("Item3:", i)
for i in r:
say("Item4:", i)
for i in r:
say("Item5:", i)
#
# The case for the exceptions
#
# Exceptions in imap_unordered_async()
result = pool.imap_unordered_async(work, range(2, -10, -1), callback=cb)
time.sleep(3)
try:
for i in result.get():
say("Got item:", i)
except (IOError, ValueError):
say("Good. Got expected exception")
# Exceptions in imap_async()
result = pool.imap_async(work, range(2, -10, -1), callback=cb)
time.sleep(3)
try:
for i in result.get():
say("Got item:", i)
except (IOError, ValueError):
say("Good. Got expected exception")
# Stop the test: need to stop the pool !!!
pool.terminate()
pool.join()
|
the-stack_106_25161 | """
Support for interface with a Sony Bravia TV.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.braviatv/
"""
import logging
import re
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP, MediaPlayerDevice)
from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['braviarc-homeassistant==0.3.7.dev0']
BRAVIA_CONFIG_FILE = 'bravia.conf'
CLIENTID_PREFIX = 'HomeAssistant'
DEFAULT_NAME = 'Sony Bravia TV'
NICKNAME = 'Home Assistant'
# Map ip to request id for configuring
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
SUPPORT_BRAVIA = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def _get_mac_address(ip_address):
"""Get the MAC address of the device."""
from subprocess import Popen, PIPE
pid = Popen(["arp", "-n", ip_address], stdout=PIPE)
pid_component = pid.communicate()[0]
match = re.search(r"(([a-f\d]{1,2}\:){5}[a-f\d]{1,2})".encode('UTF-8'),
pid_component)
if match is not None:
return match.groups()[0]
return None
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sony Bravia TV platform."""
host = config.get(CONF_HOST)
if host is None:
return
pin = None
bravia_config = load_json(hass.config.path(BRAVIA_CONFIG_FILE))
while bravia_config:
# Set up a configured TV
host_ip, host_config = bravia_config.popitem()
if host_ip == host:
pin = host_config['pin']
mac = host_config['mac']
name = config.get(CONF_NAME)
add_entities([BraviaTVDevice(host, mac, name, pin)])
return
setup_bravia(config, pin, hass, add_entities)
def setup_bravia(config, pin, hass, add_entities):
"""Set up a Sony Bravia TV based on host parameter."""
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
if pin is None:
request_configuration(config, hass, add_entities)
return
mac = _get_mac_address(host)
if mac is not None:
mac = mac.decode('utf8')
# If we came here and configuring this host, mark as done
if host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = hass.components.configurator
configurator.request_done(request_id)
_LOGGER.info("Discovery configuration done")
# Save config
save_json(
hass.config.path(BRAVIA_CONFIG_FILE),
{host: {'pin': pin, 'host': host, 'mac': mac}})
add_entities([BraviaTVDevice(host, mac, name, pin)])
def request_configuration(config, hass, add_entities):
"""Request configuration steps from the user."""
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
configurator = hass.components.configurator
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], "Failed to register, please try again.")
return
def bravia_configuration_callback(data):
"""Handle the entry of user PIN."""
from braviarc import braviarc
pin = data.get('pin')
braviarc = braviarc.BraviaRC(host)
braviarc.connect(pin, CLIENTID_PREFIX, NICKNAME)
if braviarc.is_connected():
setup_bravia(config, pin, hass, add_entities)
else:
request_configuration(config, hass, add_entities)
_CONFIGURING[host] = configurator.request_config(
name, bravia_configuration_callback,
description='Enter the Pin shown on your Sony Bravia TV.' +
'If no Pin is shown, enter 0000 to let TV show you a Pin.',
description_image="/static/images/smart-tv.png",
submit_caption="Confirm",
fields=[{'id': 'pin', 'name': 'Enter the pin', 'type': ''}]
)
class BraviaTVDevice(MediaPlayerDevice):
"""Representation of a Sony Bravia TV."""
def __init__(self, host, mac, name, pin):
"""Initialize the Sony Bravia device."""
from braviarc import braviarc
self._pin = pin
self._braviarc = braviarc.BraviaRC(host, mac)
self._name = name
self._state = STATE_OFF
self._muted = False
self._program_name = None
self._channel_name = None
self._channel_number = None
self._source = None
self._source_list = []
self._original_content_list = []
self._content_mapping = {}
self._duration = None
self._content_uri = None
self._id = None
self._playing = False
self._start_date_time = None
self._program_media_type = None
self._min_volume = None
self._max_volume = None
self._volume = None
self._braviarc.connect(pin, CLIENTID_PREFIX, NICKNAME)
if self._braviarc.is_connected():
self.update()
else:
self._state = STATE_OFF
def update(self):
"""Update TV info."""
if not self._braviarc.is_connected():
if self._braviarc.get_power_status() != 'off':
self._braviarc.connect(self._pin, CLIENTID_PREFIX, NICKNAME)
if not self._braviarc.is_connected():
return
# Retrieve the latest data.
try:
if self._state == STATE_ON:
# refresh volume info:
self._refresh_volume()
self._refresh_channels()
power_status = self._braviarc.get_power_status()
if power_status == 'active':
self._state = STATE_ON
playing_info = self._braviarc.get_playing_info()
self._reset_playing_info()
if playing_info is None or not playing_info:
self._channel_name = 'App'
else:
self._program_name = playing_info.get('programTitle')
self._channel_name = playing_info.get('title')
self._program_media_type = playing_info.get(
'programMediaType')
self._channel_number = playing_info.get('dispNum')
self._source = playing_info.get('source')
self._content_uri = playing_info.get('uri')
self._duration = playing_info.get('durationSec')
self._start_date_time = playing_info.get('startDateTime')
else:
self._state = STATE_OFF
except Exception as exception_instance: # pylint: disable=broad-except
_LOGGER.error(exception_instance)
self._state = STATE_OFF
def _reset_playing_info(self):
self._program_name = None
self._channel_name = None
self._program_media_type = None
self._channel_number = None
self._source = None
self._content_uri = None
self._duration = None
self._start_date_time = None
def _refresh_volume(self):
"""Refresh volume information."""
volume_info = self._braviarc.get_volume_info()
if volume_info is not None:
self._volume = volume_info.get('volume')
self._min_volume = volume_info.get('minVolume')
self._max_volume = volume_info.get('maxVolume')
self._muted = volume_info.get('mute')
def _refresh_channels(self):
if not self._source_list:
self._content_mapping = self._braviarc. \
load_source_list()
self._source_list = []
for key in self._content_mapping:
self._source_list.append(key)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._volume is not None:
return self._volume / 100
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_BRAVIA
@property
def media_title(self):
"""Title of current playing media."""
return_value = None
if self._channel_name is not None:
return_value = self._channel_name
if self._program_name is not None:
return_value = return_value + ': ' + self._program_name
return return_value
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._channel_name
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._duration
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._braviarc.set_volume_level(volume)
def turn_on(self):
"""Turn the media player on."""
self._braviarc.turn_on()
def turn_off(self):
"""Turn off media player."""
self._braviarc.turn_off()
def volume_up(self):
"""Volume up the media player."""
self._braviarc.volume_up()
def volume_down(self):
"""Volume down media player."""
self._braviarc.volume_down()
def mute_volume(self, mute):
"""Send mute command."""
self._braviarc.mute_volume(mute)
def select_source(self, source):
"""Set the input source."""
if source in self._content_mapping:
uri = self._content_mapping[source]
self._braviarc.play_content(uri)
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self._braviarc.media_play()
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self._braviarc.media_pause()
def media_next_track(self):
"""Send next track command."""
self._braviarc.media_next_track()
def media_previous_track(self):
"""Send the previous track command."""
self._braviarc.media_previous_track()
|
the-stack_106_25162 | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from enum import Enum
class SlideKey(str, Enum):
SLIDE_ID = 'slide_id'
IMAGE = 'image'
IMAGE_PATH = 'image_path'
MASK = 'mask'
MASK_PATH = 'mask_path'
LABEL = 'label'
SPLIT = 'split'
SCALE = 'scale'
ORIGIN = 'origin'
FOREGROUND_THRESHOLD = 'foreground_threshold'
METADATA = 'metadata'
LOCATION = 'location'
class TileKey(str, Enum):
TILE_ID = 'tile_id'
SLIDE_ID = 'slide_id'
IMAGE = 'image'
IMAGE_PATH = 'image_path'
MASK = 'mask'
MASK_PATH = 'mask_path'
LABEL = 'label'
SPLIT = 'split'
TILE_X = 'tile_x'
TILE_Y = 'tile_y'
OCCUPANCY = 'occupancy'
FOREGROUND_THRESHOLD = 'foreground_threshold'
SLIDE_METADATA = 'slide_metadata'
@staticmethod
def from_slide_metadata_key(slide_metadata_key: str) -> str:
return 'slide_' + slide_metadata_key
class ResultsKey(str, Enum):
SLIDE_ID = 'slide_id'
TILE_ID = 'tile_id'
IMAGE = 'image'
IMAGE_PATH = 'image_path'
LOSS = 'loss'
PROB = 'prob'
CLASS_PROBS = 'prob_class'
PRED_LABEL = 'pred_label'
TRUE_LABEL = 'true_label'
BAG_ATTN = 'bag_attn'
TILE_X = "x"
TILE_Y = "y"
class MetricsKey(str, Enum):
ACC = 'accuracy'
ACC_MACRO = 'macro_accuracy'
ACC_WEIGHTED = 'weighted_accuracy'
CONF_MATRIX = 'confusion_matrix'
AUROC = 'auroc'
PRECISION = 'precision'
RECALL = 'recall'
F1 = 'f1score'
|
the-stack_106_25163 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import gc
import itertools as it
import operator
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import core
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax.abstract_arrays import make_shaped_array
from jax.api import jvp, linearize, vjp, jit, make_jaxpr
from jax.core import UnshapedArray, ShapedArray
from jax.tree_util import tree_flatten, tree_unflatten, tree_multimap, tree_reduce, tree_leaves
from jax._src.util import partial
from jax.interpreters import partial_eval as pe
from jax.config import config
config.parse_flags_with_absl()
_ = pe.PartialVal.unknown(UnshapedArray(np.float32))
__ = pe.PartialVal.unknown(ShapedArray((), np.float32))
def call(f, *args):
return jit(f)(*args)
def simple_fun(x, y):
return jnp.sin(x * y)
def simple_fun_fanout(x, y):
return jnp.sin(x * y) * x
def fun_with_call(x):
return call(jnp.sin, x)
def fun_with_nested_calls(x):
def f(y):
y2 = jnp.sin(y) + 1.0 + (2.0 * x)
@jit
def g(z):
return y2 * z * x + (x * y)
return call(g, y)
return call(f, x)
def error(*args):
def f(*args):
assert False
return f
def fun_with_nested_calls_2(x):
def bar(y):
def baz(w):
q = call(lambda x: y, x)
q = q + call(lambda: y)
q = q + call(lambda y: w + y, y)
q = call(lambda w: call(jnp.sin, x) * y, 1.0) + q
return q
p, t = jvp(baz, (x + 1.0,), (y,))
return t + (x * p)
return call(bar, x)
def fun_call_jitted(x):
@jit
def g(z):
return x * z
return call(g, x)
def fun_with_two_calls(x):
return call(jnp.sin, x) + call(jnp.cos, x)
def fun_with_call_closure(x):
def foo(y, z):
return (x * x) * jnp.sin(y) * z
return call(foo, x, jnp.cos(x)) + x
def product_io_fun(x, y):
xa = x['a']
xb = x['b']
y1, (y2, y3) = y
return jnp.sin(xa + y2), [xb, (y1, y3)]
_rng = np.random.RandomState(42)
R = _rng.randn
CallSpec = namedtuple('CallSpec', ['fun', 'args'])
test_specs_base = [
CallSpec(simple_fun, (R(3, 2), R(3, 2))),
CallSpec(simple_fun_fanout, (R(3, 2), R(3, 2))),
CallSpec(product_io_fun, ({'a': R(2, 2), 'b': R(2, 2)},
(R(2, 2), (R(2, 2), R(2, 2))))),
CallSpec(fun_with_call, (R(3, 2),)),
CallSpec(fun_with_two_calls, (R(3, 2),)),
CallSpec(fun_with_call_closure, (R(3, 2),)),
CallSpec(fun_call_jitted, (R(1,),)),
CallSpec(fun_with_nested_calls, (R(),)),
CallSpec(fun_with_nested_calls, (R(3, 2),)),
CallSpec(fun_with_nested_calls_2, (R(1, 2),)),
]
def jvp_unlinearized(f, primals, tangents):
out, jvp = linearize(f, *primals)
return out, jvp(*tangents)
test_specs = []
for ts in test_specs_base:
test_specs.append(ts)
test_specs.append(CallSpec(partial(jvp, ts.fun), (ts.args, ts.args)))
test_specs.append(CallSpec(jit(ts.fun), ts.args))
test_specs.append(CallSpec(jit(jit(ts.fun)), ts.args))
test_specs.append(CallSpec(partial(jvp_unlinearized, ts.fun),
(ts.args, ts.args)))
def fwd_deriv(f):
def df(x):
return jvp(f, (x,), (1.0,))[1]
return df
class CoreTest(jtu.JaxTestCase):
def test_tree_multimap(self):
xs = ({'a': 1}, [2, 3])
ys = ({'a': 10}, [20, 30])
ys_bad = ({'a': 10, 'b': 10}, [20, 30])
zs = ({'a': 11}, [22, 33])
f = lambda x, y: x + y
assert tree_multimap(f, xs, ys) == zs
try:
tree_multimap(f, xs, ys_bad)
assert False
except (TypeError, ValueError):
pass
def test_tree_flatten(self):
flat, _ = tree_flatten(({'a': 1}, [2, 3], 4))
assert flat == [1, 2, 3, 4]
def test_tree_unflatten(self):
tree = [(1, 2), {"roy": (3, [4, 5, ()])}]
flat, treedef = tree_flatten(tree)
assert flat == [1, 2, 3, 4, 5]
tree2 = tree_unflatten(treedef, flat)
nodes_equal = tree_multimap(operator.eq, tree, tree2)
assert tree_reduce(operator.and_, nodes_equal)
@parameterized.named_parameters(
(str(i), *spec) for i, spec in enumerate(test_specs))
def test_jit(self, f, args):
jtu.check_close(jit(f)(*args), f(*args))
@parameterized.named_parameters(
(str(i), *spec) for i, spec in enumerate(test_specs))
def test_jvp(self, f, args):
jtu.check_jvp(f, partial(jvp, f), args, rtol={np.float32: 3e-2})
def test_jvp_zeros(self):
def foo(x):
def bar(y):
return jnp.sin(x * y)
return jvp(bar, (3 * x,), (2 * x,))
jtu.check_eq(jit(foo)(0.5), foo(0.5))
@parameterized.parameters(test_specs)
def test_jvp_linearized(self, f, args):
jtu.check_jvp(f, partial(jvp_unlinearized, f), args,
rtol={np.float32: 3e-2})
@parameterized.named_parameters(
(str(i), *spec) for i, spec in enumerate(test_specs))
def test_vjp(self, f, args):
jtu.check_vjp(f, partial(vjp, f), args,
rtol={np.float32: 3e-1, np.float64: 1e-5},
atol={np.float32: 1e-2, np.float64: 1e-5})
def test_jvp_closure(self):
def foo(x):
def bar(y):
return jnp.multiply(x, y)
return jvp(bar, (3.0,), (1.0,))[1]
ans = jvp(foo, (1.0,), (2.0,))
assert ans == (1.0, 2.0), ans
def test_jit_closure(self):
def foo(x):
@jit
def bar(y):
return x + y
return bar(0.0)
assert jvp(foo, (1.0,), (2.0,)) == (1.0, 2.0)
def test_simple_jit(self):
def foo(x):
if x.shape == ():
return x + 1.
else:
return x + 2.
foo2 = jit(foo)
foo3 = jit(foo2)
x1, y1 = np.array(1.0), np.array(2.0)
assert foo(x1) == y1
assert foo2(x1) == y1
assert foo3(x1) == y1
x2, y2 = np.array([1.0, 2.0]), np.array([3.0, 4.0])
assert np.all(foo(x2) == y2)
assert np.all(foo2(x2) == y2)
assert np.all(foo3(x2) == y2)
def test_product_jit(self):
def foo(x, tup):
y, z = tup
w = x + z
return (w, {'x': y}), z
foo2 = jit(foo)
foo3 = jit(foo2)
args = (1.0, (2.0, 3.0))
expected_output = ((4.0, {'x': 2.0}), 3.0)
assert foo(*args) == expected_output
assert foo2(*args) == expected_output
assert foo3(*args) == foo(*args)
def test_jvp_repeated_fwd(self):
d_sin = fwd_deriv(jnp.sin)
d2_sin = fwd_deriv(d_sin)
d3_sin = fwd_deriv(d2_sin)
assert d_sin(0.0) == 1.0
assert d2_sin(0.0) == 0.0
assert d3_sin(0.0) == -1.0
def test_reference_cycles(self):
gc.collect()
def f(x):
return x.sum()
fn = partial(linearize, f)
params = jnp.zeros([])
debug = gc.get_debug()
try:
fn(params)
gc.set_debug(gc.DEBUG_SAVEALL)
self.assertEqual(gc.collect(), 0)
finally:
gc.set_debug(debug)
def test_comparing_var(self):
newsym = core.gensym()
a = newsym(core.abstract_unit)
b = newsym(core.abstract_unit)
c = newsym(core.abstract_unit)
assert a < b < c
assert c > b > a
assert a != b and b != c and a != c
def test_var_ordering(self):
newsym = core.gensym()
a = newsym(core.abstract_unit)
b = newsym(core.abstract_unit)
c = newsym(core.abstract_unit)
for ordering in it.permutations([a, b, c]):
assert sorted(list(ordering)) == [a, b, c]
def test_var_compared_by_identity(self):
a1 = core.gensym()(core.abstract_unit)
a2 = core.gensym()(core.abstract_unit)
assert str(a1) == str(a2)
assert a1 != a2
def test_var_tree_flatten(self):
newsym = core.gensym()
a, b, c, d = (
newsym(core.abstract_unit), newsym(core.abstract_unit),
newsym(core.abstract_unit), newsym(core.abstract_unit))
syms = {c: d, a: b}
assert 'bd' == ''.join(map(str, tree_leaves(syms)))
def test_device_put_unit(self):
def f(x, y):
return x, 2 * y
args_maker = lambda: (core.unit, 1)
self._CompileAndCheck(f, args_maker)
class JaxprTypeChecks(jtu.JaxTestCase):
def setUp(self):
super().setUp()
jax._src.lax.control_flow._initial_style_open_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxprs_with_common_consts.cache_clear()
def test_check_jaxpr_correct(self):
jaxpr = make_jaxpr(lambda x: jnp.sin(x) + jnp.cos(x))(1.).jaxpr
core.check_jaxpr(jaxpr)
def test_check_jaxpr_cond_correct(self):
jaxpr = make_jaxpr(lambda x: lax.switch(0, [jnp.sin, jnp.cos], x))(1.).jaxpr
core.check_jaxpr(jaxpr)
def test_check_jaxpr_cond_invalid(self):
jaxpr = make_jaxpr(lambda x: lax.switch(0, [jnp.sin, jnp.cos], x))(1.).jaxpr
cond = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'cond')
cond.params['branches'][0].jaxpr.invars = ()
self.assertRaisesRegex(
core.JaxprTypeError,
'cond branch 0 takes 0 inputs, branch 1 takes 1',
lambda: core.check_jaxpr(jaxpr))
def test_check_jaxpr_scan_correct(self):
def f(c, x):
b = jnp.cos(jnp.sum(jnp.sin(x)) + jnp.sum(jnp.cos(c)))
c = jnp.sin(c * b)
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
jaxpr = make_jaxpr(partial(lax.scan, f))(c, xs).jaxpr
core.check_jaxpr(jaxpr)
def test_check_jaxpr_invalid_long(self):
# jaxprs can be large, and this tests that when large ones are printed for
# context in jaxpr typechecking errors, they're not printed entirely
def enlarge(f, n):
def g(x):
for _ in range(n):
x = x + x
x = f(x)
for _ in range(n):
x = x + x
return x
return g
jaxpr = make_jaxpr(enlarge(
lambda x: lax.switch(0, [jnp.sin, jnp.cos], x), 100))(1.).jaxpr
cond = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'cond')
cond.params['branches'][0].jaxpr.invars = ()
msg = ''
try:
core.check_jaxpr(jaxpr)
except core.JaxprTypeError as e:
msg, = e.args
self.assertIn('cond branch 0 takes 0 inputs, branch 1 takes 1', msg)
self.assertIn('in equation:', msg)
self.assertIn('from source:', msg)
self.assertIn('while checking jaxpr:', msg)
self.assertLess(msg.count('\n'), 200)
def test_check_jaxpr_eqn_mismatch(self):
def f(x):
return jnp.sin(x) + jnp.cos(x)
def new_jaxpr():
return make_jaxpr(f)(1.).jaxpr
# jaxpr is:
#
# { lambda ; a.
# let b = sin a
# c = cos a
# d = add b c
# in (d,) }
#
# NB: eqns[0].outvars[0] and eqns[2].invars[0] are both 'b'
jaxpr = new_jaxpr()
jaxpr.eqns[0].outvars[0].aval = make_shaped_array(2) # int, not float!
self.assertRaisesRegex(
core.JaxprTypeError,
r"Variable '.' inconsistently typed as ShapedArray(.*), "
r"bound as ShapedArray(.*)\n\nin equation:\n\n . = sin .",
lambda: core.check_jaxpr(jaxpr))
jaxpr = new_jaxpr()
jaxpr.eqns[0].outvars[0].aval = make_shaped_array(np.ones((2, 3)))
self.assertRaisesRegex(
core.JaxprTypeError,
r"Variable '.' inconsistently typed as ShapedArray(.*), "
r"bound as ShapedArray(.*)\n\nin equation:\n\n . = sin .",
lambda: core.check_jaxpr(jaxpr))
def test_jaxpr_dropvar_from_jit_call(self):
def inner(x):
return x + 1, x + 2
def f(x):
_, y = jit(inner)(x)
return y + 3
jaxpr = make_jaxpr(f)(1).jaxpr
assert jaxpr.eqns[0].outvars[0] is core.dropvar
core.check_jaxpr(jaxpr)
def test_jaxpr_dropvar_from_loop(self):
def f(x):
_, y = lax.while_loop(lambda s: s[0] < 0.,
lambda s: (jnp.sin(s[0]), jnp.cos(s[1])),
(x, x))
return y + 1.
jaxpr = make_jaxpr(f)(1.).jaxpr
assert jaxpr.eqns[0].outvars[0] is core.dropvar
core.check_jaxpr(jaxpr)
def test_jaxpr_dropvar_from_cond(self):
def f(x):
_, y = lax.cond(x < 0.,
lambda x: (jnp.sin(x), x + 1.),
lambda x: (jnp.cos(x), x + 2.),
x)
return y
jaxpr = make_jaxpr(f)(1.).jaxpr
assert jaxpr.eqns[-1].outvars[0] is core.dropvar
core.check_jaxpr(jaxpr)
def test_jaxpr_undefined_eqn_invar(self):
jaxpr = make_jaxpr(lambda x: jnp.sin(x) + jnp.cos(x))(1.).jaxpr
cos = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'cos')
cos.invars[0] = core.gensym([jaxpr], suffix='_test')(cos.invars[0].aval)
self.assertRaisesRegex(
core.JaxprTypeError,
r"Variable '.+_test' not defined\n\nin equation:",
lambda: core.check_jaxpr(jaxpr))
@parameterized.parameters(
{'value': 0, 'weak_type': True},
{'value': np.int32(0), 'weak_type': False},
{'value': np.array([0]), 'weak_type': False}
)
def test_raise_to_shaped_weak_type(self, value, weak_type):
aval = core.raise_to_shaped(core.get_aval(value))
self.assertEqual(aval.weak_type, weak_type)
def test_lattice_join_named_shape(self):
aval1 = core.ShapedArray((2, 3), np.float32, False, {'i': 10})
self.assertEqual(core.lattice_join(aval1, aval1), aval1)
aval2 = core.ShapedArray((2, 3), np.float32, False, {'j': 5})
expected = core.ShapedArray((2, 3), np.float32, False, {'i': 10, 'j': 5})
self.assertEqual(core.lattice_join(aval1, aval2), expected)
aval3 = core.ShapedArray((2, 3), np.float32, False, {'i': 5})
self.assertRaises(TypeError, lambda: core.lattice_join(aval1, aval3))
def test_typecompat_named_shape(self):
aval1 = core.ShapedArray((2, 3), np.float32, False, {'i': 10})
aval2 = core.ShapedArray((2, 3), np.float32, False, {'j': 5})
self.assertTrue(core.typecompat(aval1, aval2))
aval3 = core.ShapedArray((2, 3), np.float32, False, {'i': 5})
self.assertFalse(core.typecompat(aval1, aval3))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
the-stack_106_25164 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
import futurist
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import log
from oslo_utils import uuidutils
from ironic_inspector.common import i18n
# Import configuration options
from ironic_inspector import conf # noqa
from ironic_inspector import db
from ironic_inspector import node_cache
from ironic_inspector.plugins import base as plugins_base
from ironic_inspector import utils
CONF = cfg.CONF
class BaseTest(fixtures.TestWithFixtures):
IS_FUNCTIONAL = False
def setUp(self):
super(BaseTest, self).setUp()
if not self.IS_FUNCTIONAL:
self.init_test_conf()
self.session = db.get_session()
engine = db.get_engine()
db.Base.metadata.create_all(engine)
engine.connect()
self.addCleanup(db.get_engine().dispose)
plugins_base._HOOKS_MGR = None
node_cache._SEMAPHORES = lockutils.Semaphores()
for name in ('_', '_LI', '_LW', '_LE', '_LC'):
patch = mock.patch.object(i18n, name, lambda s: s)
patch.start()
# 'p=patch' magic is due to how closures work
self.addCleanup(lambda p=patch: p.stop())
utils._EXECUTOR = futurist.SynchronousExecutor(green=True)
def init_test_conf(self):
CONF.reset()
log.register_options(CONF)
self.cfg = self.useFixture(config_fixture.Config(CONF))
self.cfg.set_default('connection', "sqlite:///", group='database')
self.cfg.set_default('slave_connection', False, group='database')
self.cfg.set_default('max_retries', 10, group='database')
def assertPatchEqual(self, expected, actual):
expected = sorted(expected, key=lambda p: p['path'])
actual = sorted(actual, key=lambda p: p['path'])
self.assertEqual(expected, actual)
def assertCalledWithPatch(self, expected, mock_call):
def _get_patch_param(call):
try:
return call[0][1]
except IndexError:
return call[0][0]
actual = sum(map(_get_patch_param, mock_call.call_args_list), [])
self.assertPatchEqual(actual, expected)
class NodeTest(BaseTest):
def setUp(self):
super(NodeTest, self).setUp()
self.uuid = uuidutils.generate_uuid()
self.bmc_address = '1.2.3.4'
self.macs = ['11:22:33:44:55:66', '66:55:44:33:22:11']
fake_node = {
'driver': 'pxe_ipmitool',
'driver_info': {'ipmi_address': self.bmc_address},
'properties': {'cpu_arch': 'i386', 'local_gb': 40},
'uuid': self.uuid,
'power_state': 'power on',
'provision_state': 'inspecting',
'extra': {},
'instance_uuid': None,
'maintenance': False
}
mock_to_dict = mock.Mock(return_value=fake_node)
self.node = mock.Mock(**fake_node)
self.node.to_dict = mock_to_dict
self.ports = []
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0,
node=self.node, ports=self.ports)
self.node_info.node = mock.Mock(return_value=self.node)
|
the-stack_106_25168 | from setuptools import setup, find_packages
from setuptools.command.install import install
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="hyperparameter",
version="0.2.0",
description=
"A hyper-parameter library for researchers, data scientists and machine learning engineers.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Reiase",
author_email="[email protected]",
url="https://github.com/reiase/hyperparameter",
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
packages=find_packages(),
license="http://www.apache.org/licenses/LICENSE-2.0",
)
|
the-stack_106_25169 | from django.shortcuts import render
from django.forms import formset_factory
from .forms import PizzaForm, MultiplePizzaForm
def home(request):
return render(request, 'pizza/home.html')
def order(request):
multiple_form = MultiplePizzaForm()
form = PizzaForm()
context = {
'pizzaform': form,
'multiple_form': multiple_form
}
if request.method == 'POST':
filled_form = PizzaForm(request.POST)
if filled_form.is_valid():
filled_form.save()
note = f'Thanks for ordering! Your ' \
f'{filled_form.cleaned_data["size"]} ' \
f'{filled_form.cleaned_data["topping1"]} ' \
f'and ' \
f'{filled_form.cleaned_data["topping2"]} ' \
f'pizza is on its way'
context.update({'note': note})
return render(request, 'pizza/order.html', context)
def pizzas(request):
number_of_pizzas = 2
filled_multipe_pizza_form = MultiplePizzaForm(request.GET)
if filled_multipe_pizza_form.is_valid():
number_of_pizzas = filled_multipe_pizza_form.cleaned_data['number']
PizzaFormSet = formset_factory(PizzaForm, extra=number_of_pizzas)
formset = PizzaFormSet()
context = {'formset': formset}
if request.method == 'POST':
filled_formset = PizzaFormSet(request.POST)
if filled_formset.is_valid():
for form in filled_formset:
print(form.cleaned_data['topping1'])
note = 'Pizzas have been ordered!'
else:
note = 'Order was not created, please try again'
context.update({
'note': note,
})
return render(request, 'pizza/pizzas.html', context)
else:
return render(request, 'pizza/pizzas.html', context) |
the-stack_106_25171 | from Okta_v2 import Client, get_user_command, get_group_members_command, create_user_command, \
verify_push_factor_command, get_groups_for_user_command, get_user_factors_command, get_logs_command, \
get_zone_command, list_zones_command, update_zone_command, list_users_command
import pytest
import json
import io
client = Client(base_url="demisto.com")
user_data = {
"id": "TestID",
"status": "PROVISIONED",
"created": "2020-02-19T08:18:20.000Z",
"activated": "2020-02-20T11:44:43.000Z",
"statusChanged": "2020-02-20T11:45:24.000Z",
"lastLogin": "2020-02-23T11:45:24.000Z",
"lastUpdated": "2020-02-20T11:45:24.000Z",
"passwordChanged": "2020-02-19T08:18:21.000Z",
"type": {
"id": "oty66lckcvDyVcGzS0h7"
},
"profile": {
"firstName": "test",
"lastName": "this",
"mobilePhone": 'null',
"city": "Tel-Aviv",
"displayName": "test1",
"secondEmail": "[email protected]",
"login": "[email protected]",
"email": "[email protected]"
},
"credentials": {
"provider": {
"type": "OKTA",
"name": "OKTA"
}
},
"_links": {
}
}
factors_data = [
{
"id": "mblpt21nffaaN5F060h7",
"factorType": "sms",
"provider": "OKTA",
"vendorName": "OKTA",
"status": "PENDING_ACTIVATION",
"created": "2020-02-18T11:48:16.000Z",
"lastUpdated": "2020-02-18T11:48:16.000Z",
"profile": {
"phoneNumber": "+12025550191"
},
"_links": {}
},
{
"id": "uftpt24kdrDJ7fDOq0h7",
"factorType": "token:software:totp",
"provider": "GOOGLE",
"vendorName": "GOOGLE",
"status": "PENDING_ACTIVATION",
"created": "2020-02-18T11:45:14.000Z",
"lastUpdated": "2020-02-18T11:45:14.000Z",
"profile": {
"credentialId": "[email protected]"
},
"_links": {}
},
{
"id": "opfpt1joeaArlg27g0h7",
"factorType": "push",
"provider": "OKTA",
"vendorName": "OKTA",
"status": "PENDING_ACTIVATION",
"created": "2020-02-18T11:45:03.000Z",
"lastUpdated": "2020-02-18T11:45:03.000Z",
"_links": {
"self": {},
"poll": {},
"user": {}
},
"_embedded": {
"activation": {
"factorResult": "TIMEOUT",
"_links": {}
}
}
}
]
group_data = [
{
"id": "00g66lckcsAJpLcNc0h7",
"created": "2016-04-12T15:01:50.000Z",
"lastUpdated": "2016-04-12T15:01:50.000Z",
"lastMembershipUpdated": "2020-02-19T09:01:32.000Z",
"objectClass": [
"okta:user_group"
],
"type": "BUILT_IN",
"profile": {
"name": "Everyone",
"description": "All users in your organization"
},
"_links": {}
}
]
verify_push_factor_response = {
"factorResult": "WAITING",
"profile": {
"credentialId": "[email protected]",
"deviceType": "SmartPhone_IPhone",
"keys": [
{
"kty": "EC",
"use": "sig",
"kid": "default",
"x": "3Y53lDoQYwzzVbjsbsPnqOnVaotIrVByQh5Sa-RwOHQ",
"y": "0zHY_y9rVh-bq_-lR-MrmzNtUZrrIMbTrsjtxUyUT2Q",
"crv": "P-256"
}
],
"name": "iPhone (5)",
"platform": "IOS",
"version": "13.1.3"
},
"expiresAt": "2020-02-24T11:37:08.000Z",
"_links": {
"cancel": {
"href": "https://test.com/api/v1/users/TestID/factors/FactorID/transactions/TransactionID",
"hints": {
"allow": [
"DELETE"
]
}
},
"poll": {
"href": "https://test.com/api/v1/users/TestID/factors/FactorID/transactions/TransactionID",
"hints": {
"allow": [
"GET"
]
}
}
}
}
polling_response_success = {
"factorResult": "SUCCESS"
}
polling_response_rejected = {
"factorResult": "REJECTED",
"_links": {
"verify": {
"href": "https://test.com/api/v1/users/TestID/factors/FactorID/verify",
"hints": {
"allow": [
"POST"
]
}
},
"factor": {}
}
}
create_user_response = {
"id": "00ub0oNGTSWTBKOLGLNR",
"status": "STAGED",
"created": "2013-07-02T21:36:25.344Z",
"activated": '',
"statusChanged": '',
"lastLogin": '',
"lastUpdated": "2013-07-02T21:36:25.344Z",
"passwordChanged": "2013-07-02T21:36:25.344Z",
"profile": {
"firstName": "Testush",
"lastName": "Test",
"email": "[email protected]",
"login": "[email protected]",
"mobilePhone": "555-415-1337"
},
"credentials": {
"password": {},
"provider": {
"type": "OKTA",
"name": "OKTA"
}
},
"_links": {
"activate": {
"href": "https://test.com/api/v1/users/TestID/lifecycle/activate"
},
"self": {
"href": "https://test.com/api/v1/users/TestID"
}
}
}
group_members = [
{
"id": "TestID1",
"status": "ACTIVE",
"created": "2016-04-12T15:01:52.000Z",
"activated": '',
"statusChanged": "2020-02-12T15:05:06.000Z",
"lastLogin": "2020-02-24T11:40:36.000Z",
"lastUpdated": "2020-02-24T11:42:22.000Z",
"passwordChanged": "2020-02-24T11:40:08.000Z",
"type": {
"id": "oty66lckcvDyVcGzS0h7"
},
"profile": {
"firstName": "Test1",
"lastName": "Test1",
"primaryPhone": "8888888888",
"mobilePhone": "",
"secondEmail": "",
"department": "everyone,admin,bla",
"login": "[email protected]",
"email": "[email protected]"
},
"credentials": {
"password": {},
"recovery_question": {
"question": "born city"
},
"provider": {
"type": "OKTA",
"name": "OKTA"
}
},
"_links": {}
},
{
"id": "TestID2",
"status": "STAGED",
"created": "2018-07-24T20:20:04.000Z",
"activated": '',
"statusChanged": '',
"lastLogin": '',
"lastUpdated": "2018-07-24T20:20:04.000Z",
"passwordChanged": '',
"type": {
"id": "oty66lckcvDyVcGzS0h7"
},
"profile": {
"firstName": "Test2",
"lastName": "Test2",
"mobilePhone": '',
"secondEmail": "",
"login": "[email protected]",
"email": "[email protected]"
},
"credentials": {
"provider": {
"type": "OKTA",
"name": "OKTA"
}
},
"_links": {}
},
{
"id": "TestID3",
"status": "PROVISIONED",
"created": "2018-07-31T12:48:33.000Z",
"activated": "2020-02-19T12:33:20.000Z",
"statusChanged": "2020-02-19T12:33:20.000Z",
"lastLogin": '',
"lastUpdated": "2020-02-19T12:33:20.000Z",
"passwordChanged": "2020-02-06T13:32:56.000Z",
"type": {
"id": "oty66lckcvDyVcGzS0h7"
},
"profile": {
"firstName": "test",
"lastName": "that",
"manager": "MegaTester",
"mobilePhone": '',
"city": "TLV",
"displayName": "alsotest",
"secondEmail": "[email protected]",
"login": "[email protected]",
"email": "[email protected]",
"employeeNumber": "123427"
},
"credentials": {
"provider": {
"type": "OKTA",
"name": "OKTA"
}
},
"_links": {}
}
]
logs = [
{
"actor": {
"id": "UserTestID1",
"type": "User",
"alternateId": "[email protected]",
"displayName": "Test1 Testush",
"detailEntry": ''
},
"client": {
"userAgent": {
"rawUserAgent": "python-requests/2.22.0",
"os": "Windows",
"browser": "Chrome"
},
"zone": "null",
"device": "Computer",
"id": '',
"ipAddress": "8.8.8.8",
"geographicalContext": {
"city": "Tel Aviv",
"state": "Tel Aviv",
"country": "Israel",
"postalCode": '',
"geolocation": {
"lat": 32.0678,
"lon": 34.7647
}
}
},
"authenticationContext": {
"authenticationProvider": '',
"credentialProvider": '',
"credentialType": '',
"issuer": '',
"interface": '',
"authenticatio'nStep": 0,
"externalSessionId": "trsGDHiJe2ISM2GneNwg_tIWw"
},
"displayMessage": "Add user to application membership",
"eventType": "application.user_membership.add",
"outcome": {
"result": "SUCCESS",
"reason": ''
},
"published": "2020-02-18T11:23:05.066Z",
"securityContext": {
"asNumber": '',
"asOrg": '',
"isp": '',
"domain": '',
"isProxy": ''
},
"severity": "INFO",
"debugContext": {
"debugData": {
"requestId": "XkvJGFsS6hsPnC7KoFliVAAABzI",
"requestUri": "/api/v1/users",
"threatSuspected": "false",
"url": "/api/v1/users?activate=true"
}
},
"legacyEventType": "app.generic.provision.assign_user_to_app",
"transaction": {
"type": "WEB",
"id": "XkvJGFsS6hsPnC7KoFliVAAABzI",
"detail": {}
},
"uuid": "081c84f9-5241-11ea-ad7c-6125e916db06",
"version": "0",
"request": {
"ipChain": [
{
"ip": "8.8.8.8",
"geographicalContext": {
"city": "Tel Aviv",
"state": "Tel Aviv",
"country": "Israel",
"postalCode": '',
"geolocation": {
"lat": 32.0678,
"lon": 34.7647
}
},
"version": "V4",
"source": ''
}
]
},
"target": [
{
"id": "UserTestID2",
"type": "AppUser",
"alternateId": "[email protected]",
"displayName": "Test 1 that",
"detailEntry": ''
},
{
"id": "0oabfkvxe1npBRdow0h7",
"type": "AppInstance",
"alternateId": "Demisto-SAML-OKTA",
"displayName": "Demisto-SAML-OKTA",
"detailEntry": ''
},
{
"id": "00upt1h0w93PALT9v0h7",
"type": "User",
"alternateId": "[email protected]",
"displayName": "Test 1 that",
"detailEntry": ''
}
]
},
{
"actor": {
"id": "UserTestID2",
"type": "User",
"alternateId": "[email protected]",
"displayName": "Testush2 test",
"detailEntry": ''
},
"client": {
"userAgent": {
"rawUserAgent": "python-requests/2.22.0",
"os": "Unknown",
"browser": "UNKNOWN"
},
"zone": "null",
"device": "Unknown",
"id": '',
"ipAddress": "8.8.8.8",
"geographicalContext": {
"city": "Tel Aviv",
"state": "Tel Aviv",
"country": "Israel",
"postalCode": '',
"geolocation": {
"lat": 32.0678,
"lon": 34.7647
}
}
},
"authenticationContext": {
"authenticationProvider": '',
"credentialProvider": '',
"credentialType": '',
"issuer": '',
"interface": '',
"authenticationStep": 0,
"externalSessionId": "trsGDHiJe2ISM2GneNwg_tIWw"
},
"displayMessage": "Add user to application membership",
"eventType": "application.user_membership.add",
"outcome": {
"result": "SUCCESS",
"reason": ''
},
"published": "2020-02-18T11:23:04.791Z",
"securityContext": {
"asNumber": '',
"asOrg": '',
"isp": '',
"domain": '',
"isProxy": ''
},
"severity": "INFO",
"debugContext": {
"debugData": {
"requestId": "XkvJGFsS6hsPnC7KoFliVAAABzI",
"requestUri": "/api/v1/users",
"threatSuspected": "false",
"url": "/api/v1/users?activate=true"
}
},
"legacyEventType": "app.generic.provision.assign_user_to_app",
"transaction": {
"type": "WEB",
"id": "XkvJGFsS6hsPnC7KoFliVAAABzI",
"detail": {}
},
"uuid": "07f28ec5-5241-11ea-ad7c-6125e916db06",
"version": "0",
"request": {
"ipChain": [
{
"ip": "127.0.0.1",
"geographicalContext": {
"city": "Tel Aviv",
"state": "Tel Aviv",
"country": "Israel",
"postalCode": '',
"geolocation": {
"lat": 32.0678,
"lon": 34.7647
}
},
"version": "V4",
"source": ''
}
]
},
"target": [
{
"id": "0uapt1h0wbuz8uWvb0h7",
"type": "AppUser",
"alternateId": "[email protected]",
"displayName": "Test 1 that",
"detailEntry": ''
},
{
"id": "0oabe0e2jruaQccDf0h7",
"type": "AppInstance",
"alternateId": "ShrikSAML",
"displayName": "ShrikSAML",
"detailEntry": ''
},
{
"id": "00upt1h0w93PALT9v0h7",
"type": "User",
"alternateId": "[email protected]",
"displayName": "Test 1 that",
"detailEntry": ''
}
]
}]
okta_zone = {
"_links": {
"deactivate": {
"hints": {
"allow": [
"POST"
]
},
"href": "https://dev-530328.oktapreview.com/api/v1/zones/nzoqsmcx1qWYJ6wYF7q0/lifecycle/deactivate"
},
"self": {
"hints": {
"allow": [
"GET",
"PUT",
"DELETE"
]
},
"href": "https://dev-530328.oktapreview.com/api/v1/zones/nzoqsmcx1qWYJ6wYF7q0"
}
},
"created": "2020-04-06T22:23:12.000Z",
"gateways": [
{
"type": "CIDR",
"value": "4.5.3.2/16"
},
{
"type": "CIDR",
"value": "1.2.1.2/32"
}
],
"id": "nzoqsmcx1qWYJ6wYF7q0",
"lastUpdated": "2020-05-15T05:13:06.000Z",
"name": "Test Zone",
"proxies": None,
"status": "ACTIVE",
"system": False,
"type": "IP"
}
def util_load_json(path: str):
"""
Utility to load json data from a local folder.
"""
with io.open(path, mode='r', encoding='utf-8') as file:
return json.loads(file.read())
@pytest.mark.parametrize(
# Write and define the expected
"args ,expected_context, expected_readable",
[
({"userId": "TestID", "username": "", "verbose": 'false'},
{'ID': 'TestID', 'Username': '[email protected]', 'DisplayName': 'test this', 'Email': '[email protected]',
'Status': 'PROVISIONED', 'Type': 'Okta', 'Created': "2020-02-19T08:18:20.000Z",
'Activated': "2020-02-20T11:44:43.000Z",
'StatusChanged': "2020-02-20T11:45:24.000Z",
'PasswordChanged': "2020-02-19T08:18:21.000Z"}, '[email protected]'),
({"userId": "", "username": "[email protected]", "verbose": 'true'},
{'ID': 'TestID', 'Username': '[email protected]', 'DisplayName': 'test this', 'Email': '[email protected]',
'Status': 'PROVISIONED', 'Type': 'Okta', 'Created': "2020-02-19T08:18:20.000Z",
'Activated': "2020-02-20T11:44:43.000Z",
'StatusChanged': "2020-02-20T11:45:24.000Z",
'PasswordChanged': "2020-02-19T08:18:21.000Z"}, 'Additional Data'),
]
)
def test_get_user_command(mocker, args, expected_context, expected_readable):
mocker.patch.object(client, 'get_user', return_value=user_data)
readable, outputs, _ = get_user_command(client, args)
assert outputs.get('Account(val.ID && val.ID === obj.ID)')[0] == expected_context
assert expected_readable in readable
@pytest.mark.parametrize(
# Write and define the expected
"args ,expected_context, expected_readable",
[
({"userId": "TestID", "username": "", "verbose": 'false'},
{'ID': 'TestID', 'Username': '[email protected]', 'DisplayName': 'test this', 'Email': '[email protected]',
'Status': 'PROVISIONED', 'Type': 'Okta', 'Created': "2020-02-19T08:18:20.000Z",
'Activated': "2020-02-20T11:44:43.000Z",
'StatusChanged': "2020-02-20T11:45:24.000Z",
'PasswordChanged': "2020-02-19T08:18:21.000Z"}, '[email protected]'),
({"userId": "", "username": "[email protected]", "verbose": 'true'},
{'ID': 'TestID', 'Username': '[email protected]', 'DisplayName': 'test this', 'Email': '[email protected]',
'Status': 'PROVISIONED', 'Type': 'Okta', 'Created': "2020-02-19T08:18:20.000Z",
'Activated': "2020-02-20T11:44:43.000Z",
'StatusChanged': "2020-02-20T11:45:24.000Z",
'PasswordChanged': "2020-02-19T08:18:21.000Z"}, 'Additional Data'),
]
)
def test_list_user_command(mocker, args, expected_context, expected_readable):
mocker.patch.object(client, 'list_users', return_value=user_data)
readable, outputs, _ = list_users_command(client, args)
assert outputs.get('Account(val.ID && val.ID == obj.ID)')[0] == expected_context
assert expected_readable in readable
@pytest.mark.parametrize(
"args ,expected_context",
[
({"userId": "TestID"}, {'ID': 'uftpt24kdrDJ7fDOq0h7', 'FactorType': 'token:software:totp', 'Provider': 'GOOGLE',
'Status': 'PENDING_ACTIVATION', 'Profile': {'credentialId': '[email protected]'}}),
({"username": "[email protected]"},
{'ID': 'uftpt24kdrDJ7fDOq0h7', 'FactorType': 'token:software:totp', 'Provider': 'GOOGLE',
'Status': 'PENDING_ACTIVATION', 'Profile': {'credentialId': '[email protected]'}}),
]
)
def test_get_user_factors_command(mocker, args, expected_context):
mocker.patch.object(client, 'get_user_id', return_value='TestID')
mocker.patch.object(client, 'get_user_factors', return_value=factors_data)
readable, outputs, _ = get_user_factors_command(client, args)
assert expected_context == outputs.get('Account(val.ID && val.ID === obj.ID)').get('Factor')[1]
assert outputs.get('Account(val.ID && val.ID === obj.ID)').get('ID') == args.get('userId') or 'TestID'
@pytest.mark.parametrize("args", [{'username': '[email protected]'}])
def test_get_groups_for_user_command(mocker, args):
expected_context = {'ID': '00g66lckcsAJpLcNc0h7',
'Created': "2016-04-12T15:01:50.000Z",
'ObjectClass': ["okta:user_group"],
'LastUpdated': '2016-04-12T15:01:50.000Z',
'LastMembershipUpdated': "2020-02-19T09:01:32.000Z",
'Type': "BUILT_IN", 'Name': "Everyone",
'Description': "All users in your organization"}
mocker.patch.object(client, 'get_user_id', return_value='TestID')
mocker.patch.object(client, 'get_groups_for_user', return_value=group_data)
_, outputs, _ = get_groups_for_user_command(client, args)
assert outputs.get('Account(val.ID && val.ID === obj.ID)').get('Group')[0] == expected_context
assert 'TestID' == outputs.get('Account(val.ID && val.ID === obj.ID)').get('ID')
@pytest.mark.parametrize(
"args, polling_response, result",
[({'userId': 'TestID', 'factorId': 'FactorID'}, polling_response_rejected, 'REJECTED'),
({'userId': 'TestID', 'factorId': 'FactorID'}, polling_response_success, 'SUCCESS')])
def test_verify_push_factor_command(mocker, args, polling_response, result):
mocker.patch.object(client, 'verify_push_factor', return_value=verify_push_factor_response)
mocker.patch.object(client, 'poll_verify_push', return_value=polling_response)
_, outputs, _ = verify_push_factor_command(client, args)
assert outputs.get('Account(val.ID && val.ID === obj.ID)').get('ID') == 'TestID'
assert outputs.get('Account(val.ID && val.ID === obj.ID)').get('VerifyPushResult') == result
@pytest.mark.parametrize(
"args",
[({'firstName': 'Testush',
'lastName': 'Test',
'email': '[email protected]',
'login': '[email protected]',
'password': 'Aa123456'})])
def test_create_user_command(mocker, args):
mocker.patch.object(client, 'create_user', return_value=create_user_response)
readable, outputs, _ = create_user_command(client, args)
assert 'STAGED' in readable
assert outputs.get('Account(val.ID && val.ID === obj.ID)')[0].get('Status') == 'STAGED'
@pytest.mark.parametrize(
"args, expected",
[
({'groupId': 'Test Group', 'limit': 5},
{'ID': 'TestID2', 'Username': '[email protected]', 'DisplayName': 'Test2 Test2',
'Email': '[email protected]', 'Status': 'STAGED', 'Type': 'Okta', 'Created': "2018-07-24T20:20:04.000Z"})
])
def test_get_group_members_command(mocker, args, expected):
mocker.patch.object(client, 'get_group_members', return_value=group_members)
readable, outputs, _ = get_group_members_command(client, args)
assert 'Test Group' in readable
assert expected == outputs.get('Account(val.ID && val.ID === obj.ID)')[1]
def test_get_logs_command(mocker):
mocker.patch.object(client, 'get_logs', return_value=logs)
readable, outputs, _ = get_logs_command(client, {})
assert logs == outputs.get('Okta.Logs.Events(val.uuid && val.uuid === obj.uuid)')
assert 'Unknown browser on Unknown OS Unknown device' in readable
assert 'Chrome on Windows Computer' in readable
@pytest.mark.parametrize(
"args",
[
({'zoneID': 'nzoqsmcx1qWYJ6wYF7q0'})
])
def test_get_zone_command(mocker, args):
mocker.patch.object(client, 'get_zone', return_value=okta_zone)
readable, outputs, _ = get_zone_command(client, args)
assert 'Test Zone' in readable
assert 'nzoqsmcx1qWYJ6wYF7q0' == outputs.get('Okta.Zone(val.id && val.id === obj.id)').get('id', '')
def test_list_zones_command(mocker):
mocker.patch.object(client, 'list_zones', return_value=okta_zone)
readable, outputs, _ = list_zones_command(client, {})
assert 'Test Zone' in readable
assert 'nzoqsmcx1qWYJ6wYF7q0' == outputs.get('Okta.Zone(val.id && val.id === obj.id)').get('id', '')
@pytest.mark.parametrize(
"args",
[
({'zoneID': 'nzoqsmcx1qWYJ6wYF7q0', 'zoneName': 'NewZoneName'})
])
def test_update_zone_command(mocker, args):
my_okta_zone = okta_zone
my_okta_zone['name'] = 'NewZoneName'
mocker.patch.object(client, 'get_zone', return_value=okta_zone)
mocker.patch.object(client, 'update_zone', return_value=my_okta_zone)
readable, outputs, _ = update_zone_command(client, args)
assert 'NewZoneName' == outputs.get('Okta.Zone(val.id && val.id === obj.id)').get('name', '')
EXPEXTED_LOGS_RESULT = \
[
{
"Actor": "dummy name (User)",
"ActorAlternaneId": "example",
"EventInfo": "Remove user from group membership",
"EventOutcome": "SUCCESS",
"EventSeverity": "INFO",
"Client": "Unknown browser on Unknown OS Unknown device",
"RequestIP": "8.8.8.8",
"ChainIP": [
"8.8.8.8"
],
"Targets": "test this (User)\ntest1 (UserGroup)\n",
"Time": "12/13/2021, 01:47:08"
},
{
"Actor": "dummy name (User)",
"ActorAlternaneId": "example",
"EventInfo": "Remove user from group membership",
"EventOutcome": "SUCCESS",
"EventSeverity": "INFO",
"Client": "Unknown browser on Unknown OS Unknown device",
"RequestIP": "8.8.8.8",
"ChainIP": [],
"Targets": "test this (User)\ntest1 (UserGroup)\n",
"Time": "12/13/2021, 01:47:08"
}
]
def test_get_readable_logs():
logs_raw_response = util_load_json('test_data/get_logs_response.json')
result = client.get_readable_logs(logs_raw_response)
assert len(result) == 2
assert result == EXPEXTED_LOGS_RESULT
|
the-stack_106_25172 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.conf import settings
from taiga.importers.jira.agile import JiraAgileImporter
from taiga.importers.jira.normal import JiraNormalImporter
from taiga.users.models import User
import json
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--token', dest="token", type=str,
help='Auth token')
parser.add_argument('--server', dest="server", type=str,
help='Server address (default: https://jira.atlassian.com)',
default="https://jira.atlassian.com")
parser.add_argument('--project-id', dest="project_id", type=str,
help='Project ID or full name (ex: taigaio/taiga-back)')
parser.add_argument('--project-type', dest="project_type", type=str,
help='Project type in jira: project or board')
parser.add_argument('--template', dest='template', default="scrum",
help='template to use: scrum or scrum (default scrum)')
parser.add_argument('--ask-for-users', dest='ask_for_users', const=True,
action="store_const", default=False,
help='Import closed data')
parser.add_argument('--closed-data', dest='closed_data', const=True,
action="store_const", default=False,
help='Import closed data')
parser.add_argument('--keep-external-reference', dest='keep_external_reference', const=True,
action="store_const", default=False,
help='Store external reference of imported data')
def handle(self, *args, **options):
admin = User.objects.get(username="admin")
server = options.get("server")
if options.get('token', None) == "anon":
token = None
elif options.get('token', None):
token = json.loads(options.get('token'))
else:
(rtoken, rtoken_secret, url) = JiraNormalImporter.get_auth_url(
server,
settings.IMPORTERS.get('jira', {}).get('consumer_key', None),
settings.IMPORTERS.get('jira', {}).get('cert', None),
True
)
print(url)
input("Go to the url, allow the user and get back and press enter")
token = JiraNormalImporter.get_access_token(
server,
settings.IMPORTERS.get('jira', {}).get('consumer_key', None),
settings.IMPORTERS.get('jira', {}).get('cert', None),
rtoken,
rtoken_secret,
True
)
print("Auth token: {}".format(json.dumps(token)))
if options.get('project_type', None) is None:
print("Select the type of project to import (project or board): ")
project_type = input("Project type: ")
else:
project_type = options.get('project_type')
if project_type not in ["project", "board"]:
print("ERROR: Bad project type.")
return
if project_type == "project":
importer = JiraNormalImporter(admin, server, token)
else:
importer = JiraAgileImporter(admin, server, token)
if options.get('project_id', None):
project_id = options.get('project_id')
else:
print("Select the project to import:")
for project in importer.list_projects():
print("- {}: {}".format(project['id'], project['name']))
project_id = input("Project id or key: ")
users_bindings = {}
if options.get('ask_for_users', None):
print("Add the username or email for next jira users:")
for user in importer.list_users():
try:
users_bindings[user['key']] = User.objects.get(Q(email=user['email']))
break
except User.DoesNotExist:
pass
while True:
username_or_email = input("{}: ".format(user['full_name']))
if username_or_email == "":
break
try:
users_bindings[user['key']] = User.objects.get(Q(username=username_or_email) | Q(email=username_or_email))
break
except User.DoesNotExist:
print("ERROR: Invalid username or email")
options = {
"template": options.get('template'),
"import_closed_data": options.get("closed_data", False),
"users_bindings": users_bindings,
"keep_external_reference": options.get('keep_external_reference'),
}
if project_type == "project":
print("Bind jira issue types to (epic, us, issue)")
types_bindings = {
"epic": [],
"us": [],
"task": [],
"issue": [],
}
for issue_type in importer.list_issue_types(project_id):
while True:
if issue_type['subtask']:
types_bindings['task'].append(issue_type)
break
taiga_type = input("{}: ".format(issue_type['name']))
if taiga_type not in ['epic', 'us', 'issue']:
print("use a valid taiga type (epic, us, issue)")
continue
types_bindings[taiga_type].append(issue_type)
break
options["types_bindings"] = types_bindings
importer.import_project(project_id, options)
|
the-stack_106_25174 | import optparse
from scrapy.commands import ScrapyCommand
from scrapy.commands.check import Command
from scrapy.settings import BaseSettings
parser = optparse.OptionParser(prog="Command", formatter=optparse.TitledHelpFormatter(), \
conflict_handler='resolve')
parser.add_option("--logfile", metavar="FILE",
help="log file. if omitted stderr will be used")
parser.add_option("--nolog", action="store_true",
help="disable logging completely")
parser.add_option("--profile", metavar="FILE", default=None,
help="write python cProfile stats to FILE")
parser.add_option("--pidfile", metavar="FILE",
help="write process ID to FILE")
parser.add_option("-s", "--set", action="append", default=[], metavar="NAME=VALUE",
help="set/override setting (may be repeated)")
arg = ['--logfile=test.txt', '--profile=test2.txt','-sa=b']
opts, args = parser.parse_args(arg)
print(args)
print(opts)
com = Command()
com.settings = BaseSettings()
sc= ScrapyCommand()
com.process_options(args,opts)
p1 = com.settings.attributes
p2 = com.settings.get("LOG_FILE")
cs = com.settings
print(type(cs.attributes["LOG_FILE"]))
|
the-stack_106_25176 | import sys
import json
from aiohttp.client_exceptions import ClientError
from kivy import base, utils
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.uix.label import Label
from kivy.utils import platform
from electrum_bynd.gui.kivy.i18n import _
from electrum_bynd.base_crash_reporter import BaseCrashReporter
from electrum_bynd.logging import Logger
Builder.load_string('''
<CrashReporter@Popup>
BoxLayout:
orientation: 'vertical'
Label:
id: crash_message
text_size: root.width, None
size: self.texture_size
size_hint: None, None
Label:
id: request_help_message
text_size: root.width*.95, None
size: self.texture_size
size_hint: None, None
BoxLayout:
size_hint: 1, 0.1
Button:
text: 'Show report contents'
height: '48dp'
size_hint: 1, None
on_release: root.show_contents()
BoxLayout:
size_hint: 1, 0.1
Label:
id: describe_error_message
text_size: root.width, None
size: self.texture_size
size_hint: None, None
TextInput:
id: user_message
size_hint: 1, 0.3
BoxLayout:
size_hint: 1, 0.7
BoxLayout:
size_hint: 1, None
height: '48dp'
orientation: 'horizontal'
Button:
height: '48dp'
text: 'Send'
on_release: root.send_report()
Button:
text: 'Never'
on_release: root.show_never()
Button:
text: 'Not now'
on_release: root.dismiss()
<CrashReportDetails@Popup>
BoxLayout:
orientation: 'vertical'
ScrollView:
do_scroll_x: False
Label:
id: contents
text_size: root.width*.9, None
size: self.texture_size
size_hint: None, None
Button:
text: 'Close'
height: '48dp'
size_hint: 1, None
on_release: root.dismiss()
''')
class CrashReporter(BaseCrashReporter, Factory.Popup):
issue_template = """[b]Traceback[/b]
[i]{traceback}[/i]
[b]Additional information[/b]
* Electrum version: {app_version}
* Operating system: {os}
* Wallet type: {wallet_type}
* Locale: {locale}
"""
def __init__(self, main_window, exctype, value, tb):
BaseCrashReporter.__init__(self, exctype, value, tb)
Factory.Popup.__init__(self)
self.main_window = main_window
self.title = BaseCrashReporter.CRASH_TITLE
self.title_size = "24sp"
self.ids.crash_message.text = BaseCrashReporter.CRASH_MESSAGE
self.ids.request_help_message.text = BaseCrashReporter.REQUEST_HELP_MESSAGE
self.ids.describe_error_message.text = BaseCrashReporter.DESCRIBE_ERROR_MESSAGE
def show_contents(self):
details = CrashReportDetails(self.get_report_string())
details.open()
def show_popup(self, title, content):
popup = Factory.Popup(title=title,
content=Label(text=content, text_size=(Window.size[0] * 3/4, None)),
size_hint=(3/4, 3/4))
popup.open()
def send_report(self):
try:
loop = self.main_window.network.asyncio_loop
proxy = self.main_window.network.proxy
# FIXME network request in GUI thread...
response = json.loads(BaseCrashReporter.send_report(self, loop, proxy,
"/crash.json", timeout=10))
except (ValueError, ClientError):
#self.logger.debug("", exc_info=True)
self.show_popup(_('Unable to send report'), _("Please check your network connection."))
else:
self.show_popup(_('Report sent'), response["text"])
if response["location"]:
self.open_url(response["location"])
self.dismiss()
def open_url(self, url):
if platform != 'android':
return
from jnius import autoclass, cast
String = autoclass("java.lang.String")
url = String(url)
PythonActivity = autoclass('org.kivy.android.PythonActivity')
activity = PythonActivity.mActivity
Intent = autoclass('android.content.Intent')
Uri = autoclass('android.net.Uri')
browserIntent = Intent()
# This line crashes the app:
# browserIntent.setAction(Intent.ACTION_VIEW)
# Luckily we don't need it because the OS is smart enough to recognize the URL
browserIntent.setData(Uri.parse(url))
currentActivity = cast('android.app.Activity', activity)
currentActivity.startActivity(browserIntent)
def show_never(self):
self.main_window.electrum_config.set_key(BaseCrashReporter.config_key, False)
self.dismiss()
def get_user_description(self):
return self.ids.user_message.text
def get_wallet_type(self):
return self.main_window.wallet.wallet_type
class CrashReportDetails(Factory.Popup):
def __init__(self, text):
Factory.Popup.__init__(self)
self.title = "Report Details"
self.ids.contents.text = text
print(text)
class ExceptionHook(base.ExceptionHandler, Logger):
def __init__(self, main_window):
base.ExceptionHandler.__init__(self)
Logger.__init__(self)
self.main_window = main_window
if not main_window.electrum_config.get(BaseCrashReporter.config_key, default=True):
return
# For exceptions in Kivy:
base.ExceptionManager.add_handler(self)
# For everything else:
sys.excepthook = lambda exctype, value, tb: self.handle_exception(value)
def handle_exception(self, _inst):
exc_info = sys.exc_info()
self.logger.error('exception caught by crash reporter', exc_info=exc_info)
# Check if this is an exception from within the exception handler:
import traceback
for item in traceback.extract_tb(exc_info[2]):
if item.filename.endswith("crash_reporter.py"):
return
e = CrashReporter(self.main_window, *exc_info)
# Open in main thread:
Clock.schedule_once(lambda _: e.open(), 0)
return base.ExceptionManager.PASS
|
the-stack_106_25178 | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
import os
from .models import ChunkedUpload
from .settings import ABSTRACT_ADMIN_MODEL
class ChunkedUploadAdmin(admin.ModelAdmin):
list_display = ['file_type', 'id', 'creator', 'status', 'created_at', 'completed_at']
search_fields = ['filename']
list_display_links = ['id']
list_filter = ['status', 'created_at', 'completed_at']
readonly_fields = ['offset', 'creator', 'created_at', 'completed_at']
ordering = ['-created_at']
fieldsets = (
(_('status'), {'fields': ('status', 'created_at', 'completed_at')}),
(_('general'), {'fields': ('offset', 'filename', 'file')}),
(_('owner'), {'fields': ('creator', 'owner_type', 'owner_id')}),
)
def file_type(self, obj):
return os.path.splitext(obj.file.name)[1][1:]
file_type.short_description = _('file type')
if not ABSTRACT_ADMIN_MODEL: # If the model exists
admin.site.register(ChunkedUpload, ChunkedUploadAdmin)
|
the-stack_106_25181 | import dash
import dash_labs as dl
import plotly.express as px
import plotly.graph_objects as go
app = dash.Dash(__name__, plugins=[dl.plugins.FlexibleCallbacks()])
# Load gapminder dataset
df = px.data.gapminder()
years = sorted(df.year.drop_duplicates())
continents = list(df.continent.drop_duplicates())
# # Build Themed Template
# theme_name = "cerulean"
# theme_name = "cosmo"
# theme_name = "cyborg"
theme_name = "darkly"
# theme_name = "flatly"
# theme_name = "journal"
# theme_name = "litera"
# theme_name = "lumen"
# theme_name = "lux"
# theme_name = "materia"
# theme_name = "minty"
# theme_name = "pulse"
# theme_name = "sandstone"
# theme_name = "simplex"
# theme_name = "sketchy"
# theme_name = "slate"
# theme_name = "solar"
# theme_name = "spacelab"
# theme_name = "superhero"
# theme_name = "united"
# theme_name = "yeti"
css_url = f"https://bootswatch.com/4/{theme_name}/bootstrap.css"
tpl = dl.templates.DbcSidebarTabs(
["Scatter", "Histogram"],
title=f"Dash Labs - {theme_name.title()} Theme",
theme=css_url,
figure_template=True,
)
@app.callback(
args=dict(
continent=tpl.checklist_input(continents, value=continents, label="Continents"),
year=tpl.slider_input(
years[0], years[-1], step=5, value=years[-1], label="Year"
),
logs=tpl.checklist_input(
["log(x)"], value="log(x)", label="Axis Scale", role="Scatter"
),
tab=tpl.tab_input(),
),
output=[
tpl.graph_output(role="Scatter"),
tpl.graph_output(role="Histogram"),
],
template=tpl,
)
def callback(year, continent, logs, tab):
print(f"Active Tab: {tab}")
logs = logs or []
# Let parameterize infer output component
year_df = df[df.year == year]
if continent:
year_df = year_df[year_df.continent.isin(continent)]
if not len(year_df):
return [go.Figure(), go.Figure()]
title = f"Life Expectancy ({year})"
scatter_fig = (
px.scatter(
year_df,
x="gdpPercap",
y="lifeExp",
size="pop",
color="continent",
hover_name="country",
log_x="log(x)" in logs,
size_max=60,
)
.update_layout(title_text=title, margin=dict(l=0, r=0, b=0))
.update_traces(marker_opacity=0.8)
)
hist_fig = px.histogram(
year_df, x="lifeExp", color="continent", barnorm=""
).update_layout(
title_text=title,
)
return scatter_fig, hist_fig
app.layout = tpl.layout(app)
if __name__ == "__main__":
app.run_server(debug=True)
|
the-stack_106_25182 | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
affine_status = False
def _weights_init(m):
classname = m.__class__.__name__
#print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_status)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_status)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes, affine = affine_status)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16, affine = affine_status)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print() |
the-stack_106_25185 | from django.conf.urls import url
from apps.hotelapp.views import Index, UsuarioCreate, UsuarioList, UsuarioDelete
from apps.hotelapp.views import UsuarioUpdate, UsuarioShow, search, SucursalCreate
from apps.hotelapp.views import SucursalList, SucursalDelete, SucursalUpdate, SucursalShow
from apps.hotelapp.views import search_sucursal
urlpatterns = [
url(r'^$', Index.as_view(), name='index'),
url(r'^nuevo/', UsuarioCreate.as_view(), name='usuario_crear'),
url(r'^listar', UsuarioList.as_view(), name='usuario_listar'),
url(r'^eliminar/(?P<pk>\d+)/$', UsuarioDelete.as_view(), name='usuario_eliminar'),
url(r'^editar/(?P<pk>\d+)/$', UsuarioUpdate.as_view(), name='usuario_editar'),
url(r'^mostrar/(?P<pk>\d+)/$', UsuarioShow.as_view(), name='usuario_mostrar'),
url(r'^buscar/$', search, name='usuario_buscar'),
url(r'^nuevasuc/', SucursalCreate.as_view(), name='sucursal_crear'),
url(r'^listasuc', SucursalList.as_view(), name='sucursal_listar'),
url(r'^eliminasuc/(?P<pk>\d+)/$', SucursalDelete.as_view(), name='sucursal_eliminar'),
url(r'^editasuc/(?P<pk>\d+)/$', SucursalUpdate.as_view(), name='sucursal_editar'),
url(r'^muestrasuc/(?P<pk>\d+)/$', SucursalShow.as_view(), name='sucursal_mostrar'),
url(r'^buscar_sucursal/$', search_sucursal, name='sucursal_buscar'),
]
|
the-stack_106_25186 | # import libraries
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.types import *
from pyspark.sql.functions import col, count, lit, rand, when
import pandas as pd
from math import ceil
#################################################
# spark config
#################################################
mtaMaster = "spark://192.168.0.182:7077"
conf = SparkConf()
conf.setMaster(mtaMaster)
conf.set("spark.executor.memory", "24g")
conf.set("spark.driver.memory", "26g")
conf.set("spark.cores.max", 96)
conf.set("spark.driver.cores", 8)
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
conf.set("spark.kryoserializer.buffer", "256m")
conf.set("spark.kryoserializer.buffer.max", "256m")
conf.set("spark.default.parallelism", 24)
conf.set("spark.eventLog.enabled", "true")
conf.set("spark.eventLog.dir", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.history.fs.logDirectory", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.driver.maxResultSize", "4g")
conf.getAll()
#################################################
# create spark session
#################################################
spark = SparkSession.builder.appName('ML2_HV_v1_NYT_sim1_and_sim2_to_sim3_round4_human_validation').config(conf=conf).getOrCreate()
sc = spark.sparkContext
# check things are working
print(sc)
print(sc.defaultParallelism)
print("SPARK CONTEXT IS RUNNING")
#################################################
# define major topic codes
#################################################
# major topic codes for loop (NO 23 IN THE NYT CORPUS)
majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]
#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]
#################################################
# read result data from round 3
#################################################
df_results = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r4_classified.parquet").repartition(50)
# verdict to integer for the comparison with majortopic later
df_results = df_results.withColumn('verdict', df_results.verdict.cast(IntegerType()))
#################################################
# create table to store sample and validation numbers
#################################################
columns = ["num_classified", "num_sample", "num_non_sample", "num_correct", "num_incorrect", "precision_in_sample", "num_added_to_training"]
df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)
df_numbers = df_numbers.fillna(0)
#################################################
# create table of samples from results
#################################################
# constants for sample size calculation for 95% confidence with +-0.05 precision confidence interval:
z = 1.96
delta = 0.05
z_delta = z*z*0.5*0.5/(delta*delta)
print("z_delta :", z_delta)
for i in majortopic_codes:
df_classified = df_results.where(col('verdict') == i)
num_classified = df_classified.count()
df_numbers["num_classified"].loc[i] = num_classified
print("MTC:", i, "num_classified: ", num_classified)
if num_classified > 100:
sample_size = ceil(z_delta/(1+1/num_classified*(z_delta-1)))
print("sample_size: ", sample_size)
if sample_size < 100:
sample_size = 100
df_sample = df_classified.sort('doc_id').withColumn('random', rand()).sort('random').limit(sample_size).drop('random')
df_sample_num = df_sample.count()
print("df_sample: ", df_sample_num)
# separate non-sample from sample elements
ids_drop = df_sample.select("doc_id")
df_non_sample = df_classified.join(ids_drop, "doc_id", "left_anti")
df_numbers["num_sample"].loc[i] = df_sample_num
df_numbers["num_non_sample"].loc[i] = df_non_sample.count()
else:
df_numbers["num_sample"].loc[i] = num_classified
df_sample = df_classified
df_non_sample = None
# create table of all samples and add new sample to it
if i == 1:
df_sample_all = df_sample
else:
df_sample_all = df_sample_all.union(df_sample)
#print("MTC:", i, "df_sample_all: ", df_sample_all.count())
# create table of all non-samples and add new non-sample to it
if i == 1:
df_non_sample_all = None
if df_non_sample != None and df_non_sample_all == None:
df_non_sample_all = df_non_sample
elif df_non_sample != None and df_non_sample_all != None:
df_non_sample_all = df_non_sample_all.union(df_non_sample)
#print("MTC:", i, "df_non_sample_all: ", df_non_sample_all.count())
print("MTC:", i)
#################################################
# check precision by majortopic codes
#################################################
# count correctly classified and precision for each majortopic code and write to table of numbers
df_correctly_classified = df_sample_all.where(col('majortopic') == col('verdict'))
for i in majortopic_codes:
num_correct = df_correctly_classified.where(col('verdict') == i).count()
df_numbers["num_correct"].loc[i] = num_correct
df_numbers["precision_in_sample"].loc[i] = num_correct/df_numbers["num_sample"].loc[i]
# count incorrectly classified for debugging and checking
df_incorrectly_classified = df_sample_all.where(col('majortopic') != col('verdict'))
for i in majortopic_codes:
num_incorrect = df_incorrectly_classified.where(col('verdict') == i).count()
df_numbers["num_incorrect"].loc[i] = num_incorrect
print(df_numbers)
#################################################
# create tables of elements based on precision
#################################################
# create tables for sorting elements based on precision results
# where precision is equal to or greater than 75%
# NOTE: validated wrongly classified elements will NOT be added to the results with the wrong major
# topic code, instead they will be added to the unclassified elements as in rounds 1&2
df_replace_all = None
# where precision is less than 75%
df_non_sample_replace = None
df_correct_replace = None
df_wrong_replace = None
for i in majortopic_codes:
print("create tables MTC:", i)
if df_numbers["precision_in_sample"].loc[i] >= 0.75:
# in this case add all elements from sample and non-sample to the training set with
# new major topic code i, EXCEPT for validated negatives, those are added to back into the
# test set
# first add wrong sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))
if df_wrong_replace == None:
df_wrong_replace = df_lemma
else:
df_wrong_replace = df_wrong_replace.union(df_lemma)
# get doc_ids for these elements to remove them from the rest of the elements classified as
# belonging to major topic i
ids_drop = df_lemma.select("doc_id")
# get all elements classified as belonging to major topic code i
df_lemma = df_results.where(col('verdict') == i)
# remove wrongly classified from df_lemma
df_lemma = df_lemma.join(ids_drop, "doc_id", "left_anti")
# add df_lemma to df_replace_all
if df_replace_all == None:
df_replace_all = df_lemma
else:
df_replace_all = df_replace_all.union(df_lemma)
# write numbers to df_numbers
df_numbers["num_added_to_training"].loc[i] = df_lemma.count()
#print("MTC:", i, "df_replace_all: ", df_replace_all.count())
else:
# in this case add only correct elements from sample to training set, the rest go back in
# the test set
# first add non-sample elements to their table, BUT we have to check whether non-sample elements
# exist
if df_non_sample_all != None:
df_lemma = df_non_sample_all.where(col('verdict') == i)
if df_non_sample_replace == None:
df_non_sample_replace = df_lemma
else:
df_non_sample_replace = df_non_sample_replace.union(df_lemma)
else:
df_non_sample_replace = None
#print("MTC:", i, "df_non_sample_replace: ", df_non_sample_replace.count())
# second add correct sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') == col('verdict'))
if df_correct_replace == None:
df_correct_replace = df_lemma
else:
df_correct_replace = df_correct_replace.union(df_lemma)
df_numbers["num_added_to_training"].loc[i] = df_lemma.count()
#print("MTC:", i, "df_correct_replace: ", df_correct_replace.count())
# finally add wrong sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))
if df_wrong_replace == None:
df_wrong_replace = df_lemma
else:
df_wrong_replace = df_wrong_replace.union(df_lemma)
#print("MTC:", i, "df_wrong_replace: ", df_wrong_replace.count())
# sometimes there will be no major topic code with precision => 75%
if df_replace_all == None:
df_replace_all = "empty"
# sometimes there will be no non-sample elements
if df_non_sample_replace == None:
df_non_sample_replace = "empty"
# the reason for creating these "empty" values, is because they will persist after we clear the
# cache, and we can use them later in the workflow control
# write all tables to parquet before clearing memory
df_correct_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet", mode="overwrite")
df_wrong_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet", mode="overwrite")
# sometimes there will be no non-sample elements
if df_non_sample_replace != "empty":
df_non_sample_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet", mode="overwrite")
# sometimes there will be no major topic code with precision => 75%
if df_replace_all != "empty":
df_replace_all.write.parquet("hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet", mode="overwrite")
# write df_numbers to csv
df_numbers.to_csv("ML2_HV_v1_NYT_human_validation_numbers_r4.csv", index=True)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
#################################################
# prepare df_original to add tables to it
#################################################
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r4_train_and_remaining_NOTclassified.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_original = df_original.withColumnRenamed('majortopic', 'mtc_after_r3')
df_original = df_original.withColumn('majortopic', df_original['mtc_after_r3'])
# finally, create the new train id column
df_original = df_original.withColumn("train_r5", when(df_original["train_r4"] == 1, 1).otherwise(0))
#################################################
# add df_replace_all back to df_original
#################################################
if df_replace_all != "empty":
print("df_replace_all is NOT empty")
df_replace_all = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_replace_all = df_replace_all.withColumnRenamed('majortopic', 'mtc_after_r3')
df_replace_all = df_replace_all.withColumn('majortopic', df_replace_all['verdict'])
# create the new train id column
df_replace_all = df_replace_all.withColumn("train_r5", lit(1))
# drop the extra columns to be able to add it back to df_original
df_replace_all = df_replace_all.drop('verdict')
# add df_replace_all elements to df_original
df_original = df_original.union(df_replace_all)
else:
print("df_replace_all is empty")
#################################################
# add df_non_sample_replace back to df_original
#################################################
if df_non_sample_replace != "empty":
print("df_non_sample_replace is NOT empty")
df_non_sample_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_non_sample_replace = df_non_sample_replace.withColumnRenamed('majortopic', 'mtc_after_r3')
df_non_sample_replace = df_non_sample_replace.withColumn('majortopic', df_non_sample_replace['mtc_after_r3'])
# create the new train id column
df_non_sample_replace = df_non_sample_replace.withColumn("train_r5", lit(0))
# drop the extra columns to be able to add it back to df_original
df_non_sample_replace = df_non_sample_replace.drop('verdict')
# add df_non_sample_replace elements to df_original
df_original = df_original.union(df_non_sample_replace)
else:
print("df_non_sample_replace is empty")
#################################################
# add df_correct_replace back to df_original
#################################################
df_correct_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_correct_replace = df_correct_replace.withColumnRenamed('majortopic', 'mtc_after_r3')
df_correct_replace = df_correct_replace.withColumn('majortopic', df_correct_replace['verdict'])
# create the new train id column
df_correct_replace = df_correct_replace.withColumn("train_r5", lit(1))
# drop the extra columns to be able to add it back to df_original
df_correct_replace = df_correct_replace.drop('verdict')
# add df_correct_replace elements to df_original
df_original = df_original.union(df_correct_replace)
#################################################
# add df_wrong_replace back to df_original
#################################################
df_wrong_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_wrong_replace = df_wrong_replace.withColumnRenamed('majortopic', 'mtc_after_r3')
df_wrong_replace = df_wrong_replace.withColumn('majortopic', df_wrong_replace['mtc_after_r3'])
# create the new train id column
df_wrong_replace = df_wrong_replace.withColumn("train_r5", lit(0))
# drop the extra columns to be able to add it back to df_original
df_wrong_replace = df_wrong_replace.drop('verdict')
# add df_wrong_replace elements to df_original
df_original = df_original.union(df_wrong_replace)
#################################################
# final write operations
#################################################
df_original.write.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round5_start.parquet", mode="overwrite")
df_original.groupBy("train_r5").count().show(n=30)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
# write to pandas and export to csv for debugging
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round5_start.parquet").repartition(50)
df_original = df_original.drop('text', 'words', 'features', 'raw_features').toPandas()
df_original.to_csv("ML2_HV_v1_NYT_round5_starting_table.csv", index=False)
sc.stop()
spark.stop()
|
the-stack_106_25187 | # the feature extraction script
# include a complete list of features in the Tor website fingerprinting literature
import os
import sys
from . import util
from .Param import *
# for CUMUL features and std
import itertools
import numpy
# for int/int to be float
from .FeatureUtil import *
import multiprocessing
#keyword = sys.argv[1]
# used for celltrace function
#CelltracePath_crawl = util.PathReader(keyword)
CelltracePath_List = []
#for each in CelltracePath_crawl:
# CelltracePath_List.append(each)
# Content Analysis
CelltracePath_List.append("******************")
CelltracePath = ""
FeaturePath = CelltracePath
# create a celltrace file in corresponding folder
# return the path of the created file
def CreateFile(DirFile):
global FeaturePath
global CelltracePath
Filename = os.path.basename(DirFile)
AbsPath = os.path.dirname(DirFile)
# RelPath = os.path.relpath(AbsPath, FeaturePath)
# print Filename, AbsPath, RelPath
NewName = Filename.split(".")[0] + ".feature"
DestPath = os.path.join(FeaturePath, NewName)
# create folder if nonexist
DestDir = os.path.dirname(DestPath)
if not os.path.exists(DestDir):
os.makedirs(DestDir)
fd = open(DestPath, "w+")
fd.close()
return DestPath
def Enumerate(Dir):
FileList = []
for dirname, dirnames, filenames in os.walk(Dir):
# skip logs directory
if "logs" in dirnames:
dirnames.remove("logs")
# if file exists
if len(filenames) != 0:
for filename in filenames:
fulldir = os.path.join(dirname, filename)
FileList.append(fulldir)
return FileList
def extract(times, sizes, features):
if DEBUG_FLAG:
FeaturePos = dict()
#Transmission size features
if PACKET_NUMBER == True:
PktNum.PacketNumFeature(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['PACKET_NUMBER'] = len(features)
# inter packet time + transmission time feature
if PKT_TIME == True:
Time.TimeFeature(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['PKT_TIME'] = len(features)
#Unique packet lengths
if UNIQUE_PACKET_LENGTH == True:
PktLen.PktLenFeature(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['UNIQUE_PACKET_LENGTH'] = len(features)
# n-gram feature for ordering
if NGRAM_ENABLE == True:
buckets = Ngram.NgramExtract(sizes, 2)
features.extend(buckets)
buckets = Ngram.NgramExtract(sizes, 3)
features.extend(buckets)
buckets = Ngram.NgramExtract(sizes, 4)
features.extend(buckets)
buckets = Ngram.NgramExtract(sizes, 5)
features.extend(buckets)
buckets = Ngram.NgramExtract(sizes, 6)
features.extend(buckets)
if DEBUG_FLAG:
FeaturePos['NGRAM'] = len(features)
# trans position features
if TRANS_POSITION == True:
TransPosition.TransPosFeature(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['TRANS_POSITION'] = len(features)
if INTERVAL_KNN == True:
Interval.IntervalFeature(times, sizes, features, 'KNN')
if DEBUG_FLAG:
FeaturePos['INTERVAL_KNN'] = len(features)
if INTERVAL_ICICS == True:
Interval.IntervalFeature(times, sizes, features, 'ICICS')
if DEBUG_FLAG:
FeaturePos['INTERVAL_ICICS'] = len(features)
if INTERVAL_WPES11 == True:
Interval.IntervalFeature(times, sizes, features, 'WPES11')
if DEBUG_FLAG:
FeaturePos['INTERVAL_WPES11'] = len(features)
#Packet distributions (where are the outgoing packets concentrated) (knn + k-anonymity)
if PACKET_DISTRIBUTION == True:
PktDistribution.PktDistFeature(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['PKT_DISTRIBUTION'] = len(features)
#Bursts (knn)
if BURSTS == True:
Burst.BurstFeature(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['BURST'] = len(features)
# first 20 packets (knn)
if FIRST20 == True:
HeadTail.First20(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['FIRST20'] = len(features)
# first 30: outgoing/incoming packet number (k-anonymity)
if FIRST30_PKT_NUM:
HeadTail.First30PktNum(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['FIRST30_PKT_NUM'] = len(features)
# last 30: outgoing/incoming packet number (k-anonymity)
if LAST30_PKT_NUM:
HeadTail.Last30PktNum(times, sizes, features)
if DEBUG_FLAG:
FeaturePos['LAST30_PKT_NUM'] = len(features)
# packets per second (k-anonymity)
# plus alternative list
if PKT_PER_SECOND:
PktSec.PktSecFeature(times, sizes, features, howlong)
if DEBUG_FLAG:
FeaturePos['PKT_PER_SECOND'] = len(features)
# CUMUL features
if CUMUL == True:
features.extend( Cumul.CumulFeatures(sizes, featureCount) )
if DEBUG_FLAG:
FeaturePos['CUMUL'] = len(features)
if DEBUG_FLAG:
# output FeaturePos
fd = open('FeaturePos', 'w')
newfp = sorted(list(FeaturePos.items()), key=lambda i:i[1])
for each_key, pos in newfp:
fd.write(each_key + ':' + str(pos) + '\n')
fd.close()
def BatchHandler(FileList):
for each_file in FileList:
if ".feature" in each_file:
continue
# print each_file
f = open(each_file, "r")
try:
times = []
sizes = []
for x in f:
if TRAFFIC_REFORMAT:
x = util.TrafficReformat(x)
else:
x = x.split("\t")
# print x
# print x[0]
# print x[1]
times.append(float(x[0]))
sizes.append(int(x[1]))
except:
f.close()
continue
f.close()
# whether times or size is empty
if len(times) == 0 or len(sizes) == 0:
continue
# whether normalize traffic
if NormalizeTraffic == 1:
util.NormalizeTraffic(times, sizes)
features = []
try:
extract(times, sizes, features)
except:
print("error occured:", each_file)
continue
Dest = CreateFile(each_file)
#print Dest
fout = open(Dest, "w")
for x in features:
# x could be str (DEBUG)
if isinstance(x, str):
if '\n' in x:
fout.write(x)
else:
fout.write(x + " ")
else:
fout.write(repr(x) + " ")
fout.close()
def main():
global CelltracePath
FileList = Enumerate(CelltracePath)
# split into BATCH_NUM files
BATCH_NUM = 32
FlistBatch = [[]]*BATCH_NUM
for idx, each_file in enumerate(FileList):
bdx = idx % BATCH_NUM
FlistBatch[bdx].append(each_file)
# start BATCH_NUM processes for computation
pjobs = []
for i in range(BATCH_NUM):
p = multiprocessing.Process(target=BatchHandler, args=(FlistBatch[i],))
pjobs.append(p)
p.start()
for eachp in pjobs:
eachp.join()
print("finished!")
if __name__ == "__main__":
for each_path in CelltracePath_List:
CelltracePath = each_path
print(each_path)
FeaturePath = each_path
main()
|
the-stack_106_25188 | import bisect
import typing
def main() -> typing.NoReturn:
n = int(input())
a = [i * (i + 1) // 2 for i in range(1, 10000)]
s = []
n0 = n
tot = 0
while n:
i = bisect.bisect_right(a, n)
n -= a[i - 1]
s.append('7' * i)
s = '1'.join(s)
print(s)
main()
|
the-stack_106_25190 | """Represents optimization strategy for group in PSO."""
# pylint: disable=redefined-variable-type
import numpy as np
from grortir.main.model.core.optimization_status import OptimizationStatus
from grortir.main.pso.group_optimization_strategy import \
GroupOptimizationStrategy
class CallsGroupOptimizationStrategy(GroupOptimizationStrategy):
"""Represents optimization strategy for group in PSO."""
def __init__(self, stages_in_group):
self.stages_in_group = stages_in_group
self.max_cost = 0
self.expected_quality = np.inf
def initialize(self):
"""
Initialize strategy.
"""
max_cost = 0
for stage in self.stages_in_group:
max_cost += stage.get_maximal_acceptable_cost()
if self.expected_quality > stage.maximum_acceptable_quality:
self.expected_quality = stage.maximum_acceptable_quality
self.max_cost = max_cost
def should_continue(self, best_particle):
"""
Return true if optimization should be continued for Calls Process.
Args:
best_particle Particle: best particle in swarm.
Returns:
bool: true if continuation is required.
"""
return self._is_safe_cost() and not self._is_enough_quality(
best_particle)
def finalize(self, best_particle):
"""
Set proper status after finished group optimization.
Args:
best_particle (Particle): best particle in Swarm
"""
optimization_status = OptimizationStatus.failed
if self._is_safe_cost() and self._is_enough_quality(
best_particle):
optimization_status = OptimizationStatus.success
for stage in self.stages_in_group:
stage.optimization_status = optimization_status
def _is_safe_cost(self):
current_cost = 0
for stage in self.stages_in_group:
current_cost += stage.get_cost()
return current_cost < self.max_cost
def _is_enough_quality(self, best_particle):
return best_particle.best_quality <= self.expected_quality
|
the-stack_106_25191 | import logging
import time
from typing import Any, Dict, List, Optional, Set
from blspy import G1Element
from chaingreen.consensus.cost_calculator import calculate_cost_of_program, NPCResult
from chaingreen.full_node.bundle_tools import simple_solution_generator
from chaingreen.full_node.mempool_check_conditions import get_name_puzzle_conditions
from chaingreen.types.blockchain_format.coin import Coin
from chaingreen.types.blockchain_format.program import Program, SerializedProgram
from chaingreen.types.announcement import Announcement
from chaingreen.types.blockchain_format.sized_bytes import bytes32
from chaingreen.types.coin_spend import CoinSpend
from chaingreen.types.generator_types import BlockGenerator
from chaingreen.types.spend_bundle import SpendBundle
from chaingreen.util.ints import uint8, uint32, uint64, uint128
from chaingreen.util.hash import std_hash
from chaingreen.wallet.derivation_record import DerivationRecord
from chaingreen.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE_HASH,
calculate_synthetic_secret_key,
puzzle_for_pk,
solution_for_conditions,
)
from chaingreen.wallet.puzzles.puzzle_utils import (
make_assert_coin_announcement,
make_assert_puzzle_announcement,
make_assert_my_coin_id_condition,
make_assert_absolute_seconds_exceeds_condition,
make_create_coin_announcement,
make_create_puzzle_announcement,
make_create_coin_condition,
make_reserve_fee_condition,
)
from chaingreen.wallet.secret_key_store import SecretKeyStore
from chaingreen.wallet.sign_coin_spends import sign_coin_spends
from chaingreen.wallet.transaction_record import TransactionRecord
from chaingreen.wallet.util.transaction_type import TransactionType
from chaingreen.wallet.util.wallet_types import WalletType
from chaingreen.wallet.wallet_coin_record import WalletCoinRecord
from chaingreen.wallet.wallet_info import WalletInfo
class Wallet:
wallet_state_manager: Any
log: logging.Logger
wallet_id: uint32
secret_key_store: SecretKeyStore
cost_of_single_tx: Optional[int]
@staticmethod
async def create(
wallet_state_manager: Any,
info: WalletInfo,
name: str = None,
):
self = Wallet()
self.log = logging.getLogger(name if name else __name__)
self.wallet_state_manager = wallet_state_manager
self.wallet_id = info.id
self.secret_key_store = SecretKeyStore()
self.cost_of_single_tx = None
return self
async def get_max_send_amount(self, records=None):
spendable: List[WalletCoinRecord] = list(
await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id(), records)
)
if len(spendable) == 0:
return 0
spendable.sort(reverse=True, key=lambda record: record.coin.amount)
if self.cost_of_single_tx is None:
coin = spendable[0].coin
tx = await self.generate_signed_transaction(
coin.amount, coin.puzzle_hash, coins={coin}, ignore_max_send_amount=True
)
program: BlockGenerator = simple_solution_generator(tx.spend_bundle)
# npc contains names of the coins removed, puzzle_hashes and their spend conditions
result: NPCResult = get_name_puzzle_conditions(
program,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.wallet_state_manager.constants.COST_PER_BYTE,
safe_mode=True,
)
cost_result: uint64 = calculate_cost_of_program(
program.program, result, self.wallet_state_manager.constants.COST_PER_BYTE
)
self.cost_of_single_tx = cost_result
self.log.info(f"Cost of a single tx for standard wallet: {self.cost_of_single_tx}")
max_cost = self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM / 5 # avoid full block TXs
current_cost = 0
total_amount = 0
total_coin_count = 0
for record in spendable:
current_cost += self.cost_of_single_tx
total_amount += record.coin.amount
total_coin_count += 1
if current_cost + self.cost_of_single_tx > max_cost:
break
return total_amount
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.STANDARD_WALLET)
def id(self) -> uint32:
return self.wallet_id
async def get_confirmed_balance(self, unspent_records=None) -> uint128:
return await self.wallet_state_manager.get_confirmed_balance_for_wallet(self.id(), unspent_records)
async def get_unconfirmed_balance(self, unspent_records=None) -> uint128:
return await self.wallet_state_manager.get_unconfirmed_balance(self.id(), unspent_records)
async def get_spendable_balance(self, unspent_records=None) -> uint128:
spendable = await self.wallet_state_manager.get_confirmed_spendable_balance_for_wallet(
self.id(), unspent_records
)
return spendable
async def get_pending_change_balance(self) -> uint64:
unconfirmed_tx = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.id())
addition_amount = 0
for record in unconfirmed_tx:
if not record.is_in_mempool():
self.log.warning(f"Record: {record} not in mempool")
continue
our_spend = False
for coin in record.removals:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
our_spend = True
break
if our_spend is not True:
continue
for coin in record.additions:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
addition_amount += coin.amount
return uint64(addition_amount)
def puzzle_for_pk(self, pubkey: bytes) -> Program:
return puzzle_for_pk(pubkey)
async def hack_populate_secret_key_for_puzzle_hash(self, puzzle_hash: bytes32) -> G1Element:
maybe = await self.wallet_state_manager.get_keys(puzzle_hash)
if maybe is None:
error_msg = f"Wallet couldn't find keys for puzzle_hash {puzzle_hash}"
self.log.error(error_msg)
raise ValueError(error_msg)
# Get puzzle for pubkey
public_key, secret_key = maybe
# HACK
synthetic_secret_key = calculate_synthetic_secret_key(secret_key, DEFAULT_HIDDEN_PUZZLE_HASH)
self.secret_key_store.save_secret_key(synthetic_secret_key)
return public_key
async def hack_populate_secret_keys_for_coin_spends(self, coin_spends: List[CoinSpend]) -> None:
"""
This hack forces secret keys into the `_pk2sk` lookup. This should eventually be replaced
by a persistent DB table that can do this look-up directly.
"""
for coin_spend in coin_spends:
await self.hack_populate_secret_key_for_puzzle_hash(coin_spend.coin.puzzle_hash)
async def puzzle_for_puzzle_hash(self, puzzle_hash: bytes32) -> Program:
public_key = await self.hack_populate_secret_key_for_puzzle_hash(puzzle_hash)
return puzzle_for_pk(bytes(public_key))
async def get_new_puzzle(self) -> Program:
dr = await self.wallet_state_manager.get_unused_derivation_record(self.id())
return puzzle_for_pk(bytes(dr.pubkey))
async def get_puzzle_hash(self, new: bool) -> bytes32:
if new:
return await self.get_new_puzzlehash()
else:
record: Optional[
DerivationRecord
] = await self.wallet_state_manager.get_current_derivation_record_for_wallet(self.id())
if record is None:
return await self.get_new_puzzlehash()
return record.puzzle_hash
async def get_new_puzzlehash(self, in_transaction: bool = False) -> bytes32:
return (await self.wallet_state_manager.get_unused_derivation_record(self.id(), in_transaction)).puzzle_hash
def make_solution(
self,
primaries: Optional[List[Dict[str, Any]]] = None,
min_time=0,
me=None,
coin_announcements: Optional[Set[bytes32]] = None,
coin_announcements_to_assert: Optional[Set[bytes32]] = None,
puzzle_announcements: Optional[Set[bytes32]] = None,
puzzle_announcements_to_assert: Optional[Set[bytes32]] = None,
fee=0,
) -> Program:
assert fee >= 0
condition_list = []
if primaries:
for primary in primaries:
condition_list.append(make_create_coin_condition(primary["puzzlehash"], primary["amount"]))
if min_time > 0:
condition_list.append(make_assert_absolute_seconds_exceeds_condition(min_time))
if me:
condition_list.append(make_assert_my_coin_id_condition(me["id"]))
if fee:
condition_list.append(make_reserve_fee_condition(fee))
if coin_announcements:
for announcement in coin_announcements:
condition_list.append(make_create_coin_announcement(announcement))
if coin_announcements_to_assert:
for announcement_hash in coin_announcements_to_assert:
condition_list.append(make_assert_coin_announcement(announcement_hash))
if puzzle_announcements:
for announcement in puzzle_announcements:
condition_list.append(make_create_puzzle_announcement(announcement))
if puzzle_announcements_to_assert:
for announcement_hash in puzzle_announcements_to_assert:
condition_list.append(make_assert_puzzle_announcement(announcement_hash))
return solution_for_conditions(condition_list)
async def select_coins(self, amount, exclude: List[Coin] = None) -> Set[Coin]:
"""
Returns a set of coins that can be used for generating a new transaction.
Note: This must be called under a wallet state manager lock
"""
if exclude is None:
exclude = []
spendable_amount = await self.get_spendable_balance()
if amount > spendable_amount:
error_msg = (
f"Can't select amount higher than our spendable balance. Amount: {amount}, spendable: "
f" {spendable_amount}"
)
self.log.warning(error_msg)
raise ValueError(error_msg)
self.log.info(f"About to select coins for amount {amount}")
unspent: List[WalletCoinRecord] = list(
await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id())
)
sum_value = 0
used_coins: Set = set()
# Use older coins first
unspent.sort(reverse=True, key=lambda r: r.coin.amount)
# Try to use coins from the store, if there isn't enough of "unused"
# coins use change coins that are not confirmed yet
unconfirmed_removals: Dict[bytes32, Coin] = await self.wallet_state_manager.unconfirmed_removals_for_wallet(
self.id()
)
for coinrecord in unspent:
if sum_value >= amount and len(used_coins) > 0:
break
if coinrecord.coin.name() in unconfirmed_removals:
continue
if coinrecord.coin in exclude:
continue
sum_value += coinrecord.coin.amount
used_coins.add(coinrecord.coin)
self.log.debug(f"Selected coin: {coinrecord.coin.name()} at height {coinrecord.confirmed_block_height}!")
# This happens when we couldn't use one of the coins because it's already used
# but unconfirmed, and we are waiting for the change. (unconfirmed_additions)
if sum_value < amount:
raise ValueError(
"Can't make this transaction at the moment. Waiting for the change from the previous transaction."
)
self.log.debug(f"Successfully selected coins: {used_coins}")
return used_coins
async def _generate_unsigned_transaction(
self,
amount: uint64,
newpuzzlehash: bytes32,
fee: uint64 = uint64(0),
origin_id: bytes32 = None,
coins: Set[Coin] = None,
primaries_input: Optional[List[Dict[str, Any]]] = None,
ignore_max_send_amount: bool = False,
announcements_to_consume: Set[Announcement] = None,
) -> List[CoinSpend]:
"""
Generates a unsigned transaction in form of List(Puzzle, Solutions)
Note: this must be called under a wallet state manager lock
"""
if primaries_input is None:
primaries: Optional[List[Dict]] = None
total_amount = amount + fee
else:
primaries = primaries_input.copy()
primaries_amount = 0
for prim in primaries:
primaries_amount += prim["amount"]
total_amount = amount + fee + primaries_amount
if not ignore_max_send_amount:
max_send = await self.get_max_send_amount()
if total_amount > max_send:
raise ValueError(f"Can't send more than {max_send} in a single transaction")
if coins is None:
coins = await self.select_coins(total_amount)
assert len(coins) > 0
self.log.info(f"coins is not None {coins}")
spend_value = sum([coin.amount for coin in coins])
change = spend_value - total_amount
assert change >= 0
spends: List[CoinSpend] = []
primary_announcement_hash: Optional[bytes32] = None
# Check for duplicates
if primaries is not None:
all_primaries_list = [(p["puzzlehash"], p["amount"]) for p in primaries] + [(newpuzzlehash, amount)]
if len(set(all_primaries_list)) != len(all_primaries_list):
raise ValueError("Cannot create two identical coins")
for coin in coins:
self.log.info(f"coin from coins {coin}")
puzzle: Program = await self.puzzle_for_puzzle_hash(coin.puzzle_hash)
# Only one coin creates outputs
if primary_announcement_hash is None and origin_id in (None, coin.name()):
if primaries is None:
primaries = [{"puzzlehash": newpuzzlehash, "amount": amount}]
else:
primaries.append({"puzzlehash": newpuzzlehash, "amount": amount})
if change > 0:
change_puzzle_hash: bytes32 = await self.get_new_puzzlehash()
primaries.append({"puzzlehash": change_puzzle_hash, "amount": change})
message_list: List[bytes32] = [c.name() for c in coins]
for primary in primaries:
message_list.append(Coin(coin.name(), primary["puzzlehash"], primary["amount"]).name())
message: bytes32 = std_hash(b"".join(message_list))
solution: Program = self.make_solution(
primaries=primaries,
fee=fee,
coin_announcements={message},
coin_announcements_to_assert=announcements_to_consume,
)
primary_announcement_hash = Announcement(coin.name(), message).name()
else:
solution = self.make_solution(coin_announcements_to_assert={primary_announcement_hash})
spends.append(
CoinSpend(
coin, SerializedProgram.from_bytes(bytes(puzzle)), SerializedProgram.from_bytes(bytes(solution))
)
)
self.log.info(f"Spends is {spends}")
return spends
async def sign_transaction(self, coin_spends: List[CoinSpend]) -> SpendBundle:
return await sign_coin_spends(
coin_spends,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
async def generate_signed_transaction(
self,
amount: uint64,
puzzle_hash: bytes32,
fee: uint64 = uint64(0),
origin_id: bytes32 = None,
coins: Set[Coin] = None,
primaries: Optional[List[Dict[str, bytes32]]] = None,
ignore_max_send_amount: bool = False,
announcements_to_consume: Set[Announcement] = None,
) -> TransactionRecord:
"""
Use this to generate transaction.
Note: this must be called under a wallet state manager lock
"""
if primaries is None:
non_change_amount = amount
else:
non_change_amount = uint64(amount + sum(p["amount"] for p in primaries))
transaction = await self._generate_unsigned_transaction(
amount, puzzle_hash, fee, origin_id, coins, primaries, ignore_max_send_amount, announcements_to_consume
)
assert len(transaction) > 0
self.log.info("About to sign a transaction")
await self.hack_populate_secret_keys_for_coin_spends(transaction)
spend_bundle: SpendBundle = await sign_coin_spends(
transaction,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
now = uint64(int(time.time()))
add_list: List[Coin] = list(spend_bundle.additions())
rem_list: List[Coin] = list(spend_bundle.removals())
assert sum(a.amount for a in add_list) + fee == sum(r.amount for r in rem_list)
return TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=now,
to_puzzle_hash=puzzle_hash,
amount=uint64(non_change_amount),
fee_amount=uint64(fee),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=add_list,
removals=rem_list,
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
async def push_transaction(self, tx: TransactionRecord) -> None:
"""Use this API to send transactions."""
await self.wallet_state_manager.add_pending_transaction(tx)
# This is to be aggregated together with a coloured coin offer to ensure that the trade happens
async def create_spend_bundle_relative_chaingreen(self, chaingreen_amount: int, exclude: List[Coin]) -> SpendBundle:
list_of_solutions = []
utxos = None
# If we're losing value then get coins with at least that much value
# If we're gaining value then our amount doesn't matter
if chaingreen_amount < 0:
utxos = await self.select_coins(abs(chaingreen_amount), exclude)
else:
utxos = await self.select_coins(0, exclude)
assert len(utxos) > 0
# Calculate output amount given sum of utxos
spend_value = sum([coin.amount for coin in utxos])
chaingreen_amount = spend_value + chaingreen_amount
# Create coin solutions for each utxo
output_created = None
for coin in utxos:
puzzle = await self.puzzle_for_puzzle_hash(coin.puzzle_hash)
if output_created is None:
newpuzhash = await self.get_new_puzzlehash()
primaries = [{"puzzlehash": newpuzhash, "amount": chaingreen_amount}]
solution = self.make_solution(primaries=primaries)
output_created = coin
list_of_solutions.append(CoinSpend(coin, puzzle, solution))
await self.hack_populate_secret_keys_for_coin_spends(list_of_solutions)
spend_bundle = await sign_coin_spends(
list_of_solutions,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
return spend_bundle
|
the-stack_106_25192 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
from setuptools import setup, find_packages
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
# Add your dependencies in requirements.txt
# Note: you can add test-specific requirements in tox.ini
requirements = []
with open('requirements.txt') as f:
for line in f:
stripped = line.split("#")[0].strip()
if len(stripped) > 0:
requirements.append(stripped)
# https://github.com/pypa/setuptools_scm
use_scm = {"write_to": "napari_label_picker/_version.py"}
setup(
name='napari-label-picker',
author='Kevin Yamauchi',
author_email='[email protected]',
license='BSD-3',
url='https://github.com/kevinyamauchi/napari-label-picker',
description='a plugin for selecting labels in label images',
long_description=read('README.md'),
long_description_content_type='text/markdown',
packages=find_packages(),
python_requires='>=3.6',
install_requires=requirements,
use_scm_version=use_scm,
setup_requires=['setuptools_scm'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Framework :: napari',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
],
entry_points={
'napari.plugin': [
'napari-label-picker = napari_label_picker',
],
},
)
|
the-stack_106_25199 | #coding=utf-8
# Copyright (c) 2018 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import layers.paddle_layers as layers
class CNN(object):
"""
CNN
"""
def __init__(self, conf_dict):
"""
initialize
"""
self.dict_size = conf_dict["dict_size"]
self.task_mode = conf_dict["task_mode"]
self.emb_dim = conf_dict["net"]["emb_dim"]
self.filter_size = conf_dict["net"]["filter_size"]
self.num_filters = conf_dict["net"]["num_filters"]
self.hidden_dim = conf_dict["net"]["hidden_dim"]
def predict(self, left, right):
"""
Forward network
"""
# embedding layer
emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb")
left_emb = emb_layer.ops(left)
right_emb = emb_layer.ops(right)
# Presentation context
cnn_layer = layers.SequenceConvPoolLayer(
self.filter_size, self.num_filters, "conv")
left_cnn = cnn_layer.ops(left_emb)
right_cnn = cnn_layer.ops(right_emb)
# matching layer
if self.task_mode == "pairwise":
relu_layer = layers.FCLayer(self.hidden_dim, "relu", "relu")
left_relu = relu_layer.ops(left_cnn)
right_relu = relu_layer.ops(right_cnn)
cos_sim_layer = layers.CosSimLayer()
pred = cos_sim_layer.ops(left_relu, right_relu)
return left_relu, pred
else:
concat_layer = layers.ConcatLayer(1)
concat = concat_layer.ops([left_cnn, right_cnn])
relu_layer = layers.FCLayer(self.hidden_dim, "relu", "relu")
concat_fc = relu_layer.ops(concat)
softmax_layer = layers.FCLayer(2, "softmax", "cos_sim")
pred = softmax_layer.ops(concat_fc)
return left_cnn, pred
|
the-stack_106_25203 | import os
from dotenv import load_dotenv
load_dotenv()
IMDB_CAST = "cast"
IMDB_NAME = "name"
MIN_NAME_SIZE = 0
SRC_ID = "src_id"
DST_ID = "dst_id"
WEIGHT = "weight"
SUBTITLE_SLEEP_TIME = 3
EPISODE_ID = "id"
EPISODE_NAME = "title"
EPISODE_NUMBER = "episode"
EPISODE_RATING = "rating"
SEASON_ID = "seasonid"
SEASON_NUMBER = "SeasonNumber"
DVD_SEASON = "DVD_season"
DVD_EPISODE = "DVD_episodenumber"
SERIES_ID = "seriesid"
EPISODE_GUEST_STARTS = "GuestStars"
SERIES_NAME = "Series_name"
IMDB_ID = "imdb_id"
VIDEO_NAME = "movie_name"
IMDB_RATING = "imdb_rating"
SUBTITLE_PATH = "subtitle_path"
ROLES_PATH = "roles_path"
MOVIE_YEAR = "movie_year"
ROLES_GRAPH = "roles_graph"
ACTORS_GRAPH = "actors_graph"
MAX_YEAR = 2018
dirname, filename = os.path.split(os.path.abspath(__file__))
THE_TVDB_URL = r"http://thetvdb.com/data/series/%s/all/en.xml"
IMDB_NAMES_URL = "https://datasets.imdbws.com/name.basics.tsv.gz"
IMDB_TITLES_URL = "https://datasets.imdbws.com/title.basics.tsv.gz"
IMDB_CREW_URL = "https://datasets.imdbws.com/title.crew.tsv.gz"
IMDB_RATING_URL = "https://datasets.imdbws.com/title.ratings.tsv.gz"
IMDB_PRINCIPALS_URL = "https://datasets.imdbws.com/title.principals.tsv.gz"
BASE_DIR_NAME = ".subs2net"
BASEPATH = os.path.expanduser(os.path.join('~', BASE_DIR_NAME))
if not os.path.exists(BASEPATH):
os.mkdir(BASEPATH)
os.mkdir(f"{BASEPATH}/data")
os.mkdir(f"{BASEPATH}/subtitles")
os.mkdir(f"{BASEPATH}/output")
os.mkdir(f"{BASEPATH}/ner")
OUTPUT_PATH = f"{BASEPATH}/output"
DATA_PATH = f"{BASEPATH}/data"
STANFORD_NLP_MODEL = f"{BASEPATH}/ner/english.all.3class.distsim.crf.ser.gz"
STANFORD_NLP_JAR = f"{BASEPATH}/ner/stanford-ner.jar"
STANFORD_NLP_JAR_URL = "https://github.com/data4goodlab/subs2network/raw/master/ner/stanford-ner.jar"
STANFORD_NLP_MODEL_URL = "https://github.com/data4goodlab/subs2network/raw/master/ner/classifiers/english.all.3class.distsim.crf.ser.gz"
DEBUG = True
def set_output_path(output_path):
global OUTPUT_PATH
OUTPUT_PATH = output_path
|
the-stack_106_25205 | import os
import base64
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import (Mail, Attachment, FileContent, FileName, FileType, Disposition)
change_url = f"https://github.com/{os.environ.get('GITHUB_REPOSITORY')}/commit/{os.environ.get('COMMIT_HASH')}"
message = Mail(
from_email=os.environ.get('FROM_EMAIL'),
to_emails=os.environ.get('TO_EMAIL'),
subject='Site updated',
html_content=f"""
Content attached, changes can be found at <a href="{change_url}">{change_url}</a>
""")
# https://www.twilio.com/blog/sending-email-attachments-with-twilio-sendgrid-python
with open('bgd_scraped.csv', 'rb') as f:
data = f.read()
f.close()
encoded_file = base64.b64encode(data).decode()
attachedFile = Attachment(
FileContent(encoded_file),
FileName('bgd_scraped.csv'),
FileType('text/csv'),
Disposition('attachment')
)
message.attachment = attachedFile
try:
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message) |
the-stack_106_25208 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally_openstack.scenarios.ceilometer import stats
from tests.unit import test
class CeilometerStatsTestCase(test.ScenarioTestCase):
def test_get_stats(self):
scenario = stats.GetStats(self.context)
scenario._get_stats = mock.MagicMock()
context = {"user": {"tenant_id": "fake", "id": "fake_id"},
"tenant": {"id": "fake_id",
"resources": ["fake_resource"]}}
metadata_query = {"a": "test"}
period = 10
groupby = "user_id"
aggregates = "sum"
scenario.context = context
scenario.run("fake_meter", True, True, True, metadata_query,
period, groupby, aggregates)
scenario._get_stats.assert_called_once_with(
"fake_meter",
[{"field": "user_id", "value": "fake_id", "op": "eq"},
{"field": "project_id", "value": "fake_id", "op": "eq"},
{"field": "resource_id", "value": "fake_resource", "op": "eq"},
{"field": "metadata.a", "value": "test", "op": "eq"}],
10,
"user_id",
"sum"
)
|
the-stack_106_25209 | # vim: et:ts=4:sw=4:fenc=utf-8
from abc import ABC, abstractmethod
import random
from typing import *
import json
from pmevo_eval.utils.architecture import Architecture
import pmevo_eval.utils.jsonable as jsonable
class Mapping(jsonable.JSONable):
"""Abstract base class for port mappings."""
def __init__(self):
super().__init__()
@staticmethod
def read_from_json_dict(jsondict, arch: Architecture = None):
assert jsondict["kind"] in ["Mapping2", "Mapping3"]
if arch is None:
arch = Architecture()
arch.from_json_dict(jsondict["arch"])
else:
arch.verify_json_dict(jsondict["arch"])
if jsondict["kind"] == "Mapping3":
res = Mapping3(arch)
res.from_json_dict(jsondict)
return res
if jsondict["kind"] == "Mapping2":
res = Mapping2(arch)
res.from_json_dict(jsondict)
return res
raise NotImplementedError("read_from_json")
@staticmethod
def read_from_json(infile, arch: Architecture = None):
jsondict = json.load(infile)
return Mapping.read_from_json_dict(jsondict, arch)
@staticmethod
def read_from_json_str(instr, arch: Architecture = None):
jsondict = json.loads(instr)
return Mapping.read_from_json_dict(jsondict, arch)
class Mapping3(Mapping):
"""Class representing port mappings where instructions are decomposed into
uops that can be executed on ports.
"""
def __init__(self, arch: Architecture):
super().__init__()
self.arch = arch
# an assignment from instructions to lists of lists of ports
self.assignment = {i: [] for i in self.arch.insn_list()}
def __getitem__(self, key):
assert key in self.assignment
return self.assignment[key]
def __repr__(self):
res = "Mapping3(arch={}, assignment={})".format(
repr(self.arch), repr(self.assignment)
)
return res
def to_json_dict(self):
res = dict()
res["kind"] = "Mapping3"
arch_dict = self.arch.to_json_dict()
res_dict = dict()
for k, v in arch_dict.items():
res_dict[k] = jsonable.mark_noindent(v)
res["arch"] = res_dict
assignment_dict = dict()
for i, us in self.assignment.items():
curr_uops = []
for ps in us:
curr_uops.append([p.name for p in ps])
assignment_dict[i.name] = jsonable.mark_noindent(curr_uops)
res["assignment"] = assignment_dict
return res
def from_json_dict(self, jsondict):
assert jsondict["kind"] == "Mapping3"
arch = self.arch
assignment_dict = jsondict["assignment"]
for i, us in assignment_dict.items():
insn = arch.insns[i]
curr_uops = []
for ps in us:
curr_uops.append([arch.ports[p] for p in ps])
self.assignment[insn] = curr_uops
@classmethod
def from_random(cls, arch: Architecture, num_uops_per_insn: int):
"""Generate a new random Mapping for the given architecture with at
most num_uops_per_insn uops per instruction (not necessarily
distinct).
"""
return cls.from_random_with_core(
arch, num_uops_per_insn=num_uops_per_insn, core_ratio=1.0
)
@classmethod
def from_random_with_core(
cls, arch: Architecture, num_uops_per_insn: int, core_ratio
):
"""Generate a new random Mapping for the given architecture with at
most num_uops_per_insn uops per instruction (not necessarily
distinct).
Only core_ratio * number of instructions many instructions are
generated randomly, the others are composed of core instructions.
"""
assert 0.0 <= core_ratio and core_ratio <= 1.0
res = cls(arch)
I = arch.insn_list()
P = arch.port_list()
assert len(I) > 0
core_size = max(1, int(len(I) * core_ratio))
random.shuffle(I)
core = I[:core_size]
remainder = I[core_size:]
for i in core:
num_uops = random.randrange(1, num_uops_per_insn + 1)
for x in range(num_uops):
sz = random.randrange(1, len(P) + 1)
p = list(random.sample(P, sz))
res.assignment[i].append(p)
for i in remainder:
idx = random.randrange(0, len(core))
core_element = core[idx]
res.assignment[i] = res.assignment[core_element][:]
# TODO this is not a full deep copy
return res
@classmethod
def from_model(cls, arch: Architecture, model):
"""Create a Mapping3 from a model, i.e. a tuple (i2u, u2p) of
dictionaries.
i2u maps pairs of instructions i from arch and some objects u
representing uops to a True value iff i should be decomposed into u
according to the mapping.
u2p does the same for tuples of uop representations u and ports p to
indicate that u can be executed on p.
"""
(i2u, u2p) = model
P = arch.port_list()
res = cls(arch)
for (i, u), v in i2u.items():
uop = []
for p in P:
if u2p.get((u, p), False):
uop.append(p)
if len(uop) > 0:
res.assignment[i].append(uop)
return res
class Mapping2(Mapping):
"""Class representing port mappings where instructions are directly
executed on ports.
"""
def __init__(self, arch: Architecture):
super().__init__()
self.arch = arch
# an assignment from instructions to lists of ports
self.assignment = dict()
def __getitem__(self, key):
assert key in self.assignment
return self.assignment[key]
def __repr__(self):
res = "Mapping2(arch={}, assignment={})".format(
repr(self.arch), repr(self.assignment)
)
return res
def to_json_dict(self):
res = dict()
res["kind"] = "Mapping2"
arch_dict = self.arch.to_json_dict()
res_dict = dict()
for k, v in arch_dict.items():
res_dict[k] = jsonable.mark_noindent(v)
res["arch"] = res_dict
assignment_dict = dict()
for i, ps in self.assignment.items():
assignment_dict[i.name] = jsonable.mark_noindent([p.name for p in ps])
res["assignment"] = assignment_dict
return res
def from_json_dict(self, jsondict):
assert jsondict["kind"] == "Mapping2"
arch = self.arch
assignment_dict = jsondict["assignment"]
for i, ps in assignment_dict.items():
insn = arch.insns[i]
self.assignment[insn] = sorted([arch.ports[p] for p in ps])
@classmethod
def from_random(cls, arch: Architecture):
"""Generate a new random Mapping for the given architecture."""
return cls.from_random_with_core(arch, 1.0)
@classmethod
def from_random_with_core(cls, arch: Architecture, core_ratio):
"""Generate a new random Mapping for the given architecture.
Only core_ratio * number of instructions many instructions are
generated randomly, the others are composed of core instructions.
"""
assert 0.0 <= core_ratio and core_ratio <= 1.0
res = cls(arch)
I = arch.insn_list()
P = arch.port_list()
assert len(I) > 0
core_size = max(1, int(len(I) * core_ratio))
random.shuffle(I)
core = I[:core_size]
remainder = I[core_size:]
for i in core:
sz = random.randrange(1, len(P) + 1)
p = list(random.sample(P, sz))
res.assignment[i] = p
for i in remainder:
idx = random.randrange(0, len(core))
core_element = core[idx]
res.assignment[i] = res.assignment[core_element][:]
return res
@classmethod
def from_model(cls, arch: Architecture, model):
"""Create a Mapping2 from a model, i.e. a dictionary i2p.
i2p maps pairs of instructions i from arch and ports to a True value
iff i can be executed on p according to the mapping.
"""
i2p = model
I = arch.insn_list()
P = arch.port_list()
res = cls(arch)
for i in I:
res.assignment[i] = []
for (i, p), v in i2p.items():
if v:
res.assignment[i].append(p)
return res
|
the-stack_106_25212 | """
Salts RD Lite shared module
Copyright (C) 2016 creits -2- tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcaddon
import xbmcplugin
import xbmcgui
import xbmc
import xbmcvfs
import urllib
import urlparse
import sys
import os
import re
import json
import time
import CustomProgressDialog
addon = xbmcaddon.Addon()
get_setting = addon.getSetting
show_settings = addon.openSettings
sleep = xbmc.sleep
_log = xbmc.log
def execute_jsonrpc(command):
if not isinstance(command, basestring):
command = json.dumps(command)
response = xbmc.executeJSONRPC(command)
return json.loads(response)
def get_path():
return addon.getAddonInfo('path').decode('utf-8')
def get_profile():
return addon.getAddonInfo('profile').decode('utf-8')
def translate_path(path):
return xbmc.translatePath(path).decode('utf-8')
def set_setting(id, value):
if not isinstance(value, basestring): value = str(value)
addon.setSetting(id, value)
def accumulate_setting(setting, addend=1):
cur_value = get_setting(setting)
cur_value = int(cur_value) if cur_value else 0
set_setting(setting, cur_value + addend)
def get_version():
return addon.getAddonInfo('version')
def get_id():
return addon.getAddonInfo('id')
def get_name():
return addon.getAddonInfo('name')
def has_addon(addon_id):
return xbmc.getCondVisibility('System.HasAddon(%s)' % (addon_id)) == 1
def get_kodi_version():
class MetaClass(type):
def __str__(self):
return '|%s| -> |%s|%s|%s|%s|%s|' % (self.version, self.major, self.minor, self.tag, self.tag_version, self.revision)
class KodiVersion(object):
__metaclass__ = MetaClass
version = xbmc.getInfoLabel('System.BuildVersion').decode('utf-8')
match = re.search('([0-9]+)\.([0-9]+)', version)
if match: major, minor = match.groups()
match = re.search('-([a-zA-Z]+)([0-9]*)', version)
if match: tag, tag_version = match.groups()
match = re.search('\w+:(\w+-\w+)', version)
if match: revision = match.group(1)
try: major = int(major)
except: major = 0
try: minor = int(minor)
except: minor = 0
try: revision = revision.decode('utf-8')
except: revision = u''
try: tag = tag.decode('utf-8')
except: tag = u''
try: tag_version = int(tag_version)
except: tag_version = 0
return KodiVersion
def get_plugin_url(queries):
try:
query = urllib.urlencode(queries)
except UnicodeEncodeError:
for k in queries:
if isinstance(queries[k], unicode):
queries[k] = queries[k].encode('utf-8')
query = urllib.urlencode(queries)
return sys.argv[0] + '?' + query
def end_of_directory(cache_to_disc=True):
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=cache_to_disc)
def set_content(content):
xbmcplugin.setContent(int(sys.argv[1]), content)
def create_item(queries, label, thumb='', fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
if not thumb: thumb = os.path.join(get_path(), 'icon.png')
list_item = xbmcgui.ListItem(label, iconImage=thumb, thumbnailImage=thumb)
add_item(queries, list_item, fanart, is_folder, is_playable, total_items, menu_items, replace_menu)
def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
if not fanart: fanart = os.path.join(get_path(), 'fanart.jpg')
if menu_items is None: menu_items = []
if is_folder is None:
is_folder = False if is_playable else True
if is_playable is None:
playable = 'false' if is_folder else 'true'
else:
playable = 'true' if is_playable else 'false'
liz_url = queries if isinstance(queries, basestring) else get_plugin_url(queries)
if not list_item.getProperty('fanart_image'): list_item.setProperty('fanart_image', fanart)
list_item.setInfo('video', {'title': list_item.getLabel()})
list_item.setProperty('isPlayable', playable)
list_item.addContextMenuItems(menu_items, replaceItems=replace_menu)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, list_item, isFolder=is_folder, totalItems=total_items)
def parse_query(query):
q = {'mode': 'main'}
if query.startswith('?'): query = query[1:]
queries = urlparse.parse_qs(query)
for key in queries:
if len(queries[key]) == 1:
q[key] = queries[key][0]
else:
q[key] = queries[key]
return q
def notify(header=None, msg='', duration=2000, sound=None, icon_path=None):
if header is None: header = get_name()
if sound is None: sound = get_setting('mute_notifications') == 'false'
if icon_path is None: icon_path = os.path.join(get_path(), 'icon.png')
try:
xbmcgui.Dialog().notification(header, msg, icon_path, duration, sound)
except:
builtin = "XBMC.Notification(%s,%s, %s, %s)" % (header, msg, duration, icon_path)
xbmc.executebuiltin(builtin)
def close_all():
xbmc.executebuiltin('Dialog.Close(all)')
def get_current_view():
window = xbmcgui.Window(xbmcgui.getCurrentWindowId())
return str(window.getFocusId())
def set_view(content, set_view=False, set_sort=False):
# set content type so library shows more views and info
if content:
set_content(content)
if set_view:
view = get_setting('%s_view' % (content))
if view and view != '0':
_log('Setting View to %s (%s)' % (view, content), xbmc.LOGDEBUG)
xbmc.executebuiltin('Container.SetViewMode(%s)' % (view))
# set sort methods - probably we don't need all of them
if set_sort:
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_MPAA_RATING)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RUNTIME)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_GENRE)
def refresh_container():
xbmc.executebuiltin("XBMC.Container.Refresh")
def update_container(url):
xbmc.executebuiltin('Container.Update(%s)' % (url))
def get_keyboard(heading, default=''):
keyboard = xbmc.Keyboard()
keyboard.setHeading(heading)
if default: keyboard.setDefault(default)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
else:
return None
class Translations(object):
def __init__(self, strings):
self.strings = strings
def i18n(self, string_id):
try:
return addon.getLocalizedString(self.strings[string_id]).encode('utf-8', 'ignore')
except Exception as e:
xbmc.log('%s: Failed String Lookup: %s (%s)' % (get_name(), string_id, e), xbmc.LOGWARNING)
return string_id
class WorkingDialog(object):
wd = None
def __init__(self):
try:
self.wd = xbmcgui.DialogBusy()
self.wd.create()
self.update(0)
except:
xbmc.executebuiltin('ActivateWindow(busydialog)')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.wd is not None:
self.wd.close()
else:
xbmc.executebuiltin('Dialog.Close(busydialog)')
def is_canceled(self):
if self.wd is not None:
return self.wd.iscanceled()
else:
return False
def update(self, percent):
if self.wd is not None:
self.wd.update(percent)
class ProgressDialog(object):
pd = None
def __init__(self, heading, line1='', line2='', line3='', background=False, active=True, timer=0):
self.begin = time.time()
self.timer = timer
self.background = background
self.heading = heading
if active and not timer:
self.pd = self.__create_dialog(line1, line2, line3)
self.pd.update(0)
def __create_dialog(self, line1, line2, line3):
if self.background:
pd = xbmcgui.DialogProgressBG()
msg = line1 + line2 + line3
pd.create(self.heading, msg)
else:
if xbmc.getCondVisibility('Window.IsVisible(progressdialog)'):
pd = CustomProgressDialog.ProgressDialog()
else:
pd = xbmcgui.DialogProgress()
pd.create(self.heading, line1, line2, line3)
return pd
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.pd is not None:
self.pd.close()
def is_canceled(self):
if self.pd is not None and not self.background:
return self.pd.iscanceled()
else:
return False
def update(self, percent, line1='', line2='', line3=''):
if self.pd is None and self.timer and (time.time() - self.begin) >= self.timer:
self.pd = self.__create_dialog(line1, line2, line3)
if self.pd is not None:
if self.background:
msg = line1 + line2 + line3
self.pd.update(percent, self.heading, msg)
else:
self.pd.update(percent, line1, line2, line3)
class CountdownDialog(object):
__INTERVALS = 5
pd = None
def __init__(self, heading, line1='', line2='', line3='', active=True, countdown=60, interval=5):
self.heading = heading
self.countdown = countdown
self.interval = interval
self.line3 = line3
if active:
if xbmc.getCondVisibility('Window.IsVisible(progressdialog)'):
pd = CustomProgressDialog.ProgressDialog()
else:
pd = xbmcgui.DialogProgress()
if not self.line3: line3 = 'Expires in: %s seconds' % (countdown)
pd.create(self.heading, line1, line2, line3)
pd.update(100)
self.pd = pd
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.pd is not None:
self.pd.close()
def start(self, func, args=None, kwargs=None):
if args is None: args = []
if kwargs is None: kwargs = {}
result = func(*args, **kwargs)
if result:
return result
start = time.time()
expires = time_left = int(self.countdown)
interval = self.interval
while time_left > 0:
for _ in range(CountdownDialog.__INTERVALS):
sleep(interval * 1000 / CountdownDialog.__INTERVALS)
if self.is_canceled(): return
time_left = expires - int(time.time() - start)
if time_left < 0: time_left = 0
progress = time_left * 100 / expires
line3 = 'Expires in: %s seconds' % (time_left) if not self.line3 else ''
self.update(progress, line3=line3)
result = func(*args, **kwargs)
if result:
return result
def is_canceled(self):
if self.pd is None:
return False
else:
return self.pd.iscanceled()
def update(self, percent, line1='', line2='', line3=''):
if self.pd is not None:
self.pd.update(percent, line1, line2, line3)
|
the-stack_106_25213 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@date: December 2018, 7th
@author: [email protected] - Greg Sainton @IPGP on Behalf InSight/SEIS collaboration
@purpose:
This module is a class "Mars Converter" designed mainly to convert UTC Time to LMST
Time and LMST Time to UTC Time. With time, we added several useful functions.
LMST Time is depending on the landing time and the longitude of the lander.
Those informations are provided in a configuration file named "landerconfig.xml"
All the calculation are based of the Mars 24 algorithm itself based on
Alison, McEwen, Planetary ans Space Science 48 (2000) 215-235
https://www.giss.nasa.gov/tools/mars24/help/algorithm.html
Beware that numerical values and leap seconds were updated in this algo
since the publication of the article.
I followed this :https://www.giss.nasa.gov/tools/mars24/help/algorithm.html
In comments, AM2000 refers to the article and C? refers to the above web
page.
@VERSIONS:
V.1.6: 19 Jan 2021 - Fix bug on UTC to LMST conversions
- Add get_utc_2_eot and get_utc_2_ls functions
V.1.5: Dec 2019 - Add local solar elevation
+ latitude added in lander configfile
V.1.4: Dec 2019 - Add Solar declination and LTST getter
V1.3: Oct 2019 - Update in the output format of the utc2lmst functions:
no more colons between seconds and milliseconds
V1.0: April 2019 - LMST -> UTC added.
It was necessery to change some other function to bypass the effect
of modulos
You can either give a SSSSThh:mm:ss:millis (ie 0129T02:45:56:675678)
of just a sol number
# Content of landerconfigfile.xml for INSIGHT
<configlanding>
<landingdate>2018-330T19:44:52.444</landingdate>
<longitude>224.03</longitude>
<latitude>4.502384</latitude>
<solorigin>2018-330T05:10:50.3356</solorigin>
</configlanding>
"""
import os
import math
from math import floor
import time
import numpy as np
from obspy import UTCDateTime
global configfile
configfile = './landerconfig.xml'
path2marsconverter = os.environ['MARSCONVERTER']
configfile = path2marsconverter+'/landerconfig.xml'
class MarsConverter:
"""
Class which contains all the function to convert UTC time to Martian Time.
All the calculation are based of the Mars 24 algorithm itself based on
Alison, McEwen, Planetary ans Space Science 48 (2000) 215-235
https://www.giss.nasa.gov/tools/mars24/help/algorithm.html
"""
#JULIAN UNIX EPOCH (01/01/1970-00:00 UTC)
JULIAN_UNIX_EPOCH = 2440587.5
#MILLISECONDS IN A DAY
MILLISECONDS_IN_A_DAY = 86400000.0
#SECOND IN A DAY
SECOND_IN_A_DAY = 86400.0
# SHIFT IN DAY PER DEGREE
SHIFT_IN_DAY_PER_DEGREE = 1. / 360.0
# Julian day of reference (january 2010, 6th 00:00 UTC) At that time,
# the martian meridian is also at midnight.
CONSISTENT_JULIAN_DAY = 2451545.0
#Delta between IAT and UTC
TAI_UTC = 0.0003725
#time division
TIME_COEFFICIENT = 60.0
#Millisecond multiplier
MMULTIPLIER = 1000.0
# Allison's coefficient
K = 0.0009626
# Allison's normalisation factor to make sure that we get positive values
#for date after 1873
KNORM = 44796.0
#Ratio Betwwen Martian SOL and terrestrial day
SOL_RATIO = 1.0274912517
LONGITUDE = None
# Sol-001 and Sol-002 start times to compute one Martian day in seconds.
# Cannot use landing time because Sol-000 lasted shorter.
SOL01_START_TIME = UTCDateTime("2018-11-27T05:50:25.580014Z")
SOL02_START_TIME = UTCDateTime("2018-11-28T06:30:00.823990Z")
SECONDS_PER_MARS_DAY = SOL02_START_TIME - SOL01_START_TIME - 0.000005
#def __init__(self, landingdate, origindate, longitude):
# self.__landingdate = landingdate
# self.__longitude = float(longitude)
# self.__origindate = origindate
# self.LONGITUDE = float(self.__longitude)
def __init__(self, landerconfigfile=None):
global configfile
from lxml import etree
if landerconfigfile is not None:
configfile = configfile
else:
configfile = configfile
tree = etree.parse(configfile)
root = tree.getroot()
for el in root:
if el.tag == 'landingdate':
LANDING_DATE_STR = el.text
if el.tag == 'longitude':
LANDING_LONGITUDE = float(el.text)
if el.tag == 'solorigin':
SOL_ORIGIN_STR = el.text
if el.tag == 'latitude':
LANDING_LATITUDE = float(el.text)
utc_origin_sol_date = UTCDateTime(SOL_ORIGIN_STR)
utc_landing_date = UTCDateTime(LANDING_DATE_STR)
self.__landingdate = utc_landing_date
self.__longitude = LANDING_LONGITUDE
self.__origindate = utc_origin_sol_date
self.LONGITUDE = float(self.__longitude)
self.LATITUDE = LANDING_LATITUDE
def get_landing_date(self):
"""
Returns the landing date of the lander in UTCDateTime format
"""
return self.__landingdate
def get_longitude(self):
"""
Returns the lander longitude
"""
return self.__longitude
def get_origindate(self):
return self.__origindate
def j2000_epoch(self):
"""
Returns the j2000 epoch as a float
"""
return self.CONSISTENT_JULIAN_DAY
def mills(self):
"""
Returns the current time in milliseconds since Jan 1 1970
"""
return time.time()*self.MMULTIPLIER
def julian(self, date=None):
"""
Returns the julian day number given milliseconds since Jan 1 1970
"""
if date is None:
dateUTC = UTCDateTime.now()
else:
dateUTC = UTCDateTime(date)
millis = dateUTC.timestamp * 1000.0
return self.JULIAN_UNIX_EPOCH + (millis/self.MILLISECONDS_IN_A_DAY)
def utc_to_tt_offset(self, jday=None):
"""
Returns the offset in seconds from a julian date in Terrestrial Time (TT)
to a Julian day in Coordinated Universal Time (UTC)
"""
return self.utc_to_tt_offset_math(jday)
def utc_to_tt_offset_math(self, jday=None):
"""
Returns the offset in seconds from a julian date in Terrestrial Time (TT)
to a Julian day in Coordinated Universal Time (UTC)
"""
if jday is None:
jday_np=self.julian()
else:
jday_np = jday
jday_min = 2441317.5
jday_vals = [ -2441317.5, 0., 182., 366.,
731., 1096., 1461., 1827.,
2192., 2557., 2922., 3469.,
3834., 4199., 4930., 5844.,
6575., 6940., 7487., 7852.,
8217., 8766., 9313., 9862.,
12419., 13515., 14792., 15887., 16437.]
offset_min = 32.184
offset_vals = [-32.184,10., 11.0, 12.0, 13.0,
14.0, 15.0, 16.0, 17.0, 18.0,
19.0, 20.0, 21.0, 22.0, 23.0,
24.0, 25.0, 26.0, 27.0, 28.0,
29.0, 30.0, 31.0, 32.0, 33.0,
34.0, 35.0, 36.0, 37.0]
if jday_np <= jday_min+jday_vals[0]:
return offset_min+offset_vals[0]
elif jday_np >= jday_min+jday_vals[-1]:
return offset_min+offset_vals[-1]
else:
for i in range(0, len(offset_vals)):
if (jday_min+jday_vals[i] <= jday_np) and \
(jday_min+jday_vals[i+1] > jday_np) :
break
return offset_min+offset_vals[i]
def julian_tt(self, jday_utc=None):
"""
Returns the TT Julian Day given a UTC Julian day
"""
if jday_utc is None:
jday_utc = self.julian()
jdtt = jday_utc + self.utc_to_tt_offset(jday_utc)/86400.
#print("jdtt= ", jdtt)
return jdtt
def j2000_offset_tt(self, jday_tt=None):
"""
Returns the julian day offset since the J2000 epoch
(AM2000, eq. 15)
"""
if jday_tt is None:
jday_tt = self.julian_tt()
return (jday_tt - self.j2000_epoch())
def Mars_Mean_Anomaly(self, j2000_ott=None):
"""
Calculates the Mars Mean Anomaly for a givent J2000 julien day offset
(AM2000, eq. 16)
"""
if j2000_ott is None:
j2000_ott = self.j2000_offset_tt()
M = 19.3871 + 0.52402073 * j2000_ott
return M % 360.
def Alpha_FMS(self, j2000_ott=None):
"""
Returns the Fictional Mean Sun angle
(AM2000, eq. 17)
"""
if j2000_ott is None:
j2000_ott = self.j2000_offset_tt()
alpha_fms = 270.3871 + 0.524038496 * j2000_ott
return alpha_fms % 360.
def alpha_perturbs(self, j2000_ott=None):
"""
Returns the perturbations to apply to the FMS Angle from orbital
perturbations.
(AM2000, eq. 18)
"""
if j2000_ott is None:
j2000_ott = self.j2000_offset_tt()
array_A = [0.0071, 0.0057, 0.0039, 0.0037, 0.0021, 0.0020, 0.0018]
array_tau = [2.2353, 2.7543, 1.1177, 15.7866, 2.1354, 2.4694, 32.8493]
array_phi = [49.409, 168.173, 191.837, 21.736, 15.704, 95.528, 49.095]
pbs = 0
for (A, tau, phi) in zip(array_A, array_tau, array_phi):
pbs+=A*np.cos(((0.985626 * j2000_ott/tau) + phi)*np.pi/180.)
return pbs
def equation_of_center(self, j2000_ott=None):
"""
The true anomaly (v) - the Mean anomaly (M)
(Bracketed term in AM2000, eqs. 19 and 20)
----
INPUT
@j2000_ott: float - offseted terrestrial time relative to j2000
----
OUTPUT
@return: EOC
"""
if j2000_ott is None:
j2000_ott = self.j2000_offset_tt()
M = self.Mars_Mean_Anomaly(j2000_ott)*np.pi/180.
pbs = self.alpha_perturbs(j2000_ott)
EOC = (10.691 + 3.0e-7 * j2000_ott)*np.sin(M)\
+ 0.6230 * np.sin(2*M)\
+ 0.0500 * np.sin(3*M)\
+ 0.0050 * np.sin(4*M)\
+ 0.0005 * np.sin(5*M) \
+ pbs
return EOC
def L_s(self, j2000_ott=None):
"""
Returns the Areocentric solar longitude (aka Ls)
(AM2000, eq. 19)
"""
if j2000_ott is None:
j2000_ott = self.j2000_offset_tt()
alpha = self.Alpha_FMS(j2000_ott)
v_m = self.equation_of_center(j2000_ott)
ls = (alpha + v_m)
ls = ls % 360
return ls
def get_utc_2_ls(self, utc_date=None):
"""
Convert UTC date to aerocentric solar longitude (Ls).
----
INPUT:
@utc_date: UTCDateTime
----
OUTPUT:
@ls : float
"""
if utc_date==None:
utc_date = UTCDateTime().now()
if isinstance(utc_date, UTCDateTime):
jd_utc = self.utcDateTime_to_jdutc(utc_date)
elif isinstance(utc_date, str):
try:
utc_date_in_utc = UTCDateTime(utc_date)
utc_date = utc_date_in_utc
except TypeError:
return None
else:
jd_utc = self.utcDateTime_to_jdutc(utc_date_in_utc)
jd_tt = self.julian_tt(jday_utc=jd_utc)
jd_ott = self.j2000_offset_tt(jd_tt)
ls = self.L_s(j2000_ott=jd_ott)
return ls
def equation_of_time(self, j2000_ott=None):
"""
Equation of Time, to convert between Local Mean Solar Time
and Local True Solar Time, and make pretty analemma plots
(AM2000, eq. 20)
"""
if j2000_ott is None:
j2000_ott = self.j2000_offset_tt()
ls = self.L_s(j2000_ott)*np.pi/180.
EOT = 2.861*np.sin(2*ls)\
- 0.071 * np.sin(4*ls)\
+ 0.002 * np.sin(6*ls) - self.equation_of_center(j2000_ott)
return EOT
def get_utc_2_eot(self, utc_date=None):
if utc_date==None:
utc_date = UTCDateTime().now()
if isinstance(utc_date, UTCDateTime):
jd_utc = self.utcDateTime_to_jdutc(utc_date)
elif isinstance(utc_date, str):
try:
utc_date_in_utc = UTCDateTime(utc_date)
utc_date = utc_date_in_utc
except TypeError:
return None
else:
jd_utc = self.utcDateTime_to_jdutc(utc_date_in_utc)
jd_tt = self.julian_tt(jday_utc=jd_utc)
jd_ott = self.j2000_offset_tt(jd_tt)
eot = self.equation_of_time(j2000_ott=jd_ott)
return eot
def j2000_from_Mars_Solar_Date(self, msd=0):
"""
Returns j2000 based on MSD
"""
j2000_ott = ((msd + 0.00096 - 44796.0) * 1.027491252) + 4.5
return j2000_ott
def j2000_ott_from_Mars_Solar_Date(self, msd=0):
"""
Returns j2000 offset based on MSD
"""
j2000 = self.j2000_from_Mars_Solar_Date(msd)
j2000_ott = self.julian_tt(j2000+self.j2000_epoch())
return j2000_ott-self.j2000_epoch()
def Mars_Solar_Date(self, j2000_ott = None):
"""Return the Mars Solar date"""
if j2000_ott is None:
jday_tt = self.julian_tt()
j2000_ott = self.j2000_offset_tt(jday_tt)
const = 4.5
MSD = (((j2000_ott - const)/self.SOL_RATIO) + self.KNORM - self.K)
return MSD
def Coordinated_Mars_Time(self, j2000_ott = None):
"""
The Mean Solar Time at the Prime Meridian
(AM2000, eq. 22, modified)
Be aware that the correct version of MTC should be
MTC%24 but since we need to reverse the equations to go from lmst to
utc, we decided to apply the modulo, later.
"""
if j2000_ott is None:
jday_tt = self.julian_tt()
j2000_ott = self.j2000_offset_tt(jday_tt)
#print("j2000_ott: ", j2000_ott)
MTC = 24 * (((j2000_ott - 4.5)/self.SOL_RATIO) + self.KNORM - self.K)
return MTC
def j2000_tt_from_CMT(self, MTC=None):
"""
Estimate j2000_ott from Coordinated Mars Time
from (AM2000, eq. 22, modified)
"""
j2000_ott = (((MTC / 24.) + self.K - self.KNORM) * self.SOL_RATIO) + 4.5
return j2000_ott
def _LMST(self, longitude=0, j2000_ott=None):
"""
The Local Mean Solar Time given a planetographic longitude
19-03-12 : modif: the modulo 24 of MTC is estimated here
(C-3)
"""
if j2000_ott is None:
jday_tt = self.julian_tt()
j2000_ott = self.j2000_offset_tt(jday_tt)
MTC = self.Coordinated_Mars_Time(j2000_ott)
#print("In function _LMST -> MTC: {}".format(MTC))
MTCmod = MTC % 24
LMST = (MTCmod - longitude * (24./360.)) % 24
#print("In function _LMST -> LMST: {}".format(LMST))
return LMST
def LMST_to_j2000_ott(self, longitude=0, LMST=None):
MTC = LMST + 24/360*longitude
j2000_ott = self.j2000_ott_from_CMT(MTC)
return j2000_ott
def _LTST(self, longitude=0, j2000_ott=None):
"""
Local true solar time is the Mean solar time + equation of time perturbation
from (AM2000, Eq. 23 & Eq. 24)
"""
if j2000_ott is None:
jday_tt = self.julian_tt()
j2000_ott = self.j2000_offset_tt(jday_tt)
eot = self.equation_of_time(j2000_ott)
lmst = self._LMST(longitude, j2000_ott)
ltst = (lmst + eot*(1./15.))%24
return ltst
#--------------------------------------------------------------------------
# LTST : Local True Solar Time
def get_utc_2_ltst(self, utc_date=None):
"""
Convert UTC date to LTST date.
----
INPUT:
@utc_date: UTCDateTime
----
OUTPUT:
@lmst_date : str
"""
if utc_date==None:
utc_date = UTCDateTime().now()
if isinstance(utc_date, UTCDateTime):
jd_utc = self.utcDateTime_to_jdutc(utc_date)
#print("jd_utc: ", jd_utc)
elif isinstance(utc_date, str):
try:
utc_date_in_utc = UTCDateTime(utc_date)
utc_date = utc_date_in_utc
except TypeError:
return None
else:
jd_utc = self.utcDateTime_to_jdutc(utc_date_in_utc)
jd_tt = self.julian_tt(jday_utc=jd_utc)
jd_ott = self.j2000_offset_tt(jd_tt)
origin_in_sec = self.__origindate.timestamp
any_date_in_sec = utc_date.timestamp
delta_sec = any_date_in_sec - origin_in_sec
martianSol = int(delta_sec / (self.SECOND_IN_A_DAY*self.SOL_RATIO))
ltst_date = self._LMST(longitude=self.__longitude, j2000_ott=jd_ott)
ihour = floor(ltst_date)
minutes = (ltst_date - ihour)*60.
iminutes = floor(minutes)
seconds = (minutes - iminutes)*60.
iseconds = floor(seconds)
milliseconds = int(math.modf((seconds-iseconds)*1000)[1])
ltst_str = "{:04}T{:02}:{:02}:{:02}.{:06}".format(martianSol,ihour,\
iminutes, iseconds, milliseconds)
return ltst_str
def utcDateTime_to_jdutc(self, date=None):
"""
Function to convert UTCDateTime to Julian date
"""
if date == None:
date = UTCDateTime().now()
millis = date.timestamp * 1000.0
#print("millis: ", millis)
jd_utc = self.JULIAN_UNIX_EPOCH + (float(millis) / self.MILLISECONDS_IN_A_DAY)
#print("In utcDateTime_to_jdutc :", jd_utc )
return jd_utc
def jdutc_to_UTCDateTime(self, jd_utc=None):
"""
Function to convert Julien date to UTCDateTime
"""
millis = (jd_utc - self.JULIAN_UNIX_EPOCH) * self.MILLISECONDS_IN_A_DAY
utc_tstamp = millis/1000.
return UTCDateTime(utc_tstamp)
def get_utc_2_lmst(self, utc_date=None, output="date"):
"""
Convert UTC date to LMST date.
Output is formated with SSSSTHH:MM:ss.mmmmmmm if output is 'date'
Otherwise, the output is float number if output is 'decimal'
----
INPUT:
@utc_date: UTCDateTime
@output= output format which can takes those values : "date" or "decimal"
----
OUTPUT:
@return: str - Local Mean Solar Time
"""
if utc_date==None:
utc_date = UTCDateTime().now()
if isinstance(utc_date, UTCDateTime):
jd_utc = self.utcDateTime_to_jdutc(utc_date)
#print("jd_utc: ", jd_utc)
elif isinstance(utc_date, str):
try:
utc_date_in_utc = UTCDateTime(utc_date)
utc_date = utc_date_in_utc
except TypeError:
return None
else:
jd_utc = self.utcDateTime_to_jdutc(utc_date_in_utc)
jd_tt = self.julian_tt(jday_utc=jd_utc)
jd_ott = self.j2000_offset_tt(jd_tt)
origin_in_sec = self.__origindate.timestamp
#origin_in_sec = self.SOL01_START_TIME.timestamp
any_date_in_sec = utc_date.timestamp
delta_sec = any_date_in_sec - origin_in_sec
martianSol = floor(delta_sec / (self.SECOND_IN_A_DAY*self.SOL_RATIO))
raw_martian_sol = delta_sec / (self.SECONDS_PER_MARS_DAY)
nb_sol = int(math.modf(raw_martian_sol)[1])
hour_dec = 24 * math.modf(raw_martian_sol)[0]
ihour = floor(hour_dec)
#MINUTES
min_dec = 60*(hour_dec-ihour)
iminutes = floor(min_dec)
seconds = (min_dec - iminutes)*60.
iseconds = int(math.modf((seconds))[1])
milliseconds = int(math.modf((seconds-iseconds))[0]*1000000)
if output == "decimal":
marsDate = raw_martian_sol
else:
marsDate = "{:04}T{:02}:{:02}:{:02}.{:06}".format(nb_sol,ihour,\
iminutes, iseconds, milliseconds)
return marsDate
def get_utc_2_lmst_2tab(self, utc_date = None):
"""
Convert UTC date to LMST date into a list
----
INPUT:
@utc_date: UTCDateTime
----
OUTPUT:
@return: list - Local Mean Solar Time
[SOL, Hour, Minutes, Second, milliseconds]]
"""
marsdate = self.get_utc_2_lmst(utc_date=utc_date, output="date")
whereTpos = marsdate.find("T")
print(whereTpos)
marsDate = []
if whereTpos > 0:
# extract the number of SOLS
nbsol = int(marsDate[:whereTpos])
marsDate.append(nbsol)
# extract hour time in a list
timepart = marsDate[whereTpos+1:].split(":")
if len(timepart) == 2: #only hh:mm
marsDate.append(int(timepart[0]))
marsDate.append(int(timepart[1]))
elif len(timepart) == 3: # hh:mm:ss.sssssss
marsDate.append(int(timepart[0]))
marsDate.append(int(timepart[1]))
marsDate.append(float(timepart[2]))
else:
marsDate.append(int(timepart[0]))
return marsDate
else:
return None
def get_lmst_to_utc(self, lmst_date = None):
"""
Function to estimate the UTC time giving a LMST time.
LMST Time must have the following formar : XXXXTMM:MM:ss.mmm
with :
SSSS : number of sols
HH: Hours
MM: Minutes
ss: Seconds
mmm: miliseconds
----
INPUT
@lmst_date: string
----
OUPUT
@return: Time with UTCDateTime format
"""
from obspy import UTCDateTime
if lmst_date==None:
return UTCDateTime.now()
else:
date2split = str(lmst_date)
whereTpos = date2split.find("T")
#print("whereTpos", whereTpos)
# mars date format: XXXXTMM:MM:SS.mmm
if whereTpos > 0:
# extract the number of SOLS
nbsol = float(date2split[:whereTpos])
# extract hour time in a list
timepart = date2split[whereTpos+1:].split(":")
# result in martian hours
#hours_in_dec = float(timepart[0]) + float(timepart[1])/60 + \
# float(timepart[2])/(60*60) + \
# float(timepart[3])/(1000*60*60)
if len(timepart) == 2: #only hh:mm
hours_in_dec = float(timepart[0]) + float(timepart[1])/60
elif len(timepart) == 3: # hh:mm:ss.sssssss
hours_in_dec = float(timepart[0]) + float(timepart[1])/60 + \
float(timepart[2])/(60*60)
#elif len(timepart) == 3: # hh:mm:ss:sssssss (previous format with colons as separator of decimal)
# hours_in_dec = float(timepart[0]) + float(timepart[1])/60 + \
# float(timepart[2])/(60*60) + \
# float(timepart[3])/(1000*60*60)
else:
hours_in_dec = None
jd_utc_orig = self.utcDateTime_to_jdutc(self.get_origindate())
jd_tt_orig = self.julian_tt(jday_utc=jd_utc_orig)
jd_ott_orig = self.j2000_offset_tt(jd_tt_orig)
MTC = self.Coordinated_Mars_Time(jd_ott_orig)
# Add the number of SOL to the MTC of the origin date
MTC+=nbsol*24
# Add the number of hours to the MTC of the origin date
if hours_in_dec is not None:
MTC+=hours_in_dec
# Get back to Delta J2000 (Eq 15)
JD_OTT = (MTC/24 - self.KNORM + self.K)*self.SOL_RATIO + 4.5
# Equation A6 from MARS 24 (https://www.giss.nasa.gov/tools/mars24/help/algorithm.html)
JD_TT = JD_OTT + self.CONSISTENT_JULIAN_DAY
# Equation A2 from MARS 24
JD_UT = JD_TT - 69.184/86400
# Equation A1 from MARS 24
UTC = (JD_UT - self.JULIAN_UNIX_EPOCH)*self.MILLISECONDS_IN_A_DAY/1000.
return UTCDateTime(UTC)
else:
return None
# Case where you just give the a number of SOL
elif whereTpos < 0 or whereTpos == None:
# Extract the MTC time of the "origin time" (time where SOL 0 starts)
#orig = self.get_origindate().timestamp
jd_utc_orig = self.utcDateTime_to_jdutc(self.get_origindate())
jd_tt_orig = self.julian_tt(jday_utc=jd_utc_orig)
jd_ott_orig = self.j2000_offset_tt(jd_tt_orig)
MTC = self.Coordinated_Mars_Time(jd_ott_orig)
MTC+=float(date2split)*24
# Get back to Delta J2000 (Eq 15)
JD_OTT = (MTC/24 - self.KNORM + self.K)*self.SOL_RATIO + 4.5
# Equation A6 from MARS 24 (https://www.giss.nasa.gov/tools/mars24/help/algorithm.html)
JD_TT = JD_OTT + self.CONSISTENT_JULIAN_DAY
# Equation A2 from MARS 24
JD_UT = JD_TT - 69.184/86400
# Equation A1 from MARS 24
UTC = (JD_UT - self.JULIAN_UNIX_EPOCH)*self.MILLISECONDS_IN_A_DAY/1000.
#checkSOL = self.get_utc_2_lmst(UTCDateTime(UTC))
#print("checkSOL", checkSOL)
#print("date2split", date2split)
correction_factor = .466
return UTCDateTime(UTC)+correction_factor
else:
return None
#==========================================================================
# Additionnal Calculations
# added in 19', Nov 26th
#==========================================================================
def solar_declination(self,utc_date = None):
"""
Determine solar declination (planetographic). (AM1997, eq. D5)
----
INPUT:
@utc_date:
"""
if isinstance(utc_date, UTCDateTime):
jd_utc = self.utcDateTime_to_jdutc(utc_date)
elif isinstance(utc_date, str):
try:
utc_date_in_utc = UTCDateTime(utc_date)
utc_date = utc_date_in_utc
except TypeError:
return None
else:
jd_utc = self.utcDateTime_to_jdutc(utc_date_in_utc)
jd_tt = self.julian_tt(jday_utc=jd_utc)
jd_ott = self.j2000_offset_tt(jd_tt)
ls = self.L_s(jd_ott)
delta_s = (180/math.pi)*math.asin(0.42565*math.sin(math.pi*ls/180)) \
+ 0.25* math.sin(math.pi*ls/180) # (-> AM1997, eq. D5)
return delta_s
def local_solar_elevation(self, utc_date = None):
"""
For any given point on Mars's surface,
we want to determine the angle of the sun.
From section D-5 on Mars24 algo page
added in dec 19, 19th
"""
if isinstance(utc_date, UTCDateTime):
jd_utc = self.utcDateTime_to_jdutc(utc_date)
elif isinstance(utc_date, str):
try:
utc_date_in_utc = UTCDateTime(utc_date)
utc_date = utc_date_in_utc
except TypeError:
return None
else:
jd_utc = self.utcDateTime_to_jdutc(utc_date_in_utc)
jd_tt = self.julian_tt(jday_utc=jd_utc)
jd_ott = self.j2000_offset_tt(jd_tt)
MTC = self.Coordinated_Mars_Time(j2000_ott = jd_ott)
MTC= MTC%24
#print("\t -MTC= ", MTC)
delta_s = self.solar_declination(utc_date = utc_date)
#print("\t -deltas= ", delta_s)
lbda = self.LONGITUDE
lbda = lbda%360
#print("\t -lbda= ", lbda)
phi = self.LATITUDE
EOT = self.equation_of_time(j2000_ott=jd_ott)
#print("\t -Equation of time:", EOT)
lbda_s = MTC*(360/24) + EOT + 180
lbda_s = lbda_s%360
#print("\t -lbda_s=", lbda_s)
d2r = math.pi/180.
H = lbda -lbda_s
#print("\t -H=", H)
Z = (180/math.pi)*math.acos(math.sin(delta_s*d2r)*math.sin(phi*d2r)+\
math.cos(delta_s*d2r)*math.cos(phi*d2r)*math.cos(H*d2r))
#print("\t -Z=", Z)
solar_elevation = 90 - Z
return solar_elevation
def local_solar_azimuth(self, utc_date = None):
"""
For any given point on Mars's surface,
we want to determine the angle of the sun.
From section D-6 on Mars24 algo page
added in dec 19, 19th
"""
if isinstance(utc_date, UTCDateTime):
jd_utc = self.utcDateTime_to_jdutc(utc_date)
elif isinstance(utc_date, str):
try:
utc_date_in_utc = UTCDateTime(utc_date)
utc_date = utc_date_in_utc
except TypeError:
return None
else:
jd_utc = self.utcDateTime_to_jdutc(utc_date_in_utc)
jd_tt = self.julian_tt(jday_utc=jd_utc)
jd_ott = self.j2000_offset_tt(jd_tt)
MTC = self.Coordinated_Mars_Time(j2000_ott = jd_ott)
MTC= MTC%24
#print("\t -MTC= ", MTC)
delta_s = self.solar_declination(utc_date = utc_date)
#print("\t -deltas= ", delta_s)
lbda = self.LONGITUDE
lbda = lbda%360
#print("\t -lbda= ", lbda)
phi = self.LATITUDE
EOT = self.equation_of_time(j2000_ott=jd_ott)
#print("\t -Equation of time:", EOT)
lbda_s = MTC*(360/24) + EOT + 180
lbda_s = lbda_s%360
#print("\t -lbda_s=", lbda_s)
d2r = math.pi/180
H = lbda -lbda_s
#print("\t -H=", H)
A = (180/math.pi)*math.atan(math.sin(H*d2r)/\
(math.cos(phi*d2r)*math.tan(delta_s*d2r)-math.sin(phi*d2r)*math.cos(H*d2r)))
A = A%360
#print("\t -A=", A)
return A
if __name__ == '__main__':
import sys
from pathlib import Path
print("Welcome in MarsConverter module.")
print("This main is just a test...")
landerconfigfile = './landerconfig.xml'
my_file = Path(landerconfigfile)
if my_file.is_file():
print("Config file found")
UTCDate = UTCDateTime.now()
print("Now is ", UTCDate)
mDate = MarsConverter()
marsDateNow = mDate.get_utc_2_lmst()
posT = marsDateNow.find('T')
print("in LMST, now, it is ", marsDateNow)
print("SOL ",marsDateNow[:posT] ,"from ", \
str(mDate.get_lmst_to_utc(lmst_date=int(marsDateNow[:posT]))), \
" UTC to ", str(mDate.get_lmst_to_utc(lmst_date=(int(marsDateNow[:posT])+1))))
print("UTC Date Time: {}".format(UTCDate))
print("From utc to lmst (formated):", mDate.get_utc_2_lmst(utc_date=UTCDate))
print("From utc to lmst (decimal):", mDate.get_utc_2_lmst(utc_date=UTCDate, output="decimal"))
print("From utc to ltst : ", mDate.get_utc_2_ltst(utc_date=UTCDate))
print("From utc to Ls", mDate.get_utc_2_ls(utc_date=UTCDate))
print("Solar declination: {}".format(mDate.solar_declination(utc_date=UTCDate)))
print("Solar elevation : {}".format(mDate.local_solar_elevation(utc_date=UTCDate)))
print("Solar azimuth : {}".format(mDate.local_solar_azimuth(utc_date=UTCDate)))
print("--------------------------")
print("Another example...")
#Example with a given UTCDateTime
#UTCDate = "2019-10-15T11:05:34.123456Z"
#2019-06-30T05:58
UTCDate = "2019-06-12T06:28:0.0"
print("UTC Date Time: {}".format(UTCDate))
print("From utc to lmst (formated):", mDate.get_utc_2_lmst(utc_date=UTCDate))
print("From utc to lmst (decimal):", mDate.get_utc_2_lmst(utc_date=UTCDate, output="decimal"))
print("From utc to ltst : ", mDate.get_utc_2_ltst(utc_date=UTCDate))
print("From utc to Ls", mDate.get_utc_2_ls(utc_date=UTCDate))
print("Solar declination: {}".format(mDate.solar_declination(utc_date=UTCDate)))
print("Solar elevation : {}".format(mDate.local_solar_elevation(utc_date=UTCDate)))
print("Solar azimuth : {}".format(mDate.local_solar_azimuth(utc_date=UTCDate)))
else:
sys.exit("No config file found")
|
the-stack_106_25214 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Provider(Model):
"""Resource provider information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The provider ID.
:vartype id: str
:param namespace: The namespace of the resource provider.
:type namespace: str
:ivar registration_state: The registration state of the provider.
:vartype registration_state: str
:ivar resource_types: The collection of provider resource types.
:vartype resource_types:
list[~azure.mgmt.resource.resources.v2016_09_01.models.ProviderResourceType]
"""
_validation = {
'id': {'readonly': True},
'registration_state': {'readonly': True},
'resource_types': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'registration_state': {'key': 'registrationState', 'type': 'str'},
'resource_types': {'key': 'resourceTypes', 'type': '[ProviderResourceType]'},
}
def __init__(self, **kwargs):
super(Provider, self).__init__(**kwargs)
self.id = None
self.namespace = kwargs.get('namespace', None)
self.registration_state = None
self.resource_types = None
|
the-stack_106_25218 | # Python script for rewriting a docker env file.
# usage: patch_env dockerenv_input.env dockerenv_output.env
# Copies each input line to the output, except when it is of the form
# VAR_X=value
# and an environment variable PATCH_VAR_X exists: then its value is used.
# Performs no error handling.
import os
import re
import sys
PATCHVAR_PREFIX = "PATCH_"
def patch_file(infile: str, outfile: str) -> None:
with open(infile, 'rt', encoding='utf-8') as input, \
open(outfile, 'wb') as output:
for line in input:
output.write(patched(line).encode('utf-8'))
def patched(line: str) -> str:
var_match = re.match(r"^(\w+)\s*=", line)
if var_match:
varname = var_match.group(1)
patchvarname = PATCHVAR_PREFIX + varname
patchvalue = os.environ.get(patchvarname, None)
if patchvalue is None:
return line # unpatched assignment line
else:
return "%s=%s\n" % (varname, patchvalue) # patched assignment line
else:
return line # non-assignment line
if __name__ == '__main__':
first = 2 if sys.argv[1].endswith('.py') else 1 # call: python patch_env.py in out
patch_file(sys.argv[first], sys.argv[first + 1])
|
the-stack_106_25219 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import serializers
from irekua_database.models import CollectionType
from irekua_database.models import LicenceType
from irekua_rest_api.serializers.base import IrekuaModelSerializer
from irekua_rest_api.serializers.base import IrekuaHyperlinkedModelSerializer
from irekua_rest_api.serializers.object_types import licences
from . import types
MODEL = CollectionType.licence_types.through # pylint: disable=E1101
class SelectSerializer(IrekuaModelSerializer):
class Meta:
model = MODEL
fields = (
'url',
'id',
)
class ListSerializer(IrekuaModelSerializer):
licence_type = serializers.PrimaryKeyRelatedField(
many=False,
read_only=True,
source='licencetype')
class Meta:
model = MODEL
fields = (
'url',
'id',
'licence_type',
)
class DetailSerializer(IrekuaHyperlinkedModelSerializer):
licence_type = licences.SelectSerializer(
many=False,
read_only=True,
source='licencetype')
collection_type = types.SelectSerializer(
many=False,
read_only=True,
source='collectiontype')
class Meta:
model = MODEL
fields = (
'url',
'id',
'collection_type',
'licence_type',
)
class CreateSerializer(IrekuaModelSerializer):
licence_type = serializers.PrimaryKeyRelatedField(
many=False,
read_only=False,
queryset=LicenceType.objects.all(), # pylint: disable=E1101
source='licencetype')
class Meta:
model = MODEL
fields = (
'licence_type',
)
def create(self, validated_data):
collection_type = self.context['collection_type']
validated_data['collectiontype'] = collection_type
return super().create(validated_data)
|
the-stack_106_25220 | """Contains pipelines."""
from functools import partial
import numpy as np
import tensorflow as tf
from hmmlearn import hmm
import cardio.dataset as ds
from batchflow import F, V, B
from ..models.hmm import HMModel, prepare_hmm_input
def hmm_preprocessing_pipeline(batch_size=20, features="hmm_features"):
"""Preprocessing pipeline for Hidden Markov Model.
This pipeline prepares data for ``hmm_train_pipeline``.
It works with dataset that generates batches of class ``EcgBatch``.
Parameters
----------
batch_size : int
Number of samples in batch.
Default value is 20.
features : str
Batch attribute to store calculated features.
Returns
-------
pipeline : Pipeline
Output pipeline.
"""
def get_annsamples(batch):
"""Get annsamples from annotation
"""
return [ann["annsamp"] for ann in batch.annotation]
def get_anntypes(batch):
"""Get anntypes from annotation
"""
return [ann["anntype"] for ann in batch.annotation]
return (ds.Pipeline()
.init_variable("annsamps", list)
.init_variable("anntypes", list)
.init_variable("hmm_features", list)
.load(fmt='wfdb', components=["signal", "annotation", "meta"], ann_ext='pu1')
.cwt(src="signal", dst="hmm_features", scales=[4,8,16], wavelet="mexh")
.standardize(axis=-1, src="hmm_features", dst="hmm_features")
.update(V("annsamps"), F(get_annsamples, mode='e')(batch=B()))
.update(V("anntypes"), F(get_anntypes, mode='e')(batch=B()))
.update(V("hmm_features"), ds.B("hmm_features"))
.run(batch_size=20, shuffle=False, drop_last=False, n_epochs=1, lazy=True, bar=True))
def hmm_train_pipeline(hmm_preprocessed, batch_size=20):
"""Train pipeline for Hidden Markov Model.
This pipeline trains hmm model to isolate QRS, PQ and QT segments.
It works with dataset that generates bathes of class EcgBatch.
Parameters
----------
hmm_preprocessed : Pipeline
Pipeline with precomputed hmm features through hmm_preprocessing_pipeline
batch_size : int
Number of samples in batch.
Default value is 20.
Returns
-------
pipeline : Pipeline
Output pipeline.
"""
def get_annsamples(batch):
return [ann["annsamp"] for ann in batch.annotation]
def get_anntypes(batch):
return [ann["anntype"] for ann in batch.annotation]
def expand_annotation(annsamp, anntype, length):
"""Unravel annotation
"""
begin = -1
end = -1
s = 'none'
states = {'N':0, 'st':1, 't':2, 'iso':3, 'p':4, 'pq':5}
annot_expand = -1 * np.ones(length)
for j, samp in enumerate(annsamp):
if anntype[j] == '(':
begin = samp
if (end > 0) & (s != 'none'):
if s == 'N':
annot_expand[end:begin] = states['st']
elif s == 't':
annot_expand[end:begin] = states['iso']
elif s == 'p':
annot_expand[end:begin] = states['pq']
elif anntype[j] == ')':
end = samp
if (begin > 0) & (s != 'none'):
annot_expand[begin:end] = states[s]
else:
s = anntype[j]
return annot_expand
def prepare_batchx(batch, model):
"""Prepare data for training
"""
_ = model
x = np.concatenate([hmm_features[0,:,:].T for hmm_features in batch.hmm_features])
return x
def prepare_batchy(batch, model):
"""Prepare data for training
"""
_ = model
lengths = [hmm_features.shape[2] for hmm_features in batch.hmm_features]
return lengths
def prepare_means_covars(hmm_features, clustering, states=[3, 5, 11, 14, 17, 19], num_states=19, num_features=3):#pylint: disable=dangerous-default-value
"""This function is specific to the task and the model configuration, thus contains hardcode.
"""
means = np.zeros((num_states, num_features))
covariances = np.zeros((num_states, num_features, num_features))
# Prepearing means and variances
last_state = 0
unique_clusters = len(np.unique(clustering)) - 1 # Excuding value -1, which represents undefined state
for state, cluster in zip(states, np.arange(unique_clusters)):
value = hmm_features[clustering == cluster, :]
means[last_state:state, :] = np.mean(value, axis=0)
covariances[last_state:state, :, :] = value.T.dot(value) / np.sum(clustering == cluster)
last_state = state
return means, covariances
def prepare_transmat_startprob():
""" This function is specific to the task and the model configuration, thus contains hardcode.
"""
# Transition matrix - each row should add up tp 1
transition_matrix = np.diag(19 * [14/15.0]) + np.diagflat(18 * [1/15.0], 1) + np.diagflat([1/15.0], -18)
# We suppose that absence of P-peaks is possible
transition_matrix[13, 14] = 0.9*1/15.0
transition_matrix[13, 17] = 0.1*1/15.0
# Initial distribution - should add up to 1
start_probabilities = np.array(19 * [1/np.float(19)])
return transition_matrix, start_probabilities
lengths = [hmm_features.shape[2] for hmm_features in hmm_preprocessed.get_variable("hmm_features")]
hmm_features = np.concatenate([hmm_features[0,:,:].T for hmm_features in hmm_preprocessed.get_variable("hmm_features")])
anntype = hmm_preprocessed.get_variable("anntypes")
annsamp = hmm_preprocessed.get_variable("annsamps")
expanded = np.concatenate([expand_annotation(samp, types, length) for samp, types, length in zip(annsamp, anntype, lengths)])
means, covariances = prepare_means_covars(hmm_features, expanded, states = [3, 5, 11, 14, 17, 19], num_features = 3)
transition_matrix, start_probabilities = prepare_transmat_startprob()
config_train = {
'build': True,
'estimator': hmm.GaussianHMM(n_components=19, n_iter=25, covariance_type="full", random_state=42,
init_params='', verbose=False),
'init_params': {'means_': means, 'covars_': covariances, 'transmat_': transition_matrix,
'startprob_': start_probabilities}
}
return (ds.Pipeline()
.init_variable("hmm_features", list)
.init_model("HMM", HMModel, "dynamic", config=config_train)
.load(fmt='wfdb', components=["signal"], ann_ext='pu')
.cwt(src="signal", dst="hmm_features", scales=[4,8,16], wavelet="mexh")
.standardize(axis=-1, src="hmm_features", dst="hmm_features")
.train_model("HMM", X=F(prepare_batchx)(B(), "HMM"), lengths=F(prepare_batchy)(B(), "HMM"))
.run(batch_size=batch_size, shuffle=False, drop_last=False, n_epochs=1, lazy=True, bar=True))
def hmm_predict_pipeline(model_path, batch_size=20, features="hmm_features",
channel_ix=0, annot="hmm_annotation", model_name='HMM'):
"""Prediction pipeline for Hidden Markov Model.
This pipeline isolates QRS, PQ and QT segments.
It works with dataset that generates batches of class ``EcgBatch``.
Parameters
----------
model_path : str
Path to pretrained ``HMModel``.
batch_size : int
Number of samples in batch.
Default value is 20.
features : str
Batch attribute to store calculated features.
channel_ix : int
Index of channel, which data should be used in training and predicting.
annot: str
Specifies attribute of batch in which annotation will be stored.
Returns
-------
pipeline : Pipeline
Output pipeline.
"""
config_predict = {
'build': False,
'load': {'path': model_path}
}
def prepare_batchx(batch, model):
"""Prepare data for training
"""
_ = model
x = np.concatenate([hmm_features[0,:,:].T for hmm_features in batch.hmm_features])
return x
def prepare_batchy(batch, model):
"""Prepare data for training
"""
_ = model
lengths = [hmm_features.shape[2] for hmm_features in batch.hmm_features]
return lengths
return (ds.Pipeline()
.init_model(model_name, HMModel,"static", config=config_predict)
.load(fmt="wfdb", components=["signal", "meta"])
.cwt(src="signal", dst=features, scales=[4, 8, 16], wavelet="mexh")
.standardize(axis=-1, src=features, dst=features)
.predict_model(model_name,
X=F(prepare_batchx)(B(), "HMM"),
lengths=F(prepare_batchy)(B(), "HMM"),
channel_ix=channel_ix,
save_to=B(annot), mode='w')
.calc_ecg_parameters(src=annot)
.run(batch_size=batch_size, shuffle=False, drop_last=False, n_epochs=1, lazy=True, bar=False))
|
the-stack_106_25222 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test emitter with rules."""
import os
import unittest
import tests.utils as tu
from geneve.events_emitter import SourceEvents, ast_from_rule
from . import jupyter
class TestRules(tu.QueryTestCase, tu.SeededTestCase, unittest.TestCase):
maxDiff = None
nb = jupyter.Notebook()
nb.cells.append(jupyter.Markdown("""
# Documents generation from detection rules
This report captures the error reported while generating documents from detection rules. Here you
can learn what rules are still problematic and for which no documents can be generated at the moment.
Curious about the inner workings? Read [here](signals_generation.md).
"""))
def parse_from_collection(self, collection):
asts = []
rules = []
errors = {}
for rule in collection:
try:
asts.append(ast_from_rule(rule))
rules.append(rule)
except Exception as e:
errors.setdefault(str(e), []).append(rule)
continue
with self.nb.chapter("## Skipped rules") as cells:
cells.append(None)
for err in sorted(sorted(errors), key=lambda e: len(errors[e]), reverse=True):
heading = [f"{len(errors[err])} rules:", ""]
bullets = []
for rule in sorted(errors[err], key=lambda r: r.name):
bullets.append(f"* {rule.name} ({rule.path})")
with self.nb.chapter(f"### {err} ({len(errors[err])})") as cells:
cells.append(jupyter.Markdown(heading + sorted(bullets)))
return rules, asts
def generate_docs(self, rules, asts):
errors = {}
for rule, ast in zip(rules, asts):
try:
se = SourceEvents(self.schema)
se.add_ast(ast)
_ = se.emit(timestamp=False, complete=True)
except Exception as e:
errors.setdefault(str(e), []).append(rule)
continue
with self.nb.chapter("## Generation errors") as cells:
cells.append(None)
for err in sorted(sorted(errors), key=lambda e: len(errors[e]), reverse=True):
heading = [f"{len(errors[err])} rules:"]
bullets = []
for rule in sorted(errors[err], key=lambda r: r.name):
bullets.append(f"* {rule.name} ({rule.path})")
with self.nb.chapter(f"### {err} ({len(errors[err])})") as cells:
cells.append(jupyter.Markdown(heading + sorted(bullets)))
def test_rules_collection(self):
collection = sorted(tu.load_test_rules(), key=lambda x: x.name)
rules, asts = self.parse_from_collection(collection)
self.generate_docs(rules, asts)
def test_unchanged(self):
tu.assertReportUnchanged(self, self.nb, "documents_from_rules.md")
@unittest.skipIf(os.getenv("TEST_SIGNALS_RULES", "0").lower() in ("0", "false", "no", ""), "Slow online test")
class TestSignalsRules(tu.SignalsTestCase, tu.OnlineTestCase, tu.SeededTestCase, unittest.TestCase):
maxDiff = None
nb = jupyter.Notebook()
nb.cells.append(jupyter.Markdown("""
# Alerts generation from detection rules
This report captures the detection rules signals generation coverage. Here you can
learn what rules are supported and what not and why.
Curious about the inner workings? Read [here](signals_generation.md).
"""))
def parse_from_collection(self, collection):
rules = []
asts = []
for i, rule in enumerate(collection):
try:
asts.append(ast_from_rule(rule))
except Exception:
continue
index_name = "{:s}-{:03d}".format(self.index_template, i)
rules.append({
"rule_id": rule.rule_id,
"risk_score": rule.risk_score,
"description": rule.description,
"name": rule.name,
"index": [index_name],
"interval": "3s",
"from": "now-2h",
"severity": rule.severity,
"type": rule.type,
"query": rule.query,
"language": rule.language,
"max_signals": 200,
"enabled": True,
".test_private": {}, # private test data, not sent to Kibana
})
return rules, asts
ack_no_signals = 25
ack_too_few_signals = 1
def test_rules(self):
mf_ext = f"_{self.multiplying_factor}x" if self.multiplying_factor > 1 else ""
collection = sorted(tu.load_test_rules(), key=lambda x: x.name)
rules, asts = self.parse_from_collection(collection)
pending = self.load_rules_and_docs(rules, asts)
self.check_signals(rules, pending)
tu.assertReportUnchanged(self, self.nb, f"alerts_from_rules{mf_ext}.md")
|
the-stack_106_25225 | '''
写的一个将博客转成markdown的脚本,
目前支持简书,知乎,CSDN,segmentfault,掘金 使用方法 python html2md.py -u <url>
由于博客类网站页面渲染方式和反爬技术的变化,这里不再维护。
基本思路是通过分析网页中正文部分,然后通过BeautifulSoup获取html,在通过tomd.py转换成markdown
'''
import os
import sys
import getopt
import requests
import random
import re
import html2text
from bs4 import BeautifulSoup
useragents = [
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
]
def jinashu(url):
## 浏览器头部
headers = {
'Host': 'www.jianshu.com',
'Referer': 'https://www.jianshu.com/',
'User-Agent': random.choice(useragents)
}
## 获取网页主体
html = requests.get(url,headers=headers).text
## bs4
soup = BeautifulSoup(html,"html5lib")
title = soup.find_all("title")[0].get_text()
article = str(soup.find_all("div",class_="show-content")[0])
## 替图片的src加上https://方便访问
article = re.sub('(src=")|(data-original-src=")','src="https:',article)
## 写入文件
pwd = os.getcwd() # 获取当前的文件路径
dirpath = pwd + '/jianshu/'
write2md(dirpath,title,article)
def csdn(url):
headers = {
'Host': 'blog.csdn.net',
'Referer': 'http://blog.csdn.net/',
'User-Agent': random.choice(useragents)
}
## 获取网页主体
html = requests.get(url,headers=headers).text
## bs4
soup = BeautifulSoup(html,'html5lib')
title = soup.find_all('title')[0].get_text()
article = str(soup.find_all('article')[0])
## 写入文件
pwd = os.getcwd() # 获取当前的文件路径
dirpath = pwd + '/CSDN/'
write2md(dirpath,title,article)
def zhihu(url):
headers = {
'Host': 'zhuanlan.zhihu.com',
'Referer': 'https://www.zhihu.com/',
'User-Agent': random.choice(useragents)
}
html = requests.get(url,headers=headers).text
## bs4
soup = BeautifulSoup(html,'html5lib')
title = soup.find_all('title')[0].get_text()
article = str(soup.find_all('div',class_='Post-RichText')[0])
## 写入文件
pwd = os.getcwd() # 获取当前的文件路径
dirpath = pwd + '/ZhiHu/'
write2md(dirpath,title,article)
def segmentfault(url):
headers = {
# 'Host': 'https://segmentfault.com',
'Referer': 'https://segmentfault.com/',
'User-Agent': random.choice(useragents)
}
html = requests.get(url,headers=headers).text
## bs4
soup = BeautifulSoup(html,'html5lib')
title = soup.find('title').text # 获取标题
article = str(soup.find(class_='article__content'))
## 能够加载图片
# article = re.sub('<p><span class="img-wrap">','',article)
# article = re.sub('</span></p>','',article)
article = re.sub('data-src="','src="https://segmentfault.com',article)
print(article)
# 写入文件
pwd = os.getcwd() # 获取当前的文件路径
dirpath = pwd + '/segmentfault/'
write2md(dirpath,title,article)
def juejin(url):
headers = {
'Host': 'juejin.im',
'Referer': 'https://juejin.im/',
'User-Agent': random.choice(useragents)
}
res = requests.get(url=url,headers=headers).text # 获取整个html
soup = BeautifulSoup(res,'html5lib')
title = soup.find('title').text
article = str(soup.find(class_='post-content-container'))
## 写入文件
pwd = os.getcwd() # 获取当前的文件路径
dirpath = pwd + '/segmentfault/'
write2md(dirpath,title,article)
def doelse(url):
headers = {
'User-Agent': random.choice(useragents)
}
res = requests.get(url=url ,headers=headers) # 获取整个html页面
h = html2text.HTML2Text()
h.ignore_links = False
soup = BeautifulSoup(res.text,'html5lib')
title = soup.title.text # 获取标题
html = str(soup.body)
article = h.handle(html)
pwd = os.getcwd() # 获取当前文件的路径
dirpath = pwd + '/Else/'
if not os.path.exists(dirpath):# 判断目录是否存在,不存在则创建新的目录
os.makedirs(dirpath)
## 写入文件
write2md(dirpath,title,article)
"""
传入文件路径,title,article
"""
def write2md(dirpath,title,article):
## 创建转换器
h2md = html2text.HTML2Text()
h2md.ignore_links = False
## 转换文档
article = h2md.handle(article)
## 写入文件
if not os.path.exists(dirpath):# 判断目录是否存在,不存在则创建新的目录
os.makedirs(dirpath)
# 创建md文件
with open(dirpath+title+'.md','w',encoding="utf8") as f:
lines = article.splitlines()
for line in lines:
if line.endswith('-'):
f.write(line)
else:
f.write(line+"\n")
print(title+"下载完成....")
def main(argv):
try:
opts,args = getopt.getopt(argv,"hu:",["url"])
except getopt.GetoptError:
print("python html2md.py -u <url>")
for opt,arg in opts:
if opt == "-h":
print("python html2md.py -u <url>")
sys.exit(2)
elif opt in ("-u", "-url"):
print()
checkSite(arg)
else:
print("python html2md.py -u <url>")
## 检查网站,使用哪个下载器
def checkSite(url):
if url.find('csdn') != -1:
csdn(url)
elif url.find('jianshu') != -1:
jinashu(url)
elif url.find('zhihu') != -1:
zhihu(url)
elif url.find('segmentfault') != -1:
segmentfault(url)
else:
doelse(url)
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_106_25227 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a trained model for serving."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.utils import decoding
from tensor2tensor.utils import t2t_model
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import usr_dir
import tensorflow as tf
import tensorflow_hub as hub
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_bool("export_as_tfhub", False,
"If True, the model will be exported as tfHub module.")
tf.flags.DEFINE_string(
"export_dir", None, "Directory, where export model should be stored."
"If None, the model will be stored in subdirectory "
"where checkpoints are: --output_dir")
def create_estimator(run_config, hparams):
return trainer_lib.create_estimator(
FLAGS.model,
hparams,
run_config,
decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams))
def create_hparams():
return trainer_lib.create_hparams(
FLAGS.hparams_set,
FLAGS.hparams,
data_dir=os.path.expanduser(FLAGS.data_dir),
problem_name=FLAGS.problem)
# TODO(michalski): Move this method into tfhub utils.
def export_module_spec_with_checkpoint(module_spec,
checkpoint_path,
export_path,
scope_prefix=""):
"""Exports given checkpoint as tfhub module with given spec."""
# The main requirement is that it is possible to know how to map from
# module variable name to checkpoint variable name.
# This is trivial if the original code used variable scopes,
# but can be messy if the variables to export are interwined
# with variables not export.
with tf.Graph().as_default():
m = hub.Module(module_spec)
assign_map = {
scope_prefix + name: value for name, value in m.variable_map.items()
}
tf.train.init_from_checkpoint(checkpoint_path, assign_map)
init_op = tf.initializers.global_variables()
with tf.Session() as session:
session.run(init_op)
m.export(export_path, session)
def export_as_tfhub_module(model_name,
hparams,
decode_hparams,
problem,
checkpoint_path,
export_dir):
"""Exports the last checkpoint from the directory as tfhub module.
It creates the Module spec and signature (based on T2T problem information),
which is later used to create and export the hub module.
Module will be saved inside the ckpt_dir.
Args:
model_name: name of the model to be exported.
hparams: T2T parameters, model graph will be based on them.
decode_hparams: T2T parameters for decoding.
problem: the name of the problem
checkpoint_path: path to the checkpoint to be exported.
export_dir: Directory to write the exported model to.
"""
def hub_module_fn():
"""Creates the TF graph for the hub module."""
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
model_name,
hparams,
decode_hparams=decode_hparams)
features = problem.serving_input_fn(hparams).features
# we must do a copy of the features, as the model_fn can add additional
# entries there (like hyperparameter settings etc).
original_features = features.copy()
spec = model_fn(features, labels=None, mode=tf.estimator.ModeKeys.PREDICT)
hub.add_signature(
inputs=original_features,
outputs=spec.export_outputs["serving_default"].outputs)
# TFHub doesn't support LOSSES collections.
module_spec = hub.create_module_spec(
hub_module_fn, drop_collections=[tf.GraphKeys.LOSSES])
# Loads the weights from the checkpoint using the model above
# and saves it in the export_path.
export_module_spec_with_checkpoint(
module_spec,
checkpoint_path=checkpoint_path,
export_path=export_dir,
scope_prefix="")
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
ckpt_dir = os.path.expanduser(FLAGS.output_dir)
hparams = create_hparams()
hparams.no_data_parallelism = True # To clear the devices
problem = hparams.problem
export_dir = FLAGS.export_dir or os.path.join(ckpt_dir, "export")
if FLAGS.export_as_tfhub:
checkpoint_path = tf.train.latest_checkpoint(ckpt_dir)
decode_hparams = decoding.decode_hparams(FLAGS.decode_hparams)
export_as_tfhub_module(FLAGS.model, hparams, decode_hparams, problem,
checkpoint_path, export_dir)
return
run_config = t2t_trainer.create_run_config(hparams)
estimator = create_estimator(run_config, hparams)
exporter = tf.estimator.FinalExporter(
"exporter", lambda: problem.serving_input_fn(hparams), as_text=True)
exporter.export(
estimator,
export_dir,
checkpoint_path=tf.train.latest_checkpoint(ckpt_dir),
eval_result=None,
is_the_final_export=True)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
the-stack_106_25232 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="NatPy",
version="0.1.1",
author="Tomas Howson and Andre Scaffidi",
author_email="[email protected], [email protected]",
description="Convert the units of particle physics quantities.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AndreScaffidi/NatPy",
packages=setuptools.find_packages(),
install_requires=['numpy', 'astropy'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
|
the-stack_106_25233 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
from spack import *
class Fluxbox(AutotoolsPackage):
"""Fluxbox is a windowmanager for X that was based on the Blackbox 0.61.1 code.
It is very light on resources and easy to handle but yet full of features
to make an easy, and extremely fast, desktop experience.
"""
homepage = "http://fluxbox.org/"
url = "http://sourceforge.net/projects/fluxbox/files/fluxbox/1.3.7/fluxbox-1.3.7.tar.gz"
version('1.3.7', sha256='c99e2baa06fff1e96342b20415059d12ff1fa2917ade0173c75b2fa570295b9f')
# Referenced:https://sourceforge.net/p/fluxbox/bugs/1171/
patch('fix_zero_comparison.patch')
depends_on('pkgconfig', type='build')
depends_on('freetype')
depends_on('libxrender')
depends_on('libxext')
depends_on('expat')
depends_on('libx11')
|
the-stack_106_25235 | # Multiple Machine Learning Models in Predict Tetrahydrofolate from Whole Genome Methylation Data in Placenta Tissue
# Load Packages
import pandas as pd
import numpy as np
import random
import sklearn
from sklearn.model_selection import LeaveOneOut
from sklearn import preprocessing
from matplotlib import pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# load window methylation data
A = pd.read_csv("Window_Meth.csv")
# load window methylation data for chromosome 1
# A = pd.read_csv("Window_chr_Meth.csv")
# load folate data
B = pd.read_csv("Folate_placenta.csv")
# remove row without enough coverage
A = A[~(A == 0).any(axis=1)]
# Matrix and modify data structure
n_tr = 70
n_te = 17
m = len(A)
X_tr = np.zeros((n_tr,m))
y_tr = np.zeros(n_tr)
A1 = A.values
A2 = A1[:,1:]
A3 = A2.astype(np.float)
A4 = A3.transpose()
B1 = B.values
B2 = B1[:,1]
B3 = B2.astype(np.float)
B1 = B.values
B2 = B1[:,1]
B3 = B2.astype(np.float)
# Random shuffle samples to seperate training and testing set
ind = list(range(87))
random.seed(20)
random.shuffle(ind)
ind_tr = ind[:70]
ind_te = ind[70:]
X_tr = A4[ind_tr,:]
X_te = A4[ind_te,:]
y_tr = B3[ind_tr]
y_te = B3[ind_te]
# scale data
from sklearn.preprocessing import scale
X_tr = scale(X_tr)
X_te = scale(X_te)
y_tr = scale(y_tr)
y_te = scale(y_te)
## Linear Regression
from sklearn.linear_model import LinearRegression
reg = LinearRegression().fit(X_tr, y_tr)
Regre_pred = reg.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, Regre_pred, multioutput='variance_weighted')
plt.plot(range(17),Regre_pred)
plt.plot(range(17),y_te)
plt.show()
## Nearest neighbors
from sklearn.neighbors import KNeighborsRegressor
neigh = KNeighborsRegressor(n_neighbors=2, p = 7, weights = 'distance')
neigh.fit(X_tr, y_tr)
neigh_pred = neigh.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, neigh_pred, multioutput='variance_weighted')
plt.plot(range(17),neigh_pred)
plt.plot(range(17),y_te)
plt.show()
## SVM
from sklearn import svm
from sklearn.svm import SVR
svc = svm.SVR(kernel = 'linear', degree = 2)
svc.fit(X_tr, y_tr)
svm_pred = svc.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, svm_pred, multioutput='variance_weighted')
plt.plot(range(17),svm_pred)
plt.plot(range(17),y_te)
plt.show()
## Random Forrest
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(max_depth=2, random_state=1)
rf.fit(X_tr, y_tr)
rf_pred = rf.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, rf_pred, multioutput='variance_weighted')
plt.plot(range(17),rf_pred)
plt.plot(range(17),y_te)
plt.show()
## Adaboost
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
adb = AdaBoostRegressor(learning_rate=2.0, loss='linear')
adb.fit(X_tr,y_tr)
adb_pred = adb.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, adb_pred, multioutput='variance_weighted')
plt.plot(range(17),rf_pred)
plt.plot(range(17),y_te)
plt.show()
## Gaussian Process
from sklearn.datasets import make_friedman2
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
# Gaussian Process
kernel = DotProduct() + WhiteKernel()
gpr = GaussianProcessRegressor(kernel=kernel,random_state=10).fit(X_tr, y_tr)
gpr_pred = gpr.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, gpr_pred, multioutput='variance_weighted')
plt.plot(range(17),gpr_pred)
plt.plot(range(17),y_te)
plt.show()
## Decision Tree
from sklearn import tree
clf = tree.DecisionTreeRegressor(max_leaf_nodes=15)
clf = clf.fit(X_tr, y_tr)
clf_pred = clf.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, clf_pred, multioutput='variance_weighted')
plt.plot(range(17),clf_pred)
plt.plot(range(17),y_te)
plt.show()
## Feature Selection and Feature Importance
# Feature Extraction with PCA
import numpy
from pandas import read_csv
from sklearn.decomposition import PCA
# feature extraction for the 0.95 variance
pca = PCA(0.95)
fit = pca.fit(X_tr)
# feature extraction for the top 30
pca = PCA(n_components=30)
fit = pca.fit(X_tr)
train_img = pca.transform(X_tr)
test_img = pca.transform(X_te)
X_tr = train_img
X_te = test_img
## Neural Network
### Adapted from Tensorflow neural network tutorial
import tensorflow as tf
import math
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
# Set mini batches
def random_mini_batches(X, Y, mini_batch_size = 20, seed = 1):
m = X.shape[1]
mini_batches = []
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))
num_complete_minibatches = math.floor(m/mini_batch_size)
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# create placeholders
def create_placeholders(n_x, n_y):
X = tf.placeholder(tf.float32, [n_x, None], name="X")
Y = tf.placeholder(tf.float32, [n_y, None], name="Y")
return X, Y
X, Y = create_placeholders(30, 1)
print("X = " + str(X))
print("Y = " + str(Y))
# one layer
def initialize_parameters():
W1 = tf.get_variable("W1", [1, 30], initializer = tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable("b1", [1, 1], initializer = tf.zeros_initializer())
parameters = {"W1": W1,
"b1": b1}
return parameters
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
# forward propagation
def forward_propagation(X, parameters):
W1 = parameters['W1']
b1 = parameters['b1']
Z1 = tf.add(tf.matmul(W1, X), b1)
return Z1
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(30, 1)
parameters = initialize_parameters()
Z1 = forward_propagation(X, parameters)
print("Z1 = " + str(Z1))
# compute cost
def compute_cost(Z1, Y):
logits = tf.transpose(Z1)
labels = tf.transpose(Y)
cost = tf.reduce_mean(tf.nn.l2_loss(logits - labels))
return cost
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(30, 1)
parameters = initialize_parameters()
Z1 = forward_propagation(X, parameters)
cost = compute_cost(Z1, Y)
print("cost = " + str(cost))
# R2 score calculator
def R_squared(y, y_pred):
residual = tf.reduce_sum(tf.square(tf.subtract(y, y_pred)))
total = tf.reduce_sum(tf.square(tf.subtract(y, tf.reduce_mean(y))))
r2 = tf.subtract(1.0, tf.div(residual, total))
return r2
np.std(y_tr)*3
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 2000, minibatch_size = 10, print_cost = True, std = 3):
ops.reset_default_graph()
tf.set_random_seed(1)
seed = 3
(n_x, m) = X_train.shape
n_y = Y_train.shape[0]
costs = []
X, Y = create_placeholders(n_x, n_y)
parameters = initialize_parameters()
Z1 = forward_propagation(X, parameters)
cost = compute_cost(Z1, Y)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
epoch_cost = 0.
num_minibatches = int(m / minibatch_size)
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
epoch_cost += minibatch_cost / num_minibatches
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
parameters = sess.run(parameters)
print("Parameters have been trained!")
r2 = R_squared(Y, Z1)
r2avg = tf.reduce_mean(tf.cast(r2, "float"))
print("Train R2:", r2avg.eval({X: X_train, Y: Y_train}))
print("Test R2:", r2avg.eval({X: X_test, Y: Y_test}))
correct_prediction = tf.math.less(tf.abs(Y - Z1), std)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Train accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print("Test accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
b = tf.add(Z1, Z1)/2
plt.plot(range(17), b.eval({X: X_test, Y: Y_test}).T)
plt.plot(range(17), Y_test.T)
return parameters
# origize data format for neural network
X_tr = X_tr.T
X_te = X_te.T
y_tr = np.reshape(y_tr,[1,70])
y_te = np.reshape(y_te,[1,17])
parameters = model(X_tr, y_tr, X_te, y_te)
## Reference
# scikit-learn: https://github.com/scikit-learn/scikit-learn
#
# pandas: https://github.com/pandas-dev/pandas
#
# deep learning example: https://github.com/enggen/Deep-Learning-Coursera
|
the-stack_106_25236 | from __future__ import unicode_literals, division, absolute_import
import logging
import smtplib
import socket
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from smtplib import SMTPException
from email.utils import formatdate
from flexget import config_schema, manager, plugin
from flexget.event import event
from flexget.utils.template import render_from_task, get_template, RenderError
from flexget.utils.tools import merge_dict_from_to, MergeException
from flexget import validator
log = logging.getLogger('email')
# A dict which stores the email content from each task when plugin is configured globally
task_content = {}
def options_validator():
email = validator.factory('dict')
email.accept('boolean', key='active')
email.accept('text', key='to', required=True)
email.accept('list', key='to', required=True).accept('text')
email.accept('text', key='from', required=True)
email.accept('text', key='smtp_host')
email.accept('integer', key='smtp_port')
email.accept('boolean', key='smtp_login')
email.accept('text', key='smtp_username')
email.accept('text', key='smtp_password')
email.accept('boolean', key='smtp_tls')
email.accept('boolean', key='smtp_ssl')
email.accept('text', key='template')
email.accept('text', key='subject')
return email
def prepare_config(config):
config.setdefault('active', True)
config.setdefault('smtp_host', 'localhost')
config.setdefault('smtp_port', 25)
config.setdefault('smtp_login', False)
config.setdefault('smtp_username', '')
config.setdefault('smtp_password', '')
config.setdefault('smtp_tls', False)
config.setdefault('smtp_ssl', False)
config.setdefault('template', 'default.template')
if not isinstance(config['to'], list):
config['to'] = [config['to']]
return config
@event('manager.execute.started')
def setup(manager):
if not 'email' in manager.config:
return
config = prepare_config(manager.config['email'])
config['global'] = True
global task_content
task_content = {}
for task_name, task_config in manager.config['tasks'].iteritems():
task_config.setdefault('email', {})
try:
merge_dict_from_to(config, task_config['email'])
except MergeException as exc:
raise plugin.PluginError('Failed to merge email config to task %s due to %s' % (task_name, exc))
task_config.setdefault('email', config)
@event('manager.execute.completed')
def global_send(manager):
if not 'email' in manager.config:
return
config = prepare_config(manager.config['email'])
content = ''
for task, text in task_content.iteritems():
content += '_' * 30 + ' Task: %s ' % task + '_' * 30 + '\n'
content += text + '\n'
if not content:
log.verbose('No tasks generated any email notifications. Not sending.')
return
if config.get('subject'):
# If subject is specified, use it from the config
subject = config['subject']
elif config['template'].startswith('failed'):
subject = '[FlexGet] Failures on task(s): %s' % ', '.join(task_content)
else:
subject = '[FlexGet] Notifications for task(s): %s' % ', '.join(task_content)
send_email(subject, content, config)
def send_email(subject, content, config):
"""Send email at exit."""
# prepare email message
message = MIMEMultipart('alternative')
message['To'] = ','.join(config['to'])
message['From'] = config['from']
message['Subject'] = subject
message['Date'] = formatdate(localtime=True)
content_type = 'html' if '<html>' in content else 'plain'
message.attach(MIMEText(content.encode('utf-8'), content_type, _charset='utf-8'))
# send email message
if manager.manager.options.test:
log.info('Would send email : %s' % message.as_string())
log.info(content)
else:
log.verbose('Sending email.')
try:
if config['smtp_ssl']:
if sys.version_info < (2, 6, 3):
raise plugin.PluginError('SSL email support requires python >= 2.6.3 due to python bug #4066, '
'upgrade python or use TLS', log)
# Create a SSL connection to smtp server
mailServer = smtplib.SMTP_SSL(config['smtp_host'], config['smtp_port'])
else:
mailServer = smtplib.SMTP(config['smtp_host'], config['smtp_port'])
if config['smtp_tls']:
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
except socket.error as e:
log.warning('Socket error: %s' % e)
return
except SMTPException as e:
# Ticket #1133
log.warning('Unable to send email: %s' % e)
return
try:
if config.get('smtp_username') and config.get('smtp_password'):
mailServer.login(config['smtp_username'], config['smtp_password'])
mailServer.sendmail(message['From'], config['to'], message.as_string())
except IOError as e:
# Ticket #686
log.warning('Unable to send email! IOError: %s' % e)
return
except SMTPException as e:
log.warning('Unable to send email! SMTPException: %s' % e)
return
mailServer.quit()
class OutputEmail(object):
"""
Send an e-mail with the list of all succeeded (downloaded) entries.
Configuration options
=============== ===================================================================
Option Description
=============== ===================================================================
from The email address from which the email will be sent (required)
to The email address of the recipient (required)
smtp_host The host of the smtp server
smtp_port The port of the smtp server
smtp_username The username to use to connect to the smtp server
smtp_password The password to use to connect to the smtp server
smtp_tls Should we use TLS to connect to the smtp server
smtp_ssl Should we use SSL to connect to the smtp server
Due to a bug in python, this only works in python 2.6.3 and up
active Is this plugin active or not
=============== ===================================================================
Config basic example::
email:
from: [email protected]
to: [email protected]
smtp_host: smtp.host.com
Config example with smtp login::
email:
from: [email protected]
to: [email protected]
smtp_host: smtp.host.com
smtp_port: 25
smtp_login: true
smtp_username: my_smtp_login
smtp_password: my_smtp_password
smtp_tls: true
Config multi-task example::
global:
email:
from: [email protected]
to: [email protected]
smtp_host: smtp.host.com
tasks:
task1:
rss: http://xxx
task2:
rss: http://yyy
email:
active: False
task3:
rss: http://zzz
email:
to: [email protected]
GMAIL example::
from: [email protected]
to: [email protected]
smtp_host: smtp.gmail.com
smtp_port: 587
smtp_login: true
smtp_username: gmailUser
smtp_password: gmailPassword
smtp_tls: true
Default values for the config elements::
email:
active: True
smtp_host: localhost
smtp_port: 25
smtp_login: False
smtp_username:
smtp_password:
smtp_tls: False
smtp_ssl: False
"""
def validator(self):
v = options_validator()
v.accept('boolean', key='global')
return v
@plugin.priority(0)
def on_task_output(self, task, config):
config = prepare_config(config)
if not config['active']:
return
# don't send mail when learning
if task.options.learn:
return
# generate email content
if config.get('subject'):
subject = config['subject']
else:
subject = '[FlexGet] {{task.name}}: '
if task.aborted:
subject += 'Aborted'
elif task.failed:
subject += '{{task.failed|length}} failed entries'
else:
subject += '{{task.accepted|length}} new entries downloaded'
try:
subject = render_from_task(subject, task)
except RenderError as e:
log.error('Error rendering email subject: %s' % e)
return
try:
content = render_from_task(get_template(config['template'], 'email'), task)
except RenderError as e:
log.error('Error rendering email body: %s' % e)
return
if not content.strip():
log.verbose('No content generated from template, not sending email.')
return
if config.get('global'):
# Email plugin was configured at root, save the email output
log.debug('Saving email content for task %s' % task.name)
task_content[task.name] = content
else:
send_email(subject, content, config)
# Also send the email on abort
on_task_abort = on_task_output
@event('plugin.register')
def register_plugin():
plugin.register(OutputEmail, 'email', api_ver=2)
@event('config.register')
def register_config_key():
config_schema.register_config_key('email', options_validator().schema())
|
the-stack_106_25241 | # Copyright 2017-present Kensho Technologies, LLC.
"""Perform optimizations and lowering of the IR that allows the compiler to emit MATCH queries.
The compiler IR allows blocks and expressions that cannot be directly compiled to Gremlin or MATCH.
For example, ContextFieldExistence is an Expression that returns True iff its given vertex exists,
but the produced Gremlin and MATCH outputs for this purpose are entirely different and not easy
to generate directly from this Expression object. An output-language-aware IR lowering step allows
us to convert this Expression into other Expressions, using data already present in the IR,
to simplify the final code generation step.
"""
from typing import Dict, List, Optional, Set
import six
from ..blocks import Backtrack, CoerceType, Filter, MarkLocation, QueryRoot
from ..compiler_entities import BasicBlock, Expression
from ..expressions import BinaryComposition, FalseLiteral, Literal, TernaryConditional, TrueLiteral
from ..helpers import Location
from ..ir_lowering_common.location_renaming import (
make_location_rewriter_visitor_fn,
make_revisit_location_translations,
translate_potential_location,
)
from ..match_query import MatchQuery, MatchStep
from ..metadata import QueryMetadataTable
from .utils import CompoundMatchQuery, convert_coerce_type_to_instanceof_filter
##################################
# Optimization / lowering passes #
##################################
def rewrite_binary_composition_inside_ternary_conditional(
ir_blocks: List[BasicBlock],
) -> List[BasicBlock]:
"""Rewrite BinaryConditional expressions in the true/false values of TernaryConditionals."""
def visitor_fn(expression: Expression) -> Expression:
"""Expression visitor function."""
# MATCH queries do not allow BinaryComposition inside a TernaryConditional's true/false
# value blocks, since OrientDB cannot produce boolean values for comparisons inside them.
# We transform any structures that resemble the following:
# TernaryConditional(predicate, X, Y), with X or Y of type BinaryComposition
# into the following:
# - if X is of type BinaryComposition, and Y is not,
# BinaryComposition(
# '=',
# TernaryConditional(
# predicate,
# TernaryConditional(X, true, false),
# Y
# ),
# true
# )
# - if Y is of type BinaryComposition, and X is not,
# BinaryComposition(
# '=',
# TernaryConditional(
# predicate,
# X,
# TernaryConditional(Y, true, false),
# ),
# true
# )
# - if both X and Y are of type BinaryComposition,
# BinaryComposition(
# '=',
# TernaryConditional(
# predicate,
# TernaryConditional(X, true, false),
# TernaryConditional(Y, true, false)
# ),
# true
# )
if not isinstance(expression, TernaryConditional):
return expression
if_true = expression.if_true
if_false = expression.if_false
true_branch_rewriting_necessary = isinstance(if_true, BinaryComposition)
false_branch_rewriting_necessary = isinstance(if_false, BinaryComposition)
if not (true_branch_rewriting_necessary or false_branch_rewriting_necessary):
# No rewriting is necessary.
return expression
if true_branch_rewriting_necessary:
if_true = TernaryConditional(if_true, TrueLiteral, FalseLiteral)
if false_branch_rewriting_necessary:
if_false = TernaryConditional(if_false, TrueLiteral, FalseLiteral)
ternary = TernaryConditional(expression.predicate, if_true, if_false)
return BinaryComposition("=", ternary, TrueLiteral)
new_ir_blocks = [block.visit_and_update_expressions(visitor_fn) for block in ir_blocks]
return new_ir_blocks
def _prepend_wildcard(expression: Expression) -> BinaryComposition:
"""Prepend an SQL-MATCH wildcard to an expression."""
return BinaryComposition("+", Literal("%"), expression)
def _append_wildcard(expression: Expression) -> BinaryComposition:
"""Append an SQL-MATCH wildcard to an expression."""
return BinaryComposition("+", expression, Literal("%"))
def lower_string_operators(ir_blocks: List[BasicBlock]) -> List[BasicBlock]:
"""Lower Filters with "has_substring", "starts_with", or "ends_with" operation into MATCH."""
def visitor_fn(expression: Expression) -> Expression:
if not isinstance(expression, BinaryComposition):
return expression
elif expression.operator == "has_substring":
# The implementation of "has_substring" must use the LIKE operator in MATCH, and must
# prepend and append "%" (wildcard) symbols to the substring being matched.
# We transform any structures that resemble the following:
# BinaryComposition('has_substring', X, Y)
# into the following:
# BinaryComposition(
# 'LIKE',
# X,
# BinaryComposition(
# '+',
# Literal("%"),
# BinaryComposition(
# '+',
# Y,
# Literal("%")
# )
# )
# )
return BinaryComposition(
"LIKE", expression.left, _prepend_wildcard(_append_wildcard(expression.right))
)
elif expression.operator == "starts_with":
# Append a wildcard to the right of the argument string
return BinaryComposition("LIKE", expression.left, _append_wildcard(expression.right))
elif expression.operator == "ends_with":
# Prepend a wildcard to the left of the argument string
return BinaryComposition("LIKE", expression.left, _prepend_wildcard(expression.right))
else:
return expression
new_ir_blocks = [block.visit_and_update_expressions(visitor_fn) for block in ir_blocks]
return new_ir_blocks
def truncate_repeated_single_step_traversals(match_query: MatchQuery) -> MatchQuery:
"""Truncate one-step traversals that overlap a previous traversal location."""
# Such traversals frequently happen as side-effects of the lowering process
# of Backtrack blocks, and needlessly complicate the executed queries.
new_match_traversals: List[List[MatchStep]] = []
visited_locations: Set[Location] = set()
for current_match_traversal in match_query.match_traversals:
ignore_traversal = False
if len(current_match_traversal) == 1:
# Single-step traversal detected. If its location was visited already, ignore it.
single_step = current_match_traversal[0]
if single_step.as_block is None:
raise AssertionError(
"Unexpectedly found a single-step traversal with no as_block:"
" {} {}".format(current_match_traversal, match_query)
)
if single_step.as_block.location in visited_locations:
# This location was visited before, omit the traversal.
ignore_traversal = True
if not ignore_traversal:
# For each step in this traversal, mark its location as visited.
for step in current_match_traversal:
if step.as_block is not None:
visited_locations.add(step.as_block.location)
new_match_traversals.append(current_match_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def lower_backtrack_blocks(
match_query: MatchQuery, query_metadata_table: QueryMetadataTable
) -> MatchQuery:
"""Lower Backtrack blocks into (QueryRoot, MarkLocation) pairs of blocks."""
# The lowering works as follows:
# 1. Upon seeing a Backtrack block, end the current traversal (if non-empty).
# 2. Start new traversal from the type and location to which the Backtrack pointed.
# 3. If the Backtrack block had an associated MarkLocation, ensure that location is marked
# as equivalent to the location where the Backtrack pointed.
# 4. Rewrite all expressions that reference such revisit locations, making them refer to
# the revisit origin location instead.
new_match_traversals: List[List[MatchStep]] = []
locations_needing_translation: Set[Location] = set()
for current_match_traversal in match_query.match_traversals:
new_traversal: List[MatchStep] = []
for step in current_match_traversal:
if not isinstance(step.root_block, Backtrack):
new_traversal.append(step)
else:
# 1. Upon seeing a Backtrack block, end the current traversal (if non-empty).
if new_traversal:
new_match_traversals.append(new_traversal)
new_traversal = []
backtrack_location = step.root_block.location
backtrack_location_info = query_metadata_table.get_location_info(backtrack_location)
# 2. Start new traversal from the type and location to which the Backtrack pointed.
new_root_block = QueryRoot({backtrack_location_info.type.name})
new_as_block = MarkLocation(backtrack_location)
# 3. If the Backtrack block had an associated MarkLocation, mark that location
# as equivalent to the location where the Backtrack pointed.
if step.as_block is not None:
locations_needing_translation.add(step.as_block.location)
if step.coerce_type_block is not None:
raise AssertionError(
"Encountered type coercion in a MatchStep with "
"a Backtrack root block, this is unexpected: {} {}".format(
step, match_query
)
)
new_step = step._replace(root_block=new_root_block, as_block=new_as_block)
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
new_match_query = match_query._replace(match_traversals=new_match_traversals)
location_translations = make_revisit_location_translations(query_metadata_table)
if locations_needing_translation != set(six.iterkeys(location_translations)):
raise AssertionError(
"Unexpectedly, the revisit location translations table computed from "
"the query metadata table did not match the locations needing "
"translation. This is a bug. {} {}".format(
location_translations, locations_needing_translation
)
)
return _translate_equivalent_locations(new_match_query, location_translations)
def _translate_equivalent_locations(
match_query: MatchQuery, location_translations: Dict[Location, Location]
) -> MatchQuery:
"""Translate Location objects into their equivalent locations, based on the given dict."""
new_match_traversals: List[List[MatchStep]] = []
visitor_fn = make_location_rewriter_visitor_fn(location_translations)
# Rewrite the Locations in the steps of each MATCH traversal.
for current_match_traversal in match_query.match_traversals:
new_traversal: List[MatchStep] = []
for step in current_match_traversal:
new_step = step
# If the root_block is a Backtrack, translate its Location if necessary.
if isinstance(new_step.root_block, Backtrack):
old_location = new_step.root_block.location
if not isinstance(old_location, Location):
raise AssertionError(
f"Expected old_location to be of Location type, but got {old_location} "
f"instead. This is a bug."
)
if old_location in location_translations:
new_location = location_translations[old_location]
new_step = new_step._replace(root_block=Backtrack(new_location))
# If the as_block exists, translate its Location if necessary.
if new_step.as_block is not None:
old_location = new_step.as_block.location
if not isinstance(old_location, Location):
raise AssertionError(
f"Expected old_location to be of Location type, but got {old_location} "
f"instead. This is a bug."
)
if old_location in location_translations:
new_location = location_translations[old_location]
new_step = new_step._replace(as_block=MarkLocation(new_location))
# If the where_block exists, update any Location objects in its predicate.
if new_step.where_block is not None:
new_where_block = new_step.where_block.visit_and_update_expressions(visitor_fn)
new_step = new_step._replace(where_block=new_where_block)
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
# Update the Location within each FoldScopeLocation
new_folds = {
translate_potential_location(location_translations, fold_scope_location): fold_ir_blocks
for fold_scope_location, fold_ir_blocks in six.iteritems(match_query.folds)
}
# Rewrite the Locations in the ConstructResult output block.
new_output_block = match_query.output_block.visit_and_update_expressions(visitor_fn)
# Rewrite the Locations in the global where block.
new_global_where_block: Optional[Filter] = None
if match_query.where_block is not None:
new_global_where_block = match_query.where_block.visit_and_update_expressions(visitor_fn)
return match_query._replace(
match_traversals=new_match_traversals,
folds=new_folds,
output_block=new_output_block,
where_block=new_global_where_block,
)
def lower_folded_coerce_types_into_filter_blocks(
folded_ir_blocks: List[BasicBlock],
) -> List[BasicBlock]:
"""Lower CoerceType blocks into "INSTANCEOF" Filter blocks. Indended for folded IR blocks."""
new_folded_ir_blocks: List[BasicBlock] = []
for block in folded_ir_blocks:
new_block: BasicBlock
if isinstance(block, CoerceType):
new_block = convert_coerce_type_to_instanceof_filter(block)
else:
new_block = block
new_folded_ir_blocks.append(new_block)
return new_folded_ir_blocks
def remove_backtrack_blocks_from_fold(folded_ir_blocks: List[BasicBlock]) -> List[BasicBlock]:
"""Return a list of IR blocks with all Backtrack blocks removed."""
new_folded_ir_blocks: List[BasicBlock] = []
for block in folded_ir_blocks:
if not isinstance(block, Backtrack):
new_folded_ir_blocks.append(block)
return new_folded_ir_blocks
def truncate_repeated_single_step_traversals_in_sub_queries(
compound_match_query: CompoundMatchQuery,
) -> CompoundMatchQuery:
"""For each sub-query, remove one-step traversals that overlap a previous traversal location."""
lowered_match_queries = []
for match_query in compound_match_query.match_queries:
new_match_query = truncate_repeated_single_step_traversals(match_query)
lowered_match_queries.append(new_match_query)
return compound_match_query._replace(match_queries=lowered_match_queries)
|
the-stack_106_25242 | import traceback
def dfs(adj, used, order, x):
used[x] = 1
for v in adj[x]:
if used[v] == 0:
dfs(adj, used, order, v)
used[x] = -1
order.append(x)
def topological_sort(adj): # recursive dfs with
used = [0] * len(adj)
order = []
for x in range(len(adj)):
if used[x] == 0:
dfs(adj, used, order, x)
order.reverse()
topological_order = [num + 1 for num in order]
return topological_order
def test_one():
num_nodes = 4
num_edges = 3
data = [1, 2, 4, 1, 3, 1]
adj = _generate_adj_matrix(data, num_nodes, num_edges)
topological_order = topological_sort(adj)
expected_order = [4, 3, 1, 2]
assert topological_order == expected_order
def test_two():
num_nodes = 4
num_edges = 1
data = [3, 1]
adj = _generate_adj_matrix(data, num_nodes, num_edges)
topological_order = topological_sort(adj)
expected_order = [4, 3, 2, 1]
assert topological_order == expected_order
def test_three():
num_nodes = 5
num_edges = 7
data = [2, 1, 3, 2, 3, 1, 4, 3, 4, 1, 5, 2, 5, 3]
adj = _generate_adj_matrix(data, num_nodes, num_edges)
topological_order = topological_sort(adj)
expected_order = [5, 4, 3, 2, 1]
assert topological_order == expected_order
def _generate_adj_matrix(data, num_nodes, num_edges):
edges = list(zip(data[0:(2 * num_edges):2], data[1:(2 * num_edges):2]))
adj = [[] for _ in range(num_nodes)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
return adj
if __name__ == "__main__":
try:
print('begin tests')
test_one()
test_two()
test_three()
print('tests passed')
except AssertionError:
print('test assertions failed')
print(traceback.format_exc())
except Exception:
print('unknown failure')
print(traceback.format_exc())
|
the-stack_106_25243 | import pytest # noinspection PyPackageRequirements
import asyncio
from aionetworking.compatibility import (supports_task_name, get_task_name, get_current_task_name, set_task_name,
set_current_task_name, current_task)
class TestTaskNames:
@pytest.mark.asyncio
async def test_00_get_task_name(self, task):
if supports_task_name():
assert task.get_name() == get_task_name(task) == "Task-99"
else:
assert str(id(task)) == get_task_name(task)
@pytest.mark.asyncio
async def test_01_get_current_task_name(self):
this_task = current_task()
task_name = get_current_task_name()
if supports_task_name():
assert this_task.get_name() == task_name
else:
assert str(id(this_task)) == task_name
@staticmethod
def _prepare_current_task(name) -> asyncio.Task:
this_task = current_task()
if supports_task_name():
this_task.set_name(name)
assert this_task.get_name() == name
return this_task
@pytest.mark.asyncio
async def test_02_set_current_task_name(self):
this_task = self._prepare_current_task('Task-10')
set_current_task_name('TestTask')
current_task_name = get_current_task_name()
if supports_task_name():
assert this_task.get_name() == current_task_name == 'Task-10_TestTask'
else:
assert str(id(this_task)) == current_task_name
@pytest.mark.asyncio
async def test_03_set_task_name_without_hierarchy(self, task):
set_task_name(task, "HelloWorld", include_hierarchy=False)
if supports_task_name():
assert task.get_name() == get_task_name(task) == "Task-99_HelloWorld"
else:
assert str(id(task)) == get_task_name(task)
@pytest.mark.asyncio
async def test_04_set_task_name_include_hierarchy(self, task):
# Following line required to change task name, pycharm gives error if task is not retrieved
_ = self._prepare_current_task('Task-10')
set_task_name(task, "HelloWorld")
if supports_task_name():
assert task.get_name() == get_task_name(task) == "Task-10:Task-99_HelloWorld"
else:
assert str(id(task)) == get_task_name(task)
await task
|
the-stack_106_25247 | # Copyright Niantic 2019. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the Monodepth2 licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
from __future__ import absolute_import, division, print_function
import os
import sys
import glob
import argparse
import numpy as np
import PIL.Image as pil
import matplotlib as mpl
import matplotlib.cm as cm
import torch
from torchvision import transforms, datasets
import networks
from layers import disp_to_depth
from utils import download_model_if_doesnt_exist
def parse_args():
parser = argparse.ArgumentParser(
description='Simple testing funtion for Monodepthv2 models.')
parser.add_argument('--image_path', type=str, default='../Main/dataset/image/',
help='path to a test image or folder of images')
parser.add_argument('--model_name', type=str, default="mono+stereo_1024x320",
help='name of a pretrained model to use',
choices=[
"mono_640x192",
"stereo_640x192",
"mono+stereo_640x192",
"mono_no_pt_640x192",
"stereo_no_pt_640x192",
"mono+stereo_no_pt_640x192",
"mono_1024x320",
"stereo_1024x320",
"mono+stereo_1024x320"])
parser.add_argument('--ext', type=str,
help='image extension to search for in folder', default="png")
parser.add_argument("--no_cuda",
help='if set, disables CUDA',
action='store_true')
parser.add_argument("--save_path",
default='../Main/dataset/depth',type=str)
return parser.parse_args()
def test_simple(args):
"""Function to predict for a single image or folder of images
"""
assert args.model_name is not None, \
"You must specify the --model_name parameter; see README.md for an example"
if torch.cuda.is_available() and not args.no_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
download_model_if_doesnt_exist(args.model_name)
model_path = os.path.join("models", args.model_name)
print("-> Loading model from ", model_path)
encoder_path = os.path.join(model_path, "encoder.pth")
depth_decoder_path = os.path.join(model_path, "depth.pth")
# LOADING PRETRAINED MODEL
print(" Loading pretrained encoder")
encoder = networks.ResnetEncoder(18, False)
loaded_dict_enc = torch.load(encoder_path, map_location=device)
# extract the height and width of image that this model was trained with
feed_height = loaded_dict_enc['height']
feed_width = loaded_dict_enc['width']
filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()}
encoder.load_state_dict(filtered_dict_enc)
encoder.to(device)
encoder.eval()
print(" Loading pretrained decoder")
depth_decoder = networks.DepthDecoder(
num_ch_enc=encoder.num_ch_enc, scales=range(4))
loaded_dict = torch.load(depth_decoder_path, map_location=device)
depth_decoder.load_state_dict(loaded_dict)
depth_decoder.to(device)
depth_decoder.eval()
# FINDING INPUT IMAGES
if os.path.isfile(args.image_path):
# Only testing on a single image
paths = [args.image_path]
output_directory = os.path.dirname(args.image_path)
elif os.path.isdir(args.image_path):
# Searching folder for images
paths = glob.glob(os.path.join(args.image_path, '*.{}'.format(args.ext)))
output_directory = args.save_path
else:
raise Exception("Can not find args.image_path: {}".format(args.image_path))
print("-> Predicting on {:d} test images".format(len(paths)))
# PREDICTING ON EACH IMAGE IN TURN
with torch.no_grad():
for idx, image_path in enumerate(paths):
if image_path.endswith("_disp.jpg"):
# don't try to predict disparity for a disparity image!
continue
# Load image and preprocess
input_image = pil.open(image_path).convert('RGB')
original_width, original_height = input_image.size
input_image = input_image.resize((feed_width, feed_height), pil.LANCZOS)
input_image = transforms.ToTensor()(input_image).unsqueeze(0)
# PREDICTION
input_image = input_image.to(device)
features = encoder(input_image)
outputs = depth_decoder(features)
disp = outputs[("disp", 0)]
disp_resized = torch.nn.functional.interpolate(
disp, (original_height, original_width), mode="bilinear", align_corners=False)
# Saving numpy file
'''
name_dest_npy = os.path.join(output_directory, "{}_disp.npy".format(output_name))
scaled_disp, _ = disp_to_depth(disp, 0.1, 100)
np.save(name_dest_npy, scaled_disp.cpu().numpy())
'''
output_name = os.path.splitext(os.path.basename(image_path))[0]
# Saving colormapped depth image
disp_resized_np = disp_resized.squeeze().cpu().numpy()
vmax = np.percentile(disp_resized_np, 95)
normalizer = mpl.colors.Normalize(vmin=disp_resized_np.min(), vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')
colormapped_im = (mapper.to_rgba(disp_resized_np)[:, :, :3] * 255).astype(np.uint8)
im = pil.fromarray(colormapped_im)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
name_dest_im = os.path.join(output_directory, "{}_disp.png".format(output_name))
im.save(name_dest_im)
print(" Processed {:d} of {:d} images - saved prediction to {}".format(
idx + 1, len(paths), name_dest_im))
print('-> Done!')
if __name__ == '__main__':
args = parse_args()
test_simple(args) |
the-stack_106_25248 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
class MultipleOutputLoss2(nn.Module):
def __init__(self, loss, weight_factors=None):
"""
use this if you have several outputs and ground truth (both list of same len) and the loss should be computed
between them (x[0] and y[0], x[1] and y[1] etc)
:param loss:
:param weight_factors:
"""
super(MultipleOutputLoss2, self).__init__()
self.weight_factors = weight_factors
self.loss = loss
def forward(self, x, y):
assert isinstance(x, (tuple, list)), "x must be either tuple or list"
assert isinstance(y, (tuple, list)), "y must be either tuple or list"
if self.weight_factors is None:
weights = [1] * len(x)
else:
weights = self.weight_factors
l = weights[0] * self.loss(x[0], y[0])
for i in range(1, len(x)):
if weights[i] != 0:
l += weights[i] * self.loss(x[i], y[i])
return l
|
the-stack_106_25249 | from enum import Enum
import tensorflow as tf
from sticker_graph.model import Model
from sticker_graph.weight_norm import WeightNorm
class Sharing(Enum):
none = 1
initial = 2
succeeding = 3
def mask_layer(layer, mask):
return tf.multiply(
tf.broadcast_to(
tf.expand_dims(
mask, -1), tf.shape(layer)), layer)
def dilated_convolution(
x,
n_outputs,
kernel_size,
n_levels,
is_training,
mask,
glu=True,
keep_prob=1.0):
layer = x
for i in range(n_levels):
# Only use sharing for layers 1 and up. Layer 0 cannot use sared parameters:
#
# - It transforms word embeddings into the hidden representation,
# whereas subsequent layers transform hidden representations to
# hidden representations.
# - The input size may differ from the output size.
if i == 0:
sharing = Sharing.none
elif i == 1:
sharing = Sharing.initial
else:
sharing = Sharing.succeeding
dilation = 2 ** i
layer = residual_block(
layer,
n_outputs,
kernel_size,
dilation,
is_training=is_training,
mask=mask,
glu=glu,
keep_prob=keep_prob,
sharing=sharing)
# Mask after last convolution. This is only necessary for models that
# apply transformations across time steps after the diluted convolutions.
# But masking is cheap, so better safe than sorry.
layer = mask_layer(layer, mask)
return layer
def residual_block(
x,
n_outputs,
kernel_size,
dilation,
is_training,
mask,
glu=True,
keep_prob=1.0,
sharing=Sharing.none):
if sharing == Sharing.initial or sharing == Sharing.succeeding:
suffix = "shared"
else:
suffix = "unshared"
with tf.compat.v1.variable_scope("conv1-%s" % suffix, reuse=sharing == Sharing.succeeding):
conv1 = residual_unit(
x,
n_outputs,
kernel_size,
dilation,
is_training,
mask=mask,
glu=glu,
keep_prob=keep_prob)
with tf.compat.v1.variable_scope("conv2-%s" % suffix, reuse=sharing == Sharing.succeeding):
conv2 = residual_unit(
conv1,
n_outputs,
kernel_size,
dilation,
is_training,
mask=mask,
glu=glu,
keep_prob=keep_prob)
if x.get_shape()[2] != n_outputs:
# Note: biases could change padding timesteps, but the next layer will mask
# the resulting sequence.
x = tf.compat.v1.layers.Conv1D(n_outputs, 1)(x)
return x + conv2
def residual_unit(
x,
n_outputs,
kernel_size,
dilation,
is_training,
mask,
glu=True,
keep_prob=1.0):
if glu:
# For GLU we need the hidden representation, plus an equal number
# of parameters for weighting the hidden representation.
n_outputs *= 2
# Mask inactive time steps. This is necessary, because convolutions make
# the padding non-zero (through past timesteps). In later convolutions,
# these updated paddings would then influence time steps before the
# padding.
x = mask_layer(x, mask)
conv = WeightNorm(
tf.compat.v1.layers.Conv1D(
n_outputs,
kernel_size,
dilation_rate=dilation,
padding="same"))(x)
if glu:
left, right = tf.split(conv, num_or_size_splits=2, axis=2)
left = tf.sigmoid(left)
conv = tf.multiply(left, right)
else:
conv = tf.nn.relu(conv)
# Spatial dropout
conv = tf.contrib.layers.dropout(
conv,
keep_prob=keep_prob,
noise_shape=[
tf.shape(conv)[0],
tf.constant(1),
tf.shape(conv)[2]],
is_training=is_training)
return conv
class ConvModel(Model):
def __init__(
self,
args,
shapes):
super(ConvModel, self).__init__(args, shapes)
self.setup_placeholders()
hidden_states = dilated_convolution(
self.inputs,
args.hidden_size,
kernel_size=args.kernel_size,
n_levels=args.levels,
is_training=self.is_training,
glu=not args.relu,
keep_prob=args.keep_prob,
mask=self.mask)
# Normalize hidden layers, seems to speed up convergence.
hidden_states = tf.contrib.layers.layer_norm(
hidden_states, begin_norm_axis=-1)
logits = self.affine_transform(
"tag", hidden_states, shapes['n_labels'])
if args.crf:
loss, transitions = self.crf_loss(
"tag", logits, self.tags)
predictions, top_k_predictions = self.crf_predictions(
"tag", logits, transitions)
else:
loss = self.masked_softmax_loss(
"tag", logits, self.tags, self.mask)
predictions = self.predictions("tag", logits)
self.top_k_predictions("tag", logits, args.top_k)
acc = self.accuracy("tag", predictions, self.tags)
# Optimization with gradient clipping. Consider making the gradient
# norm a placeholder as well.
lr = tf.compat.v1.placeholder(tf.float32, [], "lr")
optimizer = tf.compat.v1.train.AdamOptimizer(lr)
if args.auto_mixed_precision:
optimizer = tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)
gradients, variables = zip(*optimizer.compute_gradients(loss))
gradients, gradient_norm = tf.clip_by_global_norm(gradients, 1.0)
train_step = tf.compat.v1.train.get_or_create_global_step()
self._train_op = optimizer.apply_gradients(
zip(gradients, variables), name="train", global_step=train_step)
self.create_summary_ops(acc, gradient_norm, loss, lr)
|
the-stack_106_25250 | import configparser
from remove_errors import correct_ini_file
def open_config():
config_file = 'config.ini'
config = configparser.ConfigParser()
try:
config.read(config_file)
except configparser.MissingSectionHeaderError:
print('Found error in config file...')
print('Overcoming that error')
correct_ini_file(config_file)
print('Error Corrected!')
print('Continuing')
finally:
config.read(config_file)
# Storing the API ID and HASH
api_id = config['TELEGRAM']['api_id']
api_hash = config['TELEGRAM']['api_hash']
username = config['TELEGRAM']['username']
phone = config['TELEGRAM']['phone']
# Converting to string.
api_hash = str(api_hash)
return api_id, api_hash, username, phone |
the-stack_106_25254 | from django import forms
from contacts.models import Contact
from common.models import Comment, Attachments
from teams.models import Teams
class ContactForm(forms.ModelForm):
teams_queryset = []
teams = forms.MultipleChoiceField(choices=teams_queryset)
def __init__(self, *args, **kwargs):
assigned_users = kwargs.pop("assigned_to", [])
super(ContactForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields["description"].widget.attrs.update({"rows": "6"})
if assigned_users:
self.fields["assigned_to"].queryset = assigned_users
self.fields["assigned_to"].required = False
for key, value in self.fields.items():
if key == "phone":
value.widget.attrs["placeholder"] = "+911234567890"
else:
value.widget.attrs["placeholder"] = value.label
self.fields["teams"].choices = [
(team.get("id"), team.get("name"))
for team in Teams.objects.all().values("id", "name")
]
self.fields["teams"].required = False
class Meta:
model = Contact
fields = (
"assigned_to",
"first_name",
"last_name",
"email",
"phone",
"address",
"description",
)
class ContactCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=255, required=True)
class Meta:
model = Comment
fields = ("comment", "contact", "commented_by")
class ContactAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ("attachment", "contact")
|
the-stack_106_25256 | import pandas as pd
from optparse import OptionParser
def get_options():
parser = OptionParser(description = ("Plot"))
parser.add_option("--f", dest = "flair",
help = "FLAIR mock abundance file")
parser.add_option("--t", dest ="talon",
help = "TALON abundance file")
(options, args) = parser.parse_args()
return options
def main():
options = get_options()
flair = pd.read_csv(options.flair, sep='\t', header = 0)
talon = pd.read_csv(options.talon, sep='\t', header = 0)
flair_known = list(flair.loc[flair.transcript_novelty == "Known"].annot_transcript_id)
# Trim hyphenated part from FLAIR IDs
flair_known = set([x.split('-')[0] for x in flair_known ])
talon_known = set(talon.loc[talon.transcript_novelty == "Known"].annot_transcript_id)
detected_both = talon_known.intersection(flair_known)
flair_only = flair_known - detected_both
talon_only = talon_known - detected_both
print("N known transcripts detected in both: %s" % (len(detected_both)))
print("N known transcripts detected in FLAIR only: %s" % (len(flair_only)))
print("N known transcripts detected in TALON only: %s" % (len(talon_only)))
if __name__ == '__main__':
main()
|
the-stack_106_25257 | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.aes_encryption_drm import AesEncryptionDrm
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.encoding.encodings.muxings.ts.drm.aes.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.ts.drm.aes.aes_encryption_drm_list_query_params import AesEncryptionDrmListQueryParams
class AesApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(AesApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, encoding_id, muxing_id, aes_encryption_drm, **kwargs):
# type: (string_types, string_types, AesEncryptionDrm, dict) -> AesEncryptionDrm
"""Add AES encryption configuration to a TS muxing
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param muxing_id: Id of the TS muxing.
:type muxing_id: string_types, required
:param aes_encryption_drm: The AES encryption configuration to be created
:type aes_encryption_drm: AesEncryptionDrm, required
:return: AESEncryption details
:rtype: AesEncryptionDrm
"""
return self.api_client.post(
'/encoding/encodings/{encoding_id}/muxings/ts/{muxing_id}/drm/aes',
aes_encryption_drm,
path_params={'encoding_id': encoding_id, 'muxing_id': muxing_id},
type=AesEncryptionDrm,
**kwargs
)
def delete(self, encoding_id, muxing_id, drm_id, **kwargs):
# type: (string_types, string_types, string_types, dict) -> BitmovinResponse
"""Delete AES encryption configuration from a TS muxing
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param muxing_id: Id of the TS muxing.
:type muxing_id: string_types, required
:param drm_id: Id of the PlayReady DRM configuration.
:type drm_id: string_types, required
:return: Id of the AES encryption configuration.
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/encodings/{encoding_id}/muxings/ts/{muxing_id}/drm/aes/{drm_id}',
path_params={'encoding_id': encoding_id, 'muxing_id': muxing_id, 'drm_id': drm_id},
type=BitmovinResponse,
**kwargs
)
def get(self, encoding_id, muxing_id, drm_id, **kwargs):
# type: (string_types, string_types, string_types, dict) -> AesEncryptionDrm
"""AES encryption Details of a TS muxing
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param muxing_id: Id of the TS muxing.
:type muxing_id: string_types, required
:param drm_id: Id of the AES encryption configuration.
:type drm_id: string_types, required
:return: AESEncryption details
:rtype: AesEncryptionDrm
"""
return self.api_client.get(
'/encoding/encodings/{encoding_id}/muxings/ts/{muxing_id}/drm/aes/{drm_id}',
path_params={'encoding_id': encoding_id, 'muxing_id': muxing_id, 'drm_id': drm_id},
type=AesEncryptionDrm,
**kwargs
)
def list(self, encoding_id, muxing_id, query_params=None, **kwargs):
# type: (string_types, string_types, AesEncryptionDrmListQueryParams, dict) -> AesEncryptionDrm
"""List AES encryption configurations of a TS muxing
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param muxing_id: Id of the TS muxing.
:type muxing_id: string_types, required
:param query_params: Query parameters
:type query_params: AesEncryptionDrmListQueryParams
:return: List of AES encryption configurations
:rtype: AesEncryptionDrm
"""
return self.api_client.get(
'/encoding/encodings/{encoding_id}/muxings/ts/{muxing_id}/drm/aes',
path_params={'encoding_id': encoding_id, 'muxing_id': muxing_id},
query_params=query_params,
pagination_response=True,
type=AesEncryptionDrm,
**kwargs
)
|
the-stack_106_25264 | """
Module for dataloader
"""
from typing import List, Tuple
import numpy as np
import torch
from torch.utils.data import Dataset, Sampler
from dataset.database import SedDoaDatabase
class SedDoaChunkDataset(Dataset):
"""
Chunk dataset for SED or DOA task. For training and chunk evaluation.
"""
def __init__(self, db_data, chunk_len, transform=None, is_mixup: bool = False):
self.features = db_data['features']
self.sed_targets = db_data['sed_targets']
self.doa_targets = db_data['doa_targets']
self.chunk_idxes = db_data['chunk_idxes']
self.filename_list = db_data['filename_list']
self.chunk_len = chunk_len
self.transform = transform
self.is_mixup = is_mixup
self.n_samples = len(self.chunk_idxes)
def __len__(self):
"""
Total of training samples.
"""
return len(self.chunk_idxes)
def __getitem__(self, index):
"""
Generate one sample of data
"""
# Select sample
chunk_idx = self.chunk_idxes[index]
# get filename
filename = self.filename_list[index]
# Load data and get label
X = self.features[:, chunk_idx: chunk_idx + self.chunk_len, :] # (n_channels, n_timesteps, n_mels)
sed_labels = self.sed_targets[chunk_idx: chunk_idx + self.chunk_len] # (n_timesteps, n_classes)
doa_labels = self.doa_targets[chunk_idx: chunk_idx + self.chunk_len] # (n_timesteps, x*n_classes)
# Mixup mainly for SED
if self.is_mixup:
a1 = np.random.beta(0.5, 0.5)
if np.random.rand() < 0.8 and np.abs(a1 - 0.5) > 0.2:
random_index = np.random.randint(0, self.n_samples, 1)[0]
random_chunk_idx = self.chunk_idxes[random_index]
X_1 = self.features[:, random_chunk_idx: random_chunk_idx + self.chunk_len, :]
sed_labels_1 = self.sed_targets[random_chunk_idx: random_chunk_idx + self.chunk_len]
doa_labels_1 = self.doa_targets[random_chunk_idx: random_chunk_idx + self.chunk_len]
X = a1 * X + (1 - a1) * X_1
sed_labels = a1 * sed_labels + (1 - a1) * sed_labels_1
doa_labels = a1 * doa_labels + (1 - a1) * doa_labels_1
if self.transform is not None:
X = self.transform(X)
return X, sed_labels, doa_labels, filename
class SeldChunkDataset(Dataset):
"""
Chunk dataset for SELD task
"""
pass
if __name__ == '__main__':
# test dataloader
db = SedDoaDatabase()
data_db = db.get_split(split='val')
# create train dataset
dataset = SedDoaChunkDataset(db_data=data_db, chunk_len=db.chunk_len)
print('Number of training samples: {}'.format(len(dataset)))
# load one sample
index = np.random.randint(len(dataset))
sample = dataset[index]
for item in sample[:-1]:
print(item.shape)
print(sample[-1])
# test data generator
batch_size = 8
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=4)
print('Number of batches: {}'.format(len(dataloader))) # steps_per_epoch
for train_iter, (X, sed_labels, doa_labels, filenames) in enumerate(dataloader):
if train_iter == 0:
print(X.dtype)
print(X.shape)
print(sed_labels.dtype)
print(sed_labels.shape)
print(doa_labels.dtype)
print(doa_labels.shape)
print(type(filenames))
print(filenames)
break |
the-stack_106_25265 | from PIL import Image
import torch.utils.data as data
import os
from glob import glob
import torch
import torchvision.transforms.functional as F
from torchvision import transforms
import random
import numpy as np
class Crowd(data.Dataset):
def __init__(self, root_path, crop_size,
downsample_ratio, is_gray=False,
method='val'):
self.root_path = root_path
self.im_list = sorted(glob(os.path.join(self.root_path, '*.jpg')))
if method not in ['val']:
raise Exception("not implement")
self.method = method
self.c_size = crop_size
self.d_ratio = downsample_ratio
assert self.c_size % self.d_ratio == 0
self.dc_size = self.c_size // self.d_ratio
if is_gray:
self.trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
else:
self.trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.im_list)
def __getitem__(self, item):
img_path = self.im_list[item]
gd_path = img_path.replace('jpg', 'npy')
try:
img = Image.open(img_path).convert('RGB')
except:
print(os.path.basename(img_path).split('.')[0])
if self.method == 'val':
keypoints = np.load(gd_path)
img = self.trans(img)
name = os.path.basename(img_path).split('.')[0]
return img, len(keypoints), name |
the-stack_106_25269 | # Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import confluent.core as core
import confluent.messages as msg
import pyghmi.exceptions as pygexc
import confluent.exceptions as exc
def update(nodes, element, configmanager, inputdata):
emebs = configmanager.get_node_attributes(
nodes, (u'enclosure.manager', u'enclosure.bay'))
for node in nodes:
try:
em = emebs[node]['enclosure.manager']['value']
eb = emebs[node]['enclosure.bay']['value']
except KeyError:
yield msg.ConfluentNodeError(
node,
'Reseat is only supported on servers in an enclosure, and '
'with enclosure.manager and enclosure.bay defined')
continue
try:
for rsp in core.handle_path(
'/nodes/{0}/_enclosure/reseat_bay'.format(em),
'update', configmanager,
inputdata={'reseat': int(eb)}):
yield rsp
except pygexc.UnsupportedFunctionality as uf:
yield msg.ConfluentNodeError(node, str(uf))
except exc.TargetEndpointUnreachable as uf:
yield msg.ConfluentNodeError(node, str(uf))
|
the-stack_106_25272 | # Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
def test_inplace_on_requires_grad(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1), nn.ReLU(inplace=True))
model = Pipe(model, checkpoint="always")
x = torch.rand(1)
y = model(x).local_value()
message = r"a leaf Variable that requires grad .* used in an in-place operation."
with pytest.raises(RuntimeError, match=message):
y.backward()
@pytest.mark.xfail(strict=True)
def test_inplace_on_not_requires_grad(setup_rpc):
# In-place operation on a tensor not requiring grad doesn't cause a
# RuntimeError. Currently, we cannot detect this case.
model = nn.Sequential(nn.ReLU(inplace=True))
model = Pipe(model, [1], devices=["cpu"], checkpoint="always")
x = torch.rand(1)
y = model(x).local_value()
del model
message = r"a leaf Variable that requires grad .* used in an in-place operation."
with pytest.raises(RuntimeError, match=message):
y.backward()
@pytest.mark.xfail(strict=True)
def test_inplace_incorrect_grad(setup_rpc):
class M(nn.Module):
def forward(self, foo_bar):
# 'foo' requires grad but 'bar' does not. In-place operation on
# 'bar' won't cause a RuntimeError.
foo, bar = foo_bar
# add_(1) is not idempotent, in contrast to relu_(). If it is
# executed multiple times, it will accumulates each difference onto
# 'bar'.
bar.add_(1)
# 'bar' is still captured by checkpointing. 'foo' will get
# incorrect grad.
return foo * bar
model = nn.Sequential(M())
model = Pipe(model, [1], devices=["cpu"], checkpoint="always")
foo = torch.tensor([1.0], requires_grad=True)
bar = torch.tensor([1.0])
output = model((foo, bar)).local_value()
del model
output.backward()
# The gradient of 'foo' should be 2, but it is 3 actually because
# bar.add_(1) was executed twice due to checkpointing.
assert foo.grad.item() == 2.0
|
the-stack_106_25273 | # Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
# Stdlib imports
import os
import ssl
import time
import yaml
import atexit
from copy import copy
from collections import namedtuple, MutableMapping
# Third party imports
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
# Cloudify imports
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError, OperationRetry
# This package imports
from ..constants import (
NETWORK_ID,
ASYNC_TASK_ID,
TASK_CHECK_SLEEP,
ASYNC_RESOURCE_ID,
DEFAULT_CONFIG_PATH
)
from .._compat import (
unquote,
text_type
)
from ..utils import (
logger,
)
class Config(object):
# Required during vsphere manager bootstrap
# Hard-coded to old path so old manager blueprints aren't broken
CONNECTION_CONFIG_PATH_DEFAULT = '/root/connection_config.yaml'
_path_options = [
{'source': '/root/connection_config.yaml', 'warn': True},
{'source': '~/connection_config.yaml', 'warn': True},
{'source': DEFAULT_CONFIG_PATH, 'warn': False},
{'env': True, 'source': 'CONNECTION_CONFIG_PATH', 'warn': True},
{'env': True, 'source': 'CFY_VSPHERE_CONFIG_PATH', 'warn': False},
]
def _find_config_file(self):
selected = DEFAULT_CONFIG_PATH
warnings = []
for path in self._path_options:
source = path['source']
if path.get('env'):
source = os.getenv(source)
if source:
source = os.path.expanduser(source)
if os.path.isfile(source):
selected = source
if path['warn']:
warnings.append(path['source'])
if warnings:
logger().warn(
"Deprecated configuration options were found: {0}".format(
"; ".join(warnings)),
)
return selected
def get(self):
cfg = {}
config_path = self._find_config_file()
try:
with open(config_path) as f:
cfg = yaml.safe_load(f.read())
except IOError:
logger().warn(
"Unable to read configuration file {config_path}.".format(
config_path=config_path))
return cfg
class _ContainerView(object):
def __init__(self, obj_type, service_instance):
self.si = service_instance
self.obj_type = obj_type
def __enter__(self):
container = self.si.content.rootFolder
self.view_ref = self.si.content.viewManager.CreateContainerView(
container=container,
type=self.obj_type,
recursive=True,
)
return self.view_ref
def __exit__(self, *args):
self.view_ref.Destroy()
class CustomValues(MutableMapping):
"""dict interface to ManagedObject customValue"""
def __init__(self, client, thing):
"""
client: a VsphereClient instance
thing: a NamedTuple containing a ManagedObject-derived class as its
`obj` attribute: as supplied by `client._get_obj_by_name`
"""
self.client = client
self.thing = thing
def __getitem__(self, key):
key_id = self._get_key_id(key)
for value in self.thing.obj.customValue:
if value.key == key_id:
return value.value
raise KeyError(key)
def __setitem__(self, key, value):
self._get_key_id(key, create=True)
return self.thing.obj.setCustomValue(key, value)
def __delitem__(self, key):
raise NonRecoverableError("Unable to unset custom values.")
def __iter__(self):
for value in self.thing.obj.customValue:
yield self._get_key_name(value.key)
def __len__(self):
return len(self.thing.obj.customValue)
def _get_key_id(self, k, create=False):
for key in self.client._get_custom_keys():
if key.name == k:
return key.key
if create:
try:
key = (
self.client.si.content.customFieldsManager.
AddCustomFieldDef)(name=k)
except vim.fault.DuplicateName:
self.client._get_custom_keys(use_cache=False)
return self._get_key_id(k, create=create)
return key.key
raise KeyError(k)
def _get_key_name(self, k):
for key in self.client._get_custom_keys():
if key.key == k:
return key.name
raise ValueError(k)
class VsphereClient(object):
def __init__(self, ctx_logger=None):
self.cfg = {}
self._cache = {}
self._logger = ctx_logger or logger()
def get(self, config=None, *_, **__):
static_config = Config().get()
self.cfg.update(static_config)
if config:
self.cfg.update(config)
ret = self.connect(self.cfg)
ret.format = 'yaml'
return ret
def connect(self, cfg):
host = cfg['host']
username = cfg['username']
password = cfg['password']
port = cfg['port']
certificate_path = cfg.get('certificate_path')
# Until the next major release this will have limited effect, but is
# in place to allow a clear path to the next release for users
allow_insecure = cfg.get('allow_insecure', False)
ssl_context = None
if certificate_path and allow_insecure:
raise NonRecoverableError(
'Cannot connect when certificate_path and allow_insecure '
'are both set. Unable to determine whether connection should '
'be secure or insecure.'
)
elif certificate_path:
if not hasattr(ssl, '_create_default_https_context'):
raise NonRecoverableError(
'Cannot create secure connection with this version of '
'python. This functionality requires at least python '
'2.7.9 and has been confirmed to work on at least 2.7.12.'
)
if not os.path.exists(certificate_path):
raise NonRecoverableError(
'Certificate was not found in {path}.'.format(
path=certificate_path,
)
)
elif not os.path.isfile(certificate_path):
raise NonRecoverableError(
'Found directory at {path}, but the certificate_path '
'must be a file.'.format(
path=certificate_path,
)
)
try:
# We want to load the cert into the existing default context
# in case any other python modules have already defined their
# default https context.
ssl_context = ssl._create_default_https_context()
if ssl_context.verify_mode == 0:
raise NonRecoverableError(
'Default SSL context is not set to verify. '
'Cannot use a certificate while other imported '
'modules are disabling verification on the default '
'SSL context.'
)
ssl_context.load_verify_locations(certificate_path)
except ssl.SSLError as err:
if 'unknown error' in text_type(err).lower() or \
'no certificate or crl found' in \
text_type(err).lower():
raise NonRecoverableError(
'Could not create SSL context with provided '
'certificate {path}. This problem may be caused by '
'the certificate not being in the correct format '
'(PEM).'.format(path=certificate_path))
else:
raise
elif not allow_insecure:
self._logger.warn(
'DEPRECATED: certificate_path was not supplied. '
'A certificate will be required in the next major '
'release of the plugin if allow_insecure is not set '
'to true.'
)
try:
if allow_insecure:
self._logger.warn(
'SSL verification disabled for all legacy code. '
'Please note that this may result in other code '
'from the same blueprint running with reduced '
'security.'
)
self.si = SmartConnectNoSSL(host=host,
user=username,
pwd=password,
port=int(port))
else:
self.si = SmartConnect(host=host,
user=username,
pwd=password,
port=int(port),
sslContext=ssl_context)
atexit.register(Disconnect, self.si)
return self
except vim.fault.InvalidLogin:
raise NonRecoverableError(
'Could not login to vSphere on {host} with provided '
'credentials'.format(host=host)
)
except vim.fault.HostConnectFault as err:
if 'certificate verify failed' in err.msg:
raise NonRecoverableError(
'Could not connect to vSphere on {host} with provided '
'certificate {path}. Certificate was not valid.'.format(
host=host,
path=certificate_path,
)
)
else:
raise
def is_server_suspended(self, server):
return server.summary.runtime.powerState.lower() == "suspended"
def _convert_props_list_to_dict(self, props_list):
the_dict = {}
split_list = [
item.split('.', 1) for item in props_list
]
vals = [
item[0] for item in split_list
if len(item) == 1
]
keys = [
item for item in split_list
if len(item) > 1
]
the_dict['_values'] = set(vals)
for item in keys:
key_name = item[0]
sub_keys = item[1:]
dict_entry = the_dict.get(key_name, {'_values': set()})
update_dict = self._convert_props_list_to_dict(
sub_keys
)
the_dict[key_name] = self._merge_props_dicts(
dict_entry,
update_dict,
)
return the_dict
def _merge_props_dicts(self, dict1, dict2):
new_dict = {}
keys = set(list(dict1.keys()) + list(dict2.keys()))
keys.remove('_values')
new_dict['_values'] = dict1['_values'] | dict2['_values']
for key in keys:
new_dict[key] = self._merge_props_dicts(
dict1.get(key, {'_values': set()}),
dict2.get(key, {'_values': set()})
)
return new_dict
def _get_platform_sub_results(self, platform_results, target_key):
sub_results = {}
for key, value in platform_results.items():
key_components = key.split('.', 1)
if key_components[0] == target_key:
sub_results[key_components[1]] = value
return sub_results
def _get_normalised_name(self, name, tolower=True):
"""
Get the normalised form of a platform entity's name.
"""
name = unquote(name)
return name.lower() if tolower else name
def _make_cached_object(self, obj_name, props_dict, platform_results,
root_object=True, other_entity_mappings=None):
just_keys = list(props_dict.keys())
# Discard the _values key if it is present
if '_values' in just_keys:
just_keys.remove('_values')
object_keys = copy(just_keys)
object_keys.extend(props_dict.get('_values', []))
if root_object:
object_keys.extend(['id', 'obj'])
object_keys = set(object_keys)
obj = namedtuple(
obj_name,
object_keys,
)
args = {}
for key in props_dict.get('_values', []):
args[key] = platform_results[key]
if root_object:
args['id'] = platform_results['obj']._moId
args['obj'] = platform_results['obj']
if root_object and other_entity_mappings:
for map_type in ('static', 'dynamic', 'single'):
mappings = other_entity_mappings.get(map_type, {})
for mapping, other_entities in mappings.items():
if map_type == 'single':
mapped = None
map_id = args[mapping]._moId
for entity in other_entities:
if entity.id == map_id:
mapped = entity
break
else:
mapping_ids = [
map_obj._moId for map_obj in args[mapping]
]
mapped = [
other_entity for other_entity in other_entities
if other_entity.id in mapping_ids
]
if map_type == 'static' and \
len(mapped) != len(args[mapping]):
mapped = None
if mapped is None:
raise OperationRetry(
'Platform {entity} configuration changed '
'while building {obj_name} cache.'.format(
entity=mapping,
obj_name=obj_name,
)
)
args[mapping] = mapped
for key in just_keys:
sub_object_name = '{name}_{sub}'.format(
name=obj_name,
sub=key,
)
args[key] = self._make_cached_object(
obj_name=sub_object_name,
props_dict=props_dict[key],
platform_results=self._get_platform_sub_results(
platform_results=platform_results,
target_key=key,
),
root_object=False,
)
if 'name' in args:
args['name'] = self._get_normalised_name(args['name'], False)
result = obj(
**args
)
return result
def _get_entity(self,
entity_name,
props,
vimtype,
use_cache=True,
other_entity_mappings=None,
skip_broken_objects=False):
if entity_name in self._cache and use_cache:
return self._cache[entity_name]
platform_results = self._collect_properties(
vimtype,
path_set=props,
)
props_dict = self._convert_props_list_to_dict(props)
results = []
for result in platform_results:
try:
results.append(
self._make_cached_object(
obj_name=entity_name,
props_dict=props_dict,
platform_results=result,
other_entity_mappings=other_entity_mappings,
)
)
except KeyError as err:
message = (
'Could not retrieve all details for {type} object. '
'{err} was missing.'.format(
type=entity_name,
err=text_type(err)
)
)
if hasattr(result, 'name'):
message += (
' Object name was {name}.'.format(name=result.name)
)
if hasattr(result, '_moId'):
message += (
' Object ID was {id}.'.format(id=result._moId)
)
if skip_broken_objects:
self._logger.warn(message)
else:
raise NonRecoverableError(message)
self._cache[entity_name] = results
return results
def _build_resource_pool_object(self, base_pool_id, resource_pools):
rp_object = namedtuple(
'resource_pool',
['name', 'resourcePool', 'id', 'obj'],
)
this_pool = None
for pool in resource_pools:
if pool['obj']._moId == base_pool_id:
this_pool = pool
break
if this_pool is None:
raise OperationRetry(
'Resource pools changed while getting resource pool details.'
)
if 'name' in this_pool:
this_pool['name'] = self._get_normalised_name(this_pool['name'],
False)
base_object = rp_object(
name=this_pool['name'],
id=this_pool['obj']._moId,
resourcePool=[],
obj=this_pool['obj'],
)
for item in this_pool['resourcePool']:
base_object.resourcePool.append(self._build_resource_pool_object(
base_pool_id=item._moId,
resource_pools=resource_pools,
))
return base_object
def _get_resource_pools(self, use_cache=True):
if 'resource_pool' in self._cache and use_cache:
return self._cache['resource_pool']
properties = [
'name',
'resourcePool',
]
results = self._collect_properties(
vim.ResourcePool,
path_set=properties,
)
resource_pools = []
for item in results:
resource_pools.append(self._build_resource_pool_object(
base_pool_id=item['obj']._moId,
resource_pools=results
))
self._cache['resource_pool'] = resource_pools
return resource_pools
def _get_vm_folders(self, use_cache=True):
properties = [
'name'
]
return self._get_entity(
entity_name='vm_folder',
props=properties,
vimtype=vim.Folder,
use_cache=use_cache,
)
def _get_clusters(self, use_cache=True):
properties = [
'name',
'resourcePool',
]
return self._get_entity(
entity_name='cluster',
props=properties,
vimtype=vim.ClusterComputeResource,
use_cache=use_cache,
other_entity_mappings={
'single': {
'resourcePool': self._get_resource_pools(
use_cache=use_cache,
),
},
},
)
def _get_datacenters(self, use_cache=True):
properties = [
'name',
'vmFolder',
]
return self._get_entity(
entity_name='datacenter',
props=properties,
vimtype=vim.Datacenter,
use_cache=use_cache,
)
def _get_datastores(self, use_cache=True):
properties = [
'name',
'overallStatus',
'summary.accessible',
'summary.freeSpace',
]
return self._get_entity(
entity_name='datastore',
props=properties,
vimtype=vim.Datastore,
use_cache=use_cache
)
def _get_connected_network_name(self, network):
if network.get('from_relationship'):
net_id = None
found = False
for relationship in ctx.instance.relationships:
if relationship.target.node.name == network['name']:
props = relationship.target.instance.runtime_properties
net_id = props.get(NETWORK_ID)
found = True
break
if not found:
raise NonRecoverableError(
'Could not find any relationships to a node called '
'"{name}", so {prop} could not be retrieved.'.format(
name=network['name'],
prop=NETWORK_ID,
)
)
elif net_id is None:
raise NonRecoverableError(
'Could not get a {prop} runtime property from '
'relationship to a node called "{name}".'.format(
name=network['name'],
prop=NETWORK_ID,
)
)
if isinstance(net_id, list):
# We won't alert on switch_distributed mismatch here, as the
# validation logic handles that
# Standard port groups will have multiple IDs, but since we
# use the name, just using the first one will give the right
# name
net_id = net_id[0]
net = self._get_obj_by_id(
vimtype=vim.Network,
id=net_id,
)
if net is None:
raise NonRecoverableError(
'Could not get network given network ID: {id}'.format(
id=net_id,
)
)
return net.name
else:
return network['name']
def _get_networks(self, use_cache=True):
if 'network' in self._cache and use_cache:
return self._cache['network']
properties = [
'name',
'host',
]
net_object = namedtuple(
'network',
['name', 'id', 'host', 'obj'],
)
dvnet_object = namedtuple(
'distributed_network',
['name', 'id', 'host', 'obj', 'key', 'config'],
)
host_stub = namedtuple(
'host_stub',
['id'],
)
results = self._collect_properties(
vim.Network,
path_set=properties,
)
extra_dv_port_group_details = self._get_extra_dv_port_group_details(
use_cache
)
networks = []
for item in results:
if 'name' in item:
item['name'] = self._get_normalised_name(item['name'], False)
network = net_object(
name=item['name'],
id=item['obj']._moId,
host=[host_stub(id=h._moId) for h in item['host']],
obj=item['obj'],
)
if self._port_group_is_distributed(network):
extras = extra_dv_port_group_details[item['obj']._moId]
network = dvnet_object(
name=item['name'],
id=item['obj']._moId,
obj=item['obj'],
host=[host_stub(id=h._moId) for h in item['host']],
key=extras['key'],
config=extras['config'],
)
networks.append(network)
self._cache['network'] = networks
return networks
def _get_dv_networks(self, use_cache=True):
return [
network for network in self._get_networks(use_cache)
if self._port_group_is_distributed(network)
]
def _get_standard_networks(self, use_cache=True):
return [
network for network in self._get_networks(use_cache)
if not self._port_group_is_distributed(network)
]
def _get_extra_dv_port_group_details(self, use_cache=True):
if 'dv_pg_extra_detail' in self._cache and use_cache:
return self._cache['dv_pg_extra_detail']
properties = [
'key',
'config.distributedVirtualSwitch',
]
config_object = namedtuple(
'dv_port_group_config',
['distributedVirtualSwitch'],
)
results = self._collect_properties(
vim.dvs.DistributedVirtualPortgroup,
path_set=properties,
)
dvswitches = self._get_dvswitches(use_cache)
extra_details = {}
for item in results:
try:
dvswitch_id = item['config.distributedVirtualSwitch']._moId
except KeyError:
ctx.logger.info(
'Get extra DV port group details. '
'Ignoring item {item}'.format(item=item))
dvswitch = None
for dvs in dvswitches:
if dvswitch_id == dvs.id:
dvswitch = dvs
break
if dvswitch is None:
raise OperationRetry(
'DVswitches on platform changed while getting port '
'group details.'
)
extra_details[item['obj']._moId] = {
'key': item['key'],
'config': config_object(distributedVirtualSwitch=dvswitch),
}
self._cache['dv_pg_extra_detail'] = extra_details
return extra_details
def _get_dvswitches(self, use_cache=True):
properties = [
'name',
'uuid',
]
return self._get_entity(
entity_name='dvswitch',
props=properties,
vimtype=vim.dvs.VmwareDistributedVirtualSwitch,
use_cache=use_cache,
)
def _get_vms(self, use_cache=True, skip_broken_vms=True):
properties = [
'name',
'summary',
'config.hardware.device',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'datastore',
'guest.guestState',
'guest.net',
'network',
]
return self._get_entity(
entity_name='vm',
props=properties,
vimtype=vim.VirtualMachine,
use_cache=use_cache,
other_entity_mappings={
'static': {
'network': self._get_networks(use_cache=use_cache),
'datastore': self._get_datastores(use_cache=use_cache),
},
},
# VMs still being cloned won't return everything we need
skip_broken_objects=skip_broken_vms,
)
def _get_computes(self, use_cache=True):
properties = [
'name',
'resourcePool',
]
return self._get_entity(
entity_name='compute',
props=properties,
vimtype=vim.ComputeResource,
use_cache=use_cache,
other_entity_mappings={
'single': {
'resourcePool': self._get_resource_pools(
use_cache=use_cache,
),
},
},
)
def _get_hosts(self, use_cache=True):
properties = [
'name',
'parent',
'hardware.memorySize',
'hardware.cpuInfo.numCpuThreads',
'overallStatus',
'network',
'summary.runtime.connectionState',
'summary.runtime.inMaintenanceMode',
'vm',
'datastore',
'config.network.vswitch',
'configManager',
]
# A host's parent can be either a cluster or a compute, so we handle
# both here.
return self._get_entity(
entity_name='host',
props=properties,
vimtype=vim.HostSystem,
use_cache=use_cache,
other_entity_mappings={
'single': {
'parent': self._get_clusters(
use_cache=use_cache) + self._get_computes(
use_cache=use_cache),
},
'dynamic': {
'vm': self._get_vms(use_cache=use_cache),
'network': self._get_networks(use_cache=use_cache),
},
'static': {
'datastore': self._get_datastores(use_cache=use_cache),
},
},
skip_broken_objects=True,
)
def _get_hosts_in_tree(self, host_folder):
def get_vmware_hosts(tree_node):
# Traverse the tree to find any hosts.
hosts = []
if hasattr(tree_node, "host"):
# If we find hosts under this node we are done.
hosts.extend(list(tree_node.host))
elif hasattr(tree_node, "childEntity"):
# If there are no hosts look under its children
for entity in tree_node.childEntity:
hosts.extend(get_vmware_hosts(entity))
return hosts
# Get all of the hosts in this hosts folder, that includes looking
# in subfolders and clusters.
vmware_hosts = get_vmware_hosts(host_folder)
# Cloudify uses a slightly different style of object to the raw VMWare
# API. To convert one to the other look up object IDs and compare.
vmware_host_ids = [host._GetMoId() for host in vmware_hosts]
cloudify_host_dict = {cloudify_host.obj._GetMoId(): cloudify_host
for cloudify_host in self._get_hosts()}
cloudify_hosts = [cloudify_host_dict[id] for id in vmware_host_ids]
return cloudify_hosts
def _convert_vmware_port_group_to_cloudify(self, port_group):
port_group_id = port_group._moId
for cloudify_port_group in self._get_networks():
if cloudify_port_group.obj._moId == port_group_id:
break
else:
raise RuntimeError(
"Couldn't find cloudify representation of port group {name}"
.format(name=port_group.name))
return cloudify_port_group
def _get_tasks(self, *_, **__):
task_object = namedtuple(
'task',
['id', 'obj'],
)
return [task_object(id=task._moId, obj=task)
for task in self.si.content.taskManager.recentTask]
def _get_getter_method(self, vimtype):
getter_method = {
vim.VirtualMachine: self._get_vms,
vim.ResourcePool: self._get_resource_pools,
vim.ClusterComputeResource: self._get_clusters,
vim.Datastore: self._get_datastores,
vim.Datacenter: self._get_datacenters,
vim.Network: self._get_networks,
vim.dvs.VmwareDistributedVirtualSwitch: self._get_dvswitches,
vim.DistributedVirtualSwitch: self._get_dvswitches,
vim.HostSystem: self._get_hosts,
vim.dvs.DistributedVirtualPortgroup: self._get_dv_networks,
vim.Folder: self._get_vm_folders,
vim.Task: self._get_tasks,
vim.ComputeResource: self._get_computes}.get(vimtype)
if not getter_method:
raise NonRecoverableError(
'Cannot retrieve objects for {vimtype}'.format(
vimtype=vimtype))
return getter_method
def _collect_properties(self, obj_type, path_set=None):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Args:
si (ServiceInstance): ServiceInstance connection
view_ref (pyVmomi.vim.view.*):/ Starting point of inventory
navigation
obj_type (pyVmomi.vim.*): Type of managed object
path_set (list): List of properties to retrieve
Returns:
A list of properties for the managed objects
"""
with _ContainerView([obj_type], self.si) as view_ref:
collector = self.si.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['obj'] = obj.obj
data.append(properties)
return data
def _get_obj_by_name(self, vimtype, name, use_cache=True):
entities = self._get_getter_method(vimtype)(use_cache)
name = self._get_normalised_name(name)
for entity in entities:
if name == entity.name.lower():
return entity
def _get_obj_by_id(self, vimtype, id, use_cache=True):
entities = self._get_getter_method(vimtype)(use_cache)
for entity in entities:
if entity.id == id:
return entity
def _wait_for_task(self,
task=None,
instance=None,
max_wait_time=None,
resource_id=None):
instance = instance or ctx.instance
if not isinstance(max_wait_time, int):
ctx.logger.warn(
'The provided max_wait_time {p} is not an integer. '
'Using default 300.'.format(p=max_wait_time))
max_wait_time = 300
if not task and instance:
task_id = instance.runtime_properties.get(ASYNC_TASK_ID)
resource_id = instance.runtime_properties.get(ASYNC_RESOURCE_ID)
self._logger.info('Check task_id {task_id}'.format(
task_id=task_id))
# no saved tasks
if not task_id:
return
else:
task_id = task._moId
if instance:
self._logger.info('Save task_id {task_id}'.format(
task_id=task_id))
instance.runtime_properties[ASYNC_TASK_ID] = task_id
instance.runtime_properties[ASYNC_RESOURCE_ID] = resource_id
# save flag as current state before external call
instance.update()
if not task:
task_obj = self._get_obj_by_id(vim.Task, task_id)
if not task_obj:
self._logger.info(
'No task_id? {task_id}'.format(task_id=task_id))
if instance:
# no such tasks
del instance.runtime_properties[ASYNC_TASK_ID]
# save flag as current state before external call
instance.update()
return
task = task_obj.obj
retry_count = max_wait_time // TASK_CHECK_SLEEP
while task.info.state in (vim.TaskInfo.State.queued,
vim.TaskInfo.State.running):
time.sleep(TASK_CHECK_SLEEP)
self._logger.debug(
'Task state {state} left {step} seconds'.format(
state=task.info.state,
step=(retry_count * TASK_CHECK_SLEEP)))
# check async
if instance and retry_count <= 0:
raise OperationRetry(
'Task {task_id} is not finished yet.'.format(
task_id=task._moId))
retry_count -= 1
# we correctly finished, and need to cleanup
if instance:
self._logger.info('Cleanup task_id {task_id}'.format(
task_id=task_id))
del instance.runtime_properties[ASYNC_TASK_ID]
del instance.runtime_properties[ASYNC_RESOURCE_ID]
# save flag as current state before external call
instance.update()
if task.info.state != vim.TaskInfo.State.success:
raise NonRecoverableError(
"Error during executing task on vSphere: '{0}'".format(
task.info.error))
elif instance and resource_id:
self._logger.info('Save resource_id {resource_id}'.format(
resource_id=task.info.result._moId))
instance.runtime_properties[resource_id] = task.info.result._moId
# save flag as current state before external call
instance.update()
def _port_group_is_distributed(self, port_group):
return port_group.id.startswith('dvportgroup')
def get_vm_networks(self, vm):
"""
Get details of every network interface on a VM.
A list of dicts with the following network interface information
will be returned:
{
'name': Name of the network,
'distributed': True if the network is distributed, otherwise
False,
'mac': The MAC address as provided by vsphere,
}
"""
nics = []
self._logger.debug('Getting NIC list.')
for dev in vm.config.hardware.device:
if hasattr(dev, 'macAddress'):
nics.append(dev)
self._logger.debug('Got NICs: {nics}'.format(nics=nics))
networks = []
for nic in nics:
self._logger.debug('Checking details for NIC {nic}'
.format(nic=nic))
distributed = hasattr(nic.backing, 'port') and isinstance(
nic.backing.port,
vim.dvs.PortConnection,
)
nsxt_switch = hasattr(nic.backing, 'opaqueNetworkId')
network_name = None
if nsxt_switch:
network_name = nic.backing.opaqueNetworkId
self._logger.debug(
'Found NIC was on port group {network}'.format(
network=network_name,
)
)
elif distributed:
mapping_id = nic.backing.port.portgroupKey
self._logger.debug(
'Found NIC was on distributed port group with port group '
'key {key}'.format(key=mapping_id)
)
for network in vm.network:
if hasattr(network, 'key'):
self._logger.debug(
'Checking for match on network with key: '
'{key}'.format(key=network.key)
)
if mapping_id == network.key:
network_name = network.name
self._logger.debug(
'Found NIC was distributed and was on '
'network {network}'.format(
network=network_name,
)
)
else:
# If not distributed, the port group name can be retrieved
# directly
network_name = nic.backing.deviceName
self._logger.debug(
'Found NIC was on port group {network}'.format(
network=network_name,
)
)
if network_name is None:
raise NonRecoverableError(
'Could not get network name for device with MAC address '
'{mac} on VM {vm}'.format(mac=nic.macAddress, vm=vm.name)
)
networks.append({
'name': network_name,
'distributed': distributed,
'mac': nic.macAddress,
'nsxt_switch': nsxt_switch
})
return networks
def _get_custom_keys(self, use_cache=True):
if not use_cache or 'custom_keys' not in self._cache:
self._cache['custom_keys'] = (
self.si.content.customFieldsManager.field
)
return self._cache['custom_keys']
def custom_values(self, thing):
return CustomValues(self, thing)
def add_custom_values(self, thing, attributes):
if attributes:
values = self.custom_values(thing)
values.update(attributes)
self._logger.debug('Added custom attributes')
|
the-stack_106_25274 |
import sys
import os
from ppci import api
from ppci.utils.reporting import HtmlReportGenerator
from ppci.lang.basic.c64 import BasicLine, write_basic_program
arch = api.get_arch('mcs6500')
print('Using arch', arch)
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as f:
text_message = f.read()
else:
text_message = 'you can provide a text file to customize this message'
with open('report.html', 'w') as f2, HtmlReportGenerator(f2) as reporter:
with open('add.c') as f:
oj = api.cc(f, arch, reporter=reporter)
print(oj)
with open('hello.s') as f:
oj = api.asm(f, arch)
oj = api.link([oj], layout='layout.mmp')
print(oj)
with open(os.path.join('c64disk', 'hello.prg'), 'wb') as f:
# Generate tokenized basic:
load_address = 0x801
basic_program = bytes([
0x01, 0x08, # Load address
0x09, 0x08, # start of the next line
0x0a, 0x00, # line number 10 word in little endianness
# 0x9e, 0x20, 0x34, 0x30, 0x39, 0x36, 0x00, # SYS 4096
0x99, # Print token!
0x20, 0x31, # Space and number 1
0x0, # End of line
0x1a, 0x00, # 0x1a line number in little endianness
0x99, # Print token!
0x20, 0x22 # Space and "
])
txt = text_message.upper().encode('ascii')
basic_program += txt
basic_program += bytes([
0x22, # "
0x0, # End of line
0x00, 0x00 # End of program
# 0x80, # END
])
# f.write(basic_program)
program = [
BasicLine(12, bytes([0x99, 0x20, 0x31])),
]
for nr, line in enumerate(text_message.split('\n')):
line = line.strip().upper().encode('ascii')
program.append(
BasicLine(
30 + nr,
bytes([0x99, 0x20, 0x22]) + line + bytes([0x22])))
sys_address = 0x890
address_text = str(sys_address).encode('ascii')
program.append(BasicLine(1000, bytes([0x9e]) + address_text))
write_basic_program(program, f)
pos = f.tell() - 2
max_basic_size = sys_address - load_address
if pos < max_basic_size:
f.seek(sys_address - load_address + 2)
print(pos)
f.write(oj.get_image('upperram').data)
else:
print('Basic program too large')
|
the-stack_106_25276 | from attrdict import (
AttrDict,
)
class AttributeDict(AttrDict):
'''
See `AttrDict docs <https://github.com/bcj/AttrDict#attrdict-1>`_
This class differs only in that it is made immutable. This immutability
is **not** a security guarantee. It is only a style-check convenience.
'''
def __setitem__(self, attr, val):
raise TypeError(
'This data is immutable -- create a copy instead of modifying. '
'For example, AttributeDict(old, replace_key=replace_val).'
)
def _repr_pretty_(self, builder, cycle):
"""
Custom pretty output for the IPython console
"""
builder.text(self.__class__.__name__ + "(")
if cycle:
builder.text("<cycle>")
else:
builder.pretty(self.__dict__)
builder.text(")")
|
the-stack_106_25277 | import urllib
import requests
from behance_python import ENDPOINTS, url_join
from project import Project
from user import User
from wip import WIP
from collection import Collection
from behance import Behance
import exceptions
from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects
class API:
"""Base wrapper for the Behance api.
Must be instantiated using your provided auth key."""
def __init__(self, auth_key):
self.auth_key = auth_key
def _do_api_search(self, url):
try:
#Get results from API
_results = requests.get(url)
#Parse results
if _results.status_code == 200:
return _results.json()
else:
n = _results.status_code
try:
raise getattr(exceptions, exceptions.EXCEPTIONMAPPING[n])(n)
except AttributeError:
raise exceptions.BehanceException(n)
except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as e:
raise e
def get_project(self, project_id):
"""Query behance API and return Project instance"""
return Project(project_id, self.auth_key)
def project_search(self, *args, **kwargs):
"""Search for projects on Behance.
Takes any number of text search terms, as well as key/value filters.
Valid filters: [valid values]
sort: [featured_date, appreciations, views, comments, published_date]
time: [all, today, week, month]
field: [URL-encoded field name from Behance list of defined creative fields]
country: [2-letter FIPS country code]
state: [State or province name]
page: [page number of results, 1-indexed]
tags: [single tag name or pipe separated list of tags]
"""
if len(args) == 0:
#Make sure user provides search terms...
return None
else:
#Build the URL
_base_url = url_join(ENDPOINTS['api'], ENDPOINTS['project'])
_terms = "+".join(urllib.quote(arg) for arg in args)
_filters = urllib.urlencode(kwargs)
_url = '%s?api_key=%s&q=%s&%s' % (_base_url, self.auth_key, _terms, _filters)
#Get results from API
return [Behance(data=proj) for proj in self._do_api_search(_url)['projects']]
def user_search(self, *args, **kwargs):
"""Search for users on Behance.
Takes any number of text search terms, as well as key/value filters
as supported by Behance API."""
if len(args) == 0:
return None
else:
_base_url = url_join(ENDPOINTS['api'], ENDPOINTS['user'])
_terms = "+".join(urllib.quote(arg) for arg in args)
_filters = urllib.urlencode(kwargs)
_url = '%s?api_key=%s&q=%s&%s' % (_base_url, self.auth_key, _terms, _filters)
#Get results from API
return [Behance(data=user) for user in self._do_api_search(_url)['users']]
def get_user(self, user_id):
return User(user_id, self.auth_key)
def wip_search(self, *args, **kwargs):
if len(args) == 0:
return None
else:
_base_url = url_join(ENDPOINTS['api'], ENDPOINTS['wip'])
_terms = "+".join(urllib.quote(arg) for arg in args)
_filters = urllib.urlencode(kwargs)
_url = '%s?api_key=%s&q=%s&%s' % (_base_url, self.auth_key, _terms, _filters)
#Get results from API
return [Behance(data=wip) for wip in self._do_api_search(_url)['wips']]
def get_wip(self, wip_id):
return WIP(wip_id, self.auth_key)
def collection_search(self, *args, **kwargs):
if len(args) == 0:
return None
else:
_base_url = url_join(ENDPOINTS['api'], ENDPOINTS['collection'])
_terms = "+".join(urllib.quote(arg) for arg in args)
_filters = urllib.urlencode(kwargs)
_url = '%s?api_key=%s&q=%s&%s' % (_base_url, self.auth_key, _terms, _filters)
#Get results from API
return [Behance(data=collection) for collection in self._do_api_search(_url)['collections']]
def get_collection(self, collection_id):
return Collection(collection_id, self.auth_key)
|
the-stack_106_25278 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('statsy', '0004_auto_20151110_0935'),
]
operations = [
migrations.AlterField(
model_name='statsyevent',
name='name',
field=models.CharField(unique=True, max_length=100, verbose_name=b'name'),
),
migrations.AlterField(
model_name='statsygroup',
name='name',
field=models.CharField(unique=True, max_length=100, verbose_name=b'name'),
),
]
|
the-stack_106_25279 | """
Copyright: MAXON Computer GmbH
Author: Maxime Adam.
Description:
- Gets the material linked to the first texture tag of the active object.
Class/method highlighted:
- BaseObject.GetTag()
- TextureTag.GetMaterial()
Compatible:
- Win / Mac
- R13, R14, R15, R16, R17, R18, R19, R20, R21, S22
"""
import c4d
def main():
# Checks if selected object is valid
if op is None:
raise ValueError("op is none, please select one object.")
# Get the first texture tag
textureTag = op.GetTag(c4d.Ttexture)
if textureTag is None:
raise RuntimeError("Failed to retrieve the texture tag.")
# Retrieves the linked material
mat = textureTag.GetMaterial()
# If no material is linked we leave
if mat is None:
return
# Print the name of the material to the console.
print(mat.GetName())
if __name__ == '__main__':
main()
|
the-stack_106_25280 | from __future__ import unicode_literals
from parglare.parser import REDUCE, SHIFT, ACCEPT
import codecs
import sys
from parglare import termui as t
if sys.version < '3':
text = unicode # noqa
else:
text = str
HEADER = '''
digraph grammar {
rankdir=LR
fontname = "Bitstream Vera Sans"
fontsize = 8
node[
shape=record,
style=filled,
fillcolor=aliceblue
]
nodesep = 0.3
edge[dir=black,arrowtail=empty]
'''
def dot_escape(s):
colors = t.colors
t.colors = False
s = text(s)
out = s.replace('\n', r'\n')\
.replace('\\', '\\\\')\
.replace('"', r'\"')\
.replace('|', r'\|')\
.replace('{', r'\{')\
.replace('}', r'\}')\
.replace('>', r'\>')\
.replace('<', r'\<')\
.replace('?', r'\?')
t.colors = colors
return out
def grammar_pda_export(table, file_name):
with codecs.open(file_name, 'w', encoding="utf-8") as f:
f.write(HEADER)
for state in table.states:
kernel_items = ""
for item in state.kernel_items:
kernel_items += "{}\\l".format(dot_escape(text(item)))
nonkernel_items = "|" if state.nonkernel_items else ""
for item in state.nonkernel_items:
nonkernel_items += "{}\\l".format(dot_escape(text(item)))
# SHIFT actions and GOTOs will be encoded in links.
# REDUCE actions will be presented inside each node.
reduce_actions = []
for term, actions in state.actions.items():
r_actions = [a for a in actions if a.action is REDUCE]
if r_actions:
reduce_actions.append((term, r_actions))
reductions = ""
if reduce_actions:
reductions = "|Reductions:\\l{}".format(
", ".join(["{}:{}".format(
dot_escape(x[0].name), x[1][0].prod.prod_id
if len(x[1]) == 1 else "[{}]".format(
",".join([str(i.prod.prod_id) for i in x[1]])))
for x in reduce_actions]))
# States
f.write('{}[label="{}|{}{}{}"]\n'
.format(
state.state_id,
dot_escape("{}:{}"
.format(state.state_id, state.symbol)),
kernel_items, nonkernel_items, reductions))
f.write("\n")
# SHIFT and GOTOs as links
shacc = []
for term, actions in state.actions.items():
for a in [a for a in actions if a.action in [SHIFT, ACCEPT]]:
shacc.append((term, a))
for term, action in shacc:
f.write('{} -> {} [label="{}:{}"]'.format(
state.state_id, action.state.state_id,
"SHIFT" if action.action == SHIFT else "ACCEPT", term))
for symb, goto_state in ((symb, goto) for symb, goto
in state.gotos.items()):
f.write('{} -> {} [label="GOTO:{}"]'.format(
state.state_id, goto_state.state_id, symb))
f.write("\n}\n")
|
the-stack_106_25282 | """
Training a Convolutional Neural Network for Image Classification
================================================================
*Tutorial adapted from http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html*
Generally, when you have to deal with image, text, audio or video data,
you can use standard python packages that load data into a numpy array.
Then you can convert this array into a ``torch.*Tensor``.
- For images, packages such as Pillow, OpenCV are useful.
- For audio, packages such as scipy and librosa
- For text, either raw Python or Cython based loading, or NLTK and
SpaCy are useful.
For this tutorial, we will use the CIFAR10 dataset. It has the classes:
``airplane``, ``automobile``, ``bird``, ``cat``, ``deer``, ``dog``,
``frog``, ``horse``, ``ship``, ``truck``. The images in CIFAR-10 are of
size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.
.. figure:: /_static/cifar10.png
:alt: cifar
cifar
Training an image classifier
----------------------------
We will do the following steps in order:
1. Import libraries and add model settings
2. Load and normalizing the CIFAR10 training and test datasets using
``torchvision``
3. Define a Convolution Neural Network (forward)
4. Train the network on the training data
5. Test the network on the test data
"""
######################################################################
# 1. Importing and Declaring Global Variables
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We start be importing all libraries that will be required in this
# tutorial.
#
from __future__ import print_function
import torch
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from cogitare.data import DataSet, AsyncDataLoader
from cogitare import utils, Model
from cogitare.plugins import EarlyStopping
from cogitare.metrics.classification import accuracy
import cogitare
import argparse
import matplotlib.pyplot as plt
import numpy as np
parser = argparse.ArgumentParser()
pa = parser.add_argument # define a shortcut
pa('--batch-size', help='Size of the training batch', type=int, default=64)
pa('--cuda', help='enable cuda', action='store_true')
pa('--dropout', help='dropout rate in the input data', type=float, default=0.3)
pa('--learning-rate', help='learning rate', type=float, default=0.001)
pa('--max-epochs', help='limit the number of epochs in training', type=int, default=10)
# load the model arguments
try:
args = parser.parse_args()
except:
args = parser.parse_args([])
cogitare.utils.set_cuda(args.cuda)
CLASSES = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
######################################################################
# 2. Loading and normalizing CIFAR10
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Using ``torchvision``, it’s extremely easy to load CIFAR10.
#
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1]
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# load the CIFAR 10 data
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
print(type(trainset.train_data))
######################################################################
# Torchvision loads the data as numpy arrays.
#
# We now create two datasets to hold the train and test sets.
#
print(type(trainset.train_data))
print(type(trainset.train_labels))
def batch2variable(batch):
data, label = batch
data = utils.to_tensor(data)
# B x W x H x C to B x C x W x W
data = data.transpose(1, 2).transpose(1, 3)
return utils.to_variable(data, dtype=torch.FloatTensor), utils.to_variable(label)
# convert the trainset.train_labels to LongTensor, instead of python list
data_train = DataSet([trainset.train_data, torch.LongTensor(trainset.train_labels)],
batch_size=args.batch_size,
drop_last=True)
# use the async loader, to pre-load 8 batches ahead of the model
# each batch is then loaded and moved to a torch Variable
data_train = AsyncDataLoader(data_train, buffer_size=args.batch_size * 8,
on_batch_loaded=batch2variable)
data_test = DataSet([testset.test_data, torch.LongTensor(testset.test_labels)],
batch_size=args.batch_size,
drop_last=True)
data_test = AsyncDataLoader(data_test, buffer_size=args.batch_size * 8,
on_batch_loaded=batch2variable)
# fill the data buffer
data_train.cache()
data_test.cache()
######################################################################
# The train and test datasets are defined as a collection of tuples, each
# tuple contains ``(data, expected label)``.
#
print(next(data_train))
######################################################################
# 2.1 Data Visualization
# ----------------------
#
# Let us show some of the training images, for fun.
#
def imshow(img):
npimg = img.cpu().numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def showlabels(labels, qtd):
for j in range(1, qtd + 1):
print('%10s' % CLASSES[int(labels[j - 1])], end='')
if j % 4 == 0:
print('\n')
images, labels = next(data_train)
print(images.shape)
imshow(torchvision.utils.make_grid(images.data[:16], nrow=4))
showlabels(labels, 16)
######################################################################
# 3. Define a Convolution Neural Network
# --------------------------------------
#
# In this section, we’ll define the forward method of the Cogitare Model.
# In Cogitare, you must implement two methods in the model: **forward**
# and **loss**.
#
# This is a Convolutional Neural Network (CNN) for Image Classification.
#
class CNN(Model):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, batch):
# in this sample, each batch will be a tuple
# containing (input_batch, expected_batch)
# in forward in are only interested in input so that we
# can ignore the second item of the tuple
x, _ = batch
x = F.dropout(x, args.dropout)
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
def loss(self, output, batch):
# in this sample, each batch will be a tuple
# containing (input_batch, expected_batch)
# in loss in are only interested in expected so that
# we can ignore the first item of the tuple
_, expected = batch
return F.nll_loss(output, expected)
######################################################################
# The model class is simple; it only requires de forward and loss methods.
# By default, Cogitare will backward the loss returned by the loss()
# method, and optimize the model parameters.
#
######################################################################
# 4. Training the Model
# ---------------------
#
# We first define the model optimizer for training the model.
#
cnn = CNN()
optimizer = optim.Adam(cnn.parameters(), lr=args.learning_rate)
######################################################################
# We now add the default plugins to watch the training status. The default
# plugin includes:
#
# - Progress bar per batch and epoch
# - Plot training and validation losses (if validation_dataset is
# present)
# - Log training loss
#
# And some extra plugins.
#
cnn.register_default_plugins()
early = EarlyStopping(max_tries=5, path='/tmp/model.pt')
# after 5 epochs without decreasing the loss, stop the
# training and the best model is saved at /tmp/model.pt
# the plugin will execute in the end of each epoch
cnn.register_plugin(early, 'on_end_epoch')
######################################################################
# We can now run the training:
#
if args.cuda:
cnn = cnn.cuda()
cnn.learn(data_train, optimizer, data_test, max_epochs=args.max_epochs)
######################################################################
# 5. Model Evaluation
# -------------------
#
# We now check the model loss and accuracy on the test set:
#
def model_accuracy(output, data):
_, indices = torch.max(output, 1)
return accuracy(indices, data[1])
# evaluate the model loss and accuracy over the validation dataset
metrics = cnn.evaluate_with_metrics(data_test, {'loss': cnn.metric_loss, 'accuracy': model_accuracy})
# the metrics is an dict mapping the metric name (loss or accuracy, in this sample) to a list of the accuracy output
# we have a measurement per batch. So, to have a value of the full dataset, we take the mean value:
metrics_mean = {'loss': 0, 'accuracy': 0}
for loss, acc in zip(metrics['loss'], metrics['accuracy']):
metrics_mean['loss'] += loss
metrics_mean['accuracy'] += acc.data[0]
qtd = len(metrics['loss'])
print('Loss: {}'.format(metrics_mean['loss'] / qtd))
print('Accuracy: {}'.format(metrics_mean['accuracy'] / qtd))
######################################################################
# 5.1 Visualization
# ~~~~~~~~~~~~~~~~~
#
# To check how the model behaves, we can plot the images, the expected and
# predicted labels
#
images, labels = next(data_test)
imshow(torchvision.utils.make_grid(images.data[:16], nrow=4))
######################################################################
# We forward the data to get the model output to the batch above.
#
predicted = cnn.predict((images, None))
# remember that forward method expect a tuple, where the
# first item contains the data to be passed in the net
predicted.shape
_, predicted_labels = torch.max(predicted, dim=1)
print('Predicted:\n')
showlabels(predicted_labels[:16], 16)
|
the-stack_106_25283 | """Installation utilities for Python ISAPI filters and extensions."""
# this code adapted from "Tomcat JK2 ISAPI redirector", part of Apache
# Created July 2004, Mark Hammond.
import sys, os, imp, shutil, stat
import operator
from win32com.client import GetObject, Dispatch
from win32com.client.gencache import EnsureModule, EnsureDispatch
import win32api
import pythoncom
import winerror
import traceback
_APP_INPROC = 0
_APP_OUTPROC = 1
_APP_POOLED = 2
_IIS_OBJECT = "IIS://10.0.0.7/W3SVC"
_IIS_SERVER = "IIsWebServer"
_IIS_WEBDIR = "IIsWebDirectory"
_IIS_WEBVIRTUALDIR = "IIsWebVirtualDir"
_IIS_FILTERS = "IIsFilters"
_IIS_FILTER = "IIsFilter"
_DEFAULT_SERVER_NAME = "Default Web Site"
_DEFAULT_HEADERS = "X-Powered-By: Python"
_DEFAULT_PROTECTION = _APP_POOLED
# Default is for 'execute' only access - ie, only the extension
# can be used. This can be overridden via your install script.
_DEFAULT_ACCESS_EXECUTE = True
_DEFAULT_ACCESS_READ = False
_DEFAULT_ACCESS_WRITE = False
_DEFAULT_ACCESS_SCRIPT = False
_DEFAULT_CONTENT_INDEXED = False
_DEFAULT_ENABLE_DIR_BROWSING = False
_DEFAULT_ENABLE_DEFAULT_DOC = False
_extensions = [ext for ext, _, _ in imp.get_suffixes()]
is_debug_build = '_d.pyd' in _extensions
this_dir = os.path.abspath(os.path.dirname(__file__))
class FilterParameters:
Name = None
Description = None
Path = None
Server = None
# Params that control if/how AddExtensionFile is called.
AddExtensionFile = True
AddExtensionFile_Enabled = True
AddExtensionFile_GroupID = None # defaults to Name
AddExtensionFile_CanDelete = True
AddExtensionFile_Description = None # defaults to Description.
def __init__(self, **kw):
self.__dict__.update(kw)
class VirtualDirParameters:
Name = None # Must be provided.
Description = None # defaults to Name
AppProtection = _DEFAULT_PROTECTION
Headers = _DEFAULT_HEADERS
Path = None # defaults to WWW root.
Type = _IIS_WEBVIRTUALDIR
AccessExecute = _DEFAULT_ACCESS_EXECUTE
AccessRead = _DEFAULT_ACCESS_READ
AccessWrite = _DEFAULT_ACCESS_WRITE
AccessScript = _DEFAULT_ACCESS_SCRIPT
ContentIndexed = _DEFAULT_CONTENT_INDEXED
EnableDirBrowsing = _DEFAULT_ENABLE_DIR_BROWSING
EnableDefaultDoc = _DEFAULT_ENABLE_DEFAULT_DOC
DefaultDoc = None # Only set in IIS if not None
ScriptMaps = []
ScriptMapUpdate = "end" # can be 'start', 'end', 'replace'
Server = None
def __init__(self, **kw):
self.__dict__.update(kw)
def is_root(self):
"This virtual directory is a root directory if parent and name are blank"
parent, name = self.split_path()
return not parent and not name
def split_path(self):
return split_path(self.Name)
class ScriptMapParams:
Extension = None
Module = None
Flags = 5
Verbs = ""
# Params that control if/how AddExtensionFile is called.
AddExtensionFile = True
AddExtensionFile_Enabled = True
AddExtensionFile_GroupID = None # defaults to Name
AddExtensionFile_CanDelete = True
AddExtensionFile_Description = None # defaults to Description.
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
"Format this parameter suitable for IIS"
items = [self.Extension, self.Module, self.Flags]
# IIS gets upset if there is a trailing verb comma, but no verbs
if self.Verbs:
items.append(self.Verbs)
items = [str(item) for item in items]
return ','.join(items)
class ISAPIParameters:
ServerName = _DEFAULT_SERVER_NAME
# Description = None
Filters = []
VirtualDirs = []
def __init__(self, **kw):
self.__dict__.update(kw)
verbose = 1 # The level - 0 is quiet.
def log(level, what):
if verbose >= level:
print(what)
# Convert an ADSI COM exception to the Win32 error code embedded in it.
def _GetWin32ErrorCode(com_exc):
hr = com_exc.hresult
# If we have more details in the 'excepinfo' struct, use it.
if com_exc.excepinfo:
hr = com_exc.excepinfo[-1]
if winerror.HRESULT_FACILITY(hr) != winerror.FACILITY_WIN32:
raise
return winerror.SCODE_CODE(hr)
class InstallationError(Exception): pass
class ItemNotFound(InstallationError): pass
class ConfigurationError(InstallationError): pass
def FindPath(options, server, name):
if name.lower().startswith("iis://"):
return name
else:
if name and name[0] != "/":
name = "/"+name
return FindWebServer(options, server)+"/ROOT"+name
def LocateWebServerPath(description):
"""
Find an IIS web server whose name or comment matches the provided
description (case-insensitive).
>>> LocateWebServerPath('Default Web Site') # doctest: +SKIP
or
>>> LocateWebServerPath('1') #doctest: +SKIP
"""
assert len(description) >= 1, "Server name or comment is required"
iis = GetObject(_IIS_OBJECT)
description = description.lower().strip()
for site in iis:
# Name is generally a number, but no need to assume that.
site_attributes = [getattr(site, attr, "").lower().strip()
for attr in ("Name", "ServerComment")]
if description in site_attributes:
return site.AdsPath
msg = "No web sites match the description '%s'" % description
raise ItemNotFound(msg)
def GetWebServer(description = None):
"""
Load the web server instance (COM object) for a given instance
or description.
If None is specified, the default website is retrieved (indicated
by the identifier 1.
"""
description = description or "1"
path = LocateWebServerPath(description)
server = LoadWebServer(path)
return server
def LoadWebServer(path):
try:
server = GetObject(path)
except pythoncom.com_error as details:
msg = details.strerror
if exc.excepinfo and exc.excepinfo[2]:
msg = exc.excepinfo[2]
msg = "WebServer %s: %s" % (path, msg)
raise ItemNotFound(msg)
return server
def FindWebServer(options, server_desc):
"""
Legacy function to allow options to define a .server property
to override the other parameter. Use GetWebServer instead.
"""
# options takes precedence
server_desc = options.server or server_desc
# make sure server_desc is unicode (could be mbcs if passed in
# sys.argv).
if server_desc and not isinstance(server_desc, str):
server_desc = server_desc.decode('mbcs')
# get the server (if server_desc is None, the default site is acquired)
server = GetWebServer(server_desc)
return server.adsPath
def split_path(path):
"""
Get the parent path and basename.
>>> split_path('/')
['', '']
>>> split_path('')
['', '']
>>> split_path('foo')
['', 'foo']
>>> split_path('/foo')
['', 'foo']
>>> split_path('/foo/bar')
['/foo', 'bar']
>>> split_path('foo/bar')
['/foo', 'bar']
"""
if not path.startswith('/'): path = '/' + path
return path.rsplit('/', 1)
def _CreateDirectory(iis_dir, name, params):
# We used to go to lengths to keep an existing virtual directory
# in place. However, in some cases the existing directories got
# into a bad state, and an update failed to get them working.
# So we nuke it first. If this is a problem, we could consider adding
# a --keep-existing option.
try:
# Also seen the Class change to a generic IISObject - so nuke
# *any* existing object, regardless of Class
assert name.strip("/"), "mustn't delete the root!"
iis_dir.Delete('', name)
log(2, "Deleted old directory '%s'" % (name,))
except pythoncom.com_error:
pass
newDir = iis_dir.Create(params.Type, name)
log(2, "Creating new directory '%s' in %s..." % (name,iis_dir.Name))
friendly = params.Description or params.Name
newDir.AppFriendlyName = friendly
# Note that the new directory won't be visible in the IIS UI
# unless the directory exists on the filesystem.
try:
path = params.Path or iis_dir.Path
newDir.Path = path
except AttributeError:
# If params.Type is IIS_WEBDIRECTORY, an exception is thrown
pass
newDir.AppCreate2(params.AppProtection)
# XXX - note that these Headers only work in IIS6 and earlier. IIS7
# only supports them on the w3svc node - not even on individial sites,
# let alone individual extensions in the site!
if params.Headers:
newDir.HttpCustomHeaders = params.Headers
log(2, "Setting directory options...")
newDir.AccessExecute = params.AccessExecute
newDir.AccessRead = params.AccessRead
newDir.AccessWrite = params.AccessWrite
newDir.AccessScript = params.AccessScript
newDir.ContentIndexed = params.ContentIndexed
newDir.EnableDirBrowsing = params.EnableDirBrowsing
newDir.EnableDefaultDoc = params.EnableDefaultDoc
if params.DefaultDoc is not None:
newDir.DefaultDoc = params.DefaultDoc
newDir.SetInfo()
return newDir
def CreateDirectory(params, options):
_CallHook(params, "PreInstall", options)
if not params.Name:
raise ConfigurationError("No Name param")
parent, name = params.split_path()
target_dir = GetObject(FindPath(options, params.Server, parent))
if not params.is_root():
target_dir = _CreateDirectory(target_dir, name, params)
AssignScriptMaps(params.ScriptMaps, target_dir, params.ScriptMapUpdate)
_CallHook(params, "PostInstall", options, target_dir)
log(1, "Configured Virtual Directory: %s" % (params.Name,))
return target_dir
def AssignScriptMaps(script_maps, target, update='replace'):
"""Updates IIS with the supplied script map information.
script_maps is a list of ScriptMapParameter objects
target is an IIS Virtual Directory to assign the script maps to
update is a string indicating how to update the maps, one of ('start',
'end', or 'replace')
"""
# determine which function to use to assign script maps
script_map_func = '_AssignScriptMaps' + update.capitalize()
try:
script_map_func = eval(script_map_func)
except NameError:
msg = "Unknown ScriptMapUpdate option '%s'" % update
raise ConfigurationError(msg)
# use the str method to format the script maps for IIS
script_maps = [str(s) for s in script_maps]
# call the correct function
script_map_func(target, script_maps)
target.SetInfo()
def get_unique_items(sequence, reference):
"Return items in sequence that can't be found in reference."
return tuple([item for item in sequence if item not in reference])
def _AssignScriptMapsReplace(target, script_maps):
target.ScriptMaps = script_maps
def _AssignScriptMapsEnd(target, script_maps):
unique_new_maps = get_unique_items(script_maps, target.ScriptMaps)
target.ScriptMaps = target.ScriptMaps + unique_new_maps
def _AssignScriptMapsStart(target, script_maps):
unique_new_maps = get_unique_items(script_maps, target.ScriptMaps)
target.ScriptMaps = unique_new_maps + target.ScriptMaps
def CreateISAPIFilter(filterParams, options):
server = FindWebServer(options, filterParams.Server)
_CallHook(filterParams, "PreInstall", options)
try:
filters = GetObject(server+"/Filters")
except pythoncom.com_error as exc:
# Brand new sites don't have the '/Filters' collection - create it.
# Any errors other than 'not found' we shouldn't ignore.
if winerror.HRESULT_FACILITY(exc.hresult) != winerror.FACILITY_WIN32 or \
winerror.HRESULT_CODE(exc.hresult) != winerror.ERROR_PATH_NOT_FOUND:
raise
server_ob = GetObject(server)
filters = server_ob.Create(_IIS_FILTERS, "Filters")
filters.FilterLoadOrder = ""
filters.SetInfo()
# As for VirtualDir, delete an existing one.
assert filterParams.Name.strip("/"), "mustn't delete the root!"
try:
filters.Delete(_IIS_FILTER, filterParams.Name)
log(2, "Deleted old filter '%s'" % (filterParams.Name,))
except pythoncom.com_error:
pass
newFilter = filters.Create(_IIS_FILTER, filterParams.Name)
log(2, "Created new ISAPI filter...")
assert os.path.isfile(filterParams.Path)
newFilter.FilterPath = filterParams.Path
newFilter.FilterDescription = filterParams.Description
newFilter.SetInfo()
load_order = [b.strip() for b in filters.FilterLoadOrder.split(",") if b]
if filterParams.Name not in load_order:
load_order.append(filterParams.Name)
filters.FilterLoadOrder = ",".join(load_order)
filters.SetInfo()
_CallHook(filterParams, "PostInstall", options, newFilter)
log (1, "Configured Filter: %s" % (filterParams.Name,))
return newFilter
def DeleteISAPIFilter(filterParams, options):
_CallHook(filterParams, "PreRemove", options)
server = FindWebServer(options, filterParams.Server)
ob_path = server+"/Filters"
try:
filters = GetObject(ob_path)
except pythoncom.com_error as details:
# failure to open the filters just means a totally clean IIS install
# (IIS5 at least has no 'Filters' key when freshly installed).
log(2, "ISAPI filter path '%s' did not exist." % (ob_path,))
return
try:
assert filterParams.Name.strip("/"), "mustn't delete the root!"
filters.Delete(_IIS_FILTER, filterParams.Name)
log(2, "Deleted ISAPI filter '%s'" % (filterParams.Name,))
except pythoncom.com_error as details:
rc = _GetWin32ErrorCode(details)
if rc != winerror.ERROR_PATH_NOT_FOUND:
raise
log(2, "ISAPI filter '%s' did not exist." % (filterParams.Name,))
# Remove from the load order
load_order = [b.strip() for b in filters.FilterLoadOrder.split(",") if b]
if filterParams.Name in load_order:
load_order.remove(filterParams.Name)
filters.FilterLoadOrder = ",".join(load_order)
filters.SetInfo()
_CallHook(filterParams, "PostRemove", options)
log (1, "Deleted Filter: %s" % (filterParams.Name,))
def _AddExtensionFile(module, def_groupid, def_desc, params, options):
group_id = params.AddExtensionFile_GroupID or def_groupid
desc = params.AddExtensionFile_Description or def_desc
try:
ob = GetObject(_IIS_OBJECT)
ob.AddExtensionFile(module,
params.AddExtensionFile_Enabled,
group_id,
params.AddExtensionFile_CanDelete,
desc)
log(2, "Added extension file '%s' (%s)" % (module, desc))
except (pythoncom.com_error, AttributeError) as details:
# IIS5 always fails. Probably should upgrade this to
# complain more loudly if IIS6 fails.
log(2, "Failed to add extension file '%s': %s" % (module, details))
def AddExtensionFiles(params, options):
"""Register the modules used by the filters/extensions as a trusted
'extension module' - required by the default IIS6 security settings."""
# Add each module only once.
added = {}
for vd in params.VirtualDirs:
for smp in vd.ScriptMaps:
if smp.Module not in added and smp.AddExtensionFile:
_AddExtensionFile(smp.Module, vd.Name, vd.Description, smp,
options)
added[smp.Module] = True
for fd in params.Filters:
if fd.Path not in added and fd.AddExtensionFile:
_AddExtensionFile(fd.Path, fd.Name, fd.Description, fd, options)
added[fd.Path] = True
def _DeleteExtensionFileRecord(module, options):
try:
ob = GetObject(_IIS_OBJECT)
ob.DeleteExtensionFileRecord(module)
log(2, "Deleted extension file record for '%s'" % module)
except (pythoncom.com_error, AttributeError) as details:
log(2, "Failed to remove extension file '%s': %s" % (module, details))
def DeleteExtensionFileRecords(params, options):
deleted = {} # only remove each .dll once.
for vd in params.VirtualDirs:
for smp in vd.ScriptMaps:
if smp.Module not in deleted and smp.AddExtensionFile:
_DeleteExtensionFileRecord(smp.Module, options)
deleted[smp.Module] = True
for filter_def in params.Filters:
if filter_def.Path not in deleted and filter_def.AddExtensionFile:
_DeleteExtensionFileRecord(filter_def.Path, options)
deleted[filter_def.Path] = True
def CheckLoaderModule(dll_name):
suffix = ""
if is_debug_build: suffix = "_d"
template = os.path.join(this_dir,
"PyISAPI_loader" + suffix + ".dll")
if not os.path.isfile(template):
raise ConfigurationError(
"Template loader '%s' does not exist" % (template,))
# We can't do a simple "is newer" check, as the DLL is specific to the
# Python version. So we check the date-time and size are identical,
# and skip the copy in that case.
src_stat = os.stat(template)
try:
dest_stat = os.stat(dll_name)
except os.error:
same = 0
else:
same = src_stat[stat.ST_SIZE]==dest_stat[stat.ST_SIZE] and \
src_stat[stat.ST_MTIME]==dest_stat[stat.ST_MTIME]
if not same:
log(2, "Updating %s->%s" % (template, dll_name))
shutil.copyfile(template, dll_name)
shutil.copystat(template, dll_name)
else:
log(2, "%s is up to date." % (dll_name,))
def _CallHook(ob, hook_name, options, *extra_args):
func = getattr(ob, hook_name, None)
if func is not None:
args = (ob,options) + extra_args
func(*args)
def Install(params, options):
_CallHook(params, "PreInstall", options)
for vd in params.VirtualDirs:
CreateDirectory(vd, options)
for filter_def in params.Filters:
CreateISAPIFilter(filter_def, options)
AddExtensionFiles(params, options)
_CallHook(params, "PostInstall", options)
def RemoveDirectory(params, options):
if params.is_root():
return
try:
directory = GetObject(FindPath(options, params.Server, params.Name))
except pythoncom.com_error as details:
rc = _GetWin32ErrorCode(details)
if rc != winerror.ERROR_PATH_NOT_FOUND:
raise
log(2, "VirtualDirectory '%s' did not exist" % params.Name)
directory = None
if directory is not None:
# Be robust should IIS get upset about unloading.
try:
directory.AppUnLoad()
except:
exc_val = sys.exc_info()[1]
log(2, "AppUnLoad() for %s failed: %s" % (params.Name, exc_val))
# Continue trying to delete it.
try:
parent = GetObject(directory.Parent)
parent.Delete(directory.Class, directory.Name)
log (1, "Deleted Virtual Directory: %s" % (params.Name,))
except:
exc_val = sys.exc_info()[1]
log(1, "Failed to remove directory %s: %s" % (params.Name, exc_val))
def RemoveScriptMaps(vd_params, options):
"Remove script maps from the already installed virtual directory"
parent, name = vd_params.split_path()
target_dir = GetObject(FindPath(options, vd_params.Server, parent))
installed_maps = list(target_dir.ScriptMaps)
for _map in map(str, vd_params.ScriptMaps):
if _map in installed_maps:
installed_maps.remove(_map)
target_dir.ScriptMaps = installed_maps
target_dir.SetInfo()
def Uninstall(params, options):
_CallHook(params, "PreRemove", options)
DeleteExtensionFileRecords(params, options)
for vd in params.VirtualDirs:
_CallHook(vd, "PreRemove", options)
RemoveDirectory(vd, options)
if vd.is_root():
# if this is installed to the root virtual directory, we can't delete it
# so remove the script maps.
RemoveScriptMaps(vd, options)
_CallHook(vd, "PostRemove", options)
for filter_def in params.Filters:
DeleteISAPIFilter(filter_def, options)
_CallHook(params, "PostRemove", options)
# Patch up any missing module names in the params, replacing them with
# the DLL name that hosts this extension/filter.
def _PatchParamsModule(params, dll_name, file_must_exist = True):
if file_must_exist:
if not os.path.isfile(dll_name):
raise ConfigurationError("%s does not exist" % (dll_name,))
# Patch up all references to the DLL.
for f in params.Filters:
if f.Path is None: f.Path = dll_name
for d in params.VirtualDirs:
for sm in d.ScriptMaps:
if sm.Module is None: sm.Module = dll_name
def GetLoaderModuleName(mod_name, check_module = None):
# find the name of the DLL hosting us.
# By default, this is "_{module_base_name}.dll"
if hasattr(sys, "frozen"):
# What to do? The .dll knows its name, but this is likely to be
# executed via a .exe, which does not know.
base, ext = os.path.splitext(mod_name)
path, base = os.path.split(base)
# handle the common case of 'foo.exe'/'foow.exe'
if base.endswith('w'):
base = base[:-1]
# For py2exe, we have '_foo.dll' as the standard pyisapi loader - but
# 'foo.dll' is what we use (it just delegates).
# So no leading '_' on the installed name.
dll_name = os.path.abspath(os.path.join(path, base + ".dll"))
else:
base, ext = os.path.splitext(mod_name)
path, base = os.path.split(base)
dll_name = os.path.abspath(os.path.join(path, "_" + base + ".dll"))
# Check we actually have it.
if check_module is None: check_module = not hasattr(sys, "frozen")
if check_module:
CheckLoaderModule(dll_name)
return dll_name
# Note the 'log' params to these 'builtin' args - old versions of pywin32
# didn't log at all in this function (by intent; anyone calling this was
# responsible). So existing code that calls this function with the old
# signature (ie, without a 'log' param) still gets the same behaviour as
# before...
def InstallModule(conf_module_name, params, options, log=lambda *args:None):
"Install the extension"
if not hasattr(sys, "frozen"):
conf_module_name = os.path.abspath(conf_module_name)
if not os.path.isfile(conf_module_name):
raise ConfigurationError("%s does not exist" % (conf_module_name,))
loader_dll = GetLoaderModuleName(conf_module_name)
_PatchParamsModule(params, loader_dll)
Install(params, options)
log(1, "Installation complete.")
def UninstallModule(conf_module_name, params, options, log=lambda *args:None):
"Remove the extension"
loader_dll = GetLoaderModuleName(conf_module_name, False)
_PatchParamsModule(params, loader_dll, False)
Uninstall(params, options)
log(1, "Uninstallation complete.")
standard_arguments = {
"install" : InstallModule,
"remove" : UninstallModule,
}
def build_usage(handler_map):
docstrings = [handler.__doc__ for handler in handler_map.values()]
all_args = dict(zip(iter(handler_map.keys()), docstrings))
arg_names = "|".join(iter(all_args.keys()))
usage_string = "%prog [options] [" + arg_names + "]\n"
usage_string += "commands:\n"
for arg, desc in all_args.items():
usage_string += " %-10s: %s" % (arg, desc) + "\n"
return usage_string[:-1]
def MergeStandardOptions(options, params):
"""
Take an options object generated by the command line and merge
the values into the IISParameters object.
"""
pass
# We support 2 ways of extending our command-line/install support.
# * Many of the installation items allow you to specify "PreInstall",
# "PostInstall", "PreRemove" and "PostRemove" hooks
# All hooks are called with the 'params' object being operated on, and
# the 'optparser' options for this session (ie, the command-line options)
# PostInstall for VirtualDirectories and Filters both have an additional
# param - the ADSI object just created.
# * You can pass your own option parser for us to use, and/or define a map
# with your own custom arg handlers. It is a map of 'arg'->function.
# The function is called with (options, log_fn, arg). The function's
# docstring is used in the usage output.
def HandleCommandLine(params, argv=None, conf_module_name = None,
default_arg = "install",
opt_parser = None, custom_arg_handlers = {}):
"""Perform installation or removal of an ISAPI filter or extension.
This module handles standard command-line options and configuration
information, and installs, removes or updates the configuration of an
ISAPI filter or extension.
You must pass your configuration information in params - all other
arguments are optional, and allow you to configure the installation
process.
"""
global verbose
from optparse import OptionParser
argv = argv or sys.argv
if not conf_module_name:
conf_module_name = sys.argv[0]
# convert to a long name so that if we were somehow registered with
# the "short" version but unregistered with the "long" version we
# still work (that will depend on exactly how the installer was
# started)
try:
conf_module_name = win32api.GetLongPathName(conf_module_name)
except win32api.error as exc:
log(2, "Couldn't determine the long name for %r: %s" %
(conf_module_name, exc))
if opt_parser is None:
# Build our own parser.
parser = OptionParser(usage='')
else:
# The caller is providing their own filter, presumably with their
# own options all setup.
parser = opt_parser
# build a usage string if we don't have one.
if not parser.get_usage():
all_handlers = standard_arguments.copy()
all_handlers.update(custom_arg_handlers)
parser.set_usage(build_usage(all_handlers))
# allow the user to use uninstall as a synonym for remove if it wasn't
# defined by the custom arg handlers.
all_handlers.setdefault('uninstall', all_handlers['remove'])
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-v", "--verbosity", action="count",
dest="verbose", default=1,
help="increase the verbosity of status messages")
parser.add_option("", "--server", action="store",
help="Specifies the IIS server to install/uninstall on." \
" Default is '%s/1'" % (_IIS_OBJECT,))
(options, args) = parser.parse_args(argv[1:])
MergeStandardOptions(options, params)
verbose = options.verbose
if not args:
args = [default_arg]
try:
for arg in args:
handler = all_handlers[arg]
handler(conf_module_name, params, options, log)
except (ItemNotFound, InstallationError) as details:
if options.verbose > 1:
traceback.print_exc()
print("%s: %s" % (details.__class__.__name__, details))
except KeyError:
parser.error("Invalid arg '%s'" % arg)
|
the-stack_106_25285 | import logging
from typing import Dict
import aiohttp
from redbot.core import Config, checks, commands
logger = logging.getLogger("snekeval")
class SnekEval(commands.Cog):
def __init__(self):
self.conf = Config.get_conf(
self, identifier=115110101107) # ord('snek')
default_global = {"snekbox_url": None}
self.conf.register_global(**default_global)
@staticmethod
async def _evaluate(url: str, payload: str) -> Dict:
data = {"input": payload}
async with aiohttp.ClientSession() as session: # type: aiohttp.ClientSession
async with session.post(url, json=data) as resp:
resp.raise_for_status()
return await resp.json()
async def _test_snekurl(self, url: str):
ret_json = None
try:
ret_json = await self.evaluate(url, "print('hello world')")
except aiohttp.client_exceptions.ClientError as exc:
logger.error("Request failed.", exc_info=exc)
else:
if ret_json.get("returncode") == 0:
return True
return False
@staticmethod
def _remove_escapes(text: str):
while text.startswith(('"', '\'', '`')) and text.endswith(('"', '\'', '`')):
text = text[1:-1]
return text
@staticmethod
def _parse_code_block(text: str):
return text.lstrip('```python').rstrip('```')
@staticmethod
def _escape_backticks(text: str, escape_with='\u200b'):
"""
Escapes backticks with zero length spaces.
So it won't break up returning discord message format.
"""
return text.replace('`', '`'+escape_with)
@commands.command(usage="<snekbox_url>")
@checks.is_owner()
async def snekurl(self, ctx: commands.Context, url=None):
"""Set URL to your snekbox-box.
Examples:
`http://[IP or site][:port]/eval`
`http://snek.box.com:8060/eval`"""
if not url:
current_url = await self.conf.snekbox_url()
await ctx.send_help()
return await ctx.send("`Current snekbox URL: {}`".format(current_url))
async with ctx.typing():
if await self._test_snekurl(url):
await self.conf.snekbox_url.set(url)
return await ctx.send(":white_check_mark: It's working! New url set.")
await ctx.send(":x: URL doesn't seem to work.")
@commands.command(usage="<payload>")
async def snek(self, ctx, *, payload: str = None):
"""Evaluate your python code right from Discord.```
- Execution time limited to 2 seconds.
- Only built-in modules.
- No filesystem.
- No enviroment.```
_Everything after this command is considered code._
Code blocks supported."""
url = await self.conf.snekbox_url()
if not url:
return await ctx.send("Snekbox URL isn't set.")
if not payload:
return await ctx.send_help()
async with ctx.typing():
payload = payload.strip()
# detect code block
if payload.startswith("```python") and payload.endswith("```"):
payload = self._parse_code_block(payload)
else:
payload = self._remove_escapes(payload)
try:
data = await self._evaluate(url, payload)
except Exception as exc:
await ctx.send(f"Something went wrong when contacting Snekbox: `{exc}`")
return
if data.get('returncode') == 137:
# timeout
await ctx.send(":timer: Execution timeout. _Maximum running time is 2 seconds._")
return
stdout = self._escape_backticks(data.get("stdout", ""))
await ctx.send(
"\n".join(
(
"```",
stdout,
" ",
f"status code: {data.get('returncode')}",
"```",
)
)
)
|
the-stack_106_25286 | def jaccard_similarity(setA, setB):
intersection = set(setA).intersection(set(setB))
union = set(setA).union(set(setB))
return float(len(intersection))/float(len(union))
def makeWordcloudImages(model, path):
from wordcloud import WordCloud
import matplotlib.pyplot as plt
for i in range(model.num_topics):
fig, ax = plt.subplots()
ax.imshow(WordCloud().fit_words(dict(model.show_topic(i, 200))))
ax.axis("off")
ax.set_title("Topic #" + str(i))
plt.savefig(path + 'LDAtopic' + str(i) + '.jpg')
|
the-stack_106_25287 | import json
import sys,os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from request.RequestPostJson import RequestPostJson
from session.Session import Session
class RequestGetOtherPlayerTrades(RequestPostJson):
def __init__(self, session: Session) -> None:
super().__init__(session)
def get_body(self):
di_request = [{
'__clazz__': 'ServerRequestVO',
'requestClass': 'TradeService',
'requestData': [],
'requestId': self.session.get_post_request_id(),
'requestMethod': 'getOtherPlayersTrades'
}]
return self.build_body(di_request)
if __name__ == '__main__':
sess = Session(sys.argv[1])
if sess.load_from_file():
request = RequestGetOtherPlayerTrades(sess)
response = request.post()
li = json.loads(response.text)
print(json.dumps(li, indent=4))
|
the-stack_106_25288 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for xla.reduce_window."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class ReduceWindowTest(xla_test.XLATestCase):
"""Test cases for xla.reduce_window."""
def _reduce_window(self, operand, init, reducer, **kwargs):
with self.session():
placeholder = array_ops.placeholder(operand.dtype)
with self.test_scope():
output = xla.reduce_window(placeholder, init, reducer, **kwargs)
return output.eval(feed_dict={placeholder: operand})
def testReduceWindow(self):
# TODO(b/77644762): float16 and float64 ReduceWindow are unimplemented.
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
self.assertAllClose(
np.array([3, 5, 7, 9, 11, 13], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2]))
self.assertAllClose(
np.array([3, 7, 11], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2],
window_strides=[2]))
self.assertAllClose(
np.array([1, 4, 7], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[1],
window_strides=[3]))
self.assertAllClose(
np.array([[24, 36, 24], [96, 0, 0]], dtype=dtype),
self._reduce_window(
np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),
1.0,
mul_reducer,
window_dimensions=[2, 2],
window_strides=[1, 1]))
self.assertAllClose(
np.array([[0, 0, 0], [5, 10, 5], [2, 4, 1], [0, 0, 0]], dtype=dtype),
self._reduce_window(
np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2, 2],
window_strides=[2, 2],
padding=[[2, 3], [1, 2]]))
if __name__ == '__main__':
googletest.main()
|
the-stack_106_25289 | # -*- coding: UTF-8 -*-
import os
import pandas as pd
import shutil
import numpy as np
import cv2
from tqdm import tqdm
import pyfastcopy
import json
def main():
csv_file='D:/WWF_Det/WWF_Det/Raw_annoations/top14-part2.csv'
df=pd.read_csv(csv_file)
data_set='D:/top14-dataset-part1/'
box_num=0
cate_class=['baichunlu','chihu','gaoyuanshanchun','gaoyuantu','lanmaji','ma','malu','maoniu','mashe','person','xuebao','yang','yanyang','zanghu','chai','hanta','huangmomao','lang','lv','pao','sheli','shidiao','zongxiong']
#cate_class=['person','chihu','zanghu','yang','maoniu','lanmaji','xuebao','gaoyuantu','yanyang']
for index, row in tqdm(df.iterrows()):
timu_data=json.loads(row['题目数据'])
pic_id=row['题目ID']
file_path=data_set+timu_data['Path']
image_folder='D:/WWF_Det/WWF_Data/Pos_Data/top14-part2/allset/images/'
text_folder='D:/WWF_Det/WWF_Data/Pos_Data/top14-part2/allset/labels/'
if not os.path.exists(image_folder):
os.makedirs(image_folder, exist_ok = True)
if not os.path.exists(text_folder):
os.makedirs(text_folder, exist_ok = True)
assert os.path.exists(file_path),file_path
cate=timu_data['Path'].split('/')[0]
#image_name=cate+timu_data['Path'].split('/')[2]
image_name=str(pic_id)+'.jpg'
label_dict=json.loads(row['标注答案'])
shutil.copyfile(file_path,image_folder+image_name)
if len(label_dict.keys()):
bboxes=label_dict['objects']
img = cv2.imread(file_path)
imgy,imgx=img.shape[:2]
txt_path=text_folder+os.path.splitext(image_name)[0]+'.txt'
with open(txt_path, 'w') as f:
for bbox in bboxes:
box_num+=1
topleft=[max(bbox['data'][0]['x'],0),max(bbox['data'][0]['y'],0)]
bottomright=[min(bbox['data'][2]['x'],imgx),min(bbox['data'][2]['y'],imgy)]
center_x=((topleft[0]+bottomright[0])/2)/imgx
center_y=((topleft[1]+bottomright[1])/2)/imgy
w=abs(bottomright[0]-topleft[0])/imgx
h=abs(bottomright[1]-topleft[1])/imgy
cate_id=cate_class.index(cate)
f.write(str(cate_id)+' '+str(center_x)+' '+str(center_y)+' '+str(w)+' '+str(h)+'\n')
#return df_store
if __name__ == "__main__":
main() |
the-stack_106_25290 | from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.home, name='forum'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/list/$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>[0-9]+)/$', views.post_detail, name='post_detail'),
url(r'^post/search/$', views.post_search, name='post_search'),
url(r'^reply/new/(?P<pk>[0-9]+)/$', views.reply_new, name='reply_edit'),
url(r'^bulletin/new/$', views.bulletin_new, name='bulletin_new'),
url(r'^message/new/$', views.message_new, name='message_new'),
url(r'^message/send$', views.message_send, name='message_send'),
url(r'^message/receive/$', views.message_receive, name='message_receive'),
url(r'^upload/(?P<pk>[0-9]+)/$', views.upload, name='upload'),
url(r'^download/(?P<pk>[0-9]+)/$', views.file_down, name='download'),
] |
the-stack_106_25293 | # Imports here
import torch
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
import time
from collections import OrderedDict
import matplotlib.pyplot as plt
import torch.nn.functional as F
import numpy as np
from sklearn.metrics import accuracy_score
# TODO: Build the network
class Network(nn.Module):
def __init__(self, input_size, output_size, hidden_sizes, dropout_p=0.2):
super().__init__()
# Build the network
self.layers = nn.ModuleList([nn.Linear(input_size, hidden_sizes[0])])
self.layers.extend([nn.Linear(n1, n2)
for n1, n2 in zip(hidden_sizes, hidden_sizes[1:])])
self.layers.extend([nn.Linear(hidden_sizes[-1], output_size)])
# Build dropout
self.drop_out = nn.Dropout(dropout_p)
def forward(self, x):
# iterate each layer
for i, each in enumerate(self.layers):
if i != len(self.layers) - 1:
# get output of layer i
x = each(x)
# get acctivation relu
x = F.relu(x)
# make drop_out with p
x = self.drop_out(x)
else:
# last layer = output layer
x = each(x)
x = F.log_softmax(x, dim=1)
return x
class EarlyStopping:
"""Save the best model during the trainning and finish trainning
if there is decrease for valid accuracy in delay epochs"""
def __init__(self, delay, checkpoint_save="save.pth"):
# path save chekpoint for the best model during training
self.checkpoint_save = checkpoint_save
# delay in number of epochs
self.delay = delay
# count continuous decrease in accuracy
self.count_down = 0
# record prev valid accuracy
self.prev_valid_accuracy = None
# record the best accuracy to save the best model
self.best_accuracy = None
def track(self, valid_accuracy, model, train_loss, valid_loss):
self.model = model
self.train_loss = train_loss
self.valid_loss = valid_loss
self.valid_accuracy = valid_accuracy
if self.prev_valid_accuracy != None and valid_accuracy <= self.prev_valid_accuracy:
print("Warning: there is deacrease in valid accuracy")
self.count_down += 1
else:
self.count_down = 0
if self.best_accuracy == None or valid_accuracy > self.best_accuracy:
print("Winning: better model")
# save the best model
torch.save(model.state_dict(), self.checkpoint_save)
# update the best accuracy metric
self.best_accuracy = valid_accuracy
# update prev_valid_accuracy
self.prev_valid_accuracy = valid_accuracy
if self.count_down == self.delay:
# Finish training, there is continuous decreasing in accuracy
return True
else:
return False
def get_the_best_model(self):
state_dict = torch.load(self.checkpoint_save)
self.model.load_state_dict(state_dict)
return self.model
def measurements(self):
return self.train_loss, self.valid_loss, self.valid_accuracy
# Get classifier based on feature detectors (pre_trainned CNN model)
def get_classifier(pre_trained_model, hidden_sizes, output_size, dropout_p=0.2):
# freeze the pre_trained parameters
for param in pre_trained_model.parameters():
param.requires_grad = False
input_size = pre_trained_model.classifier[0].state_dict()[
'weight'].shape[1]
print(f"input_size of features to the classifier: {input_size}")
print(f'hidden_sizes in classifier: {hidden_sizes}')
print(f"output_size of classes from the classifier: {output_size}")
print()
# define The network
classifier = Network(input_size, output_size, hidden_sizes, dropout_p)
# transfer learning
pre_trained_model.classifier = classifier
return pre_trained_model
# TODO: Get preds
def get_predictions(log_ps):
with torch.no_grad():
# get exp of log to get probabilities
ps = torch.exp(log_ps)
# get top_p and top_class
top_p, top_class = ps.topk(1, dim=1)
return top_class
# TODO: Make validation/test inference function
def validation_test(model, validation_test_loader, criterion, gpu_choice):
"""make validation or test inference based on the data"""
with torch.no_grad():
# find what is the existed device
device = torch.device(
"cuda:0" if gpu_choice and torch.cuda.is_available() else "cpu")
model.to(device)
# intial helping variables
accum_accuracy = 0
running_loss = 0
# iterate over the data
for images, labels in validation_test_loader:
labels, images = labels.to(device), images.to(device)
# forward pass
log_ps = model(images)
# get predictions
preds = get_predictions(log_ps)
# get loss
loss = criterion(log_ps, labels)
running_loss += loss.item()
accum_accuracy += accuracy_score(labels.cpu(), preds.cpu())
# get running_loss, accuracy metrics
return running_loss / len(validation_test_loader), accum_accuracy / len(validation_test_loader)
# TODO: define Train function
def train(model, optimizer, criterion, early_stopping, trainloader, validloader, gpu, epochs=5, print_every=40):
# find what is the existed device
device = torch.device(
"cuda:0" if torch.cuda.is_available() and gpu else "cpu")
model.to(device)
# intial helping variables
train_loss_container = []
valid_loss_container = []
steps = 0
# loop over epochs
for e in range(epochs):
# intial helping variables
running_loss = 0
# loop over batchs of trainloader
for images, labels in trainloader:
model.train()
steps += 1
labels, images = labels.to(device), images.to(device)
# clear gradient
optimizer.zero_grad()
# forward pass
log_ps = model(images)
# get loss
loss = criterion(log_ps, labels)
# backward pass
loss.backward()
# update weights by making step for optimizer
optimizer.step()
running_loss += loss.item()
# valid condition every print_every
if steps % print_every == 0:
model.eval()
train_loss = running_loss / print_every
valid_loss, valid_accuracy = validation_test(
model, validloader, criterion, gpu)
train_loss_container.append(train_loss)
valid_loss_container.append(valid_loss)
running_loss = 0
# print_results
print(f"{e+1}/{epochs} .. train_loss: {(train_loss) :0.3f}\
.. valid_loss: {(valid_loss) :0.3f} .. valid_accuracy: {(valid_accuracy * 100) :0.3f}%")
if early_stopping.track(valid_accuracy, model, train_loss, valid_loss):
print("Early stopping")
print("Having the best model")
model = early_stopping.get_the_best_model()
train_loss, valid_loss, valid_accuracy = early_stopping.measurements()
print(
f".. train_loss: {(train_loss) :0.3f}.. valid_loss: {(valid_loss) :0.3f} .. valid_accuracy: {(valid_accuracy * 100) :0.3f}%")
break
# plot train_loss and valid_loss
plt.plot(train_loss_container, label="Train loss")
plt.plot(valid_loss_container, label="Valid loss")
plt.legend()
plt.show()
print("Having the best model")
model = early_stopping.get_the_best_model()
train_loss, valid_loss, valid_accuracy = early_stopping.measurements()
print(
f".. train_loss: {(train_loss) :0.3f}.. valid_loss: {(valid_loss) :0.3f} .. valid_accuracy: {(valid_accuracy * 100) :0.3f}%")
return train_loss, valid_loss, valid_accuracy
|
the-stack_106_25294 | import os
import time
from compress import _compress, _decompress, encode_latents
from train import _train
from pathlib import Path
import sys
import shutil
##################
# Hyperparems #
##################
args = {
"model": "mbt2018",
"checkpoint_dir": "checkpoints",
"results_dir": "results",
"input_file": "images/0001.png",
"lmbda": 0.01,
"num_filters": 160,
"model_file": "my_model",
}
train_args = {
"patch_size": 256,
"batch_size": 8,
"n_steps": 8000,
}
# def run(mode, input_file, verbose=False):
# flags = "--num_filters {num_filters} {verbose} --checkpoint_dir {checkpoint_dir}".format(
# num_filters=args["num_filters"],
# checkpoint_dir=args["checkpoint_dir"],
# verbose="--verbose" if verbose else "",
# )
# results_flag = (
# "--results_dir {}".format(args["results_dir"]) if mode == "compress" else ""
# )
# command = "python {model}.py {flags} {mode} {model_file} {input_file} {results_flag}".format(
# model=args["model"],
# flags=flags,
# model_file=args["model_file"],
# mode=mode,
# input_file=input_file,
# results_flag=results_flag,
# )
# os.system(command)
def decompress(input_file, activation, log_folder):
"""
:param input_file: name.tfci file
"""
runname = args["model_file"]
checkpoint_dir = args["checkpoint_dir"]
num_filters = args["num_filters"]
output_file = input_file + ".png"
_decompress(runname, input_file, output_file, log_folder,
checkpoint_dir, num_filters, activation)
def compress(input_file, activation, results_dir):
"""
:param input_file: singe input image or np array of batch of images with shape (num_imgs, H, W, 3) and type(uint8)
:param verbose:
:return:
"""
runname = args["model_file"]
checkpoint_dir = args["checkpoint_dir"]
num_filters = args["num_filters"]
output_file = os.path.join(results_dir, os.path.basename(input_file) + ".tfci")
_compress(
runname, input_file, output_file, checkpoint_dir, results_dir, num_filters, activation
)
compressed_file = os.path.join(results_dir, os.path.basename(input_file) + ".tfci")
results_file = "rd-{model_file}-file={input_file}.npz".format(
model_file=args["model_file"], input_file=input_file
)
results_file = os.path.join(results_dir, results_file)
return compressed_file, results_file
def train():
_train(
patch_size=train_args["patch_size"],
batch_size=train_args["batch_size"],
num_filters=args["num_filters"],
lmbda=args["lmbda"],
last_step=train_args["n_steps"],
)
def main(input_file, activation, log_folder):
# create result folders
Path(os.path.join(log_folder, "layer_outputs")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(log_folder, "layer_outputs", "encoder")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(log_folder, "layer_outputs", "decoder")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(log_folder, "layer_outputs", "encoder", "npy_files")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(log_folder, "layer_outputs", "decoder", "npy_files")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(log_folder, "layer_outputs", "encoder", "json_files")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(log_folder, "layer_outputs", "decoder", "json_files")).mkdir(parents=True, exist_ok=True)
# check for checkpoints
if not Path("checkpoints").exists():
raise ValueError(
"Checkpoint directory does not exist. Please download the trained weights and save them under 'checkpoints'.")
# save input image copy
copy_img = os.path.join(log_folder, os.path.basename(input_file))
shutil.copy(input_file, copy_img)
# S T A R T
start_time = time.time()
print(f">>> compressing {input_file} ...")
compressed_file, results_file = compress(input_file, activation, log_folder)
intermediate_time = time.time()
compress_time = intermediate_time - start_time
print(f">>> compressing {input_file} done in {compress_time} seconds")
print(f"<<< decompressing {compressed_file} ...")
decompress(compressed_file, activation, log_folder)
stop_time = time.time()
decompress_time = stop_time - intermediate_time
print(
f"<<< decompressing {compressed_file} done in {decompress_time} seconds")
total_time = stop_time - start_time
print(f"compressing and decompressing took {total_time} seconds")
print(
f"compressing took {(compress_time / total_time) * 100}% of the total time")
print(
f"decompressing took {(decompress_time / total_time) * 100}% of the total time"
)
if __name__ == "__main__":
if sys.argv[1] == 'True':
activation = True
name = "-w-activation"
else:
activation = False
name = "-wo-activation"
##################
# Compresssion #
##################
my_picture = sys.argv[3]
assert(os.path.exists(my_picture))
log_folder = os.path.join("results", sys.argv[2]+name)
main(my_picture, activation, log_folder)
##################
# Latents #
##################
# latent_loc = 'results/latents-my_model-input=chess.png.npz'
# encode_latents(latent_loc,
# args['num_filters'],
# args['checkpoint_dir'],
# args['model_file'],
# seperate=True)
##################
# Training #
##################
# train()
|
the-stack_106_25295 | """
Helper functions for toil-luigi interfacing
"""
import bio
import math
import argparse
from toil.fileStore import FileID
from bd2k.util.humanize import human2bytes
###
# Helper functions for luigi-toil pipelines
###
def load_fasta_from_filestore(job, fasta_file_ids, prefix='genome', upper=False):
"""
Convenience function that will load a fasta from the fileStore and return the local path to it. This works with
the pyfasta module to load all of the required files.
:param job: current job.
:param fasta_file_ids: list of fileStore file ID for the fasta, gdx, and flat file.
:param prefix: local file path prefix
:param upper: force all entries to upper case
:return: open pyfasta Fasta record pointing to the file.
"""
fasta_local_path = '{}.fasta'.format(prefix)
fasta_file_id, gdx_file_id, flat_file_id = fasta_file_ids
job.fileStore.readGlobalFile(fasta_file_id, fasta_local_path)
job.fileStore.readGlobalFile(gdx_file_id, '{}.fasta.gdx'.format(prefix))
job.fileStore.readGlobalFile(flat_file_id, '{}.fasta.flat'.format(prefix))
return bio.get_sequence_dict(fasta_local_path, upper=upper)
def write_fasta_to_filestore(toil, fasta_local_path):
"""
Convenience function that loads a fasta and its associated gdx/flat file into the fileStore.
Assumes that the paths are consistent with the requirements (i.e. $path.gdx and $path.flat)
:param toil: Toil context manager
:param fasta_local_path: Path to local fasta to load.
:return: List of fileStore IDs for fasta, fasta_gdx, fasta_flat
"""
fasta_file_id = FileID.forPath(toil.importFile('file:///' + fasta_local_path), fasta_local_path)
gdx_file_id = FileID.forPath(toil.importFile('file:///' + fasta_local_path + '.gdx'), fasta_local_path + '.gdx')
flat_file_id = FileID.forPath(toil.importFile('file:///' + fasta_local_path + '.flat'), fasta_local_path + '.flat')
return fasta_file_id, gdx_file_id, flat_file_id
def find_total_disk_usage(input_file_ids, buffer='2G', round='2G'):
"""
Takes a input_file_id namespace or dict or list and finds all members that are FileID objects,
and finds their sizes.
Based on buffer and round, returns a integer value of disk usage in bytes to pass to a toil job.
:param input_file_ids: A namespace object with an arbitrary nesting of possible file ID values
:param buffer: Additional space buffer requested. Human readable parsed by human2bytes
:param round: amount to round up. Human readable parsed by human2bytes
:return: integer
"""
def roundup(x, base):
return int(math.ceil(x / float(base))) * base
def descend_object(obj):
if isinstance(obj, dict):
for item in obj.values():
for v in descend_object(item):
yield v
elif isinstance(obj, list):
for item in obj:
for v in descend_object(item):
yield v
elif isinstance(obj, argparse.Namespace):
for item in obj.__dict__.values():
for v in descend_object(item):
yield v
elif isinstance(obj, FileID):
yield obj
tot = sum([x.size for x in descend_object(input_file_ids)])
return roundup(tot, human2bytes(round)) + human2bytes(buffer)
|
the-stack_106_25296 | # -*- coding: utf-8 -*-
"""
Module for Firing Events via PagerDuty
.. versionadded:: 2014.1.0
:configuration: This module can be used by specifying the name of a
configuration profile in the minion config, minion pillar, or master
config.
For example:
.. code-block:: yaml
my-pagerduty-account:
pagerduty.api_key: F3Rbyjbve43rfFWf2214
pagerduty.subdomain: mysubdomain
"""
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.functools
import salt.utils.json
import salt.utils.pagerduty
# Import 3rd-party libs
from salt.ext import six
def __virtual__():
"""
No dependencies outside of what Salt itself requires
"""
return True
def list_services(profile=None, api_key=None):
"""
List services belonging to this account
CLI Example:
salt myminion pagerduty.list_services my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"services", "name", __salt__["config.option"](profile), api_key, opts=__opts__
)
def list_incidents(profile=None, api_key=None):
"""
List incidents belonging to this account
CLI Example:
salt myminion pagerduty.list_incidents my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"incidents", "id", __salt__["config.option"](profile), api_key, opts=__opts__
)
def list_users(profile=None, api_key=None):
"""
List users belonging to this account
CLI Example:
salt myminion pagerduty.list_users my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"users", "id", __salt__["config.option"](profile), api_key, opts=__opts__
)
def list_schedules(profile=None, api_key=None):
"""
List schedules belonging to this account
CLI Example:
salt myminion pagerduty.list_schedules my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"schedules", "id", __salt__["config.option"](profile), api_key, opts=__opts__
)
def list_windows(profile=None, api_key=None):
"""
List maintenance windows belonging to this account
CLI Example:
salt myminion pagerduty.list_windows my-pagerduty-account
salt myminion pagerduty.list_maintenance_windows my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"maintenance_windows",
"id",
__salt__["config.option"](profile),
api_key,
opts=__opts__,
)
# The long version, added for consistency
list_maintenance_windows = salt.utils.functools.alias_function(
list_windows, "list_maintenance_windows"
)
def list_policies(profile=None, api_key=None):
"""
List escalation policies belonging to this account
CLI Example:
salt myminion pagerduty.list_policies my-pagerduty-account
salt myminion pagerduty.list_escalation_policies my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"escalation_policies",
"id",
__salt__["config.option"](profile),
api_key,
opts=__opts__,
)
# The long version, added for consistency
list_escalation_policies = salt.utils.functools.alias_function(
list_policies, "list_escalation_policies"
)
def create_event(
service_key=None, description=None, details=None, incident_key=None, profile=None
):
"""
Create an event in PagerDuty. Designed for use in states.
CLI Example:
.. code-block:: yaml
salt myminion pagerduty.create_event <service_key> <description> <details> \
profile=my-pagerduty-account
The following parameters are required:
service_key
This key can be found by using pagerduty.list_services.
description
This is a short description of the event.
details
This can be a more detailed description of the event.
profile
This refers to the configuration profile to use to connect to the
PagerDuty service.
"""
trigger_url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
if isinstance(details, six.string_types):
details = salt.utils.yaml.safe_load(details)
if isinstance(details, six.string_types):
details = {"details": details}
ret = salt.utils.json.loads(
salt.utils.pagerduty.query(
method="POST",
profile_dict=__salt__["config.option"](profile),
api_key=service_key,
data={
"service_key": service_key,
"incident_key": incident_key,
"event_type": "trigger",
"description": description,
"details": details,
},
url=trigger_url,
opts=__opts__,
)
)
return ret
|
the-stack_106_25297 | """distutils.command.check
Implements the Distutils 'check' command.
"""
__revision__ = "$Id: check.py 85197 2010-10-03 14:18:09Z tarek.ziade $"
from distutils.core import Command
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
from io import StringIO
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
HAS_DOCUTILS = True
except Exception:
# Catch all exceptions because exceptions besides ImportError probably
# indicate that docutils is not ported to Py3k.
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
name, version, URL, (author and author_email) or
(maintainer and maintainer_email)).
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
for warning in self._check_rst_data(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
source_path = StringIO()
parser = Parser()
settings = frontend.OptionParser().get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError:
reporter.messages.append((-1, 'Could not finish the parsing.',
'', {}))
return reporter.messages
|
the-stack_106_25300 | # Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import glob
import hashlib
import json
import logging
import os
import random
import re
import shutil
import sys
import time
import unittest
from functools import wraps
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner')
from tools.shared import try_delete, PIPE
from tools.shared import PYTHON, EMCC, EMAR
from tools.utils import WINDOWS, MACOS
from tools import shared, building, config
from runner import RunnerCore, path_from_root, requires_native_clang, test_file
from runner import skip_if, needs_dylink, no_windows, is_slow_test, create_file, parameterized
from runner import env_modify, with_env_modify, disabled, node_pthreads
from runner import NON_ZERO, WEBIDL_BINDER
import clang_native
# decorators for limiting which modes a test can run in
logger = logging.getLogger("test_core")
def wasm_simd(f):
def decorated(self):
if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES:
self.skipTest('wasm simd only supported in d8 for now')
if not self.is_wasm():
self.skipTest('wasm2js only supports MVP for now')
if '-O3' in self.emcc_args:
self.skipTest('SIMD tests are too slow with -O3 in the new LLVM pass manager, https://github.com/emscripten-core/emscripten/issues/13427')
self.emcc_args.append('-msimd128')
self.emcc_args.append('-fno-lax-vector-conversions')
self.v8_args.append('--experimental-wasm-simd')
self.js_engines = [config.V8_ENGINE]
f(self)
return decorated
def bleeding_edge_wasm_backend(f):
def decorated(self):
if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES:
self.skipTest('only works in d8 for now')
if not self.is_wasm():
self.skipTest('wasm2js only supports MVP for now')
self.js_engines = [config.V8_ENGINE]
f(self)
return decorated
def also_with_wasm_bigint(f):
def decorated(self):
self.set_setting('WASM_BIGINT', 0)
f(self)
if self.is_wasm():
self.set_setting('WASM_BIGINT')
self.node_args.append('--experimental-wasm-bigint')
self.js_engines = [config.NODE_JS]
f(self)
return decorated
# without EMTEST_ALL_ENGINES set we only run tests in a single VM by
# default. in some tests we know that cross-VM differences may happen and
# so are worth testing, and they should be marked with this decorator
def all_engines(f):
def decorated(self):
old = self.use_all_engines
self.use_all_engines = True
try:
f(self)
finally:
self.use_all_engines = old
return decorated
# Tests exception handling in emscripten exception handling mode, and if
# possible, new wasm EH mode.
def with_both_exception_handling(f):
assert callable(f)
def metafunc(self, native_exceptions):
if native_exceptions:
# Wasm EH is currently supported only in wasm backend and V8
if not self.is_wasm():
self.skipTest('wasm2js does not support wasm exceptions')
if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES:
self.skipTest('d8 required to run wasm eh tests')
self.emcc_args.append('-fwasm-exceptions')
self.v8_args.append('--experimental-wasm-eh')
self.js_engines = [config.V8_ENGINE]
f(self)
else:
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
f(self)
metafunc._parameterize = {'': (False,),
'wasm_eh': (True,)}
return metafunc
def no_wasm2js(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm2js', note)
return decorated
# Async wasm compilation can't work in some tests, they are set up synchronously
def sync(f):
assert callable(f)
def decorated(self, *args, **kwargs):
self.set_setting('WASM_ASYNC_COMPILATION', 0) # test is set up synchronously
f(self, *args, **kwargs)
return decorated
def also_with_noderawfs(func):
def decorated(self):
orig_args = self.emcc_args.copy()
func(self)
print('noderawfs')
self.emcc_args = orig_args + ['-DNODERAWFS']
self.set_setting('NODERAWFS')
self.js_engines = [config.NODE_JS]
func(self)
return decorated
def can_do_standalone(self):
return self.is_wasm() and \
self.get_setting('STACK_OVERFLOW_CHECK', 0) < 2 and \
not self.get_setting('MINIMAL_RUNTIME') and \
not self.get_setting('SAFE_HEAP') and \
'-fsanitize=address' not in self.emcc_args
# Impure means a test that cannot run in a wasm VM yet, as it is not 100%
# standalone. We can still run them with the JS code though.
def also_with_standalone_wasm(wasm2c=False, impure=False):
def decorated(func):
def metafunc(self, standalone):
if not standalone:
func(self)
else:
if can_do_standalone(self):
self.set_setting('STANDALONE_WASM')
# we will not legalize the JS ffi interface, so we must use BigInt
# support in order for JS to have a chance to run this without trapping
# when it sees an i64 on the ffi.
self.set_setting('WASM_BIGINT')
# if we are impure, disallow all wasm engines
if impure:
self.wasm_engines = []
self.js_engines = [config.NODE_JS]
self.node_args.append('--experimental-wasm-bigint')
func(self)
if wasm2c:
print('wasm2c')
self.set_setting('WASM2C')
self.wasm_engines = []
func(self)
metafunc._parameterize = {'': (False,),
'standalone': (True,)}
return metafunc
return decorated
# A simple check whether the compiler arguments cause optimization.
def is_optimizing(args):
return '-O' in str(args) and '-O0' not in args
def no_optimize(note=''):
assert not callable(note)
def decorator(func):
assert callable(func)
def decorated(self):
if is_optimizing(self.emcc_args):
self.skipTest(note)
func(self)
return decorated
return decorator
def needs_make(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip('Tool not available on Windows bots (%s)' % note)
return lambda f: f
def no_asan(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if '-fsanitize=address' in self.emcc_args:
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
def no_lsan(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if '-fsanitize=leak' in self.emcc_args:
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
def make_no_decorator_for_setting(name):
def outer_decorator(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if (name + '=1') in self.emcc_args or self.get_setting(name):
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
return outer_decorator
no_minimal_runtime = make_no_decorator_for_setting('MINIMAL_RUNTIME')
no_safe_heap = make_no_decorator_for_setting('SAFE_HEAP')
class TestCoreBase(RunnerCore):
def is_wasm2js(self):
return self.get_setting('WASM') == 0
# Use closure in some tests for some additional coverage
def maybe_closure(self):
if '--closure=1' not in self.emcc_args:
if '-g' not in self.emcc_args and ('-O2' in self.emcc_args or '-Os' in self.emcc_args):
self.emcc_args += ['--closure=1']
logger.debug('using closure compiler..')
return True
return False
def assertStartswith(self, output, prefix):
self.assertEqual(prefix, output[:len(prefix)])
def verify_in_strict_mode(self, filename):
with open(filename) as infile:
js = infile.read()
filename += '.strict.js'
with open(filename, 'w') as outfile:
outfile.write('"use strict";\n' + js)
self.run_js(filename)
def do_core_test(self, testname, **kwargs):
self.do_run_in_out_file_test('core', testname, **kwargs)
def get_bullet_library(self, use_cmake):
if use_cmake:
configure_commands = ['cmake', '.']
configure_args = ['-DBUILD_DEMOS=OFF', '-DBUILD_EXTRAS=OFF', '-DUSE_GLUT=OFF']
# Depending on whether 'configure' or 'cmake' is used to build, Bullet
# places output files in different directory structures.
generated_libs = [os.path.join('src', 'BulletDynamics', 'libBulletDynamics.a'),
os.path.join('src', 'BulletCollision', 'libBulletCollision.a'),
os.path.join('src', 'LinearMath', 'libLinearMath.a')]
else:
configure_commands = ['sh', './configure']
# Force a nondefault --host= so that the configure script will interpret
# that we are doing cross-compilation
# and skip attempting to run the generated executable with './a.out',
# which would fail since we are building a .js file.
configure_args = ['--disable-shared', '--host=i686-pc-linux-gnu',
'--disable-demos', '--disable-dependency-tracking']
generated_libs = [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')]
return self.get_library(os.path.join('third_party', 'bullet'), generated_libs,
configure=configure_commands,
configure_args=configure_args,
cache_name_extra=configure_commands[0])
@also_with_standalone_wasm()
def test_hello_world(self):
self.do_core_test('test_hello_world.c')
# must not emit this unneeded internal thing
self.assertNotContained('EMSCRIPTEN_GENERATED_FUNCTIONS', open('test_hello_world.js').read())
@sync
def test_wasm_synchronous_compilation(self):
self.set_setting('STRICT_JS')
self.do_core_test('test_hello_world.c')
@also_with_standalone_wasm()
def test_hello_argc(self):
self.do_core_test('test_hello_argc.c')
def test_intvars(self):
self.do_core_test('test_intvars.cpp')
def test_sintvars(self):
self.do_core_test('test_sintvars.c')
def test_int53(self):
self.emcc_args += ['-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[$convertI32PairToI53,$convertU32PairToI53,$readI53FromU64,$readI53FromI64,$writeI53ToI64,$writeI53ToI64Clamped,$writeI53ToU64Clamped,$writeI53ToI64Signaling,$writeI53ToU64Signaling]']
self.do_core_test('test_int53.c')
def test_i64(self):
self.do_core_test('test_i64.c')
def test_i64_2(self):
self.do_core_test('test_i64_2.cpp')
def test_i64_3(self):
self.do_core_test('test_i64_3.cpp')
def test_i64_4(self):
# stuff that also needs sign corrections
self.do_core_test('test_i64_4.c')
def test_i64_b(self):
self.do_core_test('test_i64_b.cpp')
def test_i64_cmp(self):
self.do_core_test('test_i64_cmp.cpp')
def test_i64_cmp2(self):
self.do_core_test('test_i64_cmp2.c')
def test_i64_double(self):
self.do_core_test('test_i64_double.cpp')
def test_i64_umul(self):
self.do_core_test('test_i64_umul.c')
@also_with_standalone_wasm()
def test_i64_precise(self):
self.do_core_test('test_i64_precise.c')
def test_i64_precise_needed(self):
self.do_core_test('test_i64_precise_needed.c')
def test_i64_llabs(self):
self.do_core_test('test_i64_llabs.c')
def test_i64_zextneg(self):
self.do_core_test('test_i64_zextneg.c')
def test_i64_7z(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_i64_7z.c', args=['hallo'])
def test_i64_i16(self):
self.do_core_test('test_i64_i16.c')
def test_i64_qdouble(self):
self.do_core_test('test_i64_qdouble.c')
def test_i64_varargs(self):
self.do_core_test('test_i64_varargs.c', args='waka fleefl asdfasdfasdfasdf'.split())
@no_wasm2js('wasm_bigint')
def test_i64_invoke_bigint(self):
self.set_setting('WASM_BIGINT')
self.emcc_args += ['-fexceptions']
self.node_args += ['--experimental-wasm-bigint']
self.do_core_test('test_i64_invoke_bigint.cpp', js_engines=[config.NODE_JS])
def test_vararg_copy(self):
self.do_run_in_out_file_test('va_arg', 'test_va_copy.c')
def test_llvm_fabs(self):
self.do_core_test('test_llvm_fabs.c')
def test_double_varargs(self):
self.do_core_test('test_double_varargs.c')
def test_trivial_struct_varargs(self):
self.do_core_test('test_trivial_struct_varargs.c')
def test_struct_varargs(self):
self.do_core_test('test_struct_varargs.c')
def test_zero_struct_varargs(self):
self.do_core_test('test_zero_struct_varargs.c')
def zzztest_nested_struct_varargs(self):
self.do_core_test('test_nested_struct_varargs.c')
def test_i32_mul_precise(self):
self.do_core_test('test_i32_mul_precise.c')
def test_i16_emcc_intrinsic(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_i16_emcc_intrinsic.c')
def test_double_i64_conversion(self):
self.do_core_test('test_double_i64_conversion.c')
def test_float32_precise(self):
self.do_core_test('test_float32_precise.c')
def test_negative_zero(self):
self.do_core_test('test_negative_zero.c')
def test_literal_negative_zero(self):
self.do_core_test('test_literal_negative_zero.c')
@also_with_standalone_wasm()
def test_bswap64(self):
self.do_core_test('test_bswap64.cpp')
def test_sha1(self):
self.do_runf(test_file('sha1.c'), 'SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6')
def test_wasm32_unknown_emscripten(self):
# No other configuration is supported, so always run this.
self.do_runf(test_file('wasm32-unknown-emscripten.c'), '')
def test_cube2md5(self):
self.emcc_args += ['--embed-file', 'cube2md5.txt']
shutil.copyfile(test_file('cube2md5.txt'), 'cube2md5.txt')
self.do_run_from_file(test_file('cube2md5.cpp'), test_file('cube2md5.ok'), assert_returncode=NON_ZERO)
@also_with_standalone_wasm(wasm2c=True)
@needs_make('make')
def test_cube2hash(self):
# A good test of i64 math
self.do_run('// empty file', 'Usage: hashstring <seed>',
libraries=self.get_library(os.path.join('third_party', 'cube2hash'), ['libcube2hash.a'], configure=None),
includes=[test_file('third_party', 'cube2hash')], assert_returncode=NON_ZERO)
for text, output in [('fleefl', '892BDB6FD3F62E863D63DA55851700FDE3ACF30204798CE9'),
('fleefl2', 'AA2CC5F96FC9D540CA24FDAF1F71E2942753DB83E8A81B61'),
('64bitisslow', '64D8470573635EC354FEE7B7F87C566FCAF1EFB491041670')]:
self.do_run('src.js', 'hash value: ' + output, args=[text], no_build=True)
def test_unaligned(self):
self.skipTest('LLVM marks the reads of s as fully aligned, making this test invalid')
src = r'''
#include <stdio.h>
struct S {
double x;
int y;
};
int main() {
// the 64-bit value here will not be 8-byte aligned
S s0[3] = { {0x12a751f430142, 22}, {0x17a5c85bad144, 98}, {1, 1}};
char buffer[10*sizeof(S)];
int b = int(buffer);
S *s = (S*)(b + 4-b%8);
s[0] = s0[0];
s[1] = s0[1];
s[2] = s0[2];
printf("*%d : %d : %d\n", sizeof(S), ((unsigned int)&s[0]) % 8 != ((unsigned int)&s[1]) % 8,
((unsigned int)&s[1]) - ((unsigned int)&s[0]));
s[0].x++;
s[0].y++;
s[1].x++;
s[1].y++;
printf("%.1f,%d,%.1f,%d\n", s[0].x, s[0].y, s[1].x, s[1].y);
return 0;
}
'''
# TODO: A version of this with int64s as well
self.do_run(src, '*12 : 1 : 12\n328157500735811.0,23,416012775903557.0,99\n')
return # TODO: continue to the next part here
# Test for undefined behavior in C. This is not legitimate code, but does exist
src = r'''
#include <stdio.h>
int main()
{
int x[10];
char *p = (char*)&x[0];
p++;
short *q = (short*)p;
*q = 300;
printf("*%d:%d*\n", *q, ((int)q)%2);
int *r = (int*)p;
*r = 515559;
printf("*%d*\n", *r);
long long *t = (long long*)p;
*t = 42949672960;
printf("*%lld*\n", *t);
return 0;
}
'''
try:
self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n')
except Exception as e:
assert 'must be aligned' in str(e), e # expected to fail without emulation
def test_align64(self):
src = r'''
#include <stdio.h>
// inspired by poppler
enum Type {
A = 10,
B = 20
};
struct Object {
Type type;
union {
int intg;
double real;
char *name;
};
};
struct Principal {
double x;
Object a;
double y;
};
int main(int argc, char **argv)
{
int base = argc-1;
Object *o = NULL;
printf("%zu,%zu\n", sizeof(Object), sizeof(Principal));
printf("%d,%d,%d,%d\n", (int)&o[base].type, (int)&o[base].intg, (int)&o[base].real, (int)&o[base].name);
printf("%d,%d,%d,%d\n", (int)&o[base+1].type, (int)&o[base+1].intg, (int)&o[base+1].real, (int)&o[base+1].name);
Principal p, q;
p.x = p.y = q.x = q.y = 0;
p.a.type = A;
p.a.real = 123.456;
*(&q.a) = p.a;
printf("%.2f,%d,%.2f,%.2f : %.2f,%d,%.2f,%.2f\n", p.x, p.a.type, p.a.real, p.y, q.x, q.a.type, q.a.real, q.y);
return 0;
}
'''
self.do_run(src, '''16,32
0,8,8,8
16,24,24,24
0.00,10,123.46,0.00 : 0.00,10,123.46,0.00
''')
@no_asan('asan errors on corner cases we check')
def test_aligned_alloc(self):
self.do_runf(test_file('test_aligned_alloc.c'), '')
def test_unsigned(self):
src = '''
#include <stdio.h>
const signed char cvals[2] = { -1, -2 }; // compiler can store this is a string, so -1 becomes \\FF, and needs re-signing
int main()
{
{
unsigned char x = 200;
printf("*%d*\\n", x);
unsigned char y = -22;
printf("*%d*\\n", y);
}
int varey = 100;
unsigned int MAXEY = -1, MAXEY2 = -77;
printf("*%u,%d,%u*\\n", MAXEY, varey >= MAXEY, MAXEY2); // 100 >= -1? not in unsigned!
int y = cvals[0];
printf("*%d,%d,%d,%d*\\n", cvals[0], cvals[0] < 0, y, y < 0);
y = cvals[1];
printf("*%d,%d,%d,%d*\\n", cvals[1], cvals[1] < 0, y, y < 0);
// zext issue - see mathop in jsifier
unsigned char x8 = -10;
unsigned long hold = 0;
hold += x8;
int y32 = hold+50;
printf("*%lu,%d*\\n", hold, y32);
// Comparisons
x8 = 0;
for (int i = 0; i < 254; i++) x8++; // make it an actual 254 in JS - not a -2
printf("*%d,%d*\\n", x8+1 == 0xff, x8+1 != 0xff); // 0xff may be '-1' in the bitcode
return 0;
}
'''
self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*')
self.emcc_args.append('-Wno-constant-conversion')
src = '''
#include <stdio.h>
int main()
{
{
unsigned char x;
unsigned char *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
{
unsigned short x;
unsigned short *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
/*{ // This case is not checked. The hint for unsignedness is just the %u in printf, and we do not analyze that
unsigned int x;
unsigned int *y = &x;
*y = -1;
printf("*%u*\\n", x);
}*/
{
char x;
char *y = &x;
*y = 255;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 65535;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 0xffffffff;
printf("*%d*\\n", x);
}
return 0;
}
'''
self.do_run(src, '*255*\n*65535*\n*-1*\n*-1*\n*-1*')
def test_bitfields(self):
self.do_core_test('test_bitfields.c')
def test_floatvars(self):
self.do_core_test('test_floatvars.cpp')
def test_closebitcasts(self):
self.do_core_test('closebitcasts.c')
def test_fast_math(self):
self.emcc_args += ['-ffast-math']
self.do_core_test('test_fast_math.c', args=['5', '6', '8'])
def test_zerodiv(self):
self.do_core_test('test_zerodiv.c')
def test_zero_multiplication(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_zero_multiplication.c')
def test_isnan(self):
self.do_core_test('test_isnan.c')
def test_globaldoubles(self):
self.do_core_test('test_globaldoubles.c')
def test_math(self):
self.do_core_test('test_math.c')
def test_erf(self):
self.do_core_test('test_erf.c')
def test_math_hyperbolic(self):
self.do_core_test('test_math_hyperbolic.c')
def test_math_lgamma(self):
self.do_run_in_out_file_test('math', 'lgamma.c', assert_returncode=NON_ZERO)
def test_math_fmodf(self):
self.do_run_in_out_file_test('math', 'fmodf.c')
def test_frexp(self):
self.do_core_test('test_frexp.c')
def test_rounding(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_rounding.c')
def test_fcvt(self):
self.do_core_test('test_fcvt.cpp')
def test_llrint(self):
self.do_core_test('test_llrint.c')
def test_getgep(self):
# Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP
self.do_core_test('test_getgep.c')
def test_multiply_defined_symbols(self):
create_file('a1.c', 'int f() { return 1; }')
create_file('a2.c', 'void x() {}')
create_file('b1.c', 'int f() { return 2; }')
create_file('b2.c', 'void y() {}')
create_file('main.c', r'''
#include <stdio.h>
int f();
int main() {
printf("result: %d\n", f());
return 0;
}
''')
building.emcc('a1.c', ['-c'])
building.emcc('a2.c', ['-c'])
building.emcc('b1.c', ['-c'])
building.emcc('b2.c', ['-c'])
building.emcc('main.c', ['-c'])
building.emar('cr', 'liba.a', ['a1.c.o', 'a2.c.o'])
building.emar('cr', 'libb.a', ['b1.c.o', 'b2.c.o'])
building.link_to_object(['main.c.o', 'liba.a', 'libb.a'], 'all.o')
building.emcc('all.o', self.get_emcc_args(), 'all.js')
self.do_run('all.js', 'result: 1', no_build=True)
def test_if(self):
self.do_core_test('test_if.c')
def test_if_else(self):
self.do_core_test('test_if_else.c')
def test_loop(self):
self.do_core_test('test_loop.c')
def test_stack(self):
self.set_setting('INLINING_LIMIT')
# some extra coverage in all test suites for stack checks
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.do_core_test('test_stack.c')
def test_stack_align(self):
src = test_file('core', 'test_stack_align.cpp')
def test():
self.do_runf(src, ['''align 4: 0
align 8: 0
align 16: 0
align 32: 0
base align: 0, 0, 0, 0'''])
test()
@no_asan('stack size is too low for asan to work properly')
def test_stack_placement(self):
self.set_setting('TOTAL_STACK', 1024)
self.do_core_test('test_stack_placement.c')
self.set_setting('GLOBAL_BASE', 102400)
self.do_core_test('test_stack_placement.c')
@no_asan('asan does not support main modules')
@no_wasm2js('MAIN_MODULE support')
def test_stack_placement_pic(self):
self.set_setting('TOTAL_STACK', 1024)
self.set_setting('MAIN_MODULE')
self.do_core_test('test_stack_placement.c')
self.set_setting('GLOBAL_BASE', 102400)
self.do_core_test('test_stack_placement.c')
def test_strings(self):
self.do_core_test('test_strings.c', args=['wowie', 'too', '74'])
def test_strcmp_uni(self):
self.do_core_test('test_strcmp_uni.c')
def test_strndup(self):
self.do_core_test('test_strndup.c')
def test_errar(self):
self.do_core_test('test_errar.c')
def test_mainenv(self):
self.do_core_test('test_mainenv.c')
def test_funcs(self):
self.do_core_test('test_funcs.c')
def test_structs(self):
self.do_core_test('test_structs.c')
gen_struct_src = '''
#include <stdio.h>
#include <stdlib.h>
#include "emscripten.h"
struct S
{
int x, y;
};
int main()
{
S* a = {{gen_struct}};
a->x = 51; a->y = 62;
printf("*%d,%d*\\n", a->x, a->y);
{{del_struct}}(a);
return 0;
}
'''
def test_mallocstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', '(S*)malloc(sizeof(S))').replace('{{del_struct}}', 'free'), '*51,62*')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
@parameterized({
'normal': [],
'memvalidate': ['-DEMMALLOC_MEMVALIDATE'],
'memvalidate_verbose': ['-DEMMALLOC_MEMVALIDATE', '-DEMMALLOC_VERBOSE', '-DRANDOM_ITERS=130'],
})
def test_emmalloc(self, *args):
# in newer clang+llvm, the internal calls to malloc in emmalloc may be optimized under
# the assumption that they are external, so like in system_libs.py where we build
# malloc, we need to disable builtin here too
self.set_setting('MALLOC', 'none')
self.emcc_args += ['-fno-builtin'] + list(args)
self.do_run(open(path_from_root('system', 'lib', 'emmalloc.cpp')).read() +
open(path_from_root('system', 'lib', 'sbrk.c')).read() +
open(test_file('core', 'test_emmalloc.cpp')).read(),
open(test_file('core', 'test_emmalloc.out')).read())
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_usable_size(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += list(args)
self.do_core_test('test_malloc_usable_size.c')
@no_optimize('output is sensitive to optimization flags, so only test unoptimized builds')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_memory_statistics(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['-s', 'INITIAL_MEMORY=128MB', '-g'] + list(args)
self.do_core_test('test_emmalloc_memory_statistics.cpp')
@no_optimize('output is sensitive to optimization flags, so only test unoptimized builds')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_trim(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['-s', 'INITIAL_MEMORY=128MB', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2147418112'] + list(args)
self.do_core_test('test_emmalloc_trim.cpp')
# Test case against https://github.com/emscripten-core/emscripten/issues/10363
def test_emmalloc_memalign_corruption(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.do_core_test('emmalloc_memalign_corruption.cpp')
def test_newstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', 'new S').replace('{{del_struct}}', 'delete'), '*51,62*')
def test_addr_of_stacked(self):
self.do_core_test('test_addr_of_stacked.c')
def test_globals(self):
self.do_core_test('test_globals.c')
def test_linked_list(self):
self.do_core_test('test_linked_list.c')
def test_sup(self):
src = '''
#include <stdio.h>
struct S4 { int x; }; // size: 4
struct S4_2 { short x, y; }; // size: 4, but for alignment purposes, 2
struct S6 { short x, y, z; }; // size: 6
struct S6w { char x[6]; }; // size: 6 also
struct S6z { int x; short y; }; // size: 8, since we align to a multiple of the biggest - 4
struct C___ { S6 a, b, c; int later; };
struct Carr { S6 a[3]; int later; }; // essentially the same, but differently defined
struct C__w { S6 a; S6w b; S6 c; int later; }; // same size, different struct
struct Cp1_ { int pre; short a; S6 b, c; int later; }; // fillers for a
struct Cp2_ { int a; short pre; S6 b, c; int later; }; // fillers for a (get addr of the other filler)
struct Cint { S6 a; int b; S6 c; int later; }; // An int (different size) for b
struct C4__ { S6 a; S4 b; S6 c; int later; }; // Same size as int from before, but a struct
struct C4_2 { S6 a; S4_2 b; S6 c; int later; }; // Same size as int from before, but a struct with max element size 2
struct C__z { S6 a; S6z b; S6 c; int later; }; // different size, 8 instead of 6
int main()
{
#define TEST(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%zu*\\n", #struc, (int)&(s->a), (int)&(s->b), (int)&(s->c), (int)&(s->later), sizeof(struc)); \\
}
#define TEST_ARR(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%zu*\\n", #struc, (int)&(s->a[0]), (int)&(s->a[1]), (int)&(s->a[2]), (int)&(s->later), sizeof(struc)); \\
}
printf("sizeofs:%zu,%zu\\n", sizeof(S6), sizeof(S6z));
TEST(C___);
TEST_ARR(Carr);
TEST(C__w);
TEST(Cp1_);
TEST(Cp2_);
TEST(Cint);
TEST(C4__);
TEST(C4_2);
TEST(C__z);
return 0;
}
'''
self.do_run(src, 'sizeofs:6,8\n*C___: 0,6,12,20<24*\n*Carr: 0,6,12,20<24*\n*C__w: 0,6,12,20<24*\n*Cp1_: 4,6,12,20<24*\n*Cp2_: 0,6,12,20<24*\n*Cint: 0,8,12,20<24*\n*C4__: 0,8,12,20<24*\n*C4_2: 0,6,10,16<20*\n*C__z: 0,8,16,24<28*')
@also_with_standalone_wasm()
def test_assert(self):
self.do_core_test('test_assert.cpp', assert_returncode=NON_ZERO)
def test_wcslen(self):
self.do_core_test('test_wcslen.c')
def test_regex(self):
self.do_core_test('test_regex.c')
@also_with_standalone_wasm(wasm2c=True, impure=True)
def test_longjmp(self):
self.do_core_test('test_longjmp.c')
def test_longjmp2(self):
self.do_core_test('test_longjmp2.c')
@needs_dylink
def test_longjmp2_main_module(self):
# Test for binaryen regression:
# https://github.com/WebAssembly/binaryen/issues/2180
self.set_setting('MAIN_MODULE')
self.do_core_test('test_longjmp2.c')
def test_longjmp3(self):
self.do_core_test('test_longjmp3.c')
def test_longjmp4(self):
self.do_core_test('test_longjmp4.c')
def test_longjmp_funcptr(self):
self.do_core_test('test_longjmp_funcptr.c')
def test_longjmp_repeat(self):
self.do_core_test('test_longjmp_repeat.c')
def test_longjmp_stacked(self):
self.do_core_test('test_longjmp_stacked.c', assert_returncode=NON_ZERO)
def test_longjmp_exc(self):
self.do_core_test('test_longjmp_exc.c', assert_returncode=NON_ZERO)
def test_longjmp_throw(self):
for disable_throw in [0, 1]:
print(disable_throw)
self.set_setting('DISABLE_EXCEPTION_CATCHING', disable_throw)
self.do_core_test('test_longjmp_throw.cpp')
def test_longjmp_unwind(self):
self.do_core_test('test_longjmp_unwind.c', assert_returncode=NON_ZERO)
def test_longjmp_i64(self):
self.emcc_args += ['-g']
self.do_core_test('test_longjmp_i64.c', assert_returncode=NON_ZERO)
def test_siglongjmp(self):
self.do_core_test('test_siglongjmp.c')
def test_setjmp_many(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
int main(int argc, char** argv) {
jmp_buf buf;
for (int i = 0; i < NUM; i++) printf("%d\n", setjmp(buf));
if (argc-- == 1131) longjmp(buf, 11);
return 0;
}
'''
for num in [1, 5, 20, 1000]:
print('NUM=%d' % num)
self.do_run(src.replace('NUM', str(num)), '0\n' * num)
def test_setjmp_many_2(self):
src = r'''
#include <setjmp.h>
#include <stdio.h>
jmp_buf env;
void luaWork(int d){
int x;
printf("d is at %d\n", d);
longjmp(env, 1);
}
int main()
{
const int ITERATIONS=25;
for(int i = 0; i < ITERATIONS; i++){
if(!setjmp(env)){
luaWork(i);
}
}
return 0;
}
'''
self.do_run(src, r'''d is at 24''')
def test_setjmp_noleak(self):
src = r'''
#include <setjmp.h>
#include <stdio.h>
#include <assert.h>
jmp_buf env;
void luaWork(int d){
int x;
printf("d is at %d\n", d);
longjmp(env, 1);
}
#include <malloc.h>
#include <stdlib.h>
void dump() {
struct mallinfo m = mallinfo();
printf("dump: %d , %d\n", m.arena, m.uordblks);
}
void work(int n)
{
printf("work %d\n", n);
dump();
if(!setjmp(env)){
luaWork(n);
}
if (n > 0) work(n-1);
}
int main() {
struct mallinfo m1 = mallinfo();
dump();
work(10);
dump();
struct mallinfo m2 = mallinfo();
assert(m1.uordblks == m2.uordblks);
printf("ok.\n");
}
'''
self.do_run(src, r'''ok.''')
@with_both_exception_handling
def test_exceptions(self):
self.set_setting('EXCEPTION_DEBUG')
self.maybe_closure()
for support_longjmp in [0, 1]:
self.set_setting('SUPPORT_LONGJMP', support_longjmp)
self.do_run_from_file(test_file('core', 'test_exceptions.cpp'), test_file('core', 'test_exceptions_caught.out'))
def test_exceptions_off(self):
for support_longjmp in [0, 1]:
self.set_setting('DISABLE_EXCEPTION_CATCHING')
self.do_run_from_file(test_file('core', 'test_exceptions.cpp'), test_file('core', 'test_exceptions_uncaught.out'), assert_returncode=NON_ZERO)
@no_asan('TODO: ASan support in minimal runtime')
def test_exceptions_minimal_runtime(self):
self.set_setting('EXCEPTION_DEBUG')
self.set_setting('EXIT_RUNTIME')
self.maybe_closure()
self.set_setting('MINIMAL_RUNTIME')
for support_longjmp in [0, 1]:
self.set_setting('SUPPORT_LONGJMP', support_longjmp)
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
self.do_run_from_file(test_file('core', 'test_exceptions.cpp'), test_file('core', 'test_exceptions_caught.out'))
self.set_setting('DISABLE_EXCEPTION_CATCHING')
# TODO: Node currently returns 0 for unhandled promise rejections.
# Switch this to True when they change their default
expect_fail = False
if not self.is_wasm():
expect_fail = True
self.do_run_from_file(test_file('core', 'test_exceptions.cpp'), test_file('core', 'test_exceptions_uncaught.out'), assert_returncode=NON_ZERO if expect_fail else 0)
@with_both_exception_handling
def test_exceptions_custom(self):
self.set_setting('EXCEPTION_DEBUG')
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.maybe_closure()
src = '''
#include <iostream>
class MyException
{
public:
MyException(){ std::cout << "Construct..."; }
MyException( const MyException & ) { std::cout << "Copy..."; }
~MyException(){ std::cout << "Destruct..."; }
};
int function()
{
std::cout << "Throw...";
throw MyException();
}
int function2()
{
return function();
}
int main()
{
try
{
function2();
}
catch (MyException & e)
{
std::cout << "Caught...";
}
try
{
function2();
}
catch (MyException e)
{
std::cout << "Caught...";
}
return 0;
}
'''
self.do_run(src, 'Throw...Construct...Caught...Destruct...Throw...Construct...Copy...Caught...Destruct...Destruct...')
@with_both_exception_handling
def test_exceptions_2(self):
for safe in [0, 1]:
print(safe)
if safe and '-fsanitize=address' in self.emcc_args:
# Can't use safe heap with ASan
continue
self.set_setting('SAFE_HEAP', safe)
self.do_core_test('test_exceptions_2.cpp')
@with_both_exception_handling
def test_exceptions_3(self):
src = r'''
#include <iostream>
#include <stdexcept>
int main(int argc, char **argv)
{
if (argc != 2) {
std::cout << "need an arg" << std::endl;
return 1;
}
int arg = argv[1][0] - '0';
try {
if (arg == 0) throw "a c string";
if (arg == 1) throw std::exception();
if (arg == 2) throw std::runtime_error("Hello");
} catch(const char * ex) {
std::cout << "Caught C string: " << ex << std::endl;
} catch(const std::exception &ex) {
std::cout << "Caught exception: " << ex.what() << std::endl;
} catch(...) {
std::cout << "Caught something else" << std::endl;
}
std::cout << "Done.\n";
}
'''
print('0')
self.do_run(src, 'Caught C string: a c string\nDone.', args=['0'])
print('1')
self.do_run('src.js', 'Caught exception: std::exception\nDone.', args=['1'], no_build=True)
print('2')
self.do_run('src.js', 'Caught exception: Hello\nDone.', args=['2'], no_build=True)
def test_exceptions_allowed(self):
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ["_Z12somefunctionv"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT')
self.do_core_test('test_exceptions_allowed.cpp')
size = os.path.getsize('test_exceptions_allowed.js')
if self.is_wasm():
size += os.path.getsize('test_exceptions_allowed.wasm')
shutil.copyfile('test_exceptions_allowed.js', 'orig.js')
# check that an empty allow list works properly (as in, same as exceptions disabled)
src = test_file('core', 'test_exceptions_allowed.cpp')
empty_output = test_file('core', 'test_exceptions_allowed_empty.out')
self.set_setting('EXCEPTION_CATCHING_ALLOWED', [])
self.do_run_from_file(src, empty_output, assert_returncode=NON_ZERO)
empty_size = os.path.getsize('test_exceptions_allowed.js')
if self.is_wasm():
empty_size += os.path.getsize('test_exceptions_allowed.wasm')
shutil.copyfile('test_exceptions_allowed.js', 'empty.js')
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ['fake'])
self.do_run_from_file(src, empty_output, assert_returncode=NON_ZERO)
fake_size = os.path.getsize('test_exceptions_allowed.js')
if self.is_wasm():
fake_size += os.path.getsize('test_exceptions_allowed.wasm')
shutil.copyfile('test_exceptions_allowed.js', 'fake.js')
self.clear_setting('EXCEPTION_CATCHING_ALLOWED')
self.do_run_from_file(src, empty_output, assert_returncode=NON_ZERO)
disabled_size = os.path.getsize('test_exceptions_allowed.js')
if self.is_wasm():
disabled_size += os.path.getsize('test_exceptions_allowed.wasm')
shutil.copyfile('test_exceptions_allowed.js', 'disabled.js')
print('size: %d' % size)
print('empty_size: %d' % empty_size)
print('fake_size: %d' % fake_size)
print('disabled_size: %d' % disabled_size)
# empty list acts the same as fully disabled
self.assertEqual(empty_size, disabled_size)
# big change when we disable exception catching of the function
self.assertGreater(size - empty_size, 0.01 * size)
# full disable can remove a little bit more
self.assertLess(disabled_size, fake_size)
def test_exceptions_allowed_2(self):
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ["main"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT')
self.do_core_test('test_exceptions_allowed_2.cpp')
# When 'main' function does not have a signature, its contents will be
# outlined to '__original_main'. Check if we can handle that case.
self.emcc_args += ['-DMAIN_NO_SIGNATURE']
self.do_core_test('test_exceptions_allowed_2.cpp')
def test_exceptions_allowed_uncaught(self):
self.emcc_args += ['-std=c++11']
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ["_Z4testv"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT')
self.do_core_test('test_exceptions_allowed_uncaught.cpp')
def test_exceptions_allowed_misuse(self):
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ['foo'])
# Test old =2 setting for DISABLE_EXCEPTION_CATCHING
self.set_setting('DISABLE_EXCEPTION_CATCHING', 2)
err = self.expect_fail([EMCC, test_file('hello_world.c')] + self.get_emcc_args())
self.assertContained('error: DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED [-Wdeprecated] [-Werror]', err)
# =0 should also be a warning
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
err = self.expect_fail([EMCC, test_file('hello_world.c')] + self.get_emcc_args())
self.assertContained('error: DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED [-Wdeprecated] [-Werror]', err)
# =1 should be a hard error
self.set_setting('DISABLE_EXCEPTION_CATCHING', 1)
err = self.expect_fail([EMCC, test_file('hello_world.c')] + self.get_emcc_args())
self.assertContained('error: DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED are mutually exclusive', err)
# even setting an empty list should trigger the error;
self.set_setting('EXCEPTION_CATCHING_ALLOWED', [])
err = self.expect_fail([EMCC, test_file('hello_world.c')] + self.get_emcc_args())
self.assertContained('error: DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED are mutually exclusive', err)
@with_both_exception_handling
def test_exceptions_uncaught(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
src = r'''
#include <stdio.h>
#include <exception>
struct X {
~X() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
};
int main() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
try {
X x;
throw 1;
} catch(...) {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
return 0;
}
'''
self.do_run(src, 'exception? no\nexception? yes\nexception? no\nexception? no\n')
src = r'''
#include <fstream>
#include <iostream>
int main() {
std::ofstream os("test");
os << std::unitbuf << "foo"; // trigger a call to std::uncaught_exception from
// std::basic_ostream::sentry::~sentry
std::cout << "success";
}
'''
self.do_run(src, 'success')
@with_both_exception_handling
def test_exceptions_uncaught_2(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
src = r'''
#include <iostream>
#include <exception>
int main() {
try {
throw std::exception();
} catch(std::exception) {
try {
throw;
} catch(std::exception) {}
}
if (std::uncaught_exception())
std::cout << "ERROR: uncaught_exception still set.";
else
std::cout << "OK";
}
'''
self.do_run(src, 'OK\n')
@with_both_exception_handling
def test_exceptions_typed(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.clear_setting('SAFE_HEAP') # Throwing null will cause an ignorable null pointer access.
self.do_core_test('test_exceptions_typed.cpp')
@with_both_exception_handling
def test_exceptions_virtual_inheritance(self):
self.do_core_test('test_exceptions_virtual_inheritance.cpp')
@with_both_exception_handling
def test_exceptions_convert(self):
self.do_core_test('test_exceptions_convert.cpp')
# TODO Make setjmp-longjmp also use Wasm exception handling
@with_both_exception_handling
def test_exceptions_multi(self):
self.do_core_test('test_exceptions_multi.cpp')
@with_both_exception_handling
def test_exceptions_std(self):
self.clear_setting('SAFE_HEAP')
self.do_core_test('test_exceptions_std.cpp')
@with_both_exception_handling
def test_exceptions_alias(self):
self.do_core_test('test_exceptions_alias.cpp')
@with_both_exception_handling
def test_exceptions_rethrow(self):
self.do_core_test('test_exceptions_rethrow.cpp')
@with_both_exception_handling
def test_exceptions_uncaught_count(self):
self.do_core_test('test_exceptions_uncaught_count.cpp')
@with_both_exception_handling
def test_exceptions_resume(self):
self.set_setting('EXCEPTION_DEBUG')
self.do_core_test('test_exceptions_resume.cpp')
@with_both_exception_handling
def test_exceptions_destroy_virtual(self):
self.do_core_test('test_exceptions_destroy_virtual.cpp')
@with_both_exception_handling
def test_exceptions_refcount(self):
self.do_core_test('test_exceptions_refcount.cpp')
@with_both_exception_handling
def test_exceptions_primary(self):
self.do_core_test('test_exceptions_primary.cpp')
@with_both_exception_handling
def test_exceptions_simplify_cfg(self):
self.do_core_test('test_exceptions_simplify_cfg.cpp')
@with_both_exception_handling
def test_exceptions_libcxx(self):
self.do_core_test('test_exceptions_libcxx.cpp')
@with_both_exception_handling
def test_exceptions_multiple_inherit(self):
self.do_core_test('test_exceptions_multiple_inherit.cpp')
@with_both_exception_handling
def test_exceptions_multiple_inherit_rethrow(self):
self.do_core_test('test_exceptions_multiple_inherit_rethrow.cpp')
@with_both_exception_handling
def test_exceptions_rethrow_missing(self):
create_file('main.cpp', 'int main() { throw; }')
self.do_runf('main.cpp', None, assert_returncode=NON_ZERO)
@with_both_exception_handling
def test_bad_typeid(self):
self.do_run(r'''
// exception example
#include <iostream> // std::cerr
#include <typeinfo> // operator typeid
#include <exception> // std::exception
class Polymorphic {virtual void member(){}};
int main () {
try
{
Polymorphic * pb = 0;
const std::type_info& ti = typeid(*pb); // throws a bad_typeid exception
}
catch (std::exception& e)
{
std::cerr << "exception caught: " << e.what() << '\n';
}
return 0;
}
''', 'exception caught: std::bad_typeid')
def test_iostream_ctors(self):
# iostream stuff must be globally constructed before user global
# constructors, so iostream works in global constructors
self.do_run(r'''
#include <iostream>
struct A {
A() { std::cout << "bug"; }
};
A a;
int main() {
std::cout << "free code" << std::endl;
return 0;
}
''', 'bugfree code')
# Marked as impure since the WASI reactor modules (modules without main)
# are not yet suppored by the wasm engines we test against.
@also_with_standalone_wasm(impure=True)
def test_ctors_no_main(self):
self.emcc_args.append('--no-entry')
self.do_core_test('test_ctors_no_main.cpp')
def test_class(self):
self.do_core_test('test_class.cpp')
def test_inherit(self):
self.do_core_test('test_inherit.cpp')
def test_isdigit_l(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_isdigit_l.cpp')
def test_iswdigit(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_iswdigit.cpp')
def test_polymorph(self):
self.do_core_test('test_polymorph.cpp')
def test_complex(self):
self.do_core_test('test_complex.c')
def test_float_builtins(self):
# tests wasm_libc_rt
self.do_core_test('test_float_builtins.c')
@no_asan('SAFE_HEAP cannot be used with ASan')
def test_segfault(self):
self.set_setting('SAFE_HEAP')
for addr in ['get_null()', 'new D2()']:
print(addr)
src = r'''
#include <stdio.h>
#include <emscripten.h>
struct Classey {
virtual void doIt() = 0;
};
struct D1 : Classey {
virtual void doIt() { printf("fleefl\n"); }
};
struct D2 : Classey {
virtual void doIt() { printf("marfoosh\n"); }
};
EM_JS(Classey*, get_null, (), {
return 0;
});
int main(int argc, char **argv)
{
Classey *p = argc == 100 ? new D1() : (Classey*)%s;
p->doIt();
return 0;
}
''' % addr
if 'get_null' in addr:
self.do_run(src, 'segmentation fault', assert_returncode=NON_ZERO)
else:
self.do_run(src, 'marfoosh')
def test_dynamic_cast(self):
self.do_core_test('test_dynamic_cast.cpp')
def test_dynamic_cast_b(self):
self.do_core_test('test_dynamic_cast_b.cpp')
def test_dynamic_cast_2(self):
self.do_core_test('test_dynamic_cast_2.cpp')
def test_funcptr(self):
self.do_core_test('test_funcptr.c')
def test_mathfuncptr(self):
self.do_core_test('test_mathfuncptr.c')
def test_funcptrfunc(self):
self.do_core_test('test_funcptrfunc.c')
def test_funcptr_namecollide(self):
self.do_core_test('test_funcptr_namecollide.c')
def test_emptyclass(self):
self.do_core_test('test_emptyclass.cpp')
def test_alloca(self):
self.do_core_test('test_alloca.c')
def test_rename(self):
self.do_run_in_out_file_test('stdio', 'test_rename.c')
def test_remove(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('cstdio', 'test_remove.cpp')
def test_alloca_stack(self):
self.do_core_test('test_alloca_stack.c')
def test_stack_byval(self):
self.do_core_test('test_stack_byval.cpp')
def test_stack_varargs(self):
# in node.js we allocate argv[0] on the stack, which means the length
# of the program directory influences how much stack we need, and so
# long random temp dir names can lead to random failures. The stack
# size was increased here to avoid that.
self.set_setting('INLINING_LIMIT')
self.set_setting('TOTAL_STACK', 8 * 1024)
self.do_core_test('test_stack_varargs.c')
def test_stack_varargs2(self):
# in node.js we allocate argv[0] on the stack, which means the length
# of the program directory influences how much stack we need, and so
# long random temp dir names can lead to random failures. The stack
# size was increased here to avoid that.
self.set_setting('TOTAL_STACK', 8 * 1024)
src = r'''
#include <stdio.h>
#include <stdlib.h>
void func(int i) {
}
int main() {
for (int i = 0; i < 7000; i++) {
printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
print('with return')
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
for (int i = 0; i < 7000; i++) {
int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
printf(" (%d)\n", j);
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
print('with definitely no return')
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
void vary(const char *s, ...)
{
va_list v;
va_start(v, s);
char d[20];
vsnprintf(d, 20, s, v);
puts(d);
// Try it with copying
va_list tempva;
va_copy(tempva, v);
vsnprintf(d, 20, s, tempva);
puts(d);
va_end(v);
}
int main() {
for (int i = 0; i < 7000; i++) {
int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
printf(" (%d)\n", j);
vary("*cheez: %d+%d*", 99, 24);
vary("*albeit*");
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
def test_stack_void(self):
self.emcc_args.append('-Wno-format-extra-args')
self.set_setting('INLINING_LIMIT')
self.do_core_test('test_stack_void.c')
def test_life(self):
self.emcc_args += ['-std=c99']
self.do_run_in_out_file_test('life.c', args=['2'])
def test_array2(self):
self.do_core_test('test_array2.c')
def test_array2b(self):
self.do_core_test('test_array2b.c')
def test_constglobalstructs(self):
self.do_core_test('test_constglobalstructs.c')
def test_conststructs(self):
self.do_core_test('test_conststructs.c')
def test_bigarray(self):
self.do_core_test('test_bigarray.c')
def test_mod_globalstruct(self):
self.do_core_test('test_mod_globalstruct.c')
def test_sizeof(self):
self.do_core_test('test_sizeof.cpp')
def test_llvm_used(self):
self.do_core_test('test_llvm_used.c')
@no_asan('SAFE_HEAP cannot be used with ASan')
def test_set_align(self):
self.set_setting('SAFE_HEAP')
self.do_core_test('test_set_align.c')
def test_emscripten_api(self):
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_save_me_aimee'])
self.do_core_test('test_emscripten_api.cpp')
if '-fsanitize=address' not in self.emcc_args:
# test EXPORT_ALL (this is not compatible with asan, which doesn't
# support dynamic linking at all or the LINKING flag)
self.set_setting('EXPORTED_FUNCTIONS', [])
self.set_setting('EXPORT_ALL')
self.set_setting('LINKABLE')
self.do_core_test('test_emscripten_api.cpp')
def test_emscripten_run_script_string_int(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("1+1");
printf("got string: %s\n", str);
return 0;
}
'''
self.do_run(src, '''got string: 2''')
def test_emscripten_run_script_string_utf8(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("'\\u2603 \\u2603 \\u2603 Hello!'");
printf("length of returned string: %zu. Position of substring 'Hello': %zu\n", strlen(str), strstr(str, "Hello")-str);
return 0;
}
'''
self.do_run(src, '''length of returned string: 18. Position of substring 'Hello': 12''')
def test_emscripten_run_script_string_null(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("void(0)");
if (str) {
printf("got string: %s\n", str);
} else {
puts("got null");
}
return 0;
}
'''
self.do_run(src, 'got null')
def test_emscripten_get_now(self):
self.banned_js_engines = [config.V8_ENGINE] # timer limitations in v8 shell
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.maybe_closure()
self.do_runf(test_file('emscripten_get_now.cpp'), 'Timer resolution is good')
def test_emscripten_get_compiler_setting(self):
src = test_file('core', 'emscripten_get_compiler_setting.c')
output = shared.unsuffixed(src) + '.out'
old = self.get_setting('ASSERTIONS', 1)
# with assertions, a nice message is shown
self.set_setting('ASSERTIONS')
self.do_runf(src, 'You must build with -s RETAIN_COMPILER_SETTINGS=1', assert_returncode=NON_ZERO)
self.set_setting('ASSERTIONS', old)
self.set_setting('RETAIN_COMPILER_SETTINGS')
self.do_runf(src, open(output).read().replace('waka', shared.EMSCRIPTEN_VERSION))
def test_emscripten_has_asyncify(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("%d\n", emscripten_has_asyncify());
return 0;
}
'''
self.set_setting('ASYNCIFY', 0)
self.do_run(src, '0')
self.set_setting('ASYNCIFY')
self.do_run(src, '1')
# TODO: test only worked in non-fastcomp
def test_inlinejs(self):
self.skipTest('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM
self.do_core_test('test_inlinejs.c')
if self.emcc_args == []:
# opts will eliminate the comments
out = open('src.js').read()
for i in range(1, 5):
assert ('comment%d' % i) in out
# TODO: test only worked in non-fastcomp
def test_inlinejs2(self):
self.skipTest('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM
self.do_core_test('test_inlinejs2.c')
def test_inlinejs3(self):
if self.is_wasm():
self.skipTest('wasm requires a proper asm module')
src = test_file('core', 'test_inlinejs3.c')
output = shared.unsuffixed(src) + '.out'
self.do_core_test('test_inlinejs3.c')
print('no debugger, check validation')
src = open(src).read().replace('emscripten_debugger();', '')
self.do_run(src, open(output).read())
def test_inlinejs4(self):
self.do_run(r'''
#include <emscripten.h>
#define TO_STRING_INNER(x) #x
#define TO_STRING(x) TO_STRING_INNER(x)
#define assert_msg(msg, file, line) EM_ASM( throw 'Assert (' + msg + ') failed in ' + file + ':' + line + '!'; )
#define assert(expr) { \
if (!(expr)) { \
assert_msg(#expr, TO_STRING(__FILE__), TO_STRING(__LINE__)); \
} \
}
int main(int argc, char **argv) {
assert(argc != 17);
assert(false);
return 0;
}
''', 'false', assert_returncode=NON_ZERO)
def test_em_asm(self):
self.do_core_test('test_em_asm.cpp')
self.emcc_args.append('-std=gnu89')
self.do_core_test('test_em_asm.cpp', force_c=True)
# Tests various different ways to invoke the EM_ASM(), EM_ASM_INT() and EM_ASM_DOUBLE() macros.
@no_asan('Cannot use ASan: test depends exactly on heap size')
def test_em_asm_2(self):
self.do_core_test('test_em_asm_2.cpp')
self.emcc_args.append('-std=gnu89')
self.do_core_test('test_em_asm_2.cpp', force_c=True)
# Tests various different ways to invoke the MAIN_THREAD_EM_ASM(), MAIN_THREAD_EM_ASM_INT() and MAIN_THREAD_EM_ASM_DOUBLE() macros.
# This test is identical to test_em_asm_2, just search-replaces EM_ASM to MAIN_THREAD_EM_ASM on the test file. That way if new
# test cases are added to test_em_asm_2.cpp for EM_ASM, they will also get tested in MAIN_THREAD_EM_ASM form.
@no_asan('Cannot use ASan: test depends exactly on heap size')
def test_main_thread_em_asm(self):
src = open(test_file('core', 'test_em_asm_2.cpp')).read()
create_file('src.cpp', src.replace('EM_ASM', 'MAIN_THREAD_EM_ASM'))
expected_result = open(test_file('core', 'test_em_asm_2.out')).read()
create_file('result.out', expected_result.replace('EM_ASM', 'MAIN_THREAD_EM_ASM'))
self.do_run_from_file('src.cpp', 'result.out')
self.do_run_from_file('src.cpp', 'result.out', force_c=True)
def test_main_thread_async_em_asm(self):
self.do_core_test('test_main_thread_async_em_asm.cpp')
self.do_core_test('test_main_thread_async_em_asm.cpp', force_c=True)
# Tests MAIN_THREAD_EM_ASM_INT() function call with different signatures.
def test_main_thread_em_asm_signatures(self):
self.do_core_test('test_em_asm_signatures.cpp', assert_returncode=NON_ZERO)
def test_em_asm_unicode(self):
self.do_core_test('test_em_asm_unicode.cpp')
self.do_core_test('test_em_asm_unicode.cpp', force_c=True)
def test_em_asm_types(self):
self.do_core_test('test_em_asm_types.cpp')
self.do_core_test('test_em_asm_types.cpp', force_c=True)
def test_em_asm_unused_arguments(self):
self.do_core_test('test_em_asm_unused_arguments.cpp')
# Verify that EM_ASM macros support getting called with multiple arities.
# Maybe tests will later be joined into larger compilation units?
# Then this must still be compiled separately from other code using EM_ASM
# macros with arities 1-3. Otherwise this may incorrectly report a success.
def test_em_asm_parameter_pack(self):
self.do_core_test('test_em_asm_parameter_pack.cpp')
def test_em_asm_arguments_side_effects(self):
self.do_core_test('test_em_asm_arguments_side_effects.cpp')
self.do_core_test('test_em_asm_arguments_side_effects.cpp', force_c=True)
def test_em_asm_direct(self):
self.do_core_test('test_em_asm_direct.c')
@parameterized({
'': ([], False),
'c': ([], True),
'linked': (['-s', 'MAIN_MODULE'], False),
'linked_c': (['-s', 'MAIN_MODULE'], True),
})
def test_em_js(self, args, force_c):
if 'MAIN_MODULE' in args and not self.is_wasm():
self.skipTest('main module support for non-wasm')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic library support in asan yet')
self.emcc_args += args + ['-s', 'EXPORTED_FUNCTIONS=_main,_malloc']
self.do_core_test('test_em_js.cpp', force_c=force_c)
self.assertContained("no args returning int", open('test_em_js.js').read())
def test_runtime_stacksave(self):
self.do_runf(test_file('core', 'test_runtime_stacksave.c'), 'success')
# Tests that -s MINIMAL_RUNTIME=1 builds can utilize -s ALLOW_MEMORY_GROWTH=1 option.
def test_minimal_runtime_memorygrowth(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
self.set_setting('MINIMAL_RUNTIME')
src = test_file('core', 'test_memorygrowth.c')
# Fail without memory growth
expect_fail = False
if not self.is_wasm():
expect_fail = True
self.do_runf(src, 'OOM', assert_returncode=NON_ZERO if expect_fail else 0)
# Win with it
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_runf(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
def test_memorygrowth(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if self.maybe_closure():
# verify NO_DYNAMIC_EXECUTION is compatible with closure
self.set_setting('DYNAMIC_EXECUTION', 0)
# With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY,
# since we then need to enlarge the heap(s).
src = test_file('core', 'test_memorygrowth.c')
# Fail without memory growth
self.do_runf(src, 'OOM', assert_returncode=NON_ZERO)
fail = open('test_memorygrowth.js').read()
# Win with it
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_runf(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
win = open('test_memorygrowth.js').read()
if '-O2' in self.emcc_args and not self.is_wasm():
# Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized)
possible_starts = ['// EMSCRIPTEN_START_FUNCS', 'var TOTAL_STACK']
code_start = None
for s in possible_starts:
if fail.find(s) >= 0:
code_start = s
break
assert code_start is not None, 'Generated code must contain one of ' + str(possible_starts)
fail = fail[fail.find(code_start):]
win = win[win.find(code_start):]
assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)])
# Tracing of memory growths should work
# (SAFE_HEAP would instrument the tracing code itself, leading to recursion)
if not self.get_setting('SAFE_HEAP'):
self.set_setting('EMSCRIPTEN_TRACING')
self.emcc_args += ['--tracing']
self.do_runf(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
def test_memorygrowth_2(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
# With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY,
# since we then need to enlarge the heap(s).
src = test_file('core', 'test_memorygrowth_2.c')
# Fail without memory growth
self.do_runf(src, 'OOM', assert_returncode=NON_ZERO)
fail = open('test_memorygrowth_2.js').read()
# Win with it
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_runf(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
win = open('test_memorygrowth_2.js').read()
if '-O2' in self.emcc_args and not self.is_wasm():
# Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized)
assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)])
def test_memorygrowth_3(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
# checks handling of malloc failure properly
self.set_setting('ABORTING_MALLOC', 0)
self.set_setting('SAFE_HEAP')
self.do_core_test('test_memorygrowth_3.c')
@also_with_standalone_wasm(impure=True)
def test_memorygrowth_MAXIMUM_MEMORY(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
# check that memory growth does not exceed the wasm mem max limit
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=64Mb', '-s', 'MAXIMUM_MEMORY=100Mb']
self.do_core_test('test_memorygrowth_wasm_mem_max.c')
def test_memorygrowth_linear_step(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
# check that memory growth does not exceed the wasm mem max limit and is exactly or one step below the wasm mem max
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH', '-s', 'TOTAL_STACK=1Mb', '-s', 'INITIAL_MEMORY=64Mb', '-s', 'MAXIMUM_MEMORY=130Mb', '-s', 'MEMORY_GROWTH_LINEAR_STEP=1Mb']
self.do_core_test('test_memorygrowth_memory_growth_step.c')
def test_memorygrowth_geometric_step(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MEMORY_GROWTH_GEOMETRIC_STEP=8.5', '-s', 'MEMORY_GROWTH_GEOMETRIC_CAP=32MB']
self.do_core_test('test_memorygrowth_geometric_step.c')
def test_memorygrowth_3_force_fail_reallocBuffer(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('TEST_MEMORY_GROWTH_FAILS')
self.do_core_test('test_memorygrowth_3.c')
@parameterized({
'nogrow': (['-s', 'ALLOW_MEMORY_GROWTH=0'],),
'grow': (['-s', 'ALLOW_MEMORY_GROWTH'],)
})
@no_asan('requires more memory when growing')
def test_aborting_new(self, args):
# test that C++ new properly errors if we fail to malloc when growth is
# enabled, with or without growth
self.emcc_args += args
self.set_setting('MAXIMUM_MEMORY', '18MB')
self.do_core_test('test_aborting_new.cpp')
@no_wasm2js('no WebAssembly.Memory()')
@no_asan('ASan alters the memory size')
def test_module_wasm_memory(self):
self.emcc_args += ['--pre-js', test_file('core', 'test_module_wasm_memory.js')]
self.set_setting('IMPORTED_MEMORY')
self.do_runf(test_file('core', 'test_module_wasm_memory.c'), 'success')
def test_ssr(self): # struct self-ref
src = '''
#include <stdio.h>
// see related things in openjpeg
typedef struct opj_mqc_state {
unsigned int qeval;
int mps;
struct opj_mqc_state *nmps;
struct opj_mqc_state *nlps;
} opj_mqc_state_t;
static opj_mqc_state_t mqc_states[4] = {
{0x5600, 0, &mqc_states[2], &mqc_states[3]},
{0x5602, 1, &mqc_states[3], &mqc_states[2]},
};
int main() {
printf("*%d*\\n", (int)(mqc_states+1)-(int)mqc_states);
for (int i = 0; i < 2; i++)
printf("%d:%d,%d,%d,%d\\n", i, mqc_states[i].qeval, mqc_states[i].mps,
(int)mqc_states[i].nmps-(int)mqc_states, (int)mqc_states[i].nlps-(int)mqc_states);
return 0;
}
'''
self.do_run(src, '''*16*\n0:22016,0,32,48\n1:22018,1,48,32\n''')
def test_tinyfuncstr(self):
self.do_core_test('test_tinyfuncstr.cpp')
def test_llvmswitch(self):
self.do_core_test('test_llvmswitch.c')
def test_cxx_version(self):
self.do_core_test('test_cxx_version.cpp')
@no_wasm2js('massive switches can break js engines')
def test_bigswitch(self):
self.do_runf(test_file('bigswitch.cpp'), '''34962: GL_ARRAY_BUFFER (0x8892)
26214: what?
35040: GL_STREAM_DRAW (0x88E0)
3060: what?
''', args=['34962', '26214', '35040', str(0xbf4)])
@no_wasm2js('massive switches can break js engines')
@is_slow_test
def test_biggerswitch(self):
if not is_optimizing(self.emcc_args):
self.skipTest('nodejs takes >6GB to compile this if the wasm is not optimized, which OOMs, see https://github.com/emscripten-core/emscripten/issues/7928#issuecomment-458308453')
if '-Os' in self.emcc_args:
self.skipTest('hangs in recent upstream clang, see https://bugs.llvm.org/show_bug.cgi?id=43468')
num_cases = 20000
switch_case = self.run_process([PYTHON, test_file('gen_large_switchcase.py'), str(num_cases)], stdout=PIPE, stderr=PIPE).stdout
self.do_run(switch_case, '''58996: 589965899658996
59297: 592975929759297
59598: default
59899: 598995989959899
Success!''')
def test_indirectbr(self):
self.emcc_args = [x for x in self.emcc_args if x != '-g']
self.do_core_test('test_indirectbr.c')
@no_asan('local count too large for VMs')
@no_wasm2js('extremely deep nesting, hits stack limit on some VMs')
def test_indirectbr_many(self):
self.do_core_test('test_indirectbr_many.c')
def test_pack(self):
src = '''
#include <stdio.h>
#include <string.h>
#pragma pack(push,1)
typedef struct header
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} header;
#pragma pack(pop)
typedef struct fatheader
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} fatheader;
int main( int argc, const char *argv[] ) {
header h, *ph = 0;
fatheader fh, *pfh = 0;
printf("*%zu,%d,%d*\\n", sizeof(header), (int)((int)&h.desc - (int)&h.id), (int)(&ph[1])-(int)(&ph[0]));
printf("*%zu,%d,%d*\\n", sizeof(fatheader), (int)((int)&fh.desc - (int)&fh.id), (int)(&pfh[1])-(int)(&pfh[0]));
return 0;
}
'''
self.do_run(src, '*4,3,4*\n*6,4,6*')
def test_varargs(self):
self.do_core_test('test_varargs.c')
def test_varargs_multi(self):
self.do_core_test('test_varargs_multi.c')
@unittest.skip('clang cannot compile this code with that target yet')
def test_varargs_byval(self):
src = r'''
#include <stdio.h>
#include <stdarg.h>
typedef struct type_a {
union {
double f;
void *p;
int i;
short sym;
} value;
} type_a;
enum mrb_vtype {
MRB_TT_FALSE = 0, /* 0 */
MRB_TT_CLASS = 9 /* 9 */
};
typedef struct type_b {
enum mrb_vtype tt:8;
} type_b;
void print_type_a(int argc, ...);
void print_type_b(int argc, ...);
int main(int argc, char *argv[])
{
type_a a;
type_b b;
a.value.p = (void*) 0x12345678;
b.tt = MRB_TT_CLASS;
printf("The original address of a is: %p\n", a.value.p);
printf("The original type of b is: %d\n", b.tt);
print_type_a(1, a);
print_type_b(1, b);
return 0;
}
void print_type_a(int argc, ...) {
va_list ap;
type_a a;
va_start(ap, argc);
a = va_arg(ap, type_a);
va_end(ap);
printf("The current address of a is: %p\n", a.value.p);
}
void print_type_b(int argc, ...) {
va_list ap;
type_b b;
va_start(ap, argc);
b = va_arg(ap, type_b);
va_end(ap);
printf("The current type of b is: %d\n", b.tt);
}
'''
self.do_run(src, '''The original address of a is: 0x12345678
The original type of b is: 9
The current address of a is: 0x12345678
The current type of b is: 9
''')
def test_functionpointer_libfunc_varargs(self):
self.do_core_test('test_functionpointer_libfunc_varargs.c')
def test_structbyval(self):
self.set_setting('INLINING_LIMIT')
# part 1: make sure that normally, passing structs by value works
src = r'''
#include <stdio.h>
struct point
{
int x, y;
};
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
void dumpmod(struct point *p) {
p->x++; // should not modify
p->y++; // anything in the caller!
printf("dump: %d,%d\n", p->x, p->y);
}
int main( int argc, const char *argv[] ) {
point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
dumpmod(&p);
dumpmod(&p);
printf("last: %d,%d\n", p.x, p.y);
return 0;
}
'''
self.do_run(src, 'pre: 54,2\ndump: 55,3\ndump: 55,3\npost: 54,2\ndump: 55,3\ndump: 56,4\nlast: 56,4')
def test_stdlibs(self):
# safe heap prints a warning that messes up our output.
self.set_setting('SAFE_HEAP', 0)
# needs atexit
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_stdlibs.c')
def test_stdbool(self):
create_file('test_stdbool.c', r'''
#include <stdio.h>
#include <stdbool.h>
int main() {
bool x = true;
bool y = false;
printf("*%d*\n", x != y);
return 0;
}
''')
self.do_runf('test_stdbool.c', '*1*')
def test_strtoll_hex(self):
# tests strtoll for hex strings (0x...)
self.do_core_test('test_strtoll_hex.c')
def test_strtoll_dec(self):
# tests strtoll for decimal strings (0x...)
self.do_core_test('test_strtoll_dec.c')
def test_strtoll_bin(self):
# tests strtoll for binary strings (0x...)
self.do_core_test('test_strtoll_bin.c')
def test_strtoll_oct(self):
# tests strtoll for decimal strings (0x...)
self.do_core_test('test_strtoll_oct.c')
def test_strtol_hex(self):
# tests strtoll for hex strings (0x...)
self.do_core_test('test_strtol_hex.c')
def test_strtol_dec(self):
# tests strtoll for decimal strings (0x...)
self.do_core_test('test_strtol_dec.c')
def test_strtol_bin(self):
# tests strtoll for binary strings (0x...)
self.do_core_test('test_strtol_bin.c')
def test_strtol_oct(self):
# tests strtoll for decimal strings (0x...)
self.do_core_test('test_strtol_oct.c')
@also_with_standalone_wasm()
def test_atexit(self):
# Confirms they are called in the proper reverse order
if not self.get_setting('STANDALONE_WASM'):
# STANDALONE_WASM mode always sets EXIT_RUNTIME if main exists
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_atexit.c')
def test_atexit_threads(self):
# also tests thread exit (__cxa_thread_atexit)
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_atexit_threads.c')
@no_asan('test relies on null pointer reads')
def test_pthread_specific(self):
self.do_run_in_out_file_test('pthread', 'specific.c')
def test_pthread_equal(self):
self.do_run_in_out_file_test('pthread', 'test_pthread_equal.cpp')
@node_pthreads
def test_pthread_dispatch_after_exit(self):
self.do_run_in_out_file_test('pthread', 'test_pthread_dispatch_after_exit.c')
def test_tcgetattr(self):
self.do_runf(test_file('termios', 'test_tcgetattr.c'), 'success')
def test_time(self):
self.do_core_test('test_time.cpp')
for tz in ['EST+05EDT', 'UTC+0']:
print('extra tz test:', tz)
with env_modify({'TZ': tz}):
# Run the test with different time zone settings if
# possible. It seems that the TZ environment variable does not
# work all the time (at least it's not well respected by
# Node.js on Windows), but it does no harm either.
self.do_core_test('test_time.cpp')
def test_timeb(self):
# Confirms they are called in reverse order
self.do_core_test('test_timeb.c')
def test_time_c(self):
self.do_core_test('test_time_c.c')
def test_gmtime(self):
self.do_core_test('test_gmtime.c')
def test_strptime_tm(self):
self.do_core_test('test_strptime_tm.c')
def test_strptime_days(self):
self.do_core_test('test_strptime_days.c')
def test_strptime_reentrant(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_strptime_reentrant.c')
def test_strftime(self):
self.do_core_test('test_strftime.cpp')
def test_trickystring(self):
self.do_core_test('test_trickystring.c')
def test_statics(self):
self.do_core_test('test_statics.cpp')
def test_copyop(self):
# clang generated code is vulnerable to this, as it uses
# memcpy for assignments, with hardcoded numbers of bytes
# (llvm-gcc copies items one by one).
self.do_core_test('test_copyop.cpp')
def test_memcpy_memcmp(self):
self.banned_js_engines = [config.V8_ENGINE] # Currently broken under V8_ENGINE but not node
def check(result, err):
result = result.replace('\n \n', '\n') # remove extra node output
return hashlib.sha1(result.encode('utf-8')).hexdigest()
self.do_core_test('test_memcpy_memcmp.c', output_nicerizer=check)
def test_memcpy2(self):
self.do_core_test('test_memcpy2.c')
def test_memcpy3(self):
self.do_core_test('test_memcpy3.c')
@also_with_standalone_wasm()
def test_memcpy_alignment(self):
self.do_runf(test_file('test_memcpy_alignment.cpp'), 'OK.')
def test_memset_alignment(self):
self.do_runf(test_file('test_memset_alignment.cpp'), 'OK.')
def test_memset(self):
self.do_core_test('test_memset.c')
def test_getopt(self):
self.do_core_test('test_getopt.c', args=['-t', '12', '-n', 'foobar'])
def test_getopt_long(self):
self.do_core_test('test_getopt_long.c', args=['--file', 'foobar', '-b'])
def test_memmove(self):
self.do_core_test('test_memmove.c')
def test_memmove2(self):
self.do_core_test('test_memmove2.c')
def test_memmove3(self):
self.do_core_test('test_memmove3.c')
def test_flexarray_struct(self):
self.do_core_test('test_flexarray_struct.c')
def test_bsearch(self):
self.do_core_test('test_bsearch.c')
def test_stack_overflow(self):
self.set_setting('ASSERTIONS', 2)
self.do_runf(test_file('core', 'stack_overflow.cpp'), 'stack overflow', assert_returncode=NON_ZERO)
def test_stackAlloc(self):
self.do_core_test('stackAlloc.cpp')
def test_nestedstructs(self):
src = '''
#include <stdio.h>
#include "emscripten.h"
struct base {
int x;
float y;
union {
int a;
float b;
};
char c;
};
struct hashtableentry {
int key;
base data;
};
struct hashset {
typedef hashtableentry entry;
struct chain { entry elem; chain *next; };
// struct chainchunk { chain chains[100]; chainchunk *next; };
};
struct hashtable : hashset {
hashtable() {
base *b = NULL;
entry *e = NULL;
chain *c = NULL;
printf("*%zu,%d,%d,%d,%d,%d|%zu,%d,%d,%d,%d,%d,%d,%d|%zu,%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n",
sizeof(base),
int(&(b->x)), int(&(b->y)), int(&(b->a)), int(&(b->b)), int(&(b->c)),
sizeof(hashtableentry),
int(&(e->key)), int(&(e->data)), int(&(e->data.x)), int(&(e->data.y)), int(&(e->data.a)), int(&(e->data.b)), int(&(e->data.c)),
sizeof(hashset::chain),
int(&(c->elem)), int(&(c->next)), int(&(c->elem.key)), int(&(c->elem.data)), int(&(c->elem.data.x)), int(&(c->elem.data.y)), int(&(c->elem.data.a)), int(&(c->elem.data.b)), int(&(c->elem.data.c))
);
}
};
struct B { char buffer[62]; int last; char laster; char laster2; };
struct Bits {
unsigned short A : 1;
unsigned short B : 1;
unsigned short C : 1;
unsigned short D : 1;
unsigned short x1 : 1;
unsigned short x2 : 1;
unsigned short x3 : 1;
unsigned short x4 : 1;
};
int main() {
hashtable t;
// Part 2 - the char[] should be compressed, BUT have a padding space at the end so the next
// one is aligned properly. Also handle char; char; etc. properly.
B *b = NULL;
printf("*%d,%d,%d,%d,%d,%d,%d,%d,%zu*\\n", int(b), int(&(b->buffer)), int(&(b->buffer[0])), int(&(b->buffer[1])), int(&(b->buffer[2])),
int(&(b->last)), int(&(b->laster)), int(&(b->laster2)), sizeof(B));
// Part 3 - bitfields, and small structures
Bits *b2 = NULL;
printf("*%zu*\\n", sizeof(Bits));
return 0;
}
'''
# Bloated memory; same layout as C/C++
self.do_run(src, '*16,0,4,8,8,12|20,0,4,4,8,12,12,16|24,0,20,0,4,4,8,12,12,16*\n*0,0,0,1,2,64,68,69,72*\n*2*')
def prep_dlfcn_lib(self):
self.clear_setting('MAIN_MODULE')
self.set_setting('SIDE_MODULE')
def prep_dlfcn_main(self):
self.set_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
create_file('lib_so_pre.js', '''
if (!Module['preRun']) Module['preRun'] = [];
Module['preRun'].push(function() { FS.createDataFile('/', 'liblib.so', %s, true, false, false); });
''' % str(list(bytearray(open('liblib.so', 'rb').read()))))
self.emcc_args += ['--pre-js', 'lib_so_pre.js']
def build_dlfcn_lib(self, filename):
if self.is_wasm():
# emcc emits a wasm in this case
self.build(filename, js_outfile=False)
shutil.move(shared.unsuffixed(filename) + '.wasm', 'liblib.so')
else:
self.build(filename)
shutil.move(shared.unsuffixed(filename) + '.js', 'liblib.so')
@needs_dylink
def test_dlfcn_missing(self):
self.set_setting('MAIN_MODULE')
if self.has_changed_setting('ASSERTIONS'):
self.skipTest('test needs to customize ASSERTIONS')
self.set_setting('ASSERTIONS')
src = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <assert.h>
int main() {
void* lib_handle = dlopen("libfoo.so", RTLD_NOW);
assert(!lib_handle);
printf("error: %s\n", dlerror());
return 0;
}
'''
self.do_run(src, 'error: Could not load dynamic lib: libfoo.so\nError: No such file or directory')
print('without assertions, the error is less clear')
self.set_setting('ASSERTIONS', 0)
self.do_run(src, 'error: Could not load dynamic lib: libfoo.so\nError: FS error')
@needs_dylink
def test_dlfcn_basic(self):
self.prep_dlfcn_lib()
create_file('liblib.cpp', '''
#include <cstdio>
class Foo {
public:
Foo() {
puts("Constructing lib object.");
}
};
Foo global;
''')
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = '''
#include <cstdio>
#include <dlfcn.h>
class Bar {
public:
Bar() {
puts("Constructing main object.");
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
return 0;
}
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\n')
@needs_dylink
def test_dlfcn_i64(self):
self.prep_dlfcn_lib()
create_file('liblib.c', '''
#include <inttypes.h>
int64_t foo(int x) {
return (long long)x / (long long)1234;
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
src = r'''
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int64_t (*int64func)(int);
int main() {
void *lib_handle = dlopen("liblib.so", RTLD_NOW);
if (!lib_handle) {
puts(dlerror());
abort();
}
printf("dll handle: %p\n", lib_handle);
int64func x = (int64func)dlsym(lib_handle, "foo");
printf("foo func handle: %p\n", x);
if (!x) {
printf("dlsym failed: %s\n", dlerror());
return 1;
}
printf("|%lld|\n", x(81234567));
return 0;
}
'''
self.do_run(src, '|65830|')
@needs_dylink
@disabled('EM_ASM in not yet supported in SIDE_MODULE')
def test_dlfcn_em_asm(self):
self.prep_dlfcn_lib()
create_file('liblib.cpp', '''
#include <emscripten.h>
class Foo {
public:
Foo() {
EM_ASM( out("Constructing lib object.") );
}
};
Foo global;
''')
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = '''
#include <emscripten.h>
#include <dlfcn.h>
class Bar {
public:
Bar() {
EM_ASM( out("Constructing main object.") );
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
EM_ASM( out("All done.") );
return 0;
}
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\nAll done.\n')
@needs_dylink
def test_dlfcn_qsort(self):
self.prep_dlfcn_lib()
self.set_setting('EXPORTED_FUNCTIONS', ['_get_cmp'])
create_file('liblib.cpp', '''
int lib_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a > *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
typedef int (*CMP_TYPE)(const void*, const void*);
extern "C" CMP_TYPE get_cmp() {
return lib_cmp;
}
''')
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int (*CMP_TYPE)(const void*, const void*);
int main_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a < *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
int main() {
void* lib_handle;
CMP_TYPE (*getter_ptr)();
CMP_TYPE lib_cmp_ptr;
int arr[5] = {4, 2, 5, 1, 3};
qsort((void*)arr, 5, sizeof(int), main_cmp);
printf("Sort with main comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
getter_ptr = (CMP_TYPE (*)()) dlsym(lib_handle, "get_cmp");
if (getter_ptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
lib_cmp_ptr = getter_ptr();
qsort((void*)arr, 5, sizeof(int), lib_cmp_ptr);
printf("Sort with lib comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
return 0;
}
'''
self.do_run(src, 'Sort with main comparison: 5 4 3 2 1 *Sort with lib comparison: 1 2 3 4 5 *',
output_nicerizer=lambda x, err: x.replace('\n', '*'))
@needs_dylink
def test_dlfcn_data_and_fptr(self):
# Failing under v8 since: https://chromium-review.googlesource.com/712595
if self.is_wasm():
self.banned_js_engines = [config.V8_ENGINE]
self.prep_dlfcn_lib()
create_file('liblib.cpp', r'''
#include <stdio.h>
int theglobal = 42;
extern void parent_func(); // a function that is defined in the parent
int* lib_get_global_addr() {
return &theglobal;
}
void lib_fptr() {
printf("Second calling lib_fptr from main.\n");
parent_func();
// call it also through a pointer, to check indexizing
void (*p_f)();
p_f = parent_func;
p_f();
}
extern "C" void (*func(int x, void(*fptr)()))() {
printf("In func: %d\n", x);
fptr();
return lib_fptr;
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_func'])
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = r'''
#include <stdio.h>
#include <dlfcn.h>
#include <emscripten.h>
typedef void (*FUNCTYPE(int, void(*)()))();
FUNCTYPE func;
void EMSCRIPTEN_KEEPALIVE parent_func() {
printf("parent_func called from child\n");
}
void main_fptr() {
printf("First calling main_fptr from lib.\n");
}
int main() {
void* lib_handle;
FUNCTYPE* func_fptr;
// Test basic lib loading.
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\n");
return 1;
}
// Test looked up function.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
// Load twice to test cache.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
if (func_fptr == NULL) {
printf("Could not find func.\n");
return 1;
}
// Test passing function pointers across module bounds.
void (*fptr)() = func_fptr(13, main_fptr);
fptr();
// Test global data.
int* globaladdr = (int*) dlsym(lib_handle, "theglobal");
if (globaladdr == NULL) {
printf("Could not find global.\n");
return 1;
}
printf("Var: %d\n", *globaladdr);
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main'])
self.do_run(src, '''\
In func: 13
First calling main_fptr from lib.
Second calling lib_fptr from main.
parent_func called from child
parent_func called from child
Var: 42
''')
@needs_dylink
def test_dlfcn_varargs(self):
# this test is not actually valid - it fails natively. the child should fail
# to be loaded, not load and successfully see the parent print_ints func
self.prep_dlfcn_lib()
create_file('liblib.cpp', r'''
void print_ints(int n, ...);
extern "C" void func() {
print_ints(2, 13, 42);
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_func'])
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = r'''
#include <stdarg.h>
#include <stdio.h>
#include <dlfcn.h>
#include <assert.h>
void print_ints(int n, ...) {
va_list args;
va_start(args, n);
for (int i = 0; i < n; i++) {
printf("%d\n", va_arg(args, int));
}
va_end(args);
}
int main() {
void* lib_handle;
void (*fptr)();
print_ints(2, 100, 200);
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main'])
self.do_run(src, '100\n200\n13\n42\n')
@needs_dylink
def test_dlfcn_alignment_and_zeroing(self):
self.prep_dlfcn_lib()
self.set_setting('INITIAL_MEMORY', '16mb')
create_file('liblib.c', r'''
int prezero = 0;
__attribute__((aligned(1024))) int superAligned = 12345;
int postzero = 0;
''')
self.build_dlfcn_lib('liblib.c')
for i in range(10):
curr = '%d.so' % i
shutil.copyfile('liblib.so', curr)
self.emcc_args += ['--embed-file', curr]
self.prep_dlfcn_main()
self.set_setting('INITIAL_MEMORY', '128mb')
create_file('src.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
#include <assert.h>
#include <emscripten.h>
int main() {
printf("'prepare' memory with non-zero inited stuff\n");
int num = 120 * 1024 * 1024; // total is 128; we'll use 5*5 = 25 at least, so allocate pretty much all of it
void* mem = malloc(num);
assert(mem);
printf("setting this range to non-zero: %d - %d\n", (int)mem, ((int)mem) + num);
memset(mem, 1, num);
EM_ASM({
var value = HEAP8[64*1024*1024];
out('verify middle of memory is non-zero: ' + value);
assert(value === 1);
});
free(mem);
for (int i = 0; i < 10; i++) {
char curr[] = "?.so";
curr[0] = '0' + i;
printf("loading %s\n", curr);
void* lib_handle = dlopen(curr, RTLD_NOW);
if (!lib_handle) {
puts(dlerror());
assert(0);
}
printf("getting superAligned\n");
int* superAligned = (int*)dlsym(lib_handle, "superAligned");
assert(superAligned);
assert(((int)superAligned) % 1024 == 0); // alignment
printf("checking value of superAligned, at %p\n", superAligned);
assert(*superAligned == 12345); // value
printf("getting prezero\n");
int* prezero = (int*)dlsym(lib_handle, "prezero");
assert(prezero);
printf("checking value of prezero, at %p\n", prezero);
assert(*prezero == 0);
*prezero = 1;
assert(*prezero != 0);
printf("getting postzero\n");
int* postzero = (int*)dlsym(lib_handle, "postzero");
printf("checking value of postzero, at %p\n", postzero);
assert(postzero);
printf("checking value of postzero\n");
assert(*postzero == 0);
*postzero = 1;
assert(*postzero != 0);
}
printf("success.\n");
return 0;
}
''')
self.do_runf('src.c', 'success.\n')
@needs_dylink
def test_dlfcn_self(self):
self.set_setting('MAIN_MODULE')
self.set_setting('EXPORT_ALL')
def get_data_export_count(wasm):
wat = self.get_wasm_text(wasm)
lines = wat.splitlines()
exports = [l for l in lines if l.strip().startswith('(export ')]
data_exports = [l for l in exports if '(global ' in l]
return len(data_exports)
self.do_core_test('test_dlfcn_self.c')
export_count = get_data_export_count('test_dlfcn_self.wasm')
# ensure there aren't too many globals; we don't want unnamed_addr
self.assertGreater(export_count, 20)
self.assertLess(export_count, 56)
@needs_dylink
def test_dlfcn_unique_sig(self):
self.prep_dlfcn_lib()
create_file('liblib.c', r'''
#include <stdio.h>
int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) {
return 13;
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc'])
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
puts("success");
return 0;
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
self.do_runf('main.c', 'success')
@needs_dylink
def test_dlfcn_info(self):
self.prep_dlfcn_lib()
create_file('liblib.c', r'''
#include <stdio.h>
int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) {
return 13;
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc'])
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', '''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <dlfcn.h>
typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
/* Verify that we don't corrupt func_ptr when calling dladdr. */
Dl_info info;
memset(&info, 0, sizeof(info));
dladdr(func_ptr, &info);
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
/* Verify something useful lives in info. */
assert(info.dli_fname != NULL);
assert(info.dli_fbase == NULL);
assert(info.dli_sname == NULL);
assert(info.dli_saddr == NULL);
puts("success");
return 0;
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
self.do_runf('main.c', 'success')
@needs_dylink
def test_dlfcn_stacks(self):
self.prep_dlfcn_lib()
create_file('liblib.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
int myfunc(const char *input) {
char bigstack[1024] = { 0 };
// make sure we didn't just trample the stack!
assert(!strcmp(input, "foobar"));
snprintf(bigstack, sizeof(bigstack), "%s", input);
return strlen(bigstack);
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc'])
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', '''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
#include <string.h>
typedef int (*FUNCTYPE)(const char *);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
char str[128];
snprintf(str, sizeof(str), "foobar");
// HACK: Use strcmp in the main executable so that it doesn't get optimized out and the dynamic library
// is able to use it.
assert(!strcmp(str, "foobar"));
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(str) == 6);
puts("success");
return 0;
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_strcmp'])
self.do_runf('main.c', 'success')
@needs_dylink
def test_dlfcn_funcs(self):
self.prep_dlfcn_lib()
create_file('liblib.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
typedef void (*voidfunc)();
typedef void (*intfunc)(int);
void callvoid(voidfunc f) { f(); }
void callint(voidfunc f, int x) { f(x); }
void void_0() { printf("void 0\n"); }
void void_1() { printf("void 1\n"); }
voidfunc getvoid(int i) {
switch(i) {
case 0: return void_0;
case 1: return void_1;
default: return NULL;
}
}
void int_0(int x) { printf("int 0 %d\n", x); }
void int_1(int x) { printf("int 1 %d\n", x); }
intfunc getint(int i) {
switch(i) {
case 0: return int_0;
case 1: return int_1;
default: return NULL;
}
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_callvoid', '_callint', '_getvoid', '_getint'])
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef void (*voidfunc)();
typedef void (*intfunc)(int);
typedef void (*voidcaller)(voidfunc);
typedef void (*intcaller)(intfunc, int);
typedef voidfunc (*voidgetter)(int);
typedef intfunc (*intgetter)(int);
void void_main() { printf("void_main.\n"); }
void int_main(int x) { printf("int_main %d\n", x); }
int main() {
printf("go\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
voidcaller callvoid = (voidcaller)dlsym(lib_handle, "callvoid");
assert(callvoid != NULL);
callvoid(void_main);
intcaller callint = (intcaller)dlsym(lib_handle, "callint");
assert(callint != NULL);
callint(int_main, 201);
voidgetter getvoid = (voidgetter)dlsym(lib_handle, "getvoid");
assert(getvoid != NULL);
callvoid(getvoid(0));
callvoid(getvoid(1));
intgetter getint = (intgetter)dlsym(lib_handle, "getint");
assert(getint != NULL);
callint(getint(0), 54);
callint(getint(1), 9000);
assert(getint(1000) == NULL);
puts("ok");
return 0;
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
self.do_runf('main.c', '''go
void_main.
int_main 201
void 0
void 1
int 0 54
int 1 9000
ok
''')
@needs_dylink
def test_dlfcn_mallocs(self):
# will be exhausted without functional malloc/free
self.set_setting('INITIAL_MEMORY', '64mb')
self.prep_dlfcn_lib()
create_file('liblib.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
void *mallocproxy(int n) { return malloc(n); }
void freeproxy(void *p) { free(p); }
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_mallocproxy', '_freeproxy'])
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free'])
self.do_runf(test_file('dlmalloc_proxy.c'), '*294,153*')
@needs_dylink
def test_dlfcn_longjmp(self):
self.prep_dlfcn_lib()
create_file('liblib.c', r'''
#include <setjmp.h>
#include <stdio.h>
void jumpy(jmp_buf buf) {
static int i = 0;
i++;
if (i == 10) longjmp(buf, i);
printf("pre %d\n", i);
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_jumpy'])
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
#include <setjmp.h>
typedef void (*jumpfunc)(jmp_buf);
int main() {
printf("go!\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
jumpfunc jumpy = (jumpfunc)dlsym(lib_handle, "jumpy");
assert(jumpy);
jmp_buf buf;
int jmpval = setjmp(buf);
if (jmpval == 0) {
while (1) jumpy(buf);
} else {
printf("out!\n");
}
return 0;
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free'])
self.do_runf('main.c', '''go!
pre 1
pre 2
pre 3
pre 4
pre 5
pre 6
pre 7
pre 8
pre 9
out!
''', force_c=True)
# TODO: make this work. need to forward tempRet0 across modules
# TODO Enable @with_both_exception_handling (the test is not working now)
@needs_dylink
def zzztest_dlfcn_exceptions(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
self.prep_dlfcn_lib()
create_file('liblib.cpp', r'''
extern "C" {
int ok() {
return 65;
}
int fail() {
throw 123;
}
}
''')
self.set_setting('EXPORTED_FUNCTIONS', ['_ok', '_fail'])
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef int (*intfunc)();
int main() {
printf("go!\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
intfunc okk = (intfunc)dlsym(lib_handle, "ok");
intfunc faill = (intfunc)dlsym(lib_handle, "fail");
assert(okk && faill);
try {
printf("ok: %d\n", okk());
} catch(...) {
printf("wha\n");
}
try {
printf("fail: %d\n", faill());
} catch(int x) {
printf("int %d\n", x);
}
try {
printf("fail: %d\n", faill());
} catch(double x) {
printf("caught %f\n", x);
}
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free'])
self.do_run(src, '''go!
ok: 65
int 123
ok
''')
@needs_dylink
def test_dlfcn_handle_alloc(self):
# verify that dlopen does not allocate already used handles
dirname = self.get_dir()
def indir(name):
return os.path.join(dirname, name)
create_file('a.cpp', r'''
#include <stdio.h>
static struct a {
a() {
puts("a: loaded");
}
} _;
''')
create_file('b.cpp', r'''
#include <stdio.h>
static struct b {
b() {
puts("b: loaded");
}
} _;
''')
self.prep_dlfcn_lib()
self.build_dlfcn_lib('a.cpp')
shutil.move(indir('liblib.so'), indir('liba.so'))
self.build_dlfcn_lib('b.cpp')
shutil.move(indir('liblib.so'), indir('libb.so'))
self.set_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.set_setting('EXPORT_ALL')
self.emcc_args += ['--embed-file', '.@/']
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
src = r'''
#include <dlfcn.h>
#include <assert.h>
#include <stddef.h>
int main() {
void *liba, *libb, *liba2;
int err;
liba = dlopen("liba.so", RTLD_NOW);
assert(liba != NULL);
libb = dlopen("libb.so", RTLD_NOW);
assert(liba != NULL);
err = dlclose(liba);
assert(!err);
liba2 = dlopen("liba.so", RTLD_NOW);
assert(liba2 != libb);
return 0;
}
'''
self.do_run(src, 'a: loaded\nb: loaded\na: loaded\n')
@needs_dylink
@bleeding_edge_wasm_backend
def test_dlfcn_feature_in_lib(self):
self.emcc_args.append('-mnontrapping-fptoint')
self.prep_dlfcn_lib()
create_file('liblib.cpp', r'''
extern "C" int magic(float x) {
return __builtin_wasm_trunc_saturate_s_i32_f32(x);
}
''')
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <stdlib.h>
typedef int (*fi)(float);
int main() {
void *lib_handle = dlopen("liblib.so", RTLD_NOW);
if (!lib_handle) {
puts(dlerror());
abort();
}
fi x = (fi)dlsym(lib_handle, "magic");
if (!x) {
puts(dlerror());
abort();
}
printf("float: %d.\n", x(42.99));
return 0;
}
'''
self.do_run(src, 'float: 42.\n')
def dylink_test(self, main, side, expected=None, header=None, force_c=False, **kwargs):
# Same as dylink_testf but take source code in string form
if not isinstance(side, list):
side_file = 'liblib.cpp' if not force_c else 'liblib.c'
create_file(side_file, side)
side = side_file
if not isinstance(main, list):
main_file = 'main.cpp' if not force_c else 'main.c'
create_file(main_file, main)
main = main_file
if header:
create_file('header.h', header)
return self.dylink_testf(main, side, expected, force_c, **kwargs)
def dylink_testf(self, main, side, expected=None, force_c=False, main_emcc_args=[],
need_reverse=True, auto_load=True, main_module=2, **kwargs):
self.maybe_closure()
# Same as dylink_test but takes source code as filenames on disc.
old_args = self.emcc_args.copy()
if not expected:
outfile = shared.unsuffixed(main) + '.out'
if os.path.exists(outfile):
expected = open(outfile).read()
# side settings
self.clear_setting('MAIN_MODULE')
self.clear_setting('ERROR_ON_UNDEFINED_SYMBOLS')
self.set_setting('SIDE_MODULE')
side_suffix = 'wasm' if self.is_wasm() else 'js'
if isinstance(side, list):
out_file = 'liblib.' + side_suffix
# side is just a library
self.run_process([EMCC] + side + self.get_emcc_args() + ['-o', out_file])
else:
out_file = self.build(side, js_outfile=(side_suffix == 'js'))
shutil.move(out_file, 'liblib.so')
# main settings
self.set_setting('MAIN_MODULE', main_module)
self.clear_setting('SIDE_MODULE')
if auto_load:
# Normally we don't report undefined symbols when linking main modules but
# in this case we know all the side modules are specified on the command line.
# TODO(sbc): Make this the default one day
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS')
self.emcc_args += main_emcc_args
self.emcc_args.append('liblib.so')
if force_c:
self.emcc_args.append('-nostdlib++')
if isinstance(main, list):
# main is just a library
try_delete('main.js')
self.run_process([EMCC] + main + self.get_emcc_args() + ['-o', 'main.js'])
self.do_run('main.js', expected, no_build=True, **kwargs)
else:
self.do_runf(main, expected, force_c=force_c, **kwargs)
self.emcc_args = old_args
if need_reverse:
print('flip')
# Test the reverse as well. There we flip the role of the side module and main module.
# - We add --no-entry since the side module doesn't have a `main`
self.dylink_testf(side, main, expected, force_c, main_emcc_args + ['--no-entry'],
need_reverse=False, main_module=main_module, **kwargs)
def do_basic_dylink_test(self, **kwargs):
self.dylink_test(r'''
#include <stdio.h>
#include "header.h"
int main() {
printf("other says %d.\n", sidey());
return 0;
}
''', '''
#include "header.h"
int sidey() {
return 11;
}
''', 'other says 11.', 'int sidey();', force_c=True, **kwargs)
@needs_dylink
def test_dylink_basics(self):
self.do_basic_dylink_test(need_reverse=False)
self.verify_in_strict_mode('main.js')
@needs_dylink
def test_dylink_basics_no_modify(self):
if is_optimizing(self.emcc_args):
self.skipTest('no modify mode only works with non-optimizing builds')
self.set_setting('WASM_BIGINT')
self.set_setting('ERROR_ON_WASM_CHANGES_AFTER_LINK')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_no_export(self):
self.set_setting('NO_DECLARE_ASM_MODULE_EXPORTS')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_memory_growth(self):
if not self.is_wasm():
self.skipTest('wasm only')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_safe_heap(self):
self.set_setting('SAFE_HEAP')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_function_pointer_equality(self):
self.dylink_test(r'''
#include <stdio.h>
#include "header.h"
int main() {
void* puts_side = get_address();
printf("main module address %p.\n", &puts);
printf("side module address address %p.\n", puts_side);
if (&puts == puts_side)
printf("success\n");
else
printf("failure\n");
return 0;
}
''', '''
#include <stdio.h>
#include "header.h"
void* get_address() {
return (void*)&puts;
}
''', 'success', header='void* get_address();', force_c=True)
@needs_dylink
def test_dylink_floats(self):
self.dylink_test(r'''
#include <stdio.h>
extern float sidey();
int main() {
printf("other says %.2f.\n", sidey()+1);
return 0;
}
''', '''
float sidey() { return 11.5; }
''', 'other says 12.50', force_c=True)
@needs_dylink
def test_dylink_printf(self):
self.dylink_test(r'''
#include <stdio.h>
void sidey();
int main() {
printf("hello from main\n");
sidey();
return 0;
}
''', r'''
#include <stdio.h>
void sidey() {
printf("hello from side\n");
}
''', 'hello from main\nhello from side\n', force_c=True)
# Verify that a function pointer can be passed back and forth and invoked
# on both sides.
@needs_dylink
def test_dylink_funcpointer(self):
self.dylink_test(
main=r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
intfunc sidey(intfunc f);
void a(int arg) { printf("hello from funcptr: %d\n", arg); }
int main() {
intfunc b = sidey(a);
assert(a == b);
b(0);
return 0;
}
''',
side='''
#include "header.h"
intfunc sidey(intfunc f) { f(1); return f; }
''',
expected='hello from funcptr: 1\nhello from funcptr: 0\n',
header='typedef void (*intfunc)(int );', force_c=True)
@needs_dylink
# test dynamic linking of a module with multiple function pointers, stored
# statically
def test_dylink_static_funcpointers(self):
self.dylink_test(
main=r'''
#include <stdio.h>
#include "header.h"
void areturn0() { printf("hello 0\n"); }
void areturn1() { printf("hello 1\n"); }
void areturn2() { printf("hello 2\n"); }
voidfunc func_ptrs[3] = { areturn0, areturn1, areturn2 };
int main(int argc, char **argv) {
sidey(func_ptrs[0]);
sidey(func_ptrs[1]);
sidey(func_ptrs[2]);
return 0;
}
''',
side='''
#include "header.h"
void sidey(voidfunc f) { f(); }
''',
expected='hello 0\nhello 1\nhello 2\n',
header='typedef void (*voidfunc)(); void sidey(voidfunc f);', force_c=True)
@needs_dylink
def test_dylink_funcpointers_wrapper(self):
self.dylink_test(
main=r'''\
#include <stdio.h>
#include "header.h"
int main(int argc, char **argv) {
charfunc f1 = emscripten_run_script;
f1("console.log('one')");
charfunc f2 = get();
f2("console.log('two')");
return 0;
}
''',
side='''\
#include "header.h"
charfunc get() {
return emscripten_run_script;
}
''',
expected='one\ntwo\n',
header='''\
#include <emscripten.h>
typedef void (*charfunc)(const char*);
extern charfunc get();
''', force_c=True)
@needs_dylink
def test_dylink_static_funcpointer_float(self):
self.dylink_test(
main=r'''\
#include <stdio.h>
#include "header.h"
int sidey(floatfunc f);
float func1(float f) { printf("hello 1: %f\n", f); return 0; }
floatfunc f1 = &func1;
int main(int argc, char **argv) {
printf("got: %d\n", sidey(f1));
f1(12.34);
return 0;
}
''',
side='''\
#include "header.h"
int sidey(floatfunc f) { f(56.78); return 1; }
''',
expected='hello 1: 56.779999\ngot: 1\nhello 1: 12.340000\n',
header='typedef float (*floatfunc)(float);', force_c=True)
@needs_dylink
def test_missing_signatures(self):
create_file('test_sig.c', r'''#include <emscripten.h>
int main() {
return 0 == ( (int)&emscripten_run_script_string +
(int)&emscripten_run_script );
}''')
self.set_setting('MAIN_MODULE', 1)
self.do_runf('test_sig.c', '')
@needs_dylink
def test_dylink_global_init(self):
self.dylink_test(r'''
#include <stdio.h>
struct Class {
Class() { printf("a new Class\n"); }
};
static Class c;
int main() {
return 0;
}
''', r'''
void nothing() {}
''', 'a new Class\n')
@needs_dylink
def test_dylink_global_inits(self):
def test():
self.dylink_test(header=r'''
#include <stdio.h>
struct Class {
Class(const char *name) { printf("new %s\n", name); }
};
''', main=r'''
#include "header.h"
static Class c("main");
int main() {
return 0;
}
''', side=r'''
#include "header.h"
static Class c("side");
''', expected=['new main\nnew side\n', 'new side\nnew main\n'])
test()
print('check warnings')
self.set_setting('ASSERTIONS', 2)
test()
# TODO: this in wasm
# full = self.run_js('src.js')
# self.assertNotContained('already exists', full)
@needs_dylink
def test_dylink_i64(self):
# Runs with main_module=1 due to undefined getTempRet0 otherwise
self.dylink_test(r'''
#include <stdio.h>
#include <stdint.h>
extern int64_t sidey();
int main() {
printf("other says %lld.\n", sidey());
return 0;
}
''', '''
#include <stdint.h>
int64_t sidey() {
return 42;
}
''', 'other says 42.', force_c=True, main_module=1)
@all_engines
@needs_dylink
def test_dylink_i64_b(self):
# Runs with main_module=1 due to undefined getTempRet0 otherwise
self.dylink_test(r'''
#include <stdio.h>
#include <stdint.h>
extern int64_t sidey();
int64_t testAdd(int64_t a) {
return a + 1;
}
int64_t testAddB(int a) {
return a + 1;
}
typedef int64_t (*testAddHandler)(int64_t);
testAddHandler h = &testAdd;
typedef int64_t (*testAddBHandler)(int);
testAddBHandler hb = &testAddB;
int main() {
printf("other says %lld.\n", sidey());
int64_t r = h(42);
printf("my fp says: %lld.\n", r);
int64_t rb = hb(42);
printf("my second fp says: %lld.\n", r);
}
''', '''
#include <stdint.h>
int64_t sidey() {
volatile int64_t x = 0x12345678abcdef12LL;
x += x % 17;
x = 18 - x;
return x;
}
''', 'other says -1311768467750121224.\nmy fp says: 43.\nmy second fp says: 43.', force_c=True, main_module=1)
@needs_dylink
@also_with_wasm_bigint
def test_dylink_i64_c(self):
# Runs with main_module=1 due to undefined getTempRet0 otherwise
self.dylink_test(r'''
#include <stdio.h>
#include <inttypes.h>
#include "header.h"
typedef int32_t (*fp_type_32)(int32_t, int32_t, int32_t);
typedef int64_t (*fp_type_64)(int32_t, int32_t, int32_t);
int32_t internal_function_ret_32(int32_t i, int32_t j, int32_t k) {
return 32;
}
int64_t internal_function_ret_64(int32_t i, int32_t j, int32_t k) {
return 64;
}
int main() {
fp_type_32 fp32_internal = &internal_function_ret_32;
fp_type_32 fp32_external = &function_ret_32;
fp_type_64 fp64_external = &function_ret_64;
fp_type_64 fp64_internal = &internal_function_ret_64;
int32_t ires32 = fp32_internal(0,0,0);
printf("res32 - internal %d\n",ires32);
int32_t eres32 = fp32_external(0,0,0);
printf("res32 - external %d\n",eres32);
int64_t ires64 = fp64_internal(0,0,0);
printf("res64 - internal %" PRId64 "\n",ires64);
int64_t eres64 = fp64_external(0,0,0);
printf("res64 - external %" PRId64 "\n",eres64);
return 0;
}
''', '''
#include "header.h"
int32_t function_ret_32(int32_t i, int32_t j, int32_t k) {
return 32;
}
int64_t function_ret_64(int32_t i, int32_t j, int32_t k) {
return 64;
}
''', '''res32 - internal 32
res32 - external 32
res64 - internal 64
res64 - external 64\n''', header='''
#include <emscripten.h>
#include <stdint.h>
EMSCRIPTEN_KEEPALIVE int32_t function_ret_32(int32_t i, int32_t j, int32_t k);
EMSCRIPTEN_KEEPALIVE int64_t function_ret_64(int32_t i, int32_t j, int32_t k);
''', force_c=True, main_module=1)
@needs_dylink
@also_with_wasm_bigint
def test_dylink_i64_invoke(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
self.dylink_test(r'''\
#include <stdio.h>
#include <stdint.h>
extern "C" int64_t sidey(int64_t arg);
int main(int argc, char *argv[]) {
int64_t temp = 42;
printf("got %lld\n", sidey(temp));
return 0;
}''', r'''\
#include <stdint.h>
#include <stdio.h>
#include <emscripten.h>
extern "C" {
EMSCRIPTEN_KEEPALIVE int64_t do_call(int64_t arg) {
if (arg == 0) {
throw;
}
return 2 * arg;
}
int64_t sidey(int64_t arg) {
try {
return do_call(arg);
} catch(...) {
return 0;
}
}
}''', 'got 84', need_reverse=False)
@needs_dylink
def test_dylink_class(self):
self.dylink_test(header=r'''
#include <stdio.h>
struct Class {
Class(const char *name);
};
''', main=r'''
#include "header.h"
int main() {
Class c("main");
return 0;
}
''', side=r'''
#include "header.h"
Class::Class(const char *name) { printf("new %s\n", name); }
''', expected=['new main\n'])
@needs_dylink
def test_dylink_global_var(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern int x;
int main() {
printf("extern is %d.\n", x);
return 0;
}
''', side=r'''
int x = 123;
''', expected=['extern is 123.\n'], force_c=True)
@needs_dylink
def test_dylink_global_var_modded(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern int x;
int main() {
printf("extern is %d.\n", x);
return 0;
}
''', side=r'''
int x = 123;
struct Initter {
Initter() { x = 456; }
};
Initter initter;
''', expected=['extern is 456.\n'])
@needs_dylink
def test_dylink_stdlib(self):
self.dylink_test(header=r'''
#include <math.h>
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
double pow_two(double x);
''', main=r'''
#include <stdio.h>
#include "header.h"
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
puts(ret);
printf("pow_two: %d.\n", (int)pow_two(5.9));
return 0;
}
''', side=r'''
#include "header.h"
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
double pow_two(double x) {
return pow(2, x);
}
''', expected=['hello through side\n\npow_two: 59.'], force_c=True)
@needs_dylink
def test_dylink_jslib(self):
create_file('lib.js', r'''
mergeInto(LibraryManager.library, {
test_lib_func: function(x) {
return x + 17.2;
}
});
''')
self.dylink_test(header=r'''
extern double test_lib_func(int input);
''', main=r'''
#include <stdio.h>
#include "header.h"
extern double sidey();
int main2() { return 11; }
int main() {
int input = sidey();
double temp = test_lib_func(input);
printf("other says %.2f\n", temp);
printf("more: %.5f, %d\n", temp, input);
return 0;
}
''', side=r'''
#include <stdio.h>
#include "header.h"
extern int main2();
double sidey() {
int temp = main2();
printf("main2 sed: %d\n", temp);
printf("main2 sed: %u, %c\n", temp, temp/2);
return test_lib_func(temp);
}
''', expected='other says 45.2', main_emcc_args=['--js-library', 'lib.js'], force_c=True)
@needs_dylink
def test_dylink_many_postsets(self):
NUM = 1234
self.dylink_test(header=r'''
#include <stdio.h>
typedef void (*voidfunc)();
static void simple() {
printf("simple.\n");
}
static volatile voidfunc funcs[''' + str(NUM) + '] = { ' + ','.join(['simple'] * NUM) + r''' };
static void test() {
volatile int i = ''' + str(NUM - 1) + r''';
funcs[i]();
i = 0;
funcs[i]();
}
extern void more();
''', main=r'''
#include "header.h"
int main() {
test();
more();
return 0;
}
''', side=r'''
#include "header.h"
void more() {
test();
}
''', expected=['simple.\nsimple.\nsimple.\nsimple.\n'], force_c=True)
@needs_dylink
def test_dylink_postsets_chunking(self):
self.dylink_test(header=r'''
extern int global_var;
''', main=r'''
#include <stdio.h>
#include "header.h"
// prepare 99 global variable with local initializer
static int p = 1;
#define P(x) __attribute__((used)) int *padding##x = &p;
P(01) P(02) P(03) P(04) P(05) P(06) P(07) P(08) P(09) P(10)
P(11) P(12) P(13) P(14) P(15) P(16) P(17) P(18) P(19) P(20)
P(21) P(22) P(23) P(24) P(25) P(26) P(27) P(28) P(29) P(30)
P(31) P(32) P(33) P(34) P(35) P(36) P(37) P(38) P(39) P(40)
P(41) P(42) P(43) P(44) P(45) P(46) P(47) P(48) P(49) P(50)
P(51) P(52) P(53) P(54) P(55) P(56) P(57) P(58) P(59) P(60)
P(61) P(62) P(63) P(64) P(65) P(66) P(67) P(68) P(69) P(70)
P(71) P(72) P(73) P(74) P(75) P(76) P(77) P(78) P(79) P(80)
P(81) P(82) P(83) P(84) P(85) P(86) P(87) P(88) P(89) P(90)
P(91) P(92) P(93) P(94) P(95) P(96) P(97) P(98) P(99)
// prepare global variable with global initializer
int *ptr = &global_var;
int main(int argc, char *argv[]) {
printf("%d\n", *ptr);
}
''', side=r'''
#include "header.h"
int global_var = 12345;
''', expected=['12345\n'], force_c=True)
@needs_dylink
def test_dylink_syslibs(self): # one module uses libcxx, need to force its inclusion when it isn't the main
# https://github.com/emscripten-core/emscripten/issues/10571
return self.skipTest('Currently not working due to duplicate symbol errors in wasm-ld')
def test(syslibs, expect_pass=True, need_reverse=True):
print('syslibs', syslibs, self.get_setting('ASSERTIONS'))
passed = True
try:
with env_modify({'EMCC_FORCE_STDLIBS': syslibs}):
self.dylink_test(main=r'''
void side();
int main() {
side();
return 0;
}
''', side=r'''
#include <iostream>
void side() { std::cout << "cout hello from side\n"; }
''', expected=['cout hello from side\n'], need_reverse=need_reverse, assert_returncode=NON_ZERO)
except Exception as e:
if expect_pass:
raise
print('(seeing expected fail)')
passed = False
assertion = 'build the MAIN_MODULE with EMCC_FORCE_STDLIBS=1 in the environment'
if self.get_setting('ASSERTIONS'):
self.assertContained(assertion, str(e))
else:
self.assertNotContained(assertion, str(e))
assert passed == expect_pass, ['saw', passed, 'but expected', expect_pass]
test('libc++')
test('1')
if not self.has_changed_setting('ASSERTIONS'):
self.set_setting('ASSERTIONS', 0)
test('', expect_pass=False, need_reverse=False)
self.set_setting('ASSERTIONS')
test('', expect_pass=False, need_reverse=False)
@needs_dylink
@with_env_modify({'EMCC_FORCE_STDLIBS': 'libc++'})
def test_dylink_iostream(self):
self.dylink_test(header=r'''
#include <iostream>
#include <string>
std::string side();
''', main=r'''
#include "header.h"
int main() {
std::cout << "hello from main " << side() << std::endl;
return 0;
}
''', side=r'''
#include "header.h"
std::string side() { return "and hello from side"; }
''', expected=['hello from main and hello from side\n'])
@needs_dylink
def test_dylink_dynamic_cast(self): # issue 3465
self.dylink_test(header=r'''
class Base {
public:
virtual void printName();
};
class Derived : public Base {
public:
void printName();
};
''', main=r'''
#include "header.h"
#include <iostream>
using namespace std;
int main() {
cout << "starting main" << endl;
Base *base = new Base();
Base *derived = new Derived();
base->printName();
derived->printName();
if (dynamic_cast<Derived*>(derived)) {
cout << "OK" << endl;
} else {
cout << "KO" << endl;
}
return 0;
}
''', side=r'''
#include "header.h"
#include <iostream>
using namespace std;
void Base::printName() {
cout << "Base" << endl;
}
void Derived::printName() {
cout << "Derived" << endl;
}
''', expected=['starting main\nBase\nDerived\nOK'])
@with_both_exception_handling
@needs_dylink
def test_dylink_raii_exceptions(self):
# MAIN_MODULE=1 still needed in this test due to:
# https://github.com/emscripten-core/emscripten/issues/13786
self.dylink_test(main=r'''
#include <stdio.h>
extern int side();
int main() {
printf("from side: %d.\n", side());
}
''', side=r'''
#include <stdio.h>
typedef int (*ifdi)(float, double, int);
int func_with_special_sig(float a, double b, int c) {
printf("special %f %f %d\n", a, b, c);
return 1337;
}
struct DestructorCaller {
~DestructorCaller() { printf("destroy\n"); }
};
int side() {
// d has a destructor that must be called on function
// exit, which means an invoke will be used for the
// indirect call here - and the signature of that call
// is special and not present in the main module, so
// it must be generated for the side module.
DestructorCaller d;
volatile ifdi p = func_with_special_sig;
return p(2.18281, 3.14159, 42);
}
''', expected=['special 2.182810 3.141590 42\ndestroy\nfrom side: 1337.\n'], main_module=1)
@needs_dylink
@disabled('https://github.com/emscripten-core/emscripten/issues/12815')
def test_dylink_hyper_dupe(self):
self.set_setting('INITIAL_MEMORY', '64mb')
if not self.has_changed_setting('ASSERTIONS'):
self.set_setting('ASSERTIONS', 2)
# test hyper-dynamic linking, and test duplicate warnings
create_file('third.cpp', r'''
#include <stdio.h>
int sidef() { return 36; }
int sideg = 49;
int bsidef() { return 536; }
extern void only_in_second_1(int x);
extern int second_to_third;
int third_to_second = 1337;
void only_in_third_0() {
// note we access our own globals directly, so
// it doesn't matter that overriding failed
printf("only_in_third_0: %d, %d, %d\n", sidef(), sideg, second_to_third);
only_in_second_1(2112);
}
void only_in_third_1(int x) {
printf("only_in_third_1: %d, %d, %d, %d\n", sidef(), sideg, second_to_third, x);
}
''')
if self.is_wasm():
libname = 'third.wasm'
else:
libname = 'third.js'
self.run_process([EMCC, 'third.cpp', '-o', libname, '-s', 'SIDE_MODULE'] + self.get_emcc_args())
self.dylink_test(main=r'''
#include <stdio.h>
#include <emscripten.h>
extern int sidef();
extern int sideg;
extern int bsidef();
extern int bsideg;
extern void only_in_second_0();
extern void only_in_third_0();
int main() {
EM_ASM({
loadDynamicLibrary('%s'); // hyper-dynamic! works at least for functions (and consts not used in same block)
});
printf("sidef: %%d, sideg: %%d.\n", sidef(), sideg);
printf("bsidef: %%d.\n", bsidef());
only_in_second_0();
only_in_third_0();
}
''' % libname,
side=r'''
#include <stdio.h>
int sidef() { return 10; } // third will try to override these, but fail!
int sideg = 20;
extern void only_in_third_1(int x);
int second_to_third = 500;
extern int third_to_second;
void only_in_second_0() {
printf("only_in_second_0: %d, %d, %d\n", sidef(), sideg, third_to_second);
only_in_third_1(1221);
}
void only_in_second_1(int x) {
printf("only_in_second_1: %d, %d, %d, %d\n", sidef(), sideg, third_to_second, x);
}
''',
expected=['sidef: 10, sideg: 20.\nbsidef: 536.\nonly_in_second_0: 10, 20, 1337\nonly_in_third_1: 36, 49, 500, 1221\nonly_in_third_0: 36, 49, 500\nonly_in_second_1: 10, 20, 1337, 2112\n'],
# in wasm, we can't flip as the side would have an EM_ASM, which we don't support yet TODO
need_reverse=not self.is_wasm())
if not self.has_changed_setting('ASSERTIONS'):
print('check warnings')
full = self.run_js('src.js')
self.assertContained("warning: symbol '_sideg' from '%s' already exists" % libname, full)
@needs_dylink
def test_dylink_load_compiled_side_module(self):
self.set_setting('FORCE_FILESYSTEM')
self.emcc_args.append('-lnodefs.js')
self.set_setting('INITIAL_MEMORY', '64mb')
self.dylink_test(main=r'''
#include <stdio.h>
#include <emscripten.h>
extern int sidef();
int main() {
EM_ASM({
FS.mkdir('/working');
FS.mount(NODEFS,{ root: '.' }, '/working');
var libData = FS.readFile('/working/liblib.so', {encoding: 'binary'});
if (!(libData instanceof Uint8Array)) {
libData = new Uint8Array(libData);
}
var compiledModule = new WebAssembly.Module(libData);
var sideExports = loadWebAssemblyModule(compiledModule, {loadAsync: false, nodelete: true});
mergeLibSymbols(sideExports, 'liblib.so');
});
printf("sidef: %d.\n", sidef());
}
''',
side=r'''
#include <stdio.h>
int sidef() { return 10; }
''',
expected=['sidef: 10'],
# in wasm, we can't flip as the side would have an EM_ASM, which we don't support yet TODO
need_reverse=not self.is_wasm(),
auto_load=False)
@needs_dylink
def test_dylink_dso_needed(self):
def do_run(src, expected_output):
self.do_run(src + 'int main() { return test_main(); }', expected_output)
self._test_dylink_dso_needed(do_run)
@needs_dylink
def test_dylink_dot_a(self):
# .a linking must force all .o files inside it, when in a shared module
create_file('third.c', 'int sidef() { return 36; }')
create_file('fourth.c', 'int sideg() { return 17; }')
self.run_process([EMCC, '-fPIC', '-c', 'third.c', '-o', 'third.o'] + self.get_emcc_args())
self.run_process([EMCC, '-fPIC', '-c', 'fourth.c', '-o', 'fourth.o'] + self.get_emcc_args())
self.run_process([EMAR, 'rc', 'libfourth.a', 'fourth.o'])
self.dylink_test(main=r'''
#include <stdio.h>
#include <emscripten.h>
int sidef();
int sideg();
int main() {
printf("sidef: %d, sideg: %d.\n", sidef(), sideg());
}
''',
# contents of libfourth.a must be included, even if they aren't referred to!
side=['libfourth.a', 'third.o'],
expected=['sidef: 36, sideg: 17.\n'], force_c=True)
@needs_dylink
def test_dylink_spaghetti(self):
self.dylink_test(main=r'''
#include <stdio.h>
int main_x = 72;
extern int side_x;
int adjust = side_x + 10;
int *ptr = &side_x;
struct Class {
Class() {
printf("main init sees %d, %d, %d.\n", adjust, *ptr, main_x);
}
};
Class cm;
int main() {
printf("main main sees %d, %d, %d.\n", adjust, *ptr, main_x);
return 0;
}
''', side=r'''
#include <stdio.h>
extern int main_x;
int side_x = -534;
int adjust2 = main_x + 10;
int *ptr2 = &main_x;
struct Class {
Class() {
printf("side init sees %d, %d, %d.\n", adjust2, *ptr2, side_x);
}
};
Class cs;
''', expected=['side init sees 82, 72, -534.\nmain init sees -524, -534, 72.\nmain main sees -524, -534, 72.',
'main init sees -524, -534, 72.\nside init sees 82, 72, -534.\nmain main sees -524, -534, 72.'])
@needs_make('mingw32-make')
@needs_dylink
def test_dylink_zlib(self):
self.emcc_args += ['-Wno-shift-negative-value', '-I' + test_file('third_party', 'zlib')]
self.set_setting('RELOCATABLE')
zlib_archive = self.get_zlib_library()
self.dylink_test(main=open(test_file('third_party', 'zlib', 'example.c')).read(),
side=zlib_archive,
expected=open(test_file('core', 'test_zlib.out')).read(),
force_c=True)
# @needs_dylink
# def test_dylink_bullet(self):
# self.emcc_args += ['-I' + test_file('bullet', 'src')]
# side = self.get_bullet_library(self, True)
# self.dylink_test(main=open(test_file('bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp')).read(),
# side=side,
# expected=[open(test_file('bullet', 'output.txt')).read(), # different roundings
# open(test_file('bullet', 'output2.txt')).read(),
# open(test_file('bullet', 'output3.txt')).read()])
@needs_dylink
def test_dylink_rtti(self):
# Verify that objects created in one module and be dynamic_cast<> correctly
# in the another module.
# Each module will define its own copy of certain COMDAT symbols such as
# each classs's typeinfo, but at runtime they should both use the same one.
header = '''
#include <cstddef>
class Foo {
public:
virtual ~Foo() {}
};
class Bar : public Foo {
public:
virtual ~Bar() {}
};
bool is_bar(Foo* foo);
'''
main = '''
#include <stdio.h>
#include "header.h"
int main() {
Bar bar;
if (!is_bar(&bar)) {
puts("failure");
return 1;
}
puts("success");
return 0;
}
'''
side = '''
#include "header.h"
bool is_bar(Foo* foo) {
return dynamic_cast<Bar*>(foo) != nullptr;
}
'''
# MAIN_MODULE=1 still needed in this test due to:
# https://github.com/emscripten-core/emscripten/issues/13786
self.dylink_test(main=main,
side=side,
header=header,
main_module=1,
expected='success')
@needs_dylink
def test_dylink_argv_argc(self):
# Verify that argc and argv can be sent to main when main is in a side module
self.emcc_args += ['--extern-pre-js', 'pre.js']
create_file('pre.js', '''
var Module = { arguments: ['hello', 'world!'] }
''')
self.dylink_test(
'', # main module is empty.
r'''
#include <stdio.h>
int main(int argc, char const *argv[]) {
printf("%d ", argc);
for (int i=1; i<argc; i++) printf("%s ", argv[i]);
printf("\n");
return 0;
}
''',
expected='3 hello world!',
need_reverse=False)
@disabled('https://github.com/emscripten-core/emscripten/issues/13773')
def test_dylink_weak(self):
# Verify that weakly symbols can be defined in both side module and main
# module
main = test_file('core', 'test_dylink_weak_main.c')
side = test_file('core', 'test_dylink_weak_side.c')
self.dylink_testf(main, side, force_c=True, need_reverse=False)
def test_random(self):
src = r'''#include <stdlib.h>
#include <stdio.h>
int main()
{
srandom(0xdeadbeef);
printf("%ld\n", random());
}
'''
self.do_run(src, '956867869')
def test_rand(self):
src = r'''#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
int main()
{
// we need RAND_MAX to be a bitmask (power of 2 minus 1). this assertions guarantees
// if RAND_MAX changes the test failure will focus attention on that issue here.
assert(RAND_MAX == 0x7fffffff);
srand(0xdeadbeef);
for(int i = 0; i < 10; ++i)
printf("%d\n", rand());
unsigned int seed = 0xdeadbeef;
for(int i = 0; i < 10; ++i)
printf("%d\n", rand_r(&seed));
bool haveEvenAndOdd = true;
for(int i = 1; i <= 30; ++i)
{
int mask = 1 << i;
if (mask > RAND_MAX) break;
bool haveEven = false;
bool haveOdd = false;
for(int j = 0; j < 1000 && (!haveEven || !haveOdd); ++j)
{
if ((rand() & mask) == 0)
haveEven = true;
else
haveOdd = true;
}
haveEvenAndOdd = haveEvenAndOdd && haveEven && haveOdd;
}
if (haveEvenAndOdd)
printf("Have even and odd!\n");
return 0;
}
'''
expected = '''490242850
2074599277
1480056542
1912638067
931112055
2110392489
2053422194
1614832492
216117595
174823244
760368382
602359081
1121118963
1291018924
1608306807
352705809
958258461
1182561381
114276303
1481323674
Have even and odd!
'''
self.do_run(src, expected)
def test_strtod(self):
self.do_core_test('test_strtod.c')
def test_strtold(self):
self.do_core_test('test_strtold.c')
def test_strtok(self):
self.do_core_test('test_strtok.c')
def test_strtol(self):
self.do_core_test('test_strtol.c')
def test_transtrcase(self):
self.do_core_test('test_transtrcase.c')
@no_wasm2js('very slow to compile')
@is_slow_test
def test_printf(self):
# needs to flush stdio streams
self.emcc_args.append('-Wno-format')
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('printf', 'test.c')
def test_printf_2(self):
self.do_core_test('test_printf_2.c')
def test_printf_float(self):
self.do_run_in_out_file_test('printf', 'test_float.c')
def test_printf_octal(self):
self.do_run_in_out_file_test('printf', 'test_octal.c')
def test_vprintf(self):
self.do_core_test('test_vprintf.c')
def test_vsnprintf(self):
self.do_core_test('test_vsnprintf.c')
def test_printf_more(self):
self.do_core_test('test_printf_more.c')
def test_perrar(self):
self.do_core_test('test_perrar.c')
def test_atoX(self):
self.do_core_test('test_atoX.c')
def test_strstr(self):
self.do_core_test('test_strstr.c')
def test_fnmatch(self):
self.do_core_test('test_fnmatch.cpp')
def test_sscanf(self):
self.do_core_test('test_sscanf.c')
def test_sscanf_2(self):
# doubles
for ftype in ['float', 'double']:
src = r'''
#include <stdio.h>
int main(){
char strval1[] = "1.2345678901";
char strval2[] = "1.23456789e5";
char strval3[] = "1.23456789E5";
char strval4[] = "1.2345678e-5";
char strval5[] = "1.2345678E-5";
double dblval = 1.2345678901;
double tstval;
sscanf(strval1, "%lf", &tstval);
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval2, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval3, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval4, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval5, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
return 0;
}
'''
if ftype == 'float':
self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568
Pass: 123456.789062 123456.789062
Pass: 123456.789062 123456.789062
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
else:
self.do_run(src, '''Pass: 1.234568 1.234568
Pass: 123456.789000 123456.789000
Pass: 123456.789000 123456.789000
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
def test_sscanf_n(self):
self.do_core_test('test_sscanf_n.c')
def test_sscanf_whitespace(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_sscanf_whitespace.c')
def test_sscanf_other_whitespace(self):
# use i16s in printf
self.set_setting('SAFE_HEAP', 0)
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_sscanf_other_whitespace.c')
def test_sscanf_3(self):
self.do_core_test('test_sscanf_3.c')
def test_sscanf_4(self):
self.do_core_test('test_sscanf_4.c')
def test_sscanf_5(self):
self.do_core_test('test_sscanf_5.c')
def test_sscanf_6(self):
self.do_core_test('test_sscanf_6.c')
def test_sscanf_skip(self):
self.do_core_test('test_sscanf_skip.c')
def test_sscanf_caps(self):
self.do_core_test('test_sscanf_caps.c')
def test_sscanf_hex(self):
self.do_core_test('test_sscanf_hex.cpp')
def test_sscanf_float(self):
self.do_core_test('test_sscanf_float.c')
def test_langinfo(self):
self.do_core_test('test_langinfo.c')
def test_files(self):
self.banned_js_engines = [config.SPIDERMONKEY_ENGINE] # closure can generate variables called 'gc', which pick up js shell stuff
if self.maybe_closure(): # Use closure here, to test we don't break FS stuff
self.emcc_args = [x for x in self.emcc_args if x != '-g'] # ensure we test --closure 1 --memory-init-file 1 (-g would disable closure)
elif '-O3' in self.emcc_args and not self.is_wasm():
print('closure 2')
self.emcc_args += ['--closure', '2'] # Use closure 2 here for some additional coverage
return self.skipTest('TODO: currently skipped because CI runs out of memory running Closure in this test!')
self.emcc_args += ['--pre-js', 'pre.js']
self.set_setting('FORCE_FILESYSTEM')
print('base', self.emcc_args)
create_file('pre.js', '''
/** @suppress{checkTypes}*/
Module = {
'noFSInit': true,
'preRun': function() {
FS.createLazyFile('/', 'test.file', 'test.file', true, false);
// Test FS_* exporting
Module['FS_createDataFile']('/', 'somefile.binary', [100, 200, 50, 25, 10, 77, 123], true, false, false); // 200 becomes -56, since signed chars are used in memory
var test_files_input = 'hi there!';
var test_files_input_index = 0;
FS.init(function() {
return test_files_input.charCodeAt(test_files_input_index++) || null;
});
}
};
''')
create_file('test.file', 'some data')
mem_file = 'files.js.mem'
try_delete(mem_file)
def clean(out, err):
return '\n'.join([line for line in (out + err).split('\n') if 'binaryen' not in line and 'wasm' not in line and 'so not running' not in line])
self.do_runf(test_file('files.cpp'), ('size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\ntexte\n', 'size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\ntexte\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\n'),
output_nicerizer=clean)
if self.uses_memory_init_file():
self.assertExists(mem_file)
def test_files_m(self):
# Test for Module.stdin etc.
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
create_file('pre.js', '''
Module = {
data: [10, 20, 40, 30],
stdin: function() { return Module.data.pop() || null },
stdout: function(x) { out('got: ' + x) }
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
src = r'''
#include <stdio.h>
#include <unistd.h>
int main () {
char c;
fprintf(stderr, "isatty? %d,%d,%d\n", isatty(fileno(stdin)), isatty(fileno(stdout)), isatty(fileno(stderr)));
while ((c = fgetc(stdin)) != EOF) {
putc(c+5, stdout);
}
return 0;
}
'''
def clean(out, err):
return '\n'.join(l for l in (out + err).splitlines() if 'warning' not in l and 'binaryen' not in l)
self.do_run(src, ('got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1\n', 'got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1', 'isatty? 0,0,1\ngot: 35\ngot: 45\ngot: 25\ngot: 15'), output_nicerizer=clean)
def test_mount(self):
self.set_setting('FORCE_FILESYSTEM')
self.do_runf(test_file('fs', 'test_mount.c'), 'success')
def test_getdents64(self):
self.do_runf(test_file('fs', 'test_getdents64.cpp'), '..')
def test_getdents64_special_cases(self):
# https://bugs.chromium.org/p/v8/issues/detail?id=6881
self.banned_js_engines = [config.V8_ENGINE]
self.do_run_in_out_file_test('fs', 'test_getdents64_special_cases.cpp')
def test_getcwd_with_non_ascii_name(self):
# https://bugs.chromium.org/p/v8/issues/detail?id=6881
self.banned_js_engines = [config.V8_ENGINE]
self.do_run_in_out_file_test('fs', 'test_getcwd_with_non_ascii_name.cpp')
def test_proc_self_fd(self):
self.do_run_in_out_file_test('fs', 'test_proc_self_fd.c')
def test_fwrite_0(self):
self.do_core_test('test_fwrite_0.c')
def test_fgetc_ungetc(self):
print('TODO: update this test once the musl ungetc-on-EOF-stream bug is fixed upstream and reaches us')
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
print(fs)
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('stdio', 'test_fgetc_ungetc.c'), 'success', js_engines=[config.NODE_JS])
def test_fgetc_unsigned(self):
src = r'''
#include <stdio.h>
int main() {
FILE *file = fopen("file_with_byte_234.txt", "rb");
int c = fgetc(file);
printf("*%d\n", c);
}
'''
create_file('file_with_byte_234.txt', b'\xea', binary=True)
self.emcc_args += ['--embed-file', 'file_with_byte_234.txt']
self.do_run(src, '*234\n')
def test_fgets_eol(self):
src = r'''
#include <stdio.h>
char buf[32];
int main()
{
const char *r = "SUCCESS";
FILE *f = fopen("eol.txt", "r");
while (fgets(buf, 32, f) != NULL) {
if (buf[0] == '\0') {
r = "FAIL";
break;
}
}
printf("%s\n", r);
fclose(f);
return 0;
}
'''
open('eol.txt', 'wb').write(b'\n')
self.emcc_args += ['--embed-file', 'eol.txt']
self.do_run(src, 'SUCCESS\n')
def test_fscanf(self):
create_file('three_numbers.txt', '-1 0.1 -.1')
src = r'''
#include <stdio.h>
#include <assert.h>
#include <float.h>
int main()
{
float x = FLT_MAX, y = FLT_MAX, z = FLT_MAX;
FILE* fp = fopen("three_numbers.txt", "r");
if (fp) {
int match = fscanf(fp, " %f %f %f ", &x, &y, &z);
printf("match = %d\n", match);
printf("x = %0.1f, y = %0.1f, z = %0.1f\n", x, y, z);
} else {
printf("failed to open three_numbers.txt\n");
}
return 0;
}
'''
self.emcc_args += ['--embed-file', 'three_numbers.txt']
self.do_run(src, 'match = 3\nx = -1.0, y = 0.1, z = -0.1\n')
def test_fscanf_2(self):
create_file('a.txt', '''1/2/3 4/5/6 7/8/9
''')
self.emcc_args += ['--embed-file', 'a.txt']
self.do_run(r'''#include <cstdio>
#include <iostream>
using namespace std;
int
main( int argv, char ** argc ) {
cout << "fscanf test" << endl;
FILE * file;
file = fopen("a.txt", "rb");
int vertexIndex[4];
int normalIndex[4];
int uvIndex[4];
int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex [1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2], &vertexIndex[3], &uvIndex[3], &normalIndex[3]);
cout << matches << endl;
return 0;
}
''', 'fscanf test\n9\n')
def test_fileno(self):
create_file('empty.txt', '')
src = r'''
#include <stdio.h>
#include <unistd.h>
int main()
{
FILE* fp = fopen("empty.txt", "r");
if (fp) {
printf("%d\n", fileno(fp));
} else {
printf("failed to open empty.txt\n");
}
return 0;
}
'''
self.emcc_args += ['--embed-file', 'empty.txt']
self.do_run(src, '3\n')
def test_readdir(self):
self.do_run_in_out_file_test('dirent', 'test_readdir.c')
def test_readdir_empty(self):
self.do_run_in_out_file_test('dirent', 'test_readdir_empty.c')
def test_stat(self):
self.do_runf(test_file('stat', 'test_stat.c'), 'success')
self.verify_in_strict_mode('test_stat.js')
def test_fstatat(self):
self.do_runf(test_file('stat', 'test_fstatat.c'), 'success')
def test_stat_chmod(self):
self.do_runf(test_file('stat', 'test_chmod.c'), 'success')
def test_stat_mknod(self):
self.do_runf(test_file('stat', 'test_mknod.c'), 'success')
@no_safe_heap('https://github.com/emscripten-core/emscripten/issues/12433')
def test_fcntl(self):
self.add_pre_run("FS.createDataFile('/', 'test', 'abcdef', true, true, false);")
self.do_run_in_out_file_test('fcntl', 'test_fcntl.c')
def test_fcntl_open(self):
self.do_run_in_out_file_test('fcntl', 'test_fcntl_open.c')
@also_with_wasm_bigint
def test_fcntl_misc(self):
self.add_pre_run("FS.createDataFile('/', 'test', 'abcdef', true, true, false);")
self.do_run_in_out_file_test('fcntl', 'test_fcntl_misc.c')
def test_poll(self):
self.add_pre_run('''
var dummy_device = FS.makedev(64, 0);
FS.registerDevice(dummy_device, {});
FS.createDataFile('/', 'file', 'abcdef', true, true, false);
FS.mkdev('/device', dummy_device);
''')
self.do_core_test('test_poll.c')
def test_statvfs(self):
self.do_core_test('test_statvfs.c')
def test_libgen(self):
self.do_core_test('test_libgen.c')
def test_utime(self):
self.do_runf(test_file('utime', 'test_utime.c'), 'success')
def test_futimens(self):
self.do_runf(path_from_root('tests', 'utime', 'test_futimens.c'), 'success')
@no_minimal_runtime('MINIMAL_RUNTIME does not have getValue() and setValue() (TODO add it to a JS library function to get it in)')
def test_utf(self):
self.banned_js_engines = [config.SPIDERMONKEY_ENGINE] # only node handles utf well
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
self.set_setting('EXPORTED_RUNTIME_METHODS', ['getValue', 'setValue', 'UTF8ToString', 'stringToUTF8'])
self.do_core_test('test_utf.c')
def test_utf32(self):
if self.get_setting('MINIMAL_RUNTIME'):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$UTF32ToString', '$stringToUTF32', '$lengthBytesUTF32'])
else:
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF32ToString', 'stringToUTF32', 'lengthBytesUTF32'])
self.do_runf(test_file('utf32.cpp'), 'OK.')
self.do_runf(test_file('utf32.cpp'), 'OK.', args=['-fshort-wchar'])
def test_utf16(self):
self.do_runf(test_file('core', 'test_utf16.cpp'), 'OK.')
def test_utf8(self):
if self.get_setting('MINIMAL_RUNTIME'):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$AsciiToString', '$stringToAscii', '$writeAsciiToMemory'])
else:
self.set_setting('EXPORTED_RUNTIME_METHODS',
['UTF8ToString', 'stringToUTF8', 'AsciiToString', 'stringToAscii'])
self.do_runf(test_file('utf8.cpp'), 'OK.')
@also_with_wasm_bigint
def test_utf8_textdecoder(self):
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
self.emcc_args += ['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt']
self.do_runf(test_file('benchmark_utf8.cpp'), 'OK.')
# Test that invalid character in UTF8 does not cause decoding to crash.
def test_utf8_invalid(self):
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
for decoder_mode in [[], ['-s', 'TEXTDECODER']]:
self.emcc_args += decoder_mode
print(str(decoder_mode))
self.do_runf(test_file('utf8_invalid.cpp'), 'OK.')
# Test that invalid character in UTF8 does not cause decoding to crash.
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_utf8_invalid(self):
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
self.set_setting('MINIMAL_RUNTIME')
for decoder_mode in [False, True]:
self.set_setting('TEXTDECODER', decoder_mode)
print(str(decoder_mode))
self.do_runf(test_file('utf8_invalid.cpp'), 'OK.')
def test_utf16_textdecoder(self):
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF16ToString', 'stringToUTF16', 'lengthBytesUTF16'])
self.emcc_args += ['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt']
self.do_runf(test_file('benchmark_utf16.cpp'), 'OK.')
def test_wprintf(self):
self.do_core_test('test_wprintf.cpp')
def test_write_stdout_fileno(self):
self.do_core_test('test_write_stdout_fileno.c')
self.do_core_test('test_write_stdout_fileno.c', args=['-s', 'FILESYSTEM=0'])
def test_direct_string_constant_usage(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_direct_string_constant_usage.cpp')
def test_std_cout_new(self):
self.do_core_test('test_std_cout_new.cpp')
def test_std_function_incomplete_return(self):
self.do_core_test('test_std_function_incomplete_return.cpp')
def test_istream(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
for linkable in [0]: # , 1]:
print(linkable)
# regression check for issue #273
self.set_setting('LINKABLE', linkable)
self.do_core_test('test_istream.cpp')
def test_fs_base(self):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$FS'])
self.uses_es6 = True
self.add_pre_run(open(test_file('filesystem', 'src.js')).read())
src = 'int main() {return 0;}\n'
expected = open(test_file('filesystem', 'output.txt')).read()
self.do_run(src, expected)
@also_with_noderawfs
@is_slow_test
def test_fs_nodefs_rw(self):
# TODO(sbc): This test exposes in issue in the way we run closure compiler and
# causes it to generate non-ES5 output.
# Remove this line once we fix: https://github.com/emscripten-core/emscripten/issues/12628
self.uses_es6 = True
self.emcc_args += ['-lnodefs.js']
self.set_setting('SYSCALL_DEBUG')
self.do_runf(test_file('fs', 'test_nodefs_rw.c'), 'success')
if '-g' not in self.emcc_args:
print('closure')
self.emcc_args += ['--closure=1']
self.do_runf(test_file('fs', 'test_nodefs_rw.c'), 'success')
@also_with_noderawfs
def test_fs_nodefs_cloexec(self):
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('fs', 'test_nodefs_cloexec.c'), 'success')
def test_fs_nodefs_home(self):
self.set_setting('FORCE_FILESYSTEM')
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('fs', 'test_nodefs_home.c'), 'success', js_engines=[config.NODE_JS])
def test_fs_nodefs_nofollow(self):
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('fs', 'test_nodefs_nofollow.c'), 'success', js_engines=[config.NODE_JS])
def test_fs_trackingdelegate(self):
self.do_run_in_out_file_test('fs', 'test_trackingdelegate.c')
@also_with_noderawfs
def test_fs_writeFile(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING') # see issue 2334
self.do_run_in_out_file_test('fs', 'test_writeFile.cpp')
def test_fs_write(self):
self.do_run_in_out_file_test('fs', 'test_write.cpp')
@also_with_noderawfs
def test_fs_emptyPath(self):
self.do_run_in_out_file_test('fs', 'test_emptyPath.c')
@also_with_noderawfs
def test_fs_append(self):
self.do_runf(test_file('fs', 'test_append.c'), 'success')
def test_fs_mmap(self):
self.uses_es6 = True
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS', 'NODERAWFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
if fs == 'NODERAWFS':
self.emcc_args += ['-lnodefs.js', '-lnoderawfs.js']
self.do_run_in_out_file_test('fs', 'test_mmap.c')
@also_with_noderawfs
def test_fs_errorstack(self):
# Enables strict mode, which may catch some strict-mode-only errors
# so that users can safely work with strict JavaScript if enabled.
create_file('pre.js', '"use strict";')
self.emcc_args += ['--pre-js', 'pre.js']
self.set_setting('FORCE_FILESYSTEM')
self.set_setting('ASSERTIONS')
self.do_run(r'''
#include <emscripten.h>
#include <iostream>
int main(void) {
std::cout << "hello world\n"; // should work with strict mode
EM_ASM(
try {
FS.readFile('/dummy.txt');
} catch (err) {
err.stack = err.stack; // should be writable
throw err;
}
);
return 0;
}
''', 'at Object.readFile', assert_returncode=NON_ZERO) # engines has different error stack format
@also_with_noderawfs
def test_fs_llseek(self):
self.set_setting('FORCE_FILESYSTEM')
self.do_runf(test_file('fs', 'test_llseek.c'), 'success')
def test_fs_64bit(self):
self.do_runf(test_file('fs', 'test_64bit.c'), 'success')
def test_sigalrm(self):
self.do_runf(test_file('sigalrm.cpp'), '')
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_access(self):
self.uses_es6 = True
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd', 'access.c', js_engines=[config.NODE_JS])
# Node.js fs.chmod is nearly no-op on Windows
if not WINDOWS:
self.emcc_args = orig_compiler_opts
self.set_setting('NODERAWFS')
self.do_run_in_out_file_test('unistd', 'access.c', js_engines=[config.NODE_JS])
def test_unistd_curdir(self):
self.uses_es6 = True
self.do_run_in_out_file_test('unistd', 'curdir.c')
@also_with_noderawfs
def test_unistd_close(self):
self.do_run_in_out_file_test('unistd', 'close.c')
def test_unistd_confstr(self):
self.do_run_in_out_file_test('unistd', 'confstr.c')
def test_unistd_ttyname(self):
self.do_runf(test_file('unistd', 'ttyname.c'), 'success')
@also_with_noderawfs
def test_unistd_pipe(self):
self.do_runf(test_file('unistd', 'pipe.c'), 'success')
@also_with_noderawfs
def test_unistd_dup(self):
self.do_run_in_out_file_test('unistd', 'dup.c')
def test_unistd_pathconf(self):
self.do_run_in_out_file_test('unistd', 'pathconf.c')
def test_unistd_truncate(self):
self.uses_es6 = True
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd', 'truncate.c', js_engines=[config.NODE_JS])
@no_windows("Windows throws EPERM rather than EACCES or EINVAL")
@unittest.skipIf(WINDOWS or os.geteuid() == 0, "Root access invalidates this test by being able to write on readonly files")
def test_unistd_truncate_noderawfs(self):
self.uses_es6 = True
self.set_setting('NODERAWFS')
self.do_run_in_out_file_test('unistd', 'truncate.c', js_engines=[config.NODE_JS])
def test_unistd_swab(self):
self.do_run_in_out_file_test('unistd', 'swab.c')
def test_unistd_isatty(self):
self.do_runf(test_file('unistd', 'isatty.c'), 'success')
@also_with_standalone_wasm()
def test_unistd_sysconf(self):
self.do_run_in_out_file_test('unistd', 'sysconf.c')
@no_asan('ASan alters memory layout')
def test_unistd_sysconf_phys_pages(self):
filename = test_file('unistd', 'sysconf_phys_pages.c')
if self.get_setting('ALLOW_MEMORY_GROWTH'):
expected = (2 * 1024 * 1024 * 1024) // 16384
else:
expected = 16 * 1024 * 1024 // 16384
self.do_runf(filename, str(expected) + ', errno: 0')
def test_unistd_login(self):
self.do_run_in_out_file_test('unistd', 'login.c')
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_unlink(self):
self.clear()
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
# symlinks on node.js on non-linux behave differently (e.g. on Windows they require administrative privileges)
# so skip testing those bits on that combination.
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
if WINDOWS:
self.emcc_args += ['-DNO_SYMLINK=1']
if MACOS:
continue
self.do_runf(test_file('unistd', 'unlink.c'), 'success', js_engines=[config.NODE_JS])
# Several differences/bugs on non-linux including https://github.com/nodejs/node/issues/18014
if not WINDOWS and not MACOS:
self.emcc_args = orig_compiler_opts + ['-DNODERAWFS']
# 0 if root user
if os.geteuid() == 0:
self.emcc_args += ['-DSKIP_ACCESS_TESTS']
self.set_setting('NODERAWFS')
self.do_runf(test_file('unistd', 'unlink.c'), 'success', js_engines=[config.NODE_JS])
def test_unistd_links(self):
self.clear()
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
if WINDOWS and fs == 'NODEFS':
print('Skipping NODEFS part of this test for test_unistd_links on Windows, since it would require administrative privileges.', file=sys.stderr)
# Also, other detected discrepancies if you do end up running this test on NODEFS:
# test expects /, but Windows gives \ as path slashes.
# Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows.
continue
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd', 'links.c', js_engines=[config.NODE_JS])
@no_windows('Skipping NODEFS test, since it would require administrative privileges.')
def test_unistd_symlink_on_nodefs(self):
# Also, other detected discrepancies if you do end up running this test on NODEFS:
# test expects /, but Windows gives \ as path slashes.
# Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows.
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd', 'symlink_on_nodefs.c', js_engines=[config.NODE_JS])
def test_unistd_sleep(self):
self.do_run_in_out_file_test('unistd', 'sleep.c')
@also_with_wasm_bigint
def test_unistd_io(self):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$ERRNO_CODES'])
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.clear()
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd', 'io.c')
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_misc(self):
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd', 'misc.c', js_engines=[config.NODE_JS])
# i64s in the API, which we'd need to legalize for JS, so in standalone mode
# all we can test is wasm VMs
@also_with_standalone_wasm(wasm2c=True)
def test_posixtime(self):
self.banned_js_engines = [config.V8_ENGINE] # v8 lacks monotonic time
self.do_core_test('test_posixtime.c')
def test_uname(self):
self.do_core_test('test_uname.c')
def test_unary_literal(self):
self.do_core_test('test_unary_literal.cpp')
def test_env(self):
expected = open(test_file('env', 'output.txt')).read()
self.do_runf(test_file('env', 'src.c'), [
expected.replace('{{{ THIS_PROGRAM }}}', self.in_dir('src.js')).replace('\\', '/'), # node, can find itself properly
expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8
])
def test_environ(self):
expected = open(test_file('env', 'output-mini.txt')).read()
self.do_runf(test_file('env', 'src-mini.c'), [
expected.replace('{{{ THIS_PROGRAM }}}', self.in_dir('src-mini.js')).replace('\\', '/'), # node, can find itself properly
expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8
])
def test_systypes(self):
self.do_core_test('test_systypes.c')
def test_stddef(self):
self.do_core_test('test_stddef.cpp')
self.do_core_test('test_stddef.cpp', force_c=True)
def test_getloadavg(self):
self.do_core_test('test_getloadavg.c')
def test_nl_types(self):
self.do_core_test('test_nl_types.c')
def test_799(self):
src = test_file('799.cpp')
self.do_runf(src, '''Set PORT family: 0, port: 3979
Get PORT family: 0
PORT: 3979
''')
def test_ctype(self):
self.do_core_test('test_ctype.c')
def test_strcasecmp(self):
self.do_core_test('test_strcasecmp.c')
def test_atomic(self):
self.do_core_test('test_atomic.c')
def test_atomic_cxx(self):
# the wasm backend has lock-free atomics, but not asm.js or asm2wasm
self.emcc_args += ['-DIS_64BIT_LOCK_FREE=1']
self.do_core_test('test_atomic_cxx.cpp')
# TODO: test with USE_PTHREADS in wasm backend as well
def test_phiundef(self):
self.do_core_test('test_phiundef.c')
def test_netinet_in(self):
self.do_run_in_out_file_test('netinet', 'in.cpp')
@needs_dylink
def test_main_module_static_align(self):
if self.get_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('no shared modules with memory growth')
self.set_setting('MAIN_MODULE')
self.do_core_test('test_main_module_static_align.cpp')
# libc++ tests
def test_iostream_and_determinism(self):
create_file('src.cpp', '''
#include <iostream>
int main()
{
std::cout << "hello world" << std::endl << 77 << "." << std::endl;
return 0;
}
''')
num = 5
for i in range(num):
print('(iteration %d)' % i)
# add some timing nondeterminism here, not that we need it, but whatever
time.sleep(random.random() / (10 * num))
self.do_runf('src.cpp', 'hello world\n77.\n')
# Verify that this build is identical to the previous one
if os.path.exists('src.js.previous'):
self.assertBinaryEqual('src.js', 'src.js.previous')
shutil.copy2('src.js', 'src.js.previous')
# Same but for the wasm file.
if self.is_wasm() and not self.get_setting('WASM2JS'):
if os.path.exists('src.wasm.previous'):
self.assertBinaryEqual('src.wasm', 'src.wasm.previous')
shutil.copy2('src.wasm', 'src.wasm.previous')
def test_stdvec(self):
self.do_core_test('test_stdvec.cpp')
def test_random_device(self):
self.maybe_closure()
self.do_core_test('test_random_device.cpp')
def test_reinterpreted_ptrs(self):
self.do_core_test('test_reinterpreted_ptrs.cpp')
def test_js_libraries(self):
create_file('main.cpp', '''
#include <stdio.h>
extern "C" {
extern void printey();
extern int calcey(int x, int y);
}
int main() {
printey();
printf("*%d*\\n", calcey(10, 22));
return 0;
}
''')
create_file('mylib1.js', '''
mergeInto(LibraryManager.library, {
printey: function() {
out('hello from lib!');
}
});
''')
create_file('mylib2.js', '''
mergeInto(LibraryManager.library, {
calcey: function(x, y) {
return x + y;
}
});
''')
self.emcc_args += ['--js-library', 'mylib1.js', '--js-library', 'mylib2.js']
self.do_runf('main.cpp', 'hello from lib!\n*32*\n')
def test_unicode_js_library(self):
create_file('main.cpp', '''
#include <stdio.h>
extern "C" {
extern void printey();
}
int main() {
printey();
return 0;
}
''')
self.emcc_args += ['--js-library', test_file('unicode_library.js')]
self.do_runf('main.cpp', u'Unicode snowman \u2603 says hello!')
def test_funcptr_import_type(self):
self.emcc_args += ['--js-library', test_file('core', 'test_funcptr_import_type.js')]
self.do_core_test('test_funcptr_import_type.cpp')
@no_asan('ASan does not work with EXPORT_ALL')
def test_constglobalunion(self):
self.set_setting('EXPORT_ALL')
self.do_run(r'''
#include <stdio.h>
struct one_const {
long a;
};
struct two_consts {
long a;
long b;
};
union some_consts {
struct one_const one;
struct two_consts two;
};
union some_consts my_consts = {{
1
}};
struct one_const addr_of_my_consts = {
(long)(&my_consts)
};
int main(void) {
printf("%li\n", (long)!!addr_of_my_consts.a);
return 0;
}
''', '1')
### 'Medium' tests
def test_fannkuch(self):
results = [(1, 0), (2, 1), (3, 2), (4, 4), (5, 7), (6, 10), (7, 16), (8, 22)]
self.build(test_file('fannkuch.cpp'))
for i, j in results:
print(i, j)
self.do_run('fannkuch.js', 'Pfannkuchen(%d) = %d.' % (i, j), args=[str(i)], no_build=True)
def test_raytrace(self):
# TODO: Should we remove this test?
self.skipTest('Relies on double value rounding, extremely sensitive')
src = open(test_file('raytrace.cpp')).read().replace('double', 'float')
output = open(test_file('raytrace.ppm')).read()
self.do_run(src, output, args=['3', '16'])
def test_fasta(self):
results = [(1, '''GG*ctt**tgagc*'''),
(20, '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tacgtgtagcctagtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaa**tgacgtcttttgatctgacggcgttaacaaagatactctg*'''),
(50, '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA*TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa**NtactMcSMtYtcMgRtacttctWBacgaa**agatactctgggcaacacacatacttctctcatgttgtttcttcggacctttcataacct**ttcctggcacatggttagctgcacatcacaggattgtaagggtctagtggttcagtgagc**ggaatatcattcgtcggtggtgttaatctatctcggtgtagcttataaatgcatccgtaa**gaatattatgtttatttgtcggtacgttcatggtagtggtgtcgccgatttagacgtaaa**ggcatgtatg*''')]
old = self.emcc_args
orig_src = open(test_file('fasta.cpp')).read()
def test(extra_args):
self.emcc_args = old + extra_args
for t in ['float', 'double']:
print(t)
src = orig_src.replace('double', t)
with open('fasta.cpp', 'w') as f:
f.write(src)
self.build('fasta.cpp')
for arg, output in results:
self.do_run('fasta.js', output, args=[str(arg)], output_nicerizer=lambda x, err: x.replace('\n', '*'), no_build=True)
shutil.copyfile('fasta.js', '%s.js' % t)
test([])
@bleeding_edge_wasm_backend
def test_fasta_nontrapping(self):
self.emcc_args += ['-mnontrapping-fptoint']
self.test_fasta()
def test_whets(self):
self.do_runf(test_file('whets.cpp'), 'Single Precision C Whetstone Benchmark')
@no_asan('depends on the specifics of memory size, which for asan we are forced to increase')
def test_dlmalloc_inline(self):
self.banned_js_engines = [config.NODE_JS] # slower, and fail on 64-bit
# needed with typed arrays
self.set_setting('INITIAL_MEMORY', '128mb')
src = open(path_from_root('system', 'lib', 'dlmalloc.c')).read() + '\n\n\n' + open(test_file('dlmalloc_test.c')).read()
self.do_run(src, '*1,0*', args=['200', '1'], force_c=True)
self.do_run('src.js', '*400,0*', args=['400', '400'], force_c=True, no_build=True)
@no_asan('depends on the specifics of memory size, which for asan we are forced to increase')
def test_dlmalloc(self):
self.banned_js_engines = [config.NODE_JS] # slower, and fail on 64-bit
# needed with typed arrays
self.set_setting('INITIAL_MEMORY', '128mb')
# Linked version
self.do_runf(test_file('dlmalloc_test.c'), '*1,0*', args=['200', '1'])
self.do_run('dlmalloc_test.js', '*400,0*', args=['400', '400'], no_build=True)
# TODO: do this in other passes too, passing their opts into emcc
if self.emcc_args == []:
# emcc should build in dlmalloc automatically, and do all the sign correction etc. for it
try_delete('src.js')
self.run_process([EMCC, test_file('dlmalloc_test.c'), '-s', 'INITIAL_MEMORY=128MB', '-o', 'src.js'], stdout=PIPE, stderr=self.stderr_redirect)
self.do_run(None, '*1,0*', ['200', '1'], no_build=True)
self.do_run(None, '*400,0*', ['400', '400'], no_build=True)
# The same for new and all its variants
src = open(test_file('new.cpp')).read()
for new, delete in [
('malloc(100)', 'free'),
('new char[100]', 'delete[]'),
('new Structy', 'delete'),
('new int', 'delete'),
('new Structy[10]', 'delete[]'),
]:
self.do_run(src.replace('{{{ NEW }}}', new).replace('{{{ DELETE }}}', delete), '*1,0*')
# Tests that a large allocation should gracefully fail
@no_asan('the memory size limit here is too small for asan')
def test_dlmalloc_large(self):
self.emcc_args += ['-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=128MB']
self.do_runf(path_from_root('tests', 'dlmalloc_test_large.c'), '0 0 0 1')
@no_asan('asan also changes malloc, and that ends up linking in new twice')
def test_dlmalloc_partial(self):
# present part of the symbols of dlmalloc, not all
src = open(test_file('new.cpp')).read().replace('{{{ NEW }}}', 'new int').replace('{{{ DELETE }}}', 'delete') + '''
#include <new>
void *
operator new(size_t size) throw(std::bad_alloc)
{
printf("new %zu!\\n", size);
return malloc(size);
}
'''
self.do_run(src, 'new 4!\n*1,0*')
@no_asan('asan also changes malloc, and that ends up linking in new twice')
def test_dlmalloc_partial_2(self):
if 'SAFE_HEAP' in str(self.emcc_args):
self.skipTest('we do unsafe stuff here')
# present part of the symbols of dlmalloc, not all. malloc is harder to link than new which is weak.
self.do_core_test('test_dlmalloc_partial_2.c', assert_returncode=NON_ZERO)
def test_libcxx(self):
self.do_runf(test_file('hashtest.cpp'),
'june -> 30\nPrevious (in alphabetical order) is july\nNext (in alphabetical order) is march')
self.do_run('''
#include <set>
#include <stdio.h>
int main() {
std::set<int> *fetchOriginatorNums = new std::set<int>();
fetchOriginatorNums->insert(171);
printf("hello world\\n");
return 0;
}
''', 'hello world')
def test_typeid(self):
self.do_core_test('test_typeid.cpp')
def test_static_variable(self):
# needs atexit
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_static_variable.cpp')
def test_fakestat(self):
self.do_core_test('test_fakestat.c')
def test_mmap(self):
# ASan needs more memory, but that is set up separately
if '-fsanitize=address' not in self.emcc_args:
self.set_setting('INITIAL_MEMORY', '128mb')
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_mmap.c')
def test_mmap_file(self):
for extra_args in [[]]:
self.emcc_args += ['--embed-file', 'data.dat'] + extra_args
x = 'data from the file........'
s = ''
while len(s) < 9000:
if len(s) + len(x) < 9000:
s += x
continue
s += '.'
assert len(s) == 9000
create_file('data.dat', s)
self.do_runf(test_file('mmap_file.c'), '*\n' + s[0:20] + '\n' + s[4096:4096 + 20] + '\n*\n')
def test_cubescript(self):
# uses register keyword
self.emcc_args += ['-std=c++03', '-Wno-dynamic-class-memaccess']
self.maybe_closure()
self.emcc_args += ['-I', test_file('third_party', 'cubescript')]
def test():
src = test_file('third_party', 'cubescript', 'command.cpp')
self.do_runf(src, '*\nTemp is 33\n9\n5\nhello, everyone\n*')
test()
print('asyncify') # extra coverage
self.set_setting('ASYNCIFY')
test()
@needs_dylink
def test_relocatable_void_function(self):
self.set_setting('RELOCATABLE')
self.do_core_test('test_relocatable_void_function.c')
@wasm_simd
@is_slow_test
def test_wasm_builtin_simd(self):
# Improves test readability
self.emcc_args += ['-Wno-c++11-narrowing', '-Wno-format']
self.do_runf(test_file('test_wasm_builtin_simd.cpp'), 'Success!')
self.build(test_file('test_wasm_builtin_simd.cpp'))
@wasm_simd
@is_slow_test
def test_wasm_intrinsics_simd(self):
def run():
self.do_runf(test_file('test_wasm_intrinsics_simd.c'), 'Success!')
# Improves test readability
self.emcc_args.append('-Wno-c++11-narrowing')
self.emcc_args.extend(['-Wpedantic', '-Werror', '-Wall', '-xc++'])
run()
self.emcc_args.append('-funsigned-char')
run()
self.build(test_file('test_wasm_intrinsics_simd.c'))
# Tests invoking the NEON SIMD API via arm_neon.h header
@wasm_simd
def test_neon_wasm_simd(self):
self.emcc_args.append('-Wno-c++11-narrowing')
self.emcc_args.append('-mfpu=neon')
self.emcc_args.append('-msimd128')
self.do_runf(test_file('neon', 'test_neon_wasm_simd.cpp'), 'Success!')
# Tests invoking the SIMD API via x86 SSE1 xmmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
@no_safe_heap('has unaligned 64-bit operations in wasm')
def test_sse1(self):
src = test_file('sse', 'test_sse1.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse', '-o', 'test_sse1', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse1', stdout=PIPE).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + test_file('sse'), '-msse']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSE2 emmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
@no_safe_heap('has unaligned 64-bit operations in wasm')
@is_slow_test
def test_sse2(self):
src = test_file('sse', 'test_sse2.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse2', '-Wno-argument-outside-range', '-o', 'test_sse2', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse2', stdout=PIPE).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + test_file('sse'), '-msse2', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSE3 pmmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
def test_sse3(self):
src = test_file('sse', 'test_sse3.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse3', '-Wno-argument-outside-range', '-o', 'test_sse3', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse3', stdout=PIPE).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + test_file('sse'), '-msse3', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSSE3 tmmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
def test_ssse3(self):
src = test_file('sse', 'test_ssse3.cpp')
self.run_process([shared.CLANG_CXX, src, '-mssse3', '-Wno-argument-outside-range', '-o', 'test_ssse3', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_ssse3', stdout=PIPE).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + test_file('sse'), '-mssse3', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSE4.1 smmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
@is_slow_test
def test_sse4_1(self):
src = test_file('sse', 'test_sse4_1.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse4.1', '-Wno-argument-outside-range', '-o', 'test_sse4_1', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse4_1', stdout=PIPE).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + test_file('sse'), '-msse4.1', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSE4.2 nmmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
def test_sse4_2(self):
src = test_file('sse', 'test_sse4_2.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse4.2', '-Wno-argument-outside-range', '-o', 'test_sse4_2', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse4_2', stdout=PIPE).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + test_file('sse'), '-msse4.2', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 AVX avxintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
def test_avx(self):
src = test_file('sse', 'test_avx.cpp')
self.run_process([shared.CLANG_CXX, src, '-mavx', '-Wno-argument-outside-range', '-o', 'test_avx', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_avx', stdout=PIPE).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + test_file('sse'), '-mavx', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
@no_asan('call stack exceeded on some versions of node')
def test_gcc_unmangler(self):
self.emcc_args += ['-I' + test_file('third_party', 'libiberty')]
self.do_runf(test_file('third_party', 'libiberty', 'cp-demangle.c'), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'])
@needs_make('make')
def test_lua(self):
self.emcc_args.remove('-Werror')
self.do_run('',
'hello lua world!\n17\n1\n2\n3\n4\n7',
args=['-e', '''print("hello lua world!");print(17);for x = 1,4 do print(x) end;print(10-3)'''],
libraries=self.get_library(os.path.join('third_party', 'lua'), [os.path.join('src', 'lua.o'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None),
includes=[test_file('lua')],
output_nicerizer=lambda string, err: (string + err).replace('\n\n', '\n').replace('\n\n', '\n'))
@no_asan('issues with freetype itself')
@needs_make('configure script')
@is_slow_test
def test_freetype(self):
self.add_pre_run("FS.createDataFile('/', 'font.ttf', %s, true, false, false);" % str(
list(bytearray(open(test_file('freetype', 'LiberationSansBold.ttf'), 'rb').read()))
))
# Not needed for js, but useful for debugging
shutil.copyfile(test_file('freetype', 'LiberationSansBold.ttf'), 'font.ttf')
# Main
self.do_run_from_file(test_file('freetype', 'main.c'),
test_file('freetype', 'ref.txt'),
args=['font.ttf', 'test!', '150', '120', '25'],
libraries=self.get_freetype_library(),
includes=[test_file('third_party', 'freetype', 'include')])
# github issue 324
print('[issue 324]')
self.do_run_from_file(test_file('freetype', 'main_2.c'),
test_file('freetype', 'ref_2.txt'),
args=['font.ttf', 'w', '32', '32', '25'],
libraries=self.get_freetype_library(),
includes=[test_file('third_party', 'freetype', 'include')])
print('[issue 324 case 2]')
self.do_run_from_file(test_file('freetype', 'main_3.c'),
test_file('freetype', 'ref_3.txt'),
args=['font.ttf', 'W', '32', '32', '0'],
libraries=self.get_freetype_library(),
includes=[test_file('third_party', 'freetype', 'include')])
print('[issue 324 case 3]')
self.do_run('main_3.js',
open(test_file('freetype', 'ref_4.txt')).read(),
args=['font.ttf', 'ea', '40', '32', '0'],
no_build=True)
@no_asan('local count too large for VMs')
@is_slow_test
def test_sqlite(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING')
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_sqlite3_open', '_sqlite3_close', '_sqlite3_exec', '_sqlite3_free'])
if '-g' in self.emcc_args:
print("disabling inlining") # without registerize (which -g disables), we generate huge amounts of code
self.set_setting('INLINING_LIMIT')
# newer clang has a warning for implicit conversions that lose information,
# which happens in sqlite (see #9138)
self.emcc_args += ['-Wno-implicit-int-float-conversion']
# newer clang warns about "suspicious concatenation of string literals in an
# array initialization; did you mean to separate the elements with a comma?"
self.emcc_args += ['-Wno-string-concatenation']
# ignore unknown flags, which lets the above flags be used on github CI
# before the LLVM change rolls in (the same LLVM change that adds the
# warning also starts to warn on it)
self.emcc_args += ['-Wno-unknown-warning-option']
self.emcc_args += ['-Wno-pointer-bool-conversion']
self.emcc_args += ['-I' + test_file('third_party', 'sqlite')]
src = '''
#define SQLITE_DISABLE_LFS
#define LONGDOUBLE_TYPE double
#define SQLITE_INT64_TYPE long long int
#define SQLITE_THREADSAFE 0
'''
src += open(test_file('third_party', 'sqlite', 'sqlite3.c')).read()
src += open(test_file('sqlite', 'benchmark.c')).read()
self.do_run(src,
open(test_file('sqlite', 'benchmark.txt')).read(),
includes=[test_file('sqlite')],
force_c=True)
@needs_make('mingw32-make')
@is_slow_test
@parameterized({
'cmake': (True,),
'configure': (False,)
})
def test_zlib(self, use_cmake):
if WINDOWS and not use_cmake:
self.skipTest("Windows cannot run configure sh scripts")
self.maybe_closure()
self.emcc_args.append('-Wno-shift-negative-value')
if '-g' in self.emcc_args:
self.emcc_args.append('-gsource-map') # more source maps coverage
if use_cmake:
make_args = []
configure = ['cmake', '.']
else:
make_args = ['libz.a']
configure = ['sh', './configure']
self.do_run_from_file(
test_file('third_party', 'zlib', 'example.c'),
test_file('core', 'test_zlib.out'),
libraries=self.get_library(os.path.join('third_party', 'zlib'), 'libz.a', make_args=make_args, configure=configure),
includes=[test_file('third_party', 'zlib'), 'building', 'zlib'])
@needs_make('make')
@is_slow_test
@parameterized({
'cmake': (True,),
'autoconf': (False,)
})
# Called thus so it runs late in the alphabetical cycle... it is long
def test_bullet(self, use_cmake):
if WINDOWS and not use_cmake:
self.skipTest("Windows cannot run configure sh scripts")
self.emcc_args += [
'-Wno-c++11-narrowing',
'-Wno-deprecated-register',
'-Wno-writable-strings',
'-Wno-shift-negative-value',
'-Wno-format'
]
asserts = self.get_setting('ASSERTIONS', 0)
# extra testing for ASSERTIONS == 2
self.set_setting('ASSERTIONS', 2 if use_cmake else asserts)
self.do_runf(test_file('third_party', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp'),
[open(test_file('bullet', 'output.txt')).read(), # different roundings
open(test_file('bullet', 'output2.txt')).read(),
open(test_file('bullet', 'output3.txt')).read(),
open(test_file('bullet', 'output4.txt')).read()],
libraries=self.get_bullet_library(use_cmake),
includes=[test_file('third_party', 'bullet', 'src')])
@no_asan('issues with freetype itself')
@needs_make('depends on freetype')
@is_slow_test
def test_poppler(self):
pdf_data = open(test_file('poppler', 'paper.pdf'), 'rb').read()
create_file('paper.pdf.js', str(list(bytearray(pdf_data))))
create_file('pre.js', '''
Module.preRun = function() {
FS.createDataFile('/', 'paper.pdf', eval(read_('paper.pdf.js')), true, false, false);
};
Module.postRun = function() {
var FileData = Array.from(MEMFS.getFileDataAsTypedArray(FS.root.contents['filename-1.ppm']));
out("Data: " + JSON.stringify(FileData.map(function(x) { return unSign(x, 8) })));
};
''')
self.emcc_args += ['--pre-js', 'pre.js', '-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=$unSign']
ppm_data = str(list(bytearray(open(test_file('poppler', 'ref.ppm'), 'rb').read())))
self.do_run('', ppm_data.replace(' ', ''),
libraries=self.get_poppler_library(),
args=['-scale-to', '512', 'paper.pdf', 'filename'])
@needs_make('make')
@is_slow_test
def test_openjpeg(self):
def do_test_openjpeg():
def line_splitter(data):
out = ''
counter = 0
for ch in data:
out += ch
if ch == ' ' and counter > 60:
out += '\n'
counter = 0
else:
counter += 1
return out
# remove -g, so we have one test without it by default
self.emcc_args = [x for x in self.emcc_args if x != '-g']
original_j2k = test_file('openjpeg', 'syntensity_lobby_s.j2k')
image_bytes = list(bytearray(open(original_j2k, 'rb').read()))
create_file('pre.js', """
Module.preRun = function() { FS.createDataFile('/', 'image.j2k', %s, true, false, false); };
Module.postRun = function() {
out('Data: ' + JSON.stringify(Array.from(MEMFS.getFileDataAsTypedArray(FS.analyzePath('image.raw').object))));
};
""" % line_splitter(str(image_bytes)))
shutil.copy(test_file('third_party', 'openjpeg', 'opj_config.h'), self.get_dir())
lib = self.get_library(os.path.join('third_party', 'openjpeg'),
[os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/index.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/convert.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/color.c.o'.split('/')),
os.path.join('bin', 'libopenjpeg.a')],
configure=['cmake', '.'],
# configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'],
make_args=[]) # no -j 2, since parallel builds can fail
# We use doubles in JS, so we get slightly different values than native code. So we
# check our output by comparing the average pixel difference
def image_compare(output, err):
# Get the image generated by JS, from the JSON.stringify'd array
m = re.search(r'\[[\d, -]*\]', output)
self.assertIsNotNone(m, 'Failed to find proper image output in: ' + output)
# Evaluate the output as a python array
js_data = eval(m.group(0))
js_data = [x if x >= 0 else 256 + x for x in js_data] # Our output may be signed, so unsign it
# Get the correct output
true_data = bytearray(open(test_file('openjpeg', 'syntensity_lobby_s.raw'), 'rb').read())
# Compare them
assert(len(js_data) == len(true_data))
num = len(js_data)
diff_total = js_total = true_total = 0
for i in range(num):
js_total += js_data[i]
true_total += true_data[i]
diff_total += abs(js_data[i] - true_data[i])
js_mean = js_total / float(num)
true_mean = true_total / float(num)
diff_mean = diff_total / float(num)
image_mean = 83.265
# print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']'
assert abs(js_mean - image_mean) < 0.01, [js_mean, image_mean]
assert abs(true_mean - image_mean) < 0.01, [true_mean, image_mean]
assert diff_mean < 0.01, diff_mean
return output
self.emcc_args += ['--minify=0'] # to compare the versions
self.emcc_args += ['--pre-js', 'pre.js']
def do_test():
self.do_runf(test_file('third_party', 'openjpeg', 'codec', 'j2k_to_image.c'),
'Successfully generated', # The real test for valid output is in image_compare
args='-i image.j2k -o image.raw'.split(),
libraries=lib,
includes=[test_file('third_party', 'openjpeg', 'libopenjpeg'),
test_file('third_party', 'openjpeg', 'codec'),
test_file('third_party', 'openjpeg', 'common'),
os.path.join(self.get_build_dir(), 'openjpeg')],
output_nicerizer=image_compare)
do_test()
# extra testing
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1:
print('no memory growth', file=sys.stderr)
self.set_setting('ALLOW_MEMORY_GROWTH', 0)
do_test()
if '-fsanitize=address' in self.emcc_args:
# In ASan mode we need a large initial memory (or else wasm-ld fails).
# The OpenJPEG CMake will build several executables (which we need parts
# of in our testing, see above), so we must enable the flag for them all.
with env_modify({'EMMAKEN_CFLAGS': '-sINITIAL_MEMORY=300MB'}):
do_test_openjpeg()
else:
do_test_openjpeg()
@no_asan('call stack exceeded on some versions of node')
@is_slow_test
def test_fuzz(self):
self.emcc_args += ['-I' + test_file('fuzz', 'include'), '-w']
def run_all(x):
print(x)
for name in sorted(glob.glob(test_file('fuzz', '*.c')) + glob.glob(test_file('fuzz', '*.cpp'))):
if 'newfail' in name:
continue
if os.path.basename(name).startswith('temp_fuzzcode'):
continue
print(name)
if name.endswith('.cpp'):
self.emcc_args.append('-std=c++03')
self.do_runf(test_file('fuzz', name),
open(test_file('fuzz', name + '.txt')).read())
if name.endswith('.cpp'):
self.emcc_args.remove('-std=c++03')
run_all('normal')
self.emcc_args += ['-flto']
run_all('lto')
@also_with_standalone_wasm(wasm2c=True, impure=True)
@no_asan('autodebug logging interferes with asan')
@with_env_modify({'EMCC_AUTODEBUG': '1'})
def test_autodebug_wasm(self):
# test that the program both works and also emits some of the logging
# (but without the specific output, as it is logging the actual locals
# used and so forth, which will change between opt modes and updates of
# llvm etc.)
def check(out, err):
for msg in ['log_execution', 'get_i32', 'set_i32', 'load_ptr', 'load_val', 'store_ptr', 'store_val']:
self.assertIn(msg, out)
return out + err
self.do_runf(test_file('core', 'test_autodebug.c'),
'success', output_nicerizer=check)
@parameterized({
'full': ('full',),
'mask': ('mask',),
'none': ('none',),
})
def test_wasm2c_sandboxing(self, mode):
if not can_do_standalone(self):
return self.skipTest('standalone mode not supported')
self.set_setting('STANDALONE_WASM')
self.set_setting('WASM2C')
self.set_setting('WASM2C_SANDBOXING', mode)
self.wasm_engines = []
self.do_core_test('test_hello_world.c')
### Integration tests
@sync
def test_ccall(self):
self.emcc_args.append('-Wno-return-stack-address')
self.set_setting('EXPORTED_RUNTIME_METHODS', ['ccall', 'cwrap'])
create_file('post.js', '''
out('*');
var ret;
ret = Module['ccall']('get_int', 'number'); out([typeof ret, ret].join(','));
ret = ccall('get_float', 'number'); out([typeof ret, ret.toFixed(2)].join(','));
ret = ccall('get_bool', 'boolean'); out([typeof ret, ret].join(','));
ret = ccall('get_string', 'string'); out([typeof ret, ret].join(','));
ret = ccall('print_int', null, ['number'], [12]); out(typeof ret);
ret = ccall('print_float', null, ['number'], [14.56]); out(typeof ret);
ret = ccall('print_bool', null, ['boolean'], [true]); out(typeof ret);
ret = ccall('print_string', null, ['string'], ["cheez"]); out(typeof ret);
ret = ccall('print_string', null, ['array'], [[97, 114, 114, 45, 97, 121, 0]]); out(typeof ret); // JS array
ret = ccall('print_string', null, ['array'], [new Uint8Array([97, 114, 114, 45, 97, 121, 0])]); out(typeof ret); // typed array
ret = ccall('multi', 'number', ['number', 'number', 'number', 'string'], [2, 1.4, 3, 'more']); out([typeof ret, ret].join(','));
var p = ccall('malloc', 'pointer', ['number'], [4]);
setValue(p, 650, 'i32');
ret = ccall('pointer', 'pointer', ['pointer'], [p]); out([typeof ret, getValue(ret, 'i32')].join(','));
out('*');
// part 2: cwrap
var noThirdParam = Module['cwrap']('get_int', 'number');
out(noThirdParam());
var multi = Module['cwrap']('multi', 'number', ['number', 'number', 'number', 'string']);
out(multi(2, 1.4, 3, 'atr'));
out(multi(8, 5.4, 4, 'bret'));
out('*');
// part 3: avoid stack explosion and check it's restored correctly
for (var i = 0; i < TOTAL_STACK/60; i++) {
ccall('multi', 'number', ['number', 'number', 'number', 'string'], [0, 0, 0, '123456789012345678901234567890123456789012345678901234567890']);
}
out('stack is ok.');
ccall('call_ccall_again', null);
''')
self.emcc_args += ['--post-js', 'post.js']
self.set_setting('EXPORTED_FUNCTIONS', ['_get_int', '_get_float', '_get_bool', '_get_string', '_print_int', '_print_float', '_print_bool', '_print_string', '_multi', '_pointer', '_call_ccall_again', '_malloc'])
self.do_core_test('test_ccall.cpp')
if '-O2' in self.emcc_args and '-g' not in self.emcc_args:
print('with closure')
self.emcc_args += ['--closure=1']
self.do_core_test('test_ccall.cpp')
def test_EXPORTED_RUNTIME_METHODS(self):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$dynCall'])
self.do_core_test('EXPORTED_RUNTIME_METHODS.c')
# test dyncall (and other runtime methods in support.js) can be exported
self.emcc_args += ['-DEXPORTED']
self.set_setting('EXPORTED_RUNTIME_METHODS', ['dynCall', 'addFunction', 'lengthBytesUTF8', 'getTempRet0', 'setTempRet0'])
self.do_core_test('EXPORTED_RUNTIME_METHODS.c')
@parameterized({
'': [],
'minimal_runtime': ['-s', 'MINIMAL_RUNTIME=1']
})
def test_dyncall_specific(self, *args):
emcc_args = self.emcc_args.copy()
cases = [
('DIRECT', []),
('DYNAMIC_SIG', ['-s', 'DYNCALLS=1', '-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=$dynCall']),
]
if 'MINIMAL_RUNTIME=1' not in args:
cases += [
('EXPORTED', []),
('EXPORTED_DYNAMIC_SIG', ['-s', 'DYNCALLS=1', '-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=$dynCall', '-s', 'EXPORTED_RUNTIME_METHODS=dynCall']),
('FROM_OUTSIDE', ['-s', 'EXPORTED_RUNTIME_METHODS=dynCall_iiji'])
]
for which, extra_args in cases:
print(str(args) + ' ' + which)
self.emcc_args = emcc_args + ['-D' + which] + list(args) + extra_args
self.do_core_test('dyncall_specific.c')
def test_getValue_setValue(self):
# these used to be exported, but no longer are by default
def test(output_prefix='', args=[], assert_returncode=0):
old = self.emcc_args.copy()
self.emcc_args += args
src = test_file('core', 'getValue_setValue.cpp')
expected = test_file('core', 'getValue_setValue' + output_prefix + '.out')
self.do_run_from_file(src, expected, assert_returncode=assert_returncode)
self.emcc_args = old
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT'])
# see that with assertions, we get a nice error message
self.set_setting('EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS')
test('_assert', assert_returncode=NON_ZERO)
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXPORTED_RUNTIME_METHODS', ['getValue', 'setValue'])
test()
def test_FS_exports(self):
# these used to be exported, but no longer are by default
for use_files in (0, 1):
print(use_files)
def test(output_prefix='', args=[], assert_returncode=0):
if use_files:
args += ['-DUSE_FILES']
print(args)
old = self.emcc_args.copy()
self.emcc_args += args
self.do_runf(test_file('core', 'FS_exports.cpp'),
(open(test_file('core', 'FS_exports' + output_prefix + '.out')).read(),
open(test_file('core', 'FS_exports' + output_prefix + '_2.out')).read()),
assert_returncode=assert_returncode)
self.emcc_args = old
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT', '-s', 'FORCE_FILESYSTEM'])
# see that with assertions, we get a nice error message
self.set_setting('EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS')
test('_assert', assert_returncode=NON_ZERO)
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXPORTED_RUNTIME_METHODS', ['FS_createDataFile'])
test(args=['-s', 'FORCE_FILESYSTEM'])
def test_legacy_exported_runtime_numbers(self):
# these used to be exported, but no longer are by default
def test(output_prefix='', args=[], assert_returncode=0):
old = self.emcc_args.copy()
self.emcc_args += args
src = test_file('core', 'legacy_exported_runtime_numbers.cpp')
expected = test_file('core', 'legacy_exported_runtime_numbers%s.out' % output_prefix)
self.do_run_from_file(src, expected, assert_returncode=assert_returncode)
self.emcc_args = old
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT'])
# see that with assertions, we get a nice error message
self.set_setting('EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS')
test('_assert', assert_returncode=NON_ZERO)
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXPORTED_RUNTIME_METHODS', ['ALLOC_STACK'])
test()
def test_response_file(self):
response_data = '-o %s/response_file.js %s' % (self.get_dir(), test_file('hello_world.cpp'))
create_file('rsp_file', response_data.replace('\\', '\\\\'))
self.run_process([EMCC, "@rsp_file"] + self.get_emcc_args())
self.do_run('response_file.js', 'hello, world', no_build=True)
self.assertContained('response file not found: foo.txt', self.expect_fail([EMCC, '@foo.txt']))
def test_linker_response_file(self):
objfile = 'response_file.o'
self.run_process([EMCC, '-c', test_file('hello_world.cpp'), '-o', objfile] + self.get_emcc_args())
# This should expand into -Wl,--start-group <objfile> -Wl,--end-group
response_data = '--start-group ' + objfile + ' --end-group'
create_file('rsp_file', response_data.replace('\\', '\\\\'))
self.run_process([EMCC, "-Wl,@rsp_file", '-o', 'response_file.o.js'] + self.get_emcc_args())
self.do_run('response_file.o.js', 'hello, world', no_build=True)
def test_exported_response(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <emscripten.h>
extern "C" {
int other_function() { return 5; }
}
int main() {
int x = EM_ASM_INT({ return Module._other_function() });
emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite.
printf("waka %d!\n", x);
return 0;
}
'''
create_file('exps', '["_main","_other_function"]')
self.set_setting('EXPORTED_FUNCTIONS', '@exps')
self.do_run(src, '''waka 5!''')
assert 'other_function' in open('src.js').read()
def test_large_exported_response(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <emscripten.h>
extern "C" {
'''
js_funcs = []
num_exports = 5000
count = 0
while count < num_exports:
src += 'int exported_func_from_response_file_%d () { return %d;}\n' % (count, count)
js_funcs.append('_exported_func_from_response_file_%d' % count)
count += 1
src += r'''
}
int main() {
int x = EM_ASM_INT({ return Module._exported_func_from_response_file_4999() });
emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite.
printf("waka %d!\n", x);
return 0;
}
'''
js_funcs.append('_main')
create_file('large_exported_response.json', json.dumps(js_funcs))
self.set_setting('EXPORTED_FUNCTIONS', '@large_exported_response.json')
self.do_run(src, 'waka 4999!')
self.assertContained('_exported_func_from_response_file_1', open('src.js').read())
@sync
def test_add_function(self):
self.set_setting('INVOKE_RUN', 0)
self.set_setting('RESERVED_FUNCTION_POINTERS')
self.set_setting('EXPORTED_RUNTIME_METHODS', ['callMain'])
src = test_file('interop', 'test_add_function.cpp')
post_js = test_file('interop', 'test_add_function_post.js')
self.emcc_args += ['--post-js', post_js]
print('basics')
self.do_run_in_out_file_test('interop', 'test_add_function.cpp')
print('with RESERVED_FUNCTION_POINTERS=0')
self.set_setting('RESERVED_FUNCTION_POINTERS', 0)
expected = 'Unable to grow wasm table'
if self.is_wasm2js():
# in wasm2js the error message doesn't come from the VM, but from our
# emulation code. when ASSERTIONS are enabled we show a clear message, but
# in optimized builds we don't waste code size on that, and the JS engine
# shows a generic error.
expected = 'wasmTable.grow is not a function'
self.do_runf(src, expected, assert_returncode=NON_ZERO)
print('- with table growth')
self.set_setting('ALLOW_TABLE_GROWTH')
self.emcc_args += ['-DGROWTH']
# enable costly assertions to verify correct table behavior
self.set_setting('ASSERTIONS', 2)
self.do_run_in_out_file_test('interop', 'test_add_function.cpp')
def test_getFuncWrapper_sig_alias(self):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$getFuncWrapper'])
src = r'''
#include <stdio.h>
#include <emscripten.h>
void func1(int a) {
printf("func1\n");
}
void func2(int a, int b) {
printf("func2\n");
}
int main() {
EM_ASM({
getFuncWrapper($0, 'vi')(0);
getFuncWrapper($1, 'vii')(0, 0);
}, func1, func2);
return 0;
}
'''
self.do_run(src, 'func1\nfunc2\n')
def test_emulate_function_pointer_casts(self):
self.set_setting('EMULATE_FUNCTION_POINTER_CASTS')
self.do_core_test('test_emulate_function_pointer_casts.cpp')
@no_wasm2js('TODO: nicely printed names in wasm2js')
@parameterized({
'normal': ([],),
'noexcept': (['-fno-exceptions'],)
})
def test_demangle_stacks(self, extra_args):
self.emcc_args += extra_args
self.set_setting('DEMANGLE_SUPPORT')
self.set_setting('ASSERTIONS')
# disable aggressive inlining in binaryen
self.set_setting('BINARYEN_EXTRA_PASSES', '--one-caller-inline-max-function-size=1')
# ensure function names are preserved
self.emcc_args += ['--profiling-funcs']
self.do_core_test('test_demangle_stacks.cpp', assert_returncode=NON_ZERO)
if not self.has_changed_setting('ASSERTIONS'):
print('without assertions, the stack is not printed, but a message suggesting assertions is')
self.set_setting('ASSERTIONS', 0)
self.do_core_test('test_demangle_stacks_noassert.cpp', assert_returncode=NON_ZERO)
def test_demangle_stacks_symbol_map(self):
# disable aggressive inlining in binaryen
self.set_setting('BINARYEN_EXTRA_PASSES', '--one-caller-inline-max-function-size=1')
self.set_setting('DEMANGLE_SUPPORT')
if '-O' not in str(self.emcc_args) or '-O0' in self.emcc_args or '-O1' in self.emcc_args or '-g' in self.emcc_args:
self.skipTest("without opts, we don't emit a symbol map")
self.emcc_args += ['--emit-symbol-map']
self.do_runf(test_file('core', 'test_demangle_stacks.cpp'), 'abort', assert_returncode=NON_ZERO)
# make sure the shortened name is the right one
full_aborter = None
short_aborter = None
for line in open('test_demangle_stacks.js.symbols').readlines():
if ':' not in line:
continue
# split by the first ':' (wasm backend demangling may include more :'s later on)
short, full = line.split(':', 1)
if 'Aborter' in full:
short_aborter = short
full_aborter = full
self.assertIsNotNone(full_aborter)
self.assertIsNotNone(short_aborter)
print('full:', full_aborter, 'short:', short_aborter)
if config.SPIDERMONKEY_ENGINE and os.path.exists(config.SPIDERMONKEY_ENGINE[0]):
output = self.run_js('test_demangle_stacks.js', engine=config.SPIDERMONKEY_ENGINE, assert_returncode=NON_ZERO)
# we may see the full one, if -g, or the short one if not
if ' ' + short_aborter + ' ' not in output and ' ' + full_aborter + ' ' not in output:
# stack traces may also be ' name ' or 'name@' etc
if '\n' + short_aborter + ' ' not in output and '\n' + full_aborter + ' ' not in output and 'wasm-function[' + short_aborter + ']' not in output:
if '\n' + short_aborter + '@' not in output and '\n' + full_aborter + '@' not in output:
self.assertContained(' ' + short_aborter + ' ' + '\n' + ' ' + full_aborter + ' ', output)
@no_safe_heap('tracing from sbrk into JS leads to an infinite loop')
def test_tracing(self):
self.emcc_args += ['--tracing']
self.do_core_test('test_tracing.c')
@disabled('https://github.com/emscripten-core/emscripten/issues/9527')
def test_eval_ctors(self):
if '-O2' not in str(self.emcc_args) or '-O1' in str(self.emcc_args):
self.skipTest('need js optimizations')
if not self.is_wasm():
self.skipTest('this test uses wasm binaries')
print('leave printf in ctor')
self.set_setting('EVAL_CTORS')
self.do_run(r'''
#include <stdio.h>
struct C {
C() { printf("constructing!\n"); } // don't remove this!
};
C c;
int main() {}
''', "constructing!\n")
def get_code_size():
if self.is_wasm():
# Use number of functions as a for code size
return self.count_wasm_contents('hello_libcxx.wasm', 'funcs')
else:
return os.path.getsize('hello_libcxx.js')
def get_mem_size():
if self.is_wasm():
# Use number of functions as a for code size
return self.count_wasm_contents('hello_libcxx.wasm', 'memory-data')
if self.uses_memory_init_file():
return os.path.getsize('hello_libcxx.js.mem')
# otherwise we ignore memory size
return 0
def do_test(test):
self.set_setting('EVAL_CTORS')
test()
ec_code_size = get_code_size()
ec_mem_size = get_mem_size()
self.clear_setting('EVAL_CTORS')
test()
code_size = get_code_size()
mem_size = get_mem_size()
if mem_size:
print('mem: ', mem_size, '=>', ec_mem_size)
self.assertGreater(ec_mem_size, mem_size)
print('code:', code_size, '=>', ec_code_size)
self.assertLess(ec_code_size, code_size)
print('remove ctor of just assigns to memory')
def test1():
self.do_run(r'''
#include <stdio.h>
struct C {
int x;
C() {
volatile int y = 10;
y++;
x = y;
}
};
C c;
int main() {
printf("x: %d\n", c.x);
}
''', "x: 11\n")
do_test(test1)
# The wasm backend currently exports a single initalizer so the ctor
# evaluation is all or nothing. As well as that it doesn't currently
# do DCE of libcxx symbols (because the are marked as visibility(defaault)
# and because of that we end up not being able to eval ctors unless all
# libcxx constrcutors can be eval'd
print('libcxx - remove 2 ctors from iostream code')
output = 'hello, world!'
def test2():
self.do_runf(test_file('hello_libcxx.cpp'), output)
do_test(test2)
print('assertions too')
self.set_setting('ASSERTIONS')
self.do_runf(test_file('hello_libcxx.cpp'), output)
self.set_setting('ASSERTIONS', 0)
print('remove just some, leave others')
def test3():
self.do_run(r'''
#include <iostream>
#include <string>
class std_string {
public:
std_string(): ptr(nullptr) { std::cout << "std_string()\n"; }
std_string(const char* s): ptr(s) { std::cout << "std_string(const char* s)" << std::endl; }
std_string(const std_string& s): ptr(s.ptr) { std::cout << "std_string(const std_string& s) " << std::endl; }
const char* data() const { return ptr; }
private:
const char* ptr;
};
const std_string txtTestString("212121\0");
const std::string s2text("someweirdtext");
int main() {
std::cout << s2text << std::endl;
std::cout << txtTestString.data() << std::endl;
std::cout << txtTestString.data() << std::endl;
return 0;
}
''', '''std_string(const char* s)
someweirdtext
212121
212121
''') # noqa
do_test(test3)
def test_embind(self):
self.emcc_args += ['--bind']
create_file('test_embind.cpp', r'''
#include <stdio.h>
#include <emscripten/val.h>
using namespace emscripten;
int main() {
val Math = val::global("Math");
// two ways to call Math.abs
printf("abs(-10): %d\n", Math.call<int>("abs", -10));
printf("abs(-11): %d\n", Math["abs"](-11).as<int>());
return 0;
}
''')
self.do_runf('test_embind.cpp', 'abs(-10): 10\nabs(-11): 11')
def test_embind_2(self):
self.emcc_args += ['--bind', '--post-js', 'post.js']
create_file('post.js', '''
function printLerp() {
out('lerp ' + Module.lerp(100, 200, 66) + '.');
}
''')
create_file('test_embind_2.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
#include <emscripten/bind.h>
using namespace emscripten;
int lerp(int a, int b, int t) {
return (100 - t) * a + t * b;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("lerp", &lerp);
}
int main(int argc, char **argv) {
EM_ASM(printLerp());
return 0;
}
''')
self.do_runf('test_embind_2.cpp', 'lerp 166')
def test_embind_3(self):
self.emcc_args += ['--bind', '--post-js', 'post.js']
create_file('post.js', '''
function ready() {
try {
Module.compute(new Uint8Array([1,2,3]));
} catch(e) {
out(e);
}
}
''')
create_file('test_embind_3.cpp', r'''
#include <emscripten.h>
#include <emscripten/bind.h>
using namespace emscripten;
int compute(int array[]) {
return 0;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("compute", &compute, allow_raw_pointers());
}
int main(int argc, char **argv) {
EM_ASM(ready());
return 0;
}
''')
self.do_runf('test_embind_3.cpp', 'UnboundTypeError: Cannot call compute due to unbound types: Pi')
def test_embind_4(self):
self.emcc_args += ['--bind', '--post-js', 'post.js']
create_file('post.js', '''
function printFirstElement() {
out(Module.getBufferView()[0]);
}
''')
create_file('test_embind_4.cpp', r'''
#include <emscripten.h>
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
using namespace emscripten;
const size_t kBufferSize = 1024;
double buffer[kBufferSize];
val getBufferView(void) {
val v = val(typed_memory_view(kBufferSize, buffer));
return v;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("getBufferView", &getBufferView);
}
int main(int argc, char **argv) {
buffer[0] = 107;
EM_ASM(printFirstElement());
return 0;
}
''')
self.do_runf('test_embind_4.cpp', '107')
def test_embind_5(self):
self.emcc_args += ['--bind']
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_embind_5.cpp')
def test_embind_custom_marshal(self):
self.emcc_args += ['--bind', '--pre-js', test_file('embind', 'test_custom_marshal.js')]
self.do_run_in_out_file_test('embind', 'test_custom_marshal.cpp', assert_identical=True)
def test_embind_float_constants(self):
self.emcc_args += ['--bind']
self.do_run_in_out_file_test('embind', 'test_float_constants.cpp')
def test_embind_negative_constants(self):
self.emcc_args += ['--bind']
self.do_run_in_out_file_test('embind', 'test_negative_constants.cpp')
@also_with_wasm_bigint
def test_embind_unsigned(self):
self.emcc_args += ['--bind']
self.do_run_in_out_file_test('embind', 'test_unsigned.cpp')
def test_embind_val(self):
self.emcc_args += ['--bind']
self.do_run_in_out_file_test('embind', 'test_val.cpp')
def test_embind_no_rtti(self):
create_file('main.cpp', r'''
#include <emscripten.h>
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
EM_JS(void, calltest, (), {
console.log("dotest returned: " + Module.dotest());
});
int main(int argc, char** argv){
printf("418\n");
calltest();
return 0;
}
int test() {
return 42;
}
EMSCRIPTEN_BINDINGS(my_module) {
emscripten::function("dotest", &test);
}
''')
self.emcc_args += ['--bind', '-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']
self.do_runf('main.cpp', '418\ndotest returned: 42\n')
def test_embind_polymorphic_class_no_rtti(self):
self.emcc_args += ['--bind', '-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']
self.do_core_test('test_embind_polymorphic_class_no_rtti.cpp')
def test_embind_no_rtti_followed_by_rtti(self):
src = r'''
#include <emscripten.h>
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
EM_JS(void, calltest, (), {
console.log("dotest returned: " + Module.dotest());
});
int main(int argc, char** argv){
printf("418\n");
calltest();
return 0;
}
int test() {
return 42;
}
EMSCRIPTEN_BINDINGS(my_module) {
emscripten::function("dotest", &test);
}
'''
self.emcc_args += ['--bind', '-fno-rtti', '-frtti']
self.do_run(src, '418\ndotest returned: 42\n')
@parameterized({
'all': ('ALL', False),
'fast': ('FAST', False),
'default': ('DEFAULT', False),
'all_growth': ('ALL', True),
})
@sync
def test_webidl(self, mode, allow_memory_growth):
if self.maybe_closure():
# avoid closure minified names competing with our test code in the global name space
self.set_setting('MODULARIZE')
# Force IDL checks mode
with env_modify({'IDL_CHECKS': mode}):
self.run_process([WEBIDL_BINDER, test_file('webidl', 'test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
# Export things on "TheModule". This matches the typical use pattern of the bound library
# being used as Box2D.* or Ammo.*, and we cannot rely on "Module" being always present (closure may remove it).
self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=_malloc,_free', '--post-js', 'glue.js']
if allow_memory_growth:
self.set_setting('ALLOW_MEMORY_GROWTH')
def post(filename):
with open(filename, 'a') as f:
f.write('\n\n')
if self.get_setting('MODULARIZE'):
f.write('var TheModule = Module();\n')
else:
f.write('var TheModule = Module;\n')
f.write('\n\n')
if allow_memory_growth:
f.write("var isMemoryGrowthAllowed = true;")
else:
f.write("var isMemoryGrowthAllowed = false;")
f.write(open(test_file('webidl', 'post.js')).read())
f.write('\n\n')
output = test_file('webidl', "output_%s.txt" % mode)
self.do_run_from_file(test_file('webidl', 'test.cpp'), output, post_build=post)
### Tests for tools
@no_wasm2js('TODO: source maps in wasm2js')
@parameterized({
'': ([],),
'minimal_runtime': (['-s', 'MINIMAL_RUNTIME'],),
})
def test_source_map(self, args):
if '-g' not in self.emcc_args:
self.emcc_args.append('-g')
self.emcc_args += args
src = '''
#include <stdio.h>
#include <assert.h>
__attribute__((noinline)) int foo() {
printf("hi"); // line 6
return 1; // line 7
}
int main() {
printf("%d", foo()); // line 11
return 0; // line 12
}
'''
create_file('src.cpp', src)
out_filename = 'a.out.js'
wasm_filename = 'a.out.wasm'
no_maps_filename = 'no-maps.out.js'
assert '-gsource-map' not in self.emcc_args
building.emcc('src.cpp', self.get_emcc_args(), out_filename)
# the file name may find its way into the generated code, so make sure we
# can do an apples-to-apples comparison by compiling with the same file name
shutil.move(out_filename, no_maps_filename)
with open(no_maps_filename) as f:
no_maps_file = f.read()
no_maps_file = re.sub(' *//[@#].*$', '', no_maps_file, flags=re.MULTILINE)
self.emcc_args.append('-gsource-map')
building.emcc(os.path.abspath('src.cpp'),
self.get_emcc_args(),
out_filename,
stderr=PIPE)
map_referent = out_filename if not self.is_wasm() else wasm_filename
# after removing the @line and @sourceMappingURL comments, the build
# result should be identical to the non-source-mapped debug version.
# this is worth checking because the parser AST swaps strings for token
# objects when generating source maps, so we want to make sure the
# optimizer can deal with both types.
map_filename = map_referent + '.map'
def encode_utf8(data):
if isinstance(data, dict):
for key in data:
data[key] = encode_utf8(data[key])
return data
elif isinstance(data, list):
for i in range(len(data)):
data[i] = encode_utf8(data[i])
return data
elif isinstance(data, type(u'')):
return data.encode('utf8')
else:
return data
data = json.load(open(map_filename))
if str is bytes:
# Python 2 compatibility
data = encode_utf8(data)
if hasattr(data, 'file'):
# the file attribute is optional, but if it is present it needs to refer
# the output file.
self.assertPathsIdentical(map_referent, data['file'])
self.assertGreater(len(data['sources']), 1)
self.assertPathsIdentical('src.cpp', data['sources'][0])
if hasattr(data, 'sourcesContent'):
# the sourcesContent attribute is optional, but if it is present it
# needs to containt valid source text.
self.assertTextDataIdentical(src, data['sourcesContent'][0])
mappings = json.loads(self.run_js(
path_from_root('tools', 'source-maps', 'sourcemap2json.js'),
args=[map_filename]))
if str is bytes:
# Python 2 compatibility
mappings = encode_utf8(mappings)
seen_lines = set()
for m in mappings:
if m['source'] == 'src.cpp':
seen_lines.add(m['originalLine'])
# ensure that all the 'meaningful' lines in the original code get mapped
# when optimizing, the binaryen optimizer may remove some of them (by inlining, etc.)
if is_optimizing(self.emcc_args):
self.assertTrue(seen_lines.issuperset([11, 12]), seen_lines)
else:
self.assertTrue(seen_lines.issuperset([6, 7, 11, 12]), seen_lines)
@no_wasm2js('TODO: source maps in wasm2js')
def test_dwarf(self):
self.emcc_args.append('-g')
create_file('src.cpp', '''
#include <emscripten.h>
EM_JS(int, out_to_js, (int x), {})
void foo() {
out_to_js(0); // line 5
out_to_js(1); // line 6
out_to_js(2); // line 7
// A silly possible recursion to avoid binaryen doing any inlining.
if (out_to_js(3)) foo();
}
int main() {
foo();
}
''')
js_filename = 'a.out.js'
wasm_filename = 'a.out.wasm'
building.emcc('src.cpp', self.get_emcc_args(), js_filename)
out = self.run_process([shared.LLVM_DWARFDUMP, wasm_filename, '-all'], stdout=PIPE).stdout
# parse the sections
sections = {}
curr_section_name = ''
curr_section_body = ''
def add_section():
if curr_section_name:
sections[curr_section_name] = curr_section_body
for line in out.splitlines():
if ' contents:' in line:
# a new section, a line like ".debug_str contents:"
add_section()
curr_section_name = line.split(' ')[0]
curr_section_body = ''
else:
# possibly a line in a section
if curr_section_name:
curr_section_body += line + '\n'
add_section()
# make sure the right sections exist
self.assertIn('.debug_abbrev', sections)
self.assertIn('.debug_info', sections)
self.assertIn('.debug_line', sections)
self.assertIn('.debug_str', sections)
self.assertIn('.debug_ranges', sections)
# verify some content in the sections
self.assertIn('"src.cpp"', sections['.debug_info'])
# the line section looks like this:
# Address Line Column File ISA Discriminator Flags
# ------------------ ------ ------ ------ --- ------------- -------------
# 0x000000000000000b 5 0 3 0 0 is_stmt
src_to_addr = {}
found_src_cpp = False
for line in sections['.debug_line'].splitlines():
if 'name: "src.cpp"' in line:
found_src_cpp = True
if not found_src_cpp:
continue
if 'debug_line' in line:
break
if line.startswith('0x'):
while ' ' in line:
line = line.replace(' ', ' ')
addr, line, col = line.split(' ')[:3]
key = (int(line), int(col))
src_to_addr.setdefault(key, []).append(addr)
# each of the calls must remain in the binary, and be mapped
self.assertIn((5, 9), src_to_addr)
self.assertIn((6, 9), src_to_addr)
self.assertIn((7, 9), src_to_addr)
def get_dwarf_addr(line, col):
addrs = src_to_addr[(line, col)]
# we assume the simple calls have one address
self.assertEqual(len(addrs), 1)
return int(addrs[0], 0)
# the lines must appear in sequence (as calls to JS, the optimizer cannot
# reorder them)
self.assertLess(get_dwarf_addr(5, 9), get_dwarf_addr(6, 9))
self.assertLess(get_dwarf_addr(6, 9), get_dwarf_addr(7, 9))
# Get the wat, printing with -g which has binary offsets
wat = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'),
wasm_filename, '-g', '--print'], stdout=PIPE).stdout
# We expect to see a pattern like this in optimized builds (there isn't
# much that can change with such calls to JS (they can't be reordered or
# anything else):
#
# ;; code offset: 0x?
# (drop
# ;; code offset: 0x?
# (call $out_to_js
# ;; code offset: 0x?
# (local.get ?) or (i32.const ?)
# )
# )
#
# In the stacky stream of instructions form, it is
#
# local.get or i32.const
# call $out_to_js
# drop
#
# However, in an unoptimized build the constant may be assigned earlier in
# some other manner, so stop here.
if not is_optimizing(self.emcc_args):
return
# get_wat_addr gets the address of one of the 3 interesting calls, by its
# index (0,1,2).
def get_wat_addr(call_index):
# find the call_index-th call
call_loc = -1
for i in range(call_index + 1):
call_loc = wat.find('call $out_to_js', call_loc + 1)
assert call_loc > 0
# the call begins with the local.get/i32.const printed below it, which is
# the first instruction in the stream, so it has the lowest address
start_addr_loc = wat.find('0x', call_loc)
assert start_addr_loc > 0
start_addr_loc_end = wat.find('\n', start_addr_loc)
start_addr = int(wat[start_addr_loc:start_addr_loc_end], 0)
# the call ends with the drop, which is the last in the stream, at the
# highest address
end_addr_loc = wat.rfind('drop', 0, call_loc)
assert end_addr_loc > 0
end_addr_loc = wat.rfind('0x', 0, end_addr_loc)
assert end_addr_loc > 0
end_addr_loc_end = wat.find('\n', end_addr_loc)
assert end_addr_loc_end > 0
end_addr = int(wat[end_addr_loc:end_addr_loc_end], 0)
return (start_addr, end_addr)
# match up the DWARF and the wat
for i in range(3):
dwarf_addr = get_dwarf_addr(5 + i, 9)
start_wat_addr, end_wat_addr = get_wat_addr(i)
# the dwarf may match any of the 3 instructions that form the stream of
# of instructions implementing the call in the source code, in theory
self.assertLessEqual(start_wat_addr, dwarf_addr)
self.assertLessEqual(dwarf_addr, end_wat_addr)
def test_modularize_closure_pre(self):
# test that the combination of modularize + closure + pre-js works. in that mode,
# closure should not minify the Module object in a way that the pre-js cannot use it.
self.emcc_args += [
'--pre-js', test_file('core', 'modularize_closure_pre.js'),
'--closure=1',
'-g1',
'-s',
'MODULARIZE=1',
]
def post(filename):
with open(filename, 'a') as f:
f.write('\n\n')
f.write('var TheModule = Module();\n')
self.do_core_test('modularize_closure_pre.c', post_build=post)
@no_wasm2js('symbol names look different wasm2js backtraces')
def test_emscripten_log(self):
self.banned_js_engines = [config.V8_ENGINE] # v8 doesn't support console.log
self.set_setting('DEMANGLE_SUPPORT')
if '-g' not in self.emcc_args:
self.emcc_args.append('-g')
self.emcc_args += ['-DRUN_FROM_JS_SHELL']
self.do_run_in_out_file_test('emscripten_log', 'emscripten_log.cpp')
# test closure compiler as well
print('closure')
self.emcc_args += ['--closure=1', '-g1'] # extra testing
self.do_run_in_out_file_test('emscripten_log', 'emscripten_log_with_closure.cpp')
def test_float_literals(self):
self.do_run_in_out_file_test('test_float_literals.cpp')
def test_exit_status(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
src = r'''
#include <stdio.h>
#include <stdlib.h>
static void cleanup() {
printf("cleanup\n");
}
int main() {
atexit(cleanup); // this atexit should still be called
printf("hello, world!\n");
// Unusual exit status to make sure it's working!
if (CAPITAL_EXIT) {
_Exit(118);
} else {
exit(118);
}
}
'''
create_file('pre.js', '''
Module.preInit = function() {
addOnExit(function () {
out('I see exit status: ' + EXITSTATUS);
});
}
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_run(src.replace('CAPITAL_EXIT', '0'), 'hello, world!\ncleanup\nI see exit status: 118', assert_returncode=118)
self.do_run(src.replace('CAPITAL_EXIT', '1'), 'hello, world!\ncleanup\nI see exit status: 118', assert_returncode=118)
def test_noexitruntime(self):
src = r'''
#include <emscripten.h>
#include <stdio.h>
static int testPre = TEST_PRE;
struct Global {
Global() {
printf("in Global()\n");
if (testPre) { EM_ASM(noExitRuntime = true;); }
}
~Global() { printf("ERROR: in ~Global()\n"); }
} global;
int main() {
if (!testPre) { EM_ASM(noExitRuntime = true;); }
printf("in main()\n");
}
'''
self.do_run(src.replace('TEST_PRE', '0'), 'in Global()\nin main()')
self.do_run(src.replace('TEST_PRE', '1'), 'in Global()\nin main()')
def test_minmax(self):
self.do_runf(test_file('test_minmax.c'), 'NAN != NAN\nSuccess!')
def test_localeconv(self):
self.do_run_in_out_file_test('core', 'test_localeconv.c')
def test_newlocale(self):
self.do_run_in_out_file_test('core', 'test_newlocale.c')
def test_setlocale(self):
self.do_run_in_out_file_test('core', 'test_setlocale.c')
def test_vswprintf_utf8(self):
self.do_run_in_out_file_test('vswprintf_utf8.c')
@no_asan('asan is not compatible with asyncify stack operations; may also need to not instrument asan_c_load_4, TODO')
def test_async(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.set_setting('ASYNCIFY')
self.banned_js_engines = [config.SPIDERMONKEY_ENGINE, config.V8_ENGINE] # needs setTimeout which only node has
src = r'''
#include <stdio.h>
#include <emscripten.h>
void f(void *p) {
*(int*)p = 99;
printf("!");
}
int main() {
int i = 0;
printf("Hello");
emscripten_async_call(f, &i, 1);
printf("World");
emscripten_sleep(100);
printf("%d\n", i);
}
'''
self.do_run(src, 'HelloWorld!99')
print('check bad ccall use')
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("Hello");
emscripten_sleep(100);
printf("World\n");
}
'''
self.set_setting('ASSERTIONS')
self.set_setting('INVOKE_RUN', 0)
create_file('pre.js', '''
Module['onRuntimeInitialized'] = function() {
try {
ccall('main', 'number', ['number', 'string'], [2, 'waka']);
var never = true;
} catch(e) {
out(e);
assert(!never);
}
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_run(src, 'The call to main is running asynchronously.')
print('check reasonable ccall use')
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("Hello");
emscripten_sleep(100);
printf("World\n");
}
'''
create_file('pre.js', '''
Module['onRuntimeInitialized'] = function() {
ccall('main', null, ['number', 'string'], [2, 'waka'], { async: true });
};
''')
self.do_run(src, 'HelloWorld')
print('check ccall promise')
self.set_setting('EXPORTED_FUNCTIONS', ['_stringf', '_floatf'])
src = r'''
#include <stdio.h>
#include <emscripten.h>
extern "C" {
const char* stringf(char* param) {
emscripten_sleep(20);
printf("%s", param);
return "second";
}
double floatf() {
emscripten_sleep(20);
emscripten_sleep(20);
return 6.4;
}
}
'''
create_file('pre.js', r'''
Module['onRuntimeInitialized'] = function() {
ccall('stringf', 'string', ['string'], ['first\n'], { async: true })
.then(function(val) {
console.log(val);
ccall('floatf', 'number', null, null, { async: true }).then(console.log);
});
};
''')
self.do_run(src, 'first\nsecond\n6.4')
@no_asan('asyncify stack operations confuse asan')
def test_fibers_asyncify(self):
self.set_setting('ASYNCIFY')
self.do_runf(test_file('test_fibers.cpp'), '*leaf-0-100-1-101-1-102-2-103-3-104-5-105-8-106-13-107-21-108-34-109-*')
def test_asyncify_unused(self):
# test a program not using asyncify, but the pref is set
self.set_setting('ASYNCIFY')
self.do_core_test('test_hello_world.c')
@parameterized({
'normal': ([], True),
'removelist_a': (['-s', 'ASYNCIFY_REMOVE=["foo(int, double)"]'], False),
'removelist_b': (['-s', 'ASYNCIFY_REMOVE=["bar()"]'], True),
'removelist_c': (['-s', 'ASYNCIFY_REMOVE=["baz()"]'], False),
'onlylist_a': (['-s', 'ASYNCIFY_ONLY=["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()","bar()"]'], True),
'onlylist_b': (['-s', 'ASYNCIFY_ONLY=["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()"]'], True),
'onlylist_c': (['-s', 'ASYNCIFY_ONLY=["main","__original_main","foo(int, double)","baz()","c_baz"]'], False),
'onlylist_d': (['-s', 'ASYNCIFY_ONLY=["foo(int, double)","baz()","c_baz","Structy::funcy()"]'], False),
'onlylist_b_response': ([], True, '["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()"]'),
'onlylist_c_response': ([], False, '["main","__original_main","foo(int, double)","baz()","c_baz"]'),
})
@no_asan('asan is not compatible with asyncify stack operations; may also need to not instrument asan_c_load_4, TODO')
def test_asyncify_lists(self, args, should_pass, response=None):
if response is not None:
create_file('response.file', response)
self.set_setting('ASYNCIFY_ONLY', '@response.file')
self.set_setting('ASYNCIFY')
self.emcc_args += args
try:
self.do_core_test('test_asyncify_lists.cpp', assert_identical=True)
if not should_pass:
should_pass = True
raise Exception('should not have passed')
except Exception:
if should_pass:
raise
@parameterized({
'normal': ([], True),
'ignoreindirect': (['-s', 'ASYNCIFY_IGNORE_INDIRECT'], False),
'add': (['-s', 'ASYNCIFY_IGNORE_INDIRECT', '-s', 'ASYNCIFY_ADD=["__original_main","main","virt()"]'], True),
})
@no_asan('asan is not compatible with asyncify stack operations; may also need to not instrument asan_c_load_4, TODO')
def test_asyncify_indirect_lists(self, args, should_pass):
self.set_setting('ASYNCIFY')
self.emcc_args += args
try:
self.do_core_test('test_asyncify_indirect_lists.cpp', assert_identical=True)
if not should_pass:
should_pass = True
raise Exception('should not have passed')
except Exception:
if should_pass:
raise
@no_asan('asyncify stack operations confuse asan')
def test_emscripten_scan_registers(self):
self.set_setting('ASYNCIFY')
self.do_core_test('emscripten_scan_registers.cpp')
def test_asyncify_assertions(self):
self.set_setting('ASYNCIFY')
self.set_setting('ASYNCIFY_IMPORTS', ['suspend'])
self.set_setting('ASSERTIONS')
self.do_core_test('asyncify_assertions.cpp')
@no_asan('asyncify stack operations confuse asan')
@no_wasm2js('TODO: lazy loading in wasm2js')
@parameterized({
'conditional': (True,),
'unconditional': (False,),
})
def test_emscripten_lazy_load_code(self, conditional):
self.set_setting('ASYNCIFY_LAZY_LOAD_CODE')
self.set_setting('ASYNCIFY_IGNORE_INDIRECT')
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['--profiling-funcs'] # so that we can find the functions for the changes below
if conditional:
self.emcc_args += ['-DCONDITIONAL']
self.do_core_test('emscripten_lazy_load_code.cpp', args=['0'])
first_size = os.path.getsize('emscripten_lazy_load_code.wasm')
second_size = os.path.getsize('emscripten_lazy_load_code.wasm.lazy.wasm')
print('first wasm size', first_size)
print('second wasm size', second_size)
if not conditional and is_optimizing(self.emcc_args) and '-g' not in self.emcc_args:
# If the call to lazy-load is unconditional, then the optimizer can dce
# out more than half
self.assertLess(first_size, 0.6 * second_size)
with open('emscripten_lazy_load_code.wasm', 'rb') as f:
with open('emscripten_lazy_load_code.wasm.lazy.wasm', 'rb') as g:
self.assertNotEqual(f.read(), g.read())
# attempts to "break" the wasm by adding an unreachable in $foo_end. returns whether we found it.
def break_wasm(name):
wat = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), name], stdout=PIPE).stdout
lines = wat.splitlines()
wat = None
for i in range(len(lines)):
if '(func $foo_end ' in lines[i]:
j = i + 1
while '(local ' in lines[j]:
j += 1
# we found the first line after the local defs
lines[j] = '(unreachable)' + lines[j]
wat = '\n'.join(lines)
break
if wat is None:
# $foo_end is not present in the wasm, nothing to break
shutil.copyfile(name, name + '.orig')
return False
with open('wat.wat', 'w') as f:
f.write(wat)
shutil.move(name, name + '.orig')
self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-as'), 'wat.wat', '-o', name, '-g'])
return True
def verify_working(args=['0']):
self.assertContained('foo_end\n', self.run_js('emscripten_lazy_load_code.js', args=args))
def verify_broken(args=['0']):
self.assertNotContained('foo_end\n', self.run_js('emscripten_lazy_load_code.js', args=args, assert_returncode=NON_ZERO))
# the first-loaded wasm will not reach the second call, since we call it after lazy-loading.
# verify that by changing the first wasm to throw in that function
found_foo_end = break_wasm('emscripten_lazy_load_code.wasm')
if not conditional and is_optimizing(self.emcc_args):
self.assertFalse(found_foo_end, 'should have optimizd out $foo_end')
verify_working()
# but breaking the second wasm actually breaks us
break_wasm('emscripten_lazy_load_code.wasm.lazy.wasm')
verify_broken()
# restore
shutil.copyfile('emscripten_lazy_load_code.wasm.orig', 'emscripten_lazy_load_code.wasm')
shutil.copyfile('emscripten_lazy_load_code.wasm.lazy.wasm.orig', 'emscripten_lazy_load_code.wasm.lazy.wasm')
verify_working()
if conditional:
# if we do not call the lazy load function, then we do not need the lazy wasm,
# and we do the second call in the first wasm
os.remove('emscripten_lazy_load_code.wasm.lazy.wasm')
verify_broken()
verify_working(['42'])
break_wasm('emscripten_lazy_load_code.wasm')
verify_broken()
# Test basic wasm2js functionality in all core compilation modes.
@no_asan('no wasm2js support yet in asan')
def test_wasm2js(self):
if not self.is_wasm():
self.skipTest('redundant to test wasm2js in wasm2js* mode')
self.set_setting('WASM', 0)
self.do_core_test('test_hello_world.c')
# a mem init file is emitted just like with JS
expect_memory_init_file = self.uses_memory_init_file()
if expect_memory_init_file:
self.assertExists('test_hello_world.js.mem')
with open('test_hello_world.js.mem', 'rb') as f:
self.assertTrue(f.read()[-1] != b'\0')
else:
self.assertNotExists('test_hello_world.js.mem')
@no_asan('no wasm2js support yet in asan')
def test_maybe_wasm2js(self):
if not self.is_wasm():
self.skipTest('redundant to test wasm2js in wasm2js* mode')
self.set_setting('MAYBE_WASM2JS')
# see that running as wasm works
self.do_core_test('test_hello_world.c')
# run wasm2js, bundle the code, and use the wasm2js path
cmd = [PYTHON, path_from_root('tools', 'maybe_wasm2js.py'), 'test_hello_world.js', 'test_hello_world.wasm']
if is_optimizing(self.emcc_args):
cmd += ['-O2']
self.run_process(cmd, stdout=open('do_wasm2js.js', 'w')).stdout
# remove the wasm to make sure we never use it again
os.remove('test_hello_world.wasm')
# verify that it runs
self.assertContained('hello, world!', self.run_js('do_wasm2js.js'))
@no_asan('no wasm2js support yet in asan')
@parameterized({
'': ([],),
'minimal_runtime': (['-s', 'MINIMAL_RUNTIME'],),
})
def test_wasm2js_fallback(self, args):
if not self.is_wasm():
self.skipTest('redundant to test wasm2js in wasm2js* mode')
cmd = [EMCC, test_file('small_hello_world.c'), '-s', 'WASM=2'] + args
self.run_process(cmd)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('a.out.wasm.js', 'a.out.wasm.js.unused')
self.assertContained('hello!', self.run_js('a.out.js'))
os.rename('a.out.wasm.js.unused', 'a.out.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
open('b.out.js', 'w').write('WebAssembly = undefined;\n' + open('a.out.js', 'r').read())
os.remove('a.out.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.assertContained('hello!', self.run_js('b.out.js'))
def test_cxx_self_assign(self):
# See https://github.com/emscripten-core/emscripten/pull/2688 and http://llvm.org/bugs/show_bug.cgi?id=18735
self.do_run(r'''
#include <map>
#include <stdio.h>
int main() {
std::map<int, int> m;
m[0] = 1;
m = m;
// size should still be one after self assignment
if (m.size() == 1) {
printf("ok.\n");
}
}
''', 'ok.')
def test_memprof_requirements(self):
# This test checks for the global variables required to run the memory
# profiler. It would fail if these variables were made no longer global
# or if their identifiers were changed.
create_file('main.cpp', '''
extern "C" {
void check_memprof_requirements();
}
int main() {
check_memprof_requirements();
return 0;
}
''')
create_file('lib.js', '''
mergeInto(LibraryManager.library, {
check_memprof_requirements: function() {
if (typeof _emscripten_stack_get_base === 'function' &&
typeof _emscripten_stack_get_end === 'function' &&
typeof _emscripten_stack_get_current === 'function' &&
typeof Module['___heap_base'] === 'number') {
out('able to run memprof');
} else {
out('missing the required variables to run memprof');
}
}
});
''')
self.emcc_args += ['--memoryprofiler', '--js-library', 'lib.js']
self.do_runf('main.cpp', 'able to run memprof')
def test_fs_dict(self):
self.set_setting('FORCE_FILESYSTEM')
self.emcc_args += ['-lidbfs.js']
self.emcc_args += ['-lnodefs.js']
create_file('pre.js', '''
Module = {};
Module['preRun'] = function() {
out(typeof FS.filesystems['MEMFS']);
out(typeof FS.filesystems['IDBFS']);
out(typeof FS.filesystems['NODEFS']);
// Globals
console.log(typeof MEMFS);
console.log(typeof IDBFS);
console.log(typeof NODEFS);
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_run('int main() { return 0; }', 'object\nobject\nobject\nobject\nobject\nobject')
def test_fs_dict_none(self):
# if IDBFS and NODEFS are not enabled, they are not present.
self.set_setting('FORCE_FILESYSTEM')
self.set_setting('ASSERTIONS')
create_file('pre.js', '''
Module = {};
Module['preRun'] = function() {
out(typeof FS.filesystems['MEMFS']);
out(typeof FS.filesystems['IDBFS']);
out(typeof FS.filesystems['NODEFS']);
// Globals
if (ASSERTIONS) {
console.log(typeof MEMFS);
console.log(IDBFS);
console.log(NODEFS);
FS.mkdir('/working1');
try {
FS.mount(IDBFS, {}, '/working1');
} catch (e) {
console.log('|' + e + '|');
}
}
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
expected = '''\
object
undefined
undefined
object
IDBFS is no longer included by default; build with -lidbfs.js
NODEFS is no longer included by default; build with -lnodefs.js
|IDBFS is no longer included by default; build with -lidbfs.js|'''
self.do_run('int main() { return 0; }', expected)
@sync
def test_stack_overflow_check(self):
self.set_setting('TOTAL_STACK', 1048576)
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.do_runf(test_file('stack_overflow.cpp'), 'stack overflow', assert_returncode=NON_ZERO)
self.emcc_args += ['-DONE_BIG_STRING']
self.do_runf(test_file('stack_overflow.cpp'), 'stack overflow', assert_returncode=NON_ZERO)
# ASSERTIONS=2 implies STACK_OVERFLOW_CHECK=2
self.clear_setting('STACK_OVERFLOW_CHECK')
self.set_setting('ASSERTIONS', 2)
self.do_runf(test_file('stack_overflow.cpp'), 'stack overflow', assert_returncode=NON_ZERO)
@node_pthreads
def test_binaryen_2170_emscripten_atomic_cas_u8(self):
self.set_setting('USE_PTHREADS')
self.do_run_in_out_file_test('binaryen_2170_emscripten_atomic_cas_u8.cpp')
@also_with_standalone_wasm()
def test_sbrk(self):
self.do_runf(test_file('sbrk_brk.cpp'), 'OK.')
def test_brk(self):
self.emcc_args += ['-DTEST_BRK=1']
self.do_runf(test_file('sbrk_brk.cpp'), 'OK.')
# Tests that we can use the dlmalloc mallinfo() function to obtain information
# about malloc()ed blocks and compute how much memory is used/freed.
@no_asan('mallinfo is not part of ASan malloc')
def test_mallinfo(self):
self.do_runf(test_file('mallinfo.cpp'), 'OK.')
@no_asan('cannot replace malloc/free with ASan')
def test_wrap_malloc(self):
self.do_runf(test_file('wrap_malloc.cpp'), 'OK.')
def test_environment(self):
self.set_setting('ASSERTIONS')
def test(assert_returncode=0):
self.do_core_test('test_hello_world.c', assert_returncode=assert_returncode)
js = open('test_hello_world.js').read()
assert ('require(' in js) == ('node' in self.get_setting('ENVIRONMENT')), 'we should have require() calls only if node js specified'
for engine in config.JS_ENGINES:
print(engine)
# set us to test in just this engine
self.banned_js_engines = [e for e in config.JS_ENGINES if e != engine]
# tell the compiler to build with just that engine
if engine == config.NODE_JS:
right = 'node'
wrong = 'shell'
else:
right = 'shell'
wrong = 'node'
# test with the right env
self.set_setting('ENVIRONMENT', right)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
test()
# test with the wrong env
self.set_setting('ENVIRONMENT', wrong)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
try:
test(assert_returncode=NON_ZERO)
raise Exception('unexpected success')
except Exception as e:
self.assertContained('not compiled for this environment', str(e))
# test with a combined env
self.set_setting('ENVIRONMENT', right + ',' + wrong)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
test()
def test_postrun_exception(self):
# verify that an exception thrown in postRun() will not trigger the
# compilation failed handler, and will be printed to stderr.
self.add_post_run('ThisFunctionDoesNotExist()')
self.build(test_file('core', 'test_hello_world.c'))
output = self.run_js('test_hello_world.js', assert_returncode=NON_ZERO)
self.assertStartswith(output, 'hello, world!')
self.assertContained('ThisFunctionDoesNotExist is not defined', output)
# Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works
def test_no_declare_asm_module_exports(self):
self.set_setting('DECLARE_ASM_MODULE_EXPORTS', 0)
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.maybe_closure()
self.do_runf(test_file('declare_asm_module_exports.cpp'), 'jsFunction: 1')
js = open('declare_asm_module_exports.js').read()
occurances = js.count('cFunction')
if is_optimizing(self.emcc_args) and '-g' not in self.emcc_args:
# In optimized builds only the single reference cFunction that exists in the EM_ASM should exist
if self.is_wasm():
self.assertEqual(occurances, 1)
else:
# With js the asm module itself also contains a reference for the cFunction name
self.assertEqual(occurances, 2)
else:
print(occurances)
# Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_no_declare_asm_module_exports(self):
self.set_setting('DECLARE_ASM_MODULE_EXPORTS', 0)
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.maybe_closure()
self.set_setting('MINIMAL_RUNTIME')
self.do_runf(test_file('declare_asm_module_exports.cpp'), 'jsFunction: 1')
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
@parameterized({
'default': ([],),
'streaming': (['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION'],),
'streaming_inst': (['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION'],),
'no_export': (['-s', 'DECLARE_ASM_MODULE_EXPORTS=0'],)
})
def test_minimal_runtime_hello_world(self, args):
# TODO: Support for non-Node.js shells has not yet been added to MINIMAL_RUNTIME
self.banned_js_engines = [config.V8_ENGINE, config.SPIDERMONKEY_ENGINE]
self.emcc_args = args
self.set_setting('MINIMAL_RUNTIME')
self.maybe_closure()
self.do_runf(test_file('small_hello_world.c'), 'hello')
# Test that printf() works in MINIMAL_RUNTIME=1
@parameterized({
'fs': ('FORCE_FILESYSTEM',),
'nofs': ('NO_FILESYSTEM',),
})
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_hello_printf(self, extra_setting):
self.set_setting('MINIMAL_RUNTIME')
self.set_setting(extra_setting)
# $FS is not fully compatible with MINIMAL_RUNTIME so fails with closure
# compiler. lsan also pulls in $FS
if '-fsanitize=leak' not in self.emcc_args and extra_setting != 'FORCE_FILESYSTEM':
self.maybe_closure()
self.do_runf(test_file('hello_world.c'), 'hello, world!')
# Tests that -s MINIMAL_RUNTIME=1 works well with SAFE_HEAP
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_safe_heap(self):
self.set_setting('MINIMAL_RUNTIME')
self.set_setting('SAFE_HEAP')
# $FS is not fully compatible with MINIMAL_RUNTIME so fails with closure
# compiler.
# lsan pulls in $FS
if '-fsanitize=leak' not in self.emcc_args:
self.maybe_closure()
self.do_runf(test_file('small_hello_world.c'), 'hello')
# Tests global initializer with -s MINIMAL_RUNTIME=1
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_global_initializer(self):
self.set_setting('MINIMAL_RUNTIME')
self.maybe_closure()
self.do_runf(test_file('test_global_initializer.cpp'), 't1 > t0: 1')
@no_wasm2js('wasm2js does not support PROXY_TO_PTHREAD (custom section support)')
def test_return_address(self):
self.set_setting('USE_OFFSET_CONVERTER')
self.do_runf(test_file('core', 'test_return_address.c'), 'passed')
@no_wasm2js('TODO: sanitizers in wasm2js')
@no_asan('-fsanitize-minimal-runtime cannot be used with ASan')
def test_ubsan_minimal_too_many_errors(self):
self.emcc_args += ['-fsanitize=undefined', '-fsanitize-minimal-runtime']
if not self.is_wasm():
if is_optimizing(self.emcc_args):
self.skipTest('test can only be run without optimizations on asm.js')
# Need to use `-g` to get proper line numbers in asm.js
self.emcc_args += ['-g']
self.do_runf(test_file('core', 'test_ubsan_minimal_too_many_errors.c'),
expected_output='ubsan: add-overflow\n' * 20 + 'ubsan: too many errors\n')
@no_wasm2js('TODO: sanitizers in wasm2js')
@no_asan('-fsanitize-minimal-runtime cannot be used with ASan')
def test_ubsan_minimal_errors_same_place(self):
self.emcc_args += ['-fsanitize=undefined', '-fsanitize-minimal-runtime']
if not self.is_wasm():
if is_optimizing(self.emcc_args):
self.skipTest('test can only be run without optimizations on asm.js')
# Need to use `-g` to get proper line numbers in asm.js
self.emcc_args += ['-g']
self.do_runf(test_file('core', 'test_ubsan_minimal_errors_same_place.c'),
expected_output='ubsan: add-overflow\n' * 5)
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_integer': (['-fsanitize=integer'],),
'fsanitize_overflow': (['-fsanitize=signed-integer-overflow'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_overflow(self, args):
self.emcc_args += args
self.do_runf(test_file('core', 'test_ubsan_full_overflow.c'),
assert_all=True, expected_output=[
".c:3:5: runtime error: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'",
".c:7:7: runtime error: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'",
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_return': (['-fsanitize=return'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_no_return(self, args):
self.emcc_args += ['-Wno-return-type'] + args
self.do_runf(test_file('core', 'test_ubsan_full_no_return.cpp'),
expected_output='.cpp:1:5: runtime error: execution reached the end of a value-returning function without returning a value', assert_returncode=NON_ZERO)
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_integer': (['-fsanitize=integer'],),
'fsanitize_shift': (['-fsanitize=shift'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_left_shift(self, args):
self.emcc_args += args
self.do_runf(test_file('core', 'test_ubsan_full_left_shift.c'),
assert_all=True, expected_output=[
'.c:3:5: runtime error: left shift of negative value -1',
".c:7:5: runtime error: left shift of 16 by 29 places cannot be represented in type 'int'"
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_null': (['-fsanitize=null'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_null_ref(self, args):
self.emcc_args += args
self.do_runf(test_file('core', 'test_ubsan_full_null_ref.cpp'),
assert_all=True, expected_output=[
".cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
".cpp:4:13: runtime error: reference binding to null pointer of type 'int'",
".cpp:5:14: runtime error: reference binding to null pointer of type 'int'",
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_vptr': (['-fsanitize=vptr'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_static_cast(self, args):
self.emcc_args += args
self.do_runf(test_file('core', 'test_ubsan_full_static_cast.cpp'),
assert_all=True, expected_output=[
".cpp:18:10: runtime error: downcast of address",
"which does not point to an object of type 'R'",
])
@parameterized({
'g': ('-g', [
".cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
'in main',
]),
'g4': ('-gsource-map', [
".cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
'in main ',
'.cpp:3:8'
]),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_stack_trace(self, g_flag, expected_output):
self.emcc_args += ['-fsanitize=null', g_flag]
self.set_setting('ALLOW_MEMORY_GROWTH')
if g_flag == '-gsource-map':
if not self.is_wasm():
self.skipTest('wasm2js has no source map support')
elif '-Oz' in self.emcc_args:
self.skipTest('-Oz breaks stack traces')
def modify_env(filename):
with open(filename) as f:
contents = f.read()
contents = 'Module = {UBSAN_OPTIONS: "print_stacktrace=1"};' + contents
with open(filename, 'w') as f:
f.write(contents)
self.do_runf(test_file('core', 'test_ubsan_full_null_ref.cpp'),
post_build=modify_env, assert_all=True, expected_output=expected_output)
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_typeinfo_eq(self):
# https://github.com/emscripten-core/emscripten/issues/13330
src = r'''
#include <typeinfo>
#include <stdio.h>
int main() {
int mismatch = typeid(int) != typeid(int);
printf("ok\n");
return mismatch;
}
'''
self.emcc_args.append('-fsanitize=undefined')
self.do_run(src, 'ok\n')
def test_template_class_deduction(self):
self.emcc_args += ['-std=c++17']
self.do_core_test('test_template_class_deduction.cpp')
@no_safe_heap('asan does not work with SAFE_HEAP')
@parameterized({
'c': ['test_asan_no_error.c'],
'cpp': ['test_asan_no_error.cpp'],
})
def test_asan_no_error(self, name):
self.emcc_args.append('-fsanitize=address')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('INITIAL_MEMORY', '300mb')
self.do_runf(test_file('core', name), '', assert_returncode=NON_ZERO)
# note: these tests have things like -fno-builtin-memset in order to avoid
# clang optimizing things away. for example, a memset might be optimized into
# stores, and then the stores identified as dead, which leaves nothing for
# asan to test. here we want to test asan itself, so we work around that.
@no_safe_heap('asan does not work with SAFE_HEAP')
@parameterized({
'use_after_free_c': ('test_asan_use_after_free.c', [
'AddressSanitizer: heap-use-after-free on address',
]),
'use_after_free_cpp': ('test_asan_use_after_free.cpp', [
'AddressSanitizer: heap-use-after-free on address',
]),
'use_after_return': ('test_asan_use_after_return.c', [
'AddressSanitizer: stack-use-after-return on address',
], ['-Wno-return-stack-address']),
'static_buffer_overflow': ('test_asan_static_buffer_overflow.c', [
'AddressSanitizer: global-buffer-overflow on address',
], ['-fno-builtin-memset']),
'heap_buffer_overflow_c': ('test_asan_heap_buffer_overflow.c', [
'AddressSanitizer: heap-buffer-overflow on address',
], ['-fno-builtin-memset']),
'heap_buffer_overflow_cpp': ('test_asan_heap_buffer_overflow.cpp', [
'AddressSanitizer: heap-buffer-overflow on address',
], ['-fno-builtin-memset']),
'stack_buffer_overflow': ('test_asan_stack_buffer_overflow.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'stack_buffer_overflow_js': ('test_asan_stack_buffer_overflow_js.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_unround_size': ('test_asan_bitfield_unround_size.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_unround_offset': ('test_asan_bitfield_unround_offset.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_round': ('test_asan_bitfield_round.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'memset_null': ('test_asan_memset_null.c', [
'AddressSanitizer: null-pointer-dereference on address 0x00000001'
], ['-fno-builtin-memset']),
'memset_freed': ('test_asan_memset_freed.c', [
'AddressSanitizer: heap-use-after-free on address'
], ['-fno-builtin-memset']),
'strcpy': ('test_asan_strcpy.c', [
'AddressSanitizer: heap-buffer-overflow on address'
], ['-fno-builtin-strcpy']),
'memcpy': ('test_asan_memcpy.c', [
'AddressSanitizer: heap-buffer-overflow on address'
], ['-fno-builtin-memcpy']),
'memchr': ('test_asan_memchr.c', [
'AddressSanitizer: global-buffer-overflow on address'
], ['-fno-builtin-memchr']),
'vector': ('test_asan_vector.cpp', [
'AddressSanitizer: container-overflow on address'
]),
})
def test_asan(self, name, expected_output, cflags=None):
if '-Oz' in self.emcc_args:
self.skipTest('-Oz breaks source maps')
if not self.is_wasm():
self.skipTest('wasm2js has no ASan support')
self.emcc_args.append('-fsanitize=address')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('INITIAL_MEMORY', '300mb')
if cflags:
self.emcc_args += cflags
self.do_runf(test_file('core', name),
expected_output=expected_output, assert_all=True,
check_for_error=False, assert_returncode=NON_ZERO)
@no_safe_heap('asan does not work with SAFE_HEAP')
@no_wasm2js('TODO: ASAN in wasm2js')
def test_asan_js_stack_op(self):
self.emcc_args.append('-fsanitize=address')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('INITIAL_MEMORY', '300mb')
self.do_runf(test_file('core', 'test_asan_js_stack_op.c'),
expected_output='Hello, World!')
@no_safe_heap('asan does not work with SAFE_HEAP')
@no_wasm2js('TODO: ASAN in wasm2js')
def test_asan_api(self):
self.emcc_args.append('-fsanitize=address')
self.set_setting('INITIAL_MEMORY', '300mb')
self.do_core_test('test_asan_api.c')
@no_safe_heap('asan does not work with SAFE_HEAP')
@no_wasm2js('TODO: ASAN in wasm2js')
def test_asan_modularized_with_closure(self):
self.emcc_args.append('-fsanitize=address')
self.set_setting('MODULARIZE')
self.set_setting('EXPORT_NAME', 'createModule')
self.set_setting('USE_CLOSURE_COMPILER')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('INITIAL_MEMORY', '300mb')
def post(filename):
with open(filename, 'a') as f:
f.write('\n\n')
# the bug is that createModule() returns undefined, instead of the
# proper Promise object.
f.write('if (!(createModule() instanceof Promise)) throw "Promise was not returned :(";\n')
self.do_runf(test_file('hello_world.c'),
post_build=post,
expected_output='hello, world!')
@no_asan('SAFE_HEAP cannot be used with ASan')
def test_safe_heap_user_js(self):
self.set_setting('SAFE_HEAP')
self.do_runf(test_file('core', 'test_safe_heap_user_js.c'),
expected_output=['abort(segmentation fault storing 1 bytes to address 0)'], assert_returncode=NON_ZERO)
def test_safe_stack(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
self.do_runf(test_file('core', 'test_safe_stack.c'),
expected_output=['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=NON_ZERO)
def test_safe_stack_alloca(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
self.do_runf(test_file('core', 'test_safe_stack_alloca.c'),
expected_output=['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=NON_ZERO)
@needs_dylink
def test_safe_stack_dylink(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
self.dylink_test(r'''
#include <stdio.h>
extern void sidey();
int main() {
sidey();
}
''', '''
#include <string.h>
static int accumulator = 0;
int f(int *b) {
// Infinite recursion while recording stack pointer locations
// so that compiler can't eliminate the stack allocs.
accumulator += (int)b;
int a[1024];
return f(a);
}
void sidey() {
f(NULL);
}
''', ['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=NON_ZERO, force_c=True)
def test_fpic_static(self):
self.emcc_args.append('-fPIC')
self.do_core_test('test_hello_world.c')
@node_pthreads
def test_pthread_create(self):
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('core', 'pthread', 'create.cpp')
@node_pthreads
def test_pthread_c11_threads(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
if not self.has_changed_setting('INITIAL_MEMORY'):
self.set_setting('INITIAL_MEMORY', '64mb')
self.do_run_in_out_file_test('pthread', 'test_pthread_c11_threads.c')
@node_pthreads
def test_pthread_cxx_threads(self):
self.set_setting('PROXY_TO_PTHREAD')
self.clear_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('pthread', 'test_pthread_cxx_threads.cpp')
@node_pthreads
def test_pthread_create_pool(self):
# with a pool, we can synchronously depend on workers being available
self.set_setting('PTHREAD_POOL_SIZE', '2')
self.emcc_args += ['-DALLOW_SYNC']
self.do_run_in_out_file_test('core', 'pthread', 'create.cpp')
@node_pthreads
def test_pthread_create_proxy(self):
# with PROXY_TO_PTHREAD, we can synchronously depend on workers being available
self.set_setting('PROXY_TO_PTHREAD')
self.emcc_args += ['-DALLOW_SYNC']
self.do_run_in_out_file_test('core', 'pthread', 'create.cpp')
@node_pthreads
def test_pthread_create_embind_stack_check(self):
# embind should work with stack overflow checks (see #12356)
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.emcc_args += ['--bind']
self.do_run_in_out_file_test('core', 'pthread', 'create.cpp')
@node_pthreads
def test_pthread_exceptions(self):
self.set_setting('PTHREAD_POOL_SIZE', '2')
self.emcc_args += ['-fexceptions']
self.do_run_in_out_file_test('core', 'pthread', 'exceptions.cpp')
@node_pthreads
def test_pthread_exit_process(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.emcc_args += ['-DEXIT_RUNTIME', '--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.do_run_in_out_file_test('core', 'pthread', 'test_pthread_exit_runtime.c', assert_returncode=42)
@node_pthreads
@disabled('https://github.com/emscripten-core/emscripten/issues/12945')
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME
self.set_setting('PROXY_TO_PTHREAD')
self.emcc_args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.do_run_in_out_file_test('core', 'pthread', 'test_pthread_exit_runtime.c', assert_returncode=43)
@node_pthreads
@no_wasm2js('wasm2js does not support PROXY_TO_PTHREAD (custom section support)')
def test_pthread_offset_converter(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_OFFSET_CONVERTER')
self.do_runf(test_file('core', 'test_return_address.c'), 'passed')
@node_pthreads
@no_wasm2js('wasm2js does not support PROXY_TO_PTHREAD (custom section support)')
def test_pthread_offset_converter_modularize(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_OFFSET_CONVERTER')
self.set_setting('MODULARIZE')
create_file('post.js', 'var m = require("./test_return_address.js"); m();')
self.emcc_args += ['--extern-post-js', 'post.js', '-s', 'EXPORT_NAME=foo']
self.do_runf(test_file('core', 'test_return_address.c'), 'passed')
def test_emscripten_atomics_stub(self):
self.do_run_in_out_file_test('core', 'pthread', 'emscripten_atomics.c')
@no_asan('incompatibility with atomics')
@node_pthreads
def test_emscripten_atomics(self):
self.set_setting('USE_PTHREADS')
self.do_run_in_out_file_test('core', 'pthread', 'emscripten_atomics.c')
@no_asan('incompatibility with atomics')
@node_pthreads
def test_emscripten_futexes(self):
self.set_setting('USE_PTHREADS')
self.do_run_in_out_file_test('core', 'pthread', 'emscripten_futexes.c')
@needs_dylink
@node_pthreads
def test_pthread_dylink_basics(self):
self.emcc_args.append('-Wno-experimental')
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.do_basic_dylink_test()
@needs_dylink
@node_pthreads
def test_pthread_dylink(self):
self.emcc_args.append('-Wno-experimental')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_PTHREADS')
self.set_setting('PTHREAD_POOL_SIZE=2')
main = test_file('core', 'pthread', 'test_pthread_dylink.c')
side = test_file('core', 'pthread', 'test_pthread_dylink_side.c')
self.dylink_testf(main, side, "success", need_reverse=False)
@needs_dylink
@node_pthreads
def test_pthread_dylink_tls(self):
self.emcc_args.append('-Wno-experimental')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_PTHREADS')
self.set_setting('PTHREAD_POOL_SIZE=1')
main = test_file('core', 'pthread', 'test_pthread_dylink_tls.c')
side = test_file('core', 'pthread', 'test_pthread_dylink_tls_side.c')
self.dylink_testf(main, side, need_reverse=False)
@needs_dylink
@node_pthreads
def test_Module_dynamicLibraries_pthreads(self):
# test that Module.dynamicLibraries works with pthreads
self.emcc_args += ['-pthread', '-Wno-experimental']
self.emcc_args += ['--extern-pre-js', 'pre.js']
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
create_file('pre.js', '''
if ( !global.Module ) {
// This is the initial load (not a worker)
// Define the initial state of Module as we would
// in the html shell file.
// Use var to escape the scope of the if statement
var Module = {
dynamicLibraries: ['liblib.so']
};
}
''')
self.dylink_test(
r'''
#include <stdio.h>
int side();
int main() {
printf("result is %d", side());
return 0;
}
''',
r'''
int side() { return 42; }
''',
'result is 42',
auto_load=False)
# Tests the emscripten_get_exported_function() API.
def test_emscripten_get_exported_function(self):
# Could also test with -s ALLOW_TABLE_GROWTH=1
self.set_setting('RESERVED_FUNCTION_POINTERS', 2)
self.emcc_args += ['-lexports.js']
self.do_core_test('test_get_exported_function.cpp')
# Tests the emscripten_get_exported_function() API.
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_emscripten_get_exported_function(self):
# Could also test with -s ALLOW_TABLE_GROWTH=1
self.set_setting('RESERVED_FUNCTION_POINTERS', 2)
self.set_setting('MINIMAL_RUNTIME')
self.emcc_args += ['-lexports.js']
self.do_core_test('test_get_exported_function.cpp')
# Marked as impure since the WASI reactor modules (modules without main)
# are not yet suppored by the wasm engines we test against.
@also_with_standalone_wasm(impure=True)
def test_undefined_main(self):
if self.get_setting('STANDALONE_WASM'):
# In standalone we don't support implicitly building without main. The user has to explicitly
# opt out (see below).
err = self.expect_fail([EMCC, test_file('core', 'test_ctors_no_main.cpp')] + self.get_emcc_args())
self.assertContained('error: undefined symbol: main (referenced by top-level compiled C/C++ code)', err)
self.assertContained('warning: To build in STANDALONE_WASM mode without a main(), use emcc --no-entry', err)
elif not self.get_setting('LLD_REPORT_UNDEFINED') and not self.get_setting('STRICT'):
# Traditionally in emscripten we allow main to be implicitly undefined. This allows programs
# with a main and libraries without a main to be compiled identically.
# However we are trying to move away from that model to a more explicit opt-out model. See:
# https://github.com/emscripten-core/emscripten/issues/9640
self.do_core_test('test_ctors_no_main.cpp')
# Disabling IGNORE_MISSING_MAIN should cause link to fail due to missing main
self.set_setting('IGNORE_MISSING_MAIN', 0)
err = self.expect_fail([EMCC, test_file('core', 'test_ctors_no_main.cpp')] + self.get_emcc_args())
self.assertContained('error: entry symbol not defined (pass --no-entry to suppress): main', err)
# In non-standalone mode exporting an empty list of functions signal that we don't
# have a main and so should not generate an error.
self.set_setting('EXPORTED_FUNCTIONS', [])
self.do_core_test('test_ctors_no_main.cpp')
self.clear_setting('EXPORTED_FUNCTIONS')
def test_undefined_main_explict(self):
# If we pass --no-entry this test should compile without issue
self.emcc_args.append('--no-entry')
self.do_core_test('test_ctors_no_main.cpp')
def test_undefined_main_wasm_output(self):
if not can_do_standalone(self):
self.skipTest('standalone mode only')
err = self.expect_fail([EMCC, '-o', 'out.wasm', test_file('core', 'test_ctors_no_main.cpp')] + self.get_emcc_args())
self.assertContained('undefined symbol: main', err)
def test_export_start(self):
if not can_do_standalone(self):
self.skipTest('standalone mode only')
self.set_setting('STANDALONE_WASM')
self.set_setting('EXPORTED_FUNCTIONS', ['__start'])
self.do_core_test('test_hello_world.c')
@unittest.skip("memory64 functionality only partially working")
def test_memory64_hello_world(self):
self.set_setting('MEMORY64', 2)
self.do_core_test('test_hello_world.c')
# Tests the operation of API found in #include <emscripten/math.h>
def test_emscripten_math(self):
self.do_core_test('test_emscripten_math.c')
# Tests that users can pass custom JS options from command line using
# the -jsDfoo=val syntax:
# See https://github.com/emscripten-core/emscripten/issues/10580.
def test_custom_js_options(self):
self.emcc_args += ['--js-library', test_file('core', 'test_custom_js_settings.js'), '-jsDCUSTOM_JS_OPTION=1']
self.do_core_test('test_custom_js_settings.c')
self.assertContained('cannot change built-in settings values with a -jsD directive', self.expect_fail([EMCC, '-jsDWASM=0']))
# Tests <emscripten/stack.h> API
@no_asan('stack allocation sizes are no longer predictable')
def test_emscripten_stack(self):
self.set_setting('TOTAL_STACK', 4 * 1024 * 1024)
self.do_core_test('test_stack_get_free.c')
# Tests settings.ABORT_ON_WASM_EXCEPTIONS
def test_abort_on_exceptions(self):
self.set_setting('ABORT_ON_WASM_EXCEPTIONS')
self.set_setting('EXPORTED_RUNTIME_METHODS', ['ccall', 'cwrap'])
self.emcc_args += ['--bind', '--post-js', test_file('core', 'test_abort_on_exception_post.js')]
self.do_core_test('test_abort_on_exception.cpp')
@needs_dylink
def test_gl_main_module(self):
self.set_setting('MAIN_MODULE')
self.do_runf(test_file('core', 'test_gl_get_proc_address.c'))
def test_REVERSE_DEPS(self):
create_file('connect.c', '#include <sys/socket.h>\nint main() { return (int)&connect; }')
self.run_process([EMCC, 'connect.c'])
base_size = os.path.getsize('a.out.wasm')
# 'auto' should work (its the default)
self.run_process([EMCC, 'connect.c', '-sREVERSE_DEPS=auto'])
# 'all' should work too although it should produce a larger binary
self.run_process([EMCC, 'connect.c', '-sREVERSE_DEPS=all'])
self.assertGreater(os.path.getsize('a.out.wasm'), base_size)
# 'none' should fail to link because the dependency on ntohs was not added.
err = self.expect_fail([EMCC, 'connect.c', '-sREVERSE_DEPS=none'])
self.assertContained('undefined symbol: ntohs', err)
# Generate tests for everything
def make_run(name, emcc_args, settings=None, env=None):
if env is None:
env = {}
if settings is None:
settings = {}
TT = type(name, (TestCoreBase,), dict(run_name=name, env=env, __module__=__name__)) # noqa
def tearDown(self):
try:
super(TT, self).tearDown()
finally:
for k, v in self.env.items():
del os.environ[k]
TT.tearDown = tearDown
def setUp(self):
super(TT, self).setUp()
for k, v in self.env.items():
assert k not in os.environ, k + ' should not be in environment'
os.environ[k] = v
os.chdir(self.get_dir()) # Ensure the directory exists and go there
for k, v in settings.items():
self.set_setting(k, v)
self.emcc_args += emcc_args
TT.setUp = setUp
return TT
# Main wasm test modes
wasm0 = make_run('wasm0', emcc_args=['-O0'])
wasm0g = make_run('wasm0g', emcc_args=['-O0', '-g'])
wasm1 = make_run('wasm1', emcc_args=['-O1'])
wasm2 = make_run('wasm2', emcc_args=['-O2'])
wasm2g = make_run('wasm2g', emcc_args=['-O2', '-g'])
wasm3 = make_run('wasm3', emcc_args=['-O3'])
wasms = make_run('wasms', emcc_args=['-Os'])
wasmz = make_run('wasmz', emcc_args=['-Oz'])
wasmlto0 = make_run('wasmlto0', emcc_args=['-flto', '-O0'])
wasmlto1 = make_run('wasmlto1', emcc_args=['-flto', '-O1'])
wasmlto2 = make_run('wasmlto2', emcc_args=['-flto', '-O2'])
wasmlto3 = make_run('wasmlto3', emcc_args=['-flto', '-O3'])
wasmltos = make_run('wasmltos', emcc_args=['-flto', '-Os'])
wasmltoz = make_run('wasmltoz', emcc_args=['-flto', '-Oz'])
wasm2js0 = make_run('wasm2js0', emcc_args=['-O0'], settings={'WASM': 0})
wasm2js1 = make_run('wasm2js1', emcc_args=['-O1'], settings={'WASM': 0})
wasm2js2 = make_run('wasm2js2', emcc_args=['-O2'], settings={'WASM': 0})
wasm2js3 = make_run('wasm2js3', emcc_args=['-O3'], settings={'WASM': 0})
wasm2jss = make_run('wasm2jss', emcc_args=['-Os'], settings={'WASM': 0})
wasm2jsz = make_run('wasm2jsz', emcc_args=['-Oz'], settings={'WASM': 0})
# Secondary test modes - run directly when there is a specific need
# features
simd2 = make_run('simd2', emcc_args=['-O2', '-msimd128'])
bulkmem2 = make_run('bulkmem2', emcc_args=['-O2', '-mbulk-memory'])
# wasm
wasm2s = make_run('wasm2s', emcc_args=['-O2'], settings={'SAFE_HEAP': 1})
wasm2ss = make_run('wasm2ss', emcc_args=['-O2'], settings={'STACK_OVERFLOW_CHECK': 2})
# Add DEFAULT_TO_CXX=0
strict = make_run('strict', emcc_args=[], settings={'STRICT': 1})
lsan = make_run('lsan', emcc_args=['-fsanitize=leak', '-O2'], settings={'ALLOW_MEMORY_GROWTH': 1})
asan = make_run('asan', emcc_args=['-fsanitize=address', '-O2'], settings={'ALLOW_MEMORY_GROWTH': 1, 'INITIAL_MEMORY': '500mb'})
asani = make_run('asani', emcc_args=['-fsanitize=address', '-O2', '--pre-js', os.path.join(os.path.dirname(__file__), 'asan-no-leak.js')],
settings={'ALLOW_MEMORY_GROWTH': 1, 'INITIAL_MEMORY': '500mb'})
# Experimental modes (not tested by CI)
lld = make_run('lld', emcc_args=[], settings={'LLD_REPORT_UNDEFINED': 1})
minimal0 = make_run('minimal', emcc_args=['-g'], settings={'MINIMAL_RUNTIME': 1})
# TestCoreBase is just a shape for the specific subclasses, we don't test it itself
del TestCoreBase # noqa
|
the-stack_106_25301 | import json
import os
import subprocess
import urllib.request
import urllib.error
from queue import Queue
from bottle import route, run, Bottle, request, static_file
from threading import Thread
import mutagen
from mutagen.id3 import ID3, APIC
from mutagen.easyid3 import EasyID3
import yt_dlp
from yt_dlp.postprocessor.common import PostProcessor
app = Bottle()
@app.route('/yt')
def index_static():
return static_file('index.html', root='./')
@app.route('/yt/static/:filename#.*#')
def server_static(filename):
return static_file(filename, root='./static')
@app.route('/yt/q', method='GET')
def q_size():
return { "success" : True, "size" : dl_q.qsize() }
@app.route('/yt/q', method='POST')
def q_put():
url = request.forms.get( "url" )
if "" != url:
dl_q.put( DownloadItem(url) )
print("Added url " + url + " to the download queue")
return { "success" : True, "url" : url }
else:
return { "success" : False, "error" : "yt called without a url" }
@app.route('/yt/search', method='GET')
def search():
artist = request.params.get( "artist" )
title = request.params.get( "title" )
album = request.params.get( "album" )
artwork = request.params.get( "artwork-url" )
ext = request.params.get( "ext" )
search = f'{artist} {title} Lyric Video'
if search is not None and "" != search:
print( "Searching for: ", search )
dl_q.put( DownloadItem(None, artist, title, album, artwork) )
return { "success" : True }
else:
return { "success" : False, "error" : "yt called without a search query" }
def dl_worker():
while not done:
item = dl_q.get()
download(item)
dl_q.task_done()
def download(item):
mp3_postprocessor = {
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '0',
}
opus_postprocessor = {
'key': 'FFmpegExtractAudio',
'preferredcodec': item.ext,
}
metadata_postprocessor = {
'key': 'FFmpegMetadata',
'add_metadata': True
}
ydl_opts = {
'format': 'bestaudio/best',
'paths': {
'home': '/downloads/'
},
'outtmpl': '%(artist)s-%(album)s-%(track)s-[%(id)s]-(%(title)s).%(ext)s'
}
if item.ext == 'mp3': ydl_opts['postprocessors'] = [mp3_postprocessor, metadata_postprocessor]
if item.ext in ['opus', 'ogg', 'webm']: ydl_opts['postprocessors'] = [opus_postprocessor, metadata_postprocessor]
if item.url is not None:
print("Starting download of " + item.url)
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.add_post_processor(AddID3ArtworkPP())
ydl.download([item.url])
print("Finished downloading " + item.url)
else:
print(f'Starting download {item.artist}-{item.title}')
if item.artist is not None and item.album is not None:
ydl_opts['paths']['home'] = f'/downloads/{item.artist}/{item.album}/'
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.add_post_processor(AddID3ArtworkPP())
ydl.extract_info(f'ytsearch:{item.artist} {item.title} Lyric Video', extra_info={'artwork': item.artwork})
print(f'Finished downloading {item.artist}-{item.title}')
class DownloadItem(object):
def __init__(self, url=None, artist=None, title=None, album=None, artwork=None, ext='opus'):
self.url = url
self.artist = artist
self.title = title
self.album = album
self.artwork = artwork
self.ext = ext
class AddID3ArtworkPP(PostProcessor):
def run(self, info):
if info['ext'] != 'mp3':
self.to_screen('Not MP3, skipping ID3 tag update')
return [], info
self.to_screen('Setting ID3 Tags')
filepath = info['filepath']
try:
song = EasyID3(filepath)
except mutagen.id3.ID3NoHeaderError:
song = mutagen.File(filepath, easy=True)
song.add_tags()
if info['artwork'] is not None:
try:
audio = ID3(filepath)
with urllib.request.urlopen(info['artwork']) as albumart:
audio['APIC'] = APIC(
encoding=3,
mime=albumart.info().get_content_type(),
type=3, desc=u'Cover',
data=albumart.read()
)
audio.save()
except urllib.error.HTTPError as err:
print(err.reason)
finally:
try:
sf.close()
except NameError:
pass
print("Saved Artwork Image")
return [], info
# Start queue and app
dl_q = Queue();
done = False;
dl_thread = Thread(target=dl_worker)
dl_thread.start()
print("Started download thread")
app.run(host='0.0.0.0', port=8080, debug=False)
done = True
dl_thread.join()
|
the-stack_106_25302 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import binary_sensor
from esphome.const import CONF_DIRECTION, DEVICE_CLASS_MOVING
from . import APDS9960, CONF_APDS9960_ID
DEPENDENCIES = ["apds9960"]
DIRECTIONS = {
"UP": "set_up_direction",
"DOWN": "set_down_direction",
"LEFT": "set_left_direction",
"RIGHT": "set_right_direction",
}
CONFIG_SCHEMA = binary_sensor.binary_sensor_schema(
device_class=DEVICE_CLASS_MOVING
).extend(
{
cv.GenerateID(CONF_APDS9960_ID): cv.use_id(APDS9960),
cv.Required(CONF_DIRECTION): cv.one_of(*DIRECTIONS, upper=True),
}
)
async def to_code(config):
hub = await cg.get_variable(config[CONF_APDS9960_ID])
var = await binary_sensor.new_binary_sensor(config)
func = getattr(hub, DIRECTIONS[config[CONF_DIRECTION]])
cg.add(func(var))
|
the-stack_106_25303 | # coding=utf-8
from random import randint
import pygame, math
from character import *
class AICharacter(Character):
def __init__(self, x, y, Vx, Vy, properties=('slime', -1, -1)):
# Properties should be a tuple of the form (STRING mobName, INT leftLimit,
# INT rightLimit) where leftLimit and rightLimit can be -1 to remove the limit
self.mobType = properties[0]
self.limit = [properties[1], properties[2]]
# Call base class implementation
Character.__init__(self, x, y, Vx, Vy)
# Decide colour if slime
self.colour = 'Blue'
if self.mobType == 'slime' and randint(0, 1) == 0:
self.colour = 'Green'
# Load images
# slime
self.slimeDL = pygame.image.load('enemies'+os.sep+'slime'+os.sep+'slime' + self.colour +'_squashed.png').convert_alpha()
self.slimeDR = pygame.image.load('enemies'+os.sep+'slime'+os.sep+'slime' + self.colour + '_squashedR.png').convert_alpha()
self.slimeL = pygame.image.load('enemies'+os.sep+'slime'+os.sep+'slime' + self.colour + '_walk.png').convert_alpha()
self.slimeR = pygame.image.load('enemies'+os.sep+'slime'+os.sep+'slime' + self.colour + '_walkR.png').convert_alpha()
# fly
self.flyDL = pygame.image.load('enemies'+os.sep+'fly'+os.sep+'fly_dead.png').convert_alpha()
self.flyDR = pygame.image.load('enemies'+os.sep+'fly'+os.sep+'fly_dead_r.png').convert_alpha()
self.flyL = pygame.image.load('enemies'+os.sep+'fly'+os.sep+'fly_fly.png').convert_alpha()
self.flyR = pygame.image.load('enemies'+os.sep+'fly'+os.sep+'fly_fly_r.png').convert_alpha()
# fish
self.fishDL = pygame.image.load('enemies'+os.sep+'other'+os.sep+'fishGreen_dead.png').convert_alpha()
self.fishDR = pygame.image.load('enemies'+os.sep+'other'+os.sep+'fishGreen_dead_r.png').convert_alpha()
self.fishL = pygame.image.load('enemies'+os.sep+'other'+os.sep+'fishGreen_swim.png').convert_alpha()
self.fishR = pygame.image.load('enemies'+os.sep+'other'+os.sep+'fishGreen_swim_r.png').convert_alpha()
# snail
self.snailL1 = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailWalk1.png').convert_alpha()
self.snailL2 = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailWalk2.png').convert_alpha()
self.snailR1 = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailWalk1R.png').convert_alpha()
self.snailR2 = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailWalk2R.png').convert_alpha()
self.snailDL = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailShell.png').convert_alpha()
self.snailDR = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailShellR.png').convert_alpha()
# general image properties
self.imageL1, self.imageL2, self.imageR1, self.imageR2, self.imageDL, self.imageDR = [None] * 6
self.deadWidth, self.deadHeight = [None] * 2
# Other control variables
self.originalHeight = y
self.alive = True
self.health = 1
self.gravity = 1
self.runSpeed = abs(self.Vx)
self.currentStep = 0
self.takenAction = False
self.updateFrequency = 2
# -----------------------------------------------------------------------------------------------------------------
@staticmethod
def distance(p0, p1):
return math.sqrt((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2)
# -----------------------------------------------------------------------------------------------------------------
def updateAI(self, platforms, mainChar, blocks):
# Increment position by velocity
self.x += self.Vx
self.y += self.Vy
# Determine direction for draw() method
if self.Vx > 0:
self.direction = 1
elif self.Vx < 0:
self.direction = 0
# Check if character is still alive
if self.health <= 0:
self.alive = False
# Set a terminal velocity
if self.Vy >= platforms[0].height:
self.Vy = platforms[0].height - 5
if not self.onGround and self.Vy >= platforms[0].height - 15 and self.y > platforms[self.lowestPlatform][1]:
self.dispose()
# Apply gravity if necessary
if self.onGround:
self.Vy = 0
elif ((self.mobType == 'fly' and not self.alive) or self.mobType != 'fly') and (self.mobType != 'fish' or
(self.mobType == 'fish' and not self.alive)):
self.Vy += self.gravity
# Keep character within bounds
if self.limit[0] != -1 and self.x <= self.limit[0]:
self.x += self.runSpeed
self.Vx = abs(self.Vx)
if self.limit[1] != -1 and self.x >= self.limit[1]:
self.x -= self.runSpeed
self.Vx = -abs(self.Vx)
# Switch to a dead state if close to explosion
explosionRadius = 400
for block in blocks:
distanceFromBlock = self.distance((self.x + 0.5 * self.width, self.y + 0.5 * self.height),
(block.x + 0.5 * block.width, block.y + 0.5 * block.height))
if block.disabled and block.willExplode and block.explosionStep == 1 and \
distanceFromBlock < explosionRadius:
self.health = 0
# Prevent AI from falling off the lowest platform
if self.mobType == 'slime' or self.mobType == 'snail':
testXLeft = self.x - 25
testXRight = self.x + 25 + self.width
lowestPlatLeft = platforms[self.lowestPlatform][0]
lowestPlatRight = platforms[self.lowestPlatform][2]
onLowestPlatform = self.currentPlatform == self.lowestPlatform
if onLowestPlatform and testXLeft <= lowestPlatLeft and self.Vx < 0:
self.x += self.runSpeed
self.Vx *= -1
elif onLowestPlatform and testXRight >= lowestPlatRight and self.Vx > 0:
self.x -= self.runSpeed
self.Vx *= -1
# Implement simple AI
if self.mobType == 'slime' or self.mobType == 'snail' and randint(0, 10 - self.updateFrequency) == 0:
platformsBelowSelf = []
currentPlatformHeight = platforms[self.currentPlatform][1]
limitBackup = [self.limit[0], self.limit[1]]
self.limit[0] = platforms[self.currentPlatform][0] + 5
self.limit[1] = platforms[self.currentPlatform][2] - 40
safePlatformDropLeft, safePlatformDropRight = False, False
for i in range(0, len(platforms)):
if platforms[i][1] > currentPlatformHeight:
platformsBelowSelf.append(platforms[i])
for platform in platformsBelowSelf:
if platform[0] < platforms[self.currentPlatform][0] < platform[2]:
safePlatformDropLeft = True
if platform[0] < platforms[self.currentPlatform][2] and platform[2] > platforms[self.currentPlatform][
2]:
safePlatformDropRight = True
if safePlatformDropLeft:
self.limit[0] = limitBackup[0]
if safePlatformDropRight:
self.limit[1] = limitBackup[1]
elif self.mobType == 'fly' and self.alive and randint(0, 10 - self.updateFrequency) == 0:
self.limit[0] = platforms[0][0]
for i in range(0, len(platforms)):
if self.x + self.width + 5 >= platforms[i][0] and self.x <= platforms[i][2] and \
platforms[i][1] <= self.y <= platforms[i][3]:
self.limit[1] = platforms[i][0]
self.Vx *= -1
self.x -= self.runSpeed
# -----------------------------------------------------------------------------------------------------------------
def update(self, platforms, ev, movableObjects, blocks, aiCharacters, mainChar, pool, surface, FPS, torches=None):
# Collide with other objects
Character.collide(self, platforms, blocks, aiCharacters, pool, torches)
# Update motion and AI actions
self.updateAI(platforms, mainChar, blocks)
# Draw correct character
self.draw(surface, FPS)
# -----------------------------------------------------------------------------------------------------------------
def draw(self, surface, fps=60):
# Return immediately if mob is invisibile
if not self.visible:
return
# Determine the correct image to use
if self.mobType == 'slime' and not self.imageL1:
self.imageL1 = self.imageL2 = self.slimeL
self.imageR1 = self.imageR2 = self.slimeR
self.imageDL = self.slimeDL
self.imageDR = self.slimeDR
elif self.mobType == 'fly' and not self.imageL1:
self.imageL1 = self.imageL2 = self.flyL
self.imageR1 = self.imageR2 = self.flyR
self.imageDL = self.flyDL
self.imageDR = self.flyDR
elif self.mobType == 'fish' and not self.imageL1:
self.imageL1 = self.fishL
self.imageL2 = self.fishL
self.imageR1 = self.fishR
self.imageR2 = self.fishR
self.imageDL = self.fishDL
self.imageDR = self.fishDR
elif self.mobType == 'snail' and not self.imageL1:
self.imageL1 = self.snailL1
self.imageL2 = self.snailL2
self.imageR1 = self.snailR1
self.imageR2 = self.snailR2
self.imageDL = self.snailDL
self.imageDR = self.snailDR
# Get image widths and heights
self.width = pygame.Surface.get_width(self.imageL1)
self.height = pygame.Surface.get_height(self.imageL1)
self.deadWidth = pygame.Surface.get_width(self.imageDL)
self.deadHeight = pygame.Surface.get_height(self.imageDL)
# Increment the walking/moving frame
footstepRarity = 1
if pygame.time.get_ticks() % footstepRarity == 0:
self.walkFrame += 1
if self.walkFrame > 1:
self.walkFrame = 0
if self.direction == 1 and self.alive and self.walkFrame == 0:
surface.blit(self.imageR1, (self.x, self.y))
elif self.direction == 0 and self.alive and self.walkFrame == 0:
surface.blit(self.imageL1, (self.x, self.y))
elif self.direction == 1 and self.alive and self.walkFrame == 1:
surface.blit(self.imageR2, (self.x, self.y))
elif self.direction == 0 and self.alive and self.walkFrame == 1:
surface.blit(self.imageL2, (self.x, self.y))
elif self.direction == 1 and not self.alive:
surface.blit(self.imageDR, (self.x, self.y))
elif self.direction == 0 and not self.alive:
surface.blit(self.imageDL, (self.x, self.y))
# Recalculate the image width and height, and stop horizontal motion if the AI char is dead
if not self.alive:
self.width = self.deadWidth
self.height = self.deadHeight
self.Vx = 0
# -----------------------------------------------------------------------------------------------------------------
|
the-stack_106_25306 | #!/usr/bin/env python3
import sys
import argparse
def parse_arguments():
""" parse the arguments """
p = argparse.ArgumentParser()
p.add_argument(
"--words-file",
help="file with list of words [/usr/share/dict/words]",
default="words.txt",
)
p.add_argument(
"--tlds-file", help="file with list of tlds [tlds.txt]", default="tlds.txt"
)
p.add_argument(
"--tlds", help="manually specify tlds as comma-separated list", default=False
)
p.add_argument(
"--leet",
help="generate domains that replace letters with numbers",
action="store_true",
)
p.add_argument("--min-size", default=0, type=int, help="minimum word length")
p.add_argument("--max-size", default=100000, type=int, help="maximum word length")
return p.parse_args()
def iter_words(handle):
""" iterate over list of words in text file """
return (word.strip().lower() for word in handle)
def get_tlds(tlds_file):
""" iterate over list of tlds in text file """
with open(tlds_file) as handle:
return [line.split()[0].strip().lower() for line in handle]
def iter_domains(words, tlds):
""" list domains made from words and tlds """
return (
"{}.{}".format(word.rstrip(tld), tld)
for word in words
for tld in tlds
if word.endswith(tld)
)
def l33tify(domain):
""" Produce 1337 versions of words """
replacements = {
"a": "4",
"b": "8",
"e": "3",
"g": "6",
"i": "1",
"o": "0",
"s": "5",
"t": "7",
"z": "2",
}
word, tld = domain.split(".")
return "".join([replacements.get(char, char) for char in word]) + "." + tld
if __name__ == "__main__":
args = parse_arguments()
if not args.tlds:
tlds = get_tlds(args.tlds_file)
else:
tlds = args.tlds.split(",")
with open(args.words_file) as handle:
processed_domains = (
l33tify(domain) if args.leet else domain
for domain in iter_domains(iter_words(handle), tlds)
if len(domain) in range(args.min_size, args.max_size)
)
for domain in processed_domains:
print(domain)
|
the-stack_106_25307 | import numpy as np
import numpy
import os
from tqdm import tqdm
import numpy as np
from tqdm import tqdm
from scipy.io import wavfile
import os, csv
import tensorflow as tf
import pickle
import numpy as np
import argparse
from network_model import *
from helper import *
import argparse
import librosa
def argument_parser():
"""
Get an argument parser for a training script.
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--saved_name', help='name of the file saved as a pickle file',default='m_example',type=str)
parser.add_argument('--modfolder', help='path of the saved model to be used for inference', default='../pre-model/scratch_loss',type=str)
parser.add_argument('--type', help='pretrained/linear/finetune/scratch', default='scratch',type=str)
return parser
args = argument_parser().parse_args()
#sample loading the audio files in a dictionary. '4.wav is perceptually farther than 2.ref from reference.'
def load_full_data_list(datafolder='dataset'): # check change path names
dataset={}
print("Loading Files....")
dataset['all']={}
dataset['all']['inname']=[]
dataset['all']['outname']=[]
dataset['all']['inname'].append('../sample_audio/ref.wav')
dataset['all']['outname'].append('../sample_audio/4.wav')
dataset['all']['inname'].append('../sample_audio/ref.wav')
dataset['all']['outname'].append('../sample_audio/2.wav')
return dataset
def load_full_data(dataset):
dataset['all']['inaudio'] = [None]*len(dataset['all']['inname'])
dataset['all']['outaudio'] = [None]*len(dataset['all']['outname'])
for id in tqdm(range(len(dataset['all']['inname']))):
if dataset['all']['inaudio'][id] is None:
inputData, sr = librosa.load(dataset['all']['inname'][id],sr=22050)
outputData, sr = librosa.load(dataset['all']['outname'][id],sr=22050)
## convert to 16 bit floating point
inputData = np.round(inputData.astype(np.float)*32768)
outputData = np.round(outputData.astype(np.float)*32768)
inputData_wav = np.reshape(inputData, [-1, 1])
outputData_wav = np.reshape(outputData, [-1, 1])
shape_wav = np.shape(inputData_wav)
shape_wav1 = np.shape(outputData_wav)
inputData_wav = np.reshape(inputData_wav, [1, 1,shape_wav[0], shape_wav[1]])
outputData_wav = np.reshape(outputData_wav, [1, 1,shape_wav1[0], shape_wav1[1]])
inputData_wav = np.float32(inputData_wav)
outputData_wav = np.float32(outputData_wav)
dataset['all']['inaudio'][id] = inputData_wav
dataset['all']['outaudio'][id] = outputData_wav
return dataset
######### Data loading
dataset=load_full_data_list()
dataset=load_full_data(dataset)
######### Parameters of the model
#MAKE SURE THAT YOU UPDATE THESE PARAMETERS IF YOU MAKE ANY CHANGES TO THE MODEL.
#################################
SE_LAYERS = 13 # NUMBER OF INTERNAL LAYERS
SE_CHANNELS = 64 # NUMBER OF FEATURE CHANNELS PER LAYER
SE_LOSS_LAYERS = 14 # NUMBER OF FEATURE LOSS LAYERS
SE_NORM = "NM" # TYPE OF LAYER NORMALIZATION (NM, SBN or None)
SE_LOSS_TYPE = "FL" # TYPE OF TRAINING LOSS (L1, L2 or FL)
# FEATURE LOSS NETWORK
LOSS_LAYERS = 14 # NUMBER OF INTERNAL LAYERS
LOSS_BASE_CHANNELS = 32 # NUMBER OF FEATURE CHANNELS PER LAYER IN FIRT LAYER
LOSS_BLK_CHANNELS = 5 # NUMBER OF LAYERS BETWEEN CHANNEL NUMBER UPDATES
LOSS_NORM = 'SBN' # TYPE OF LAYER NORMALIZATION (NM, SBN or None)
SET_WEIGHT_EPOCH = 10 # NUMBER OF EPOCHS BEFORE FEATURE LOSS BALANCE
SAVE_EPOCHS = 10 # NUMBER OF EPOCHS BETWEEN MODEL SAVES
FILTER_SIZE=3
def load_full_data(dataset,sets,id_value):
noisy=dataset[sets]['inaudio'][id_value]
clean=dataset['all']['outaudio'][id_value]
clean=np.reshape(clean,[clean.shape[2]])
noisy=np.reshape(noisy,[noisy.shape[2]])
shape1=clean.shape[0]
shape2=noisy.shape[0]
if shape1>shape2:
difference=shape1-shape2
a=(np.zeros(difference))
noisy=np.append(a,noisy,axis=0)
elif shape1<shape2:
difference=shape2-shape1
a=(np.zeros(difference))
clean=np.append(a,clean,axis=0)
clean=np.reshape(clean,[1,1,clean.shape[0],1])
noisy=np.reshape(noisy,[1,1,noisy.shape[0],1])
return [clean,noisy]
def model_run():
modfolder= args.modfolder
tf.reset_default_graph()
with tf.variable_scope(tf.get_variable_scope()):
input1_wav=tf.placeholder(tf.float32,shape=[None, None, None,1])
clean1_wav=tf.placeholder(tf.float32,shape=[None, None, None,1])
keep_prob = tf.placeholder_with_default(1.0, shape=())
if args.type!='pretrained':
others,loss_sum = featureloss(input1_wav,clean1_wav,keep_prob,loss_layers=SE_LOSS_LAYERS,n_layers=LOSS_LAYERS, norm_type=LOSS_NORM, base_channels=LOSS_BASE_CHANNELS,blk_channels=LOSS_BLK_CHANNELS,ksz=FILTER_SIZE)
else:
others,loss_sum = featureloss_pretrained(input1_wav,clean1_wav,keep_prob,loss_layers=SE_LOSS_LAYERS,n_layers=LOSS_LAYERS, norm_type=LOSS_NORM, base_channels=LOSS_BASE_CHANNELS,blk_channels=LOSS_BLK_CHANNELS,ksz=FILTER_SIZE)
distance = loss_sum
distance_overall=[]
with tf.Session() as sess:
#sess.run(tf.global_variables_initializer())
loss_saver = tf.train.Saver([var for var in tf.trainable_variables()])
if args.type=='pretrained':
loss_saver.restore(sess, "%s/loss_model.ckpt" % modfolder)
else:
loss_saver.restore(sess, "%s/my_test_model" % modfolder)
for j in tqdm(range(len(dataset['all']['inname']))):
wav_in,wav_out=load_full_data(dataset,'all',j)
dist= sess.run([distance],feed_dict={input1_wav:wav_out, clean1_wav:wav_in})
distance_overall.append(dist)
return [distance_overall]
distance=[]
distance_overall=model_run()
distance.append(distance_overall)
with open('saved_distances/'+str(args.saved_name)+'.p', 'wb') as f:
pickle.dump(distance, f) |
the-stack_106_25313 | # Databricks notebook source
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit, udf
spark = SparkSession.builder.appName("Spark DataFrames").getOrCreate()
# COMMAND ----------
df = spark.read.options(header='True', inferSchema='True').csv('/FileStore/tables/StudentData.csv')
df.show()
# COMMAND ----------
type(df)
# COMMAND ----------
rdd = df.rdd
# COMMAND ----------
type(rdd)
# COMMAND ----------
rdd.collect()
# COMMAND ----------
rdd.filter(lambda x: x[0] == 28 ).collect()
# COMMAND ----------
rdd.filter(lambda x: x["gender"] == "Male" ).collect()
|
the-stack_106_25315 | #!/usr/bin/env python3
'''
Factutil: helper scripts for source code entities
Copyright 2012-2021 Codinuum Software Lab <https://codinuum.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from functools import reduce
import logging
from .const import SUB_SEP
logger = logging.getLogger()
def compo_to_int(s):
i = -1
try:
i = int(s)
except Exception as e:
if s != 'U':
logger.warning(str(e))
return i
class Range(object):
def __init__(self):
self._valid = False
self._encoded = None
self._enc = None
def is_valid(self):
return self._valid
def encode(self):
return self._encoded
def get_enc(self):
return self._enc
def meet(self, other):
logger.warning('not implemented')
return None
def overlaps(self, other):
return self.meet(other) != None
def contains(self, other):
logger.warning('not implemented')
return None
class LCRange(Range):
@classmethod
def from_encoded(cls, encoded):
compos = encoded.split(SUB_SEP)
obj = None
try:
sl = compo_to_int(compos[0])
sc = compo_to_int(compos[1])
el = compo_to_int(compos[2])
ec = compo_to_int(compos[3])
obj = LCRange(sl, sc, el, ec)
except Exception as e:
logger.warning(str(e))
return obj
def __init__(self, sl, sc, el, ec):
Range.__init__(self)
if sl == el:
self._valid = sc <= ec
else:
self._valid = sl < el
self._start_line = sl
self._start_col = sc
self._end_line = el
self._end_col = ec
self._enc = 'LC'
self._encoded = SUB_SEP.join([str(x) for x in [self._start_line,
self._start_col,
self._end_line,
self._end_col]])
def __eq__(self, other):
res = False
if isinstance(other, LCRange):
res = reduce(lambda x,y: x and y, [self._start_line == other._start_line,
self._start_col == other._start_col,
self._end_line == other._end_line,
self._end_col == other._end_col])
return res
@classmethod
def _meet(cls, s0, s1):
sl0, sc0, el0, ec0 = s0
sl1, sc1, el1, ec1 = s1
sl = max(sl0, sl1)
el = min(el0, el1)
sc = 0
if el0 == el1:
sc = max(sc0, sc1)
else:
if sl0 > sl1:
sc = sc0
else:
sc = sc1
ec = 0
if el0 == el1:
ec = min(ec0, ec1)
else:
if el0 > el1:
ec = ec1
else:
ec = ec0
return (sl, sc, el, ec)
def __str__(self):
s = '%dL,%dC-%dL,%dC' % (self._start_line,
self._start_col,
self._end_line,
self._end_col)
return s
def meet(self, other):
m = None
if isinstance(other, LCRange):
(sl, sc, el, ec) = LCRange._meet((self._start_line,
self._start_col,
self._end_line,
self._end_col),
(other._start_line,
other._start_col,
other._end_line,
other._end_col))
m = LCRange(sl, sc, el, ec)
if not m.is_valid():
m = None
return m
def contains(self, other):
b = False
if isinstance(other, LCRange):
b = self._start_line <= other._start_line and other._end_line <= self._end_line and self._start_col <= other._start_col and other._end_col <= self._end_col
return b
def get_start_line(self):
return self._start_line
def get_start_col(self):
return self._start_col
def get_end_line(self):
return self._end_line
def get_end_col(self):
return self._end_col
class ORange(Range):
@classmethod
def from_encoded(cls, encoded):
compos = encoded.split(SUB_SEP)
obj = None
try:
so = compo_to_int(compos[0])
eo = compo_to_int(compos[1])
obj = ORange(so, eo)
except Exception as e:
logger.warning(str(e))
return obj
def __init__(self, so, eo):
Range.__init__(self)
self._valid = so <= eo
self._start_offset = so
self._end_offset = eo
self._enc = 'O'
self._encoded = SUB_SEP.join([str(x) for x in [self._start_offset,
self._end_offset]])
def __eq__(self, other):
res = False
if isinstance(other, ORange):
res = reduce(lambda x,y: x and y, [self._start_offset == other._start_offset,
self._end_offset == other._end_offset])
return res
def __str__(self):
s = '%d-%d' % (self._start_offset,
self._end_offset)
return s
def meet(self, other):
m = None
if isinstance(other, ORange):
so = max(self._start_offset, other._start_offset)
eo = min(self._end_offset, other._end_offset)
m = ORange(so, eo)
if not m.is_valid():
m = None
return m
def contains(self, other):
b = False
if isinstance(other, ORange) or isinstance(other, LCORange):
b = self._start_offset <= other._start_offset and other._end_offset <= self._end_offset
return b
def get_start_offset(self):
return self._start_offset
def get_end_offset(self):
return self._end_offset
class LORange(ORange):
@classmethod
def from_encoded(cls, encoded):
compos = encoded.split(SUB_SEP)
obj = None
try:
sl = compo_to_int(compos[0])
so = compo_to_int(compos[2])
el = compo_to_int(compos[3])
eo = compo_to_int(compos[5])
obj = LORange(sl, so, el, eo)
except Exception as e:
logger.warning(str(e))
return obj
def __init__(self, sl, so, el, eo):
Range.__init__(self)
valid0 = sl <= el and so <= eo
self._start_line = sl
self._end_line = el
ORange.__init__(self, so, eo)
self._valid = self._valid and valid0
self._enc = 'LO'
self._encoded = SUB_SEP.join([str(x) for x in [self._start_line,
self._start_offset,
self._end_line,
self._end_offset]])
def __eq__(self, other):
res = False
if isinstance(other, LORange):
res = reduce(lambda x,y: x and y, [self._start_line == other._start_line,
self._start_offset == other._start_offset,
self._end_line == other._end_line,
self._end_offset == other._end_offset])
def __str__(self):
s = '%dL(%d)-%dL(%d)' % (self._start_line,
self._start_offset,
self._end_line,
self._end_offset)
return s
def meet(self, other):
m = None
if isinstance(other, LORange):
sl = max(self._start_line, other._start_line)
el = min(self._end_line, other._end_line)
so = max(self._start_offset, other._start_offset)
eo = min(self._end_offset, other._end_offset)
m = LORange(sl, so, el, eo)
if not m.is_valid():
m = None
return m
def contains(self, other):
b = False
if isinstance(other, ORange) or isinstance(other, LORange):
b = self._start_offset <= other._start_offset and other._end_offset <= self._end_offset
return b
class LCORange(LCRange, ORange):
@classmethod
def from_encoded(cls, encoded):
compos = encoded.split(SUB_SEP)
obj = None
try:
sl = compo_to_int(compos[0])
sc = compo_to_int(compos[1])
so = compo_to_int(compos[2])
el = compo_to_int(compos[3])
ec = compo_to_int(compos[4])
eo = compo_to_int(compos[5])
obj = LCORange(sl, sc, so, el, ec, eo)
except Exception as e:
logger.warning(str(e))
return obj
def __init__(self, sl, sc, so, el, ec, eo):
LCRange.__init__(self, sl, sc, el, ec)
valid0 = self._valid
ORange.__init__(self, so, eo)
self._valid = self._valid and valid0
self._enc = 'LCO'
self._encoded = SUB_SEP.join([str(x) for x in [self._start_line,
self._start_col,
self._start_offset,
self._end_line,
self._end_col,
self._end_offset]])
def __eq__(self, other):
res = False
if isinstance(other, LCORange):
res = reduce(lambda x,y: x and y, [self._start_line == other._start_line,
self._start_col == other._start_col,
self._start_offset == other._start_offset,
self._end_line == other._end_line,
self._end_col == other._end_col,
self._end_offset == other._end_offset])
def __str__(self):
s = '%dL,%dC(%d)-%dL,%dC(%d)' % (self._start_line,
self._start_col,
self._start_offset,
self._end_line,
self._end_col,
self._end_offset)
return s
def meet(self, other):
m = None
if isinstance(other, LCORange):
slc = (self._start_line,
self._start_col,
self._end_line,
self._end_col)
olc = (other._start_line,
other._start_col,
other._end_line,
other._end_col)
(sl, sc, el, ec) = LCRange._meet(slc, olc)
so = max(self._start_offset, other._start_offset)
eo = min(self._end_offset, other._end_offset)
m = LCORange(sl, sc, so, el, ec, eo)
if not m.is_valid():
m = None
return m
def contains(self, other):
b = False
if isinstance(other, ORange) or isinstance(other, LCORange):
b = self._start_offset <= other._start_offset and other._end_offset <= self._end_offset
return b
class MaxRange(Range):
def __init__(self):
Range.__init__(self)
self._valid = True
self._enc = 'MAX'
self._encoded = ''
def __eq__(self, other):
return isinstance(other, MaxRange)
def __str__(self):
return '<max range>'
def contains(self, other):
return True
def get_start_line(self):
return 1
def get_start_col(self):
return 0
def get_end_line(self):
return -1
def get_end_col(self):
return 0
def get_start_offset(self):
return 0
def get_end_offset(self):
return -1
MAX_RANGE = MaxRange()
def from_encoded(encoded):
range = None
compos = encoded.split(SUB_SEP)
n = len(compos)
if n == 6:
range = LCORange.from_encoded(encoded)
elif n == 4:
range = LCRange.from_encoded(encoded)
elif n == 2:
range = ORange.from_encoded(encoded)
return range
class Key(object):
def __init__(self, obj, *args):
self.obj = obj
(self.L, self.O) = self.chk(obj)
def chk(self, obj):
L = None
if isinstance(obj, LCRange) or isinstance(obj, LCORange):
L = obj.get_start_line()
O = None
if isinstance(obj, ORange) or isinstance(obj, LCORange):
O = obj.get_start_offset()
return (L, O)
def __lt__(self, other):
(L, O) = self.chk(other.obj)
b = False
if self.L and L:
b = self.L < L
elif self.O and O:
b = self.O < O
return b
def __gt__(self, other):
(L, O) = self.chk(other.obj)
b = False
if self.L and L:
b = self.L > L
elif self.O and O:
b = self.O > O
return b
def __eq__(self, other):
(L, O) = self.chk(other.obj)
b = False
if self.L and L:
b = self.L == L
elif self.O and O:
b = self.O == O
return b
def __le__(self, other):
(L, O) = self.chk(other.obj)
b = False
if self.L and L:
b = self.L <= L
elif self.O and O:
b = self.O <= O
return b
def __ge__(self, other):
(L, O) = self.chk(other.obj)
b = False
if self.L and L:
b = self.L >= L
elif self.O and O:
b = self.O >= O
return b
def __ne__(self, other):
(L, O) = self.chk(other.obj)
b = False
if self.L and L:
b = self.L != L
elif self.O and O:
b = self.O != O
else:
b = True
return b
|
the-stack_106_25318 | """
1101. The Earliest Moment When Everyone Become Friends
Medium
In a social group, there are N people, with unique integer ids from 0 to N-1.
We have a list of logs, where each logs[i] = [timestamp, id_A, id_B] contains a non-negative integer timestamp, and the ids of two different people.
Each log represents the time in which two different people became friends. Friendship is symmetric: if A is friends with B, then B is friends with A.
Let's say that person A is acquainted with person B if A is friends with B, or A is a friend of someone acquainted with B.
Return the earliest time for which every person became acquainted with every other person. Return -1 if there is no such earliest time.
Example 1:
Input: logs = [[20190101,0,1],[20190104,3,4],[20190107,2,3],[20190211,1,5],[20190224,2,4],[20190301,0,3],[20190312,1,2],[20190322,4,5]], N = 6
Output: 20190301
Explanation:
The first event occurs at timestamp = 20190101 and after 0 and 1 become friends we have the following friendship groups [0,1], [2], [3], [4], [5].
The second event occurs at timestamp = 20190104 and after 3 and 4 become friends we have the following friendship groups [0,1], [2], [3,4], [5].
The third event occurs at timestamp = 20190107 and after 2 and 3 become friends we have the following friendship groups [0,1], [2,3,4], [5].
The fourth event occurs at timestamp = 20190211 and after 1 and 5 become friends we have the following friendship groups [0,1,5], [2,3,4].
The fifth event occurs at timestamp = 20190224 and as 2 and 4 are already friend anything happens.
The sixth event occurs at timestamp = 20190301 and after 0 and 3 become friends we have that all become friends.
Note:
2 <= N <= 100
1 <= logs.length <= 10^4
0 <= logs[i][0] <= 10^9
0 <= logs[i][1], logs[i][2] <= N - 1
It's guaranteed that all timestamps in logs[i][0] are different.
logs are not necessarily ordered by some criteria.
logs[i][1] != logs[i][2]
"""
class Solution:
def earliestAcq(self, logs: List[List[int]], N: int) -> int:
uf = {x: x for x in range(N)}
self.groups = N
def merge(x, y):
x, y = find(x), find(y)
if x != y:
self.groups -= 1
uf[x] = y
def find(x):
if uf[x] != x:
uf[x] = find(uf[x])
return uf[x]
for t, x, y in sorted(logs):
merge(x, y)
if self.groups == 1:
return t
return -1 |
the-stack_106_25319 | # IMPORTATION STANDARD
from datetime import datetime
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.options import alphaquery_view
@pytest.mark.vcr
def test_display_put_call_ratio(mocker):
# MOCK CHARTS
mocker.patch.object(target=alphaquery_view.gtff, attribute="USE_ION", new=True)
mocker.patch(target="gamestonk_terminal.stocks.backtesting.bt_view.plt.ion")
mocker.patch(target="gamestonk_terminal.stocks.backtesting.bt_view.plt.show")
alphaquery_view.display_put_call_ratio(
ticker="PM",
window=10,
start_date=datetime.strptime("2021-12-01", "%Y-%m-%d"),
export="",
)
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_display_put_call_ratio_empty(mocker):
mocker.patch(
target="gamestonk_terminal.stocks.options.alphaquery_view.alphaquery_model.get_put_call_ratio",
return_value=pd.DataFrame(),
)
alphaquery_view.display_put_call_ratio(
ticker="PM",
window=10,
start_date=datetime.strptime("2021-12-01", "%Y-%m-%d"),
export="",
)
|
the-stack_106_25321 | from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Tag, Ingredient, Recipe
from recipe import serializers
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base viewset for user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(
user=self.request.user
).order_by('-name').distinct()
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the recipes for the auth user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serialzer class"""
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
|
the-stack_106_25322 | #Надо подправить!!
import codecs
import operator
import os
import re
freq = {}
pages = os.listdir('./corpora')
for page in pages:
f = codecs.open(u'C:/Users/M/Desktop/corpora/' + page, 'r', 'utf-8')
text = f.read()
f.close()
words = text.split(u'|')
for word in words:
if word.startswith(u'<'):
word = word[4:-6]
if word in freq:
freq[word] += 1
if word not in freq:
freq[word] = 1
f = codecs.open(u'Word frequency.txt', 'w', 'utf-8')
for word in reversed(sorted(freq, key=freq.get)):
line = word + u' - ' + str(freq[word])
if line == u'':
breal
f.write(line)
f.write(os.linesep)
|
the-stack_106_25323 |
from cmsfix.views import *
from cmsfix.views.node import get_node, get_add_menu
from cmsfix.views.node.node import ( nav, node_submit_bar,
NodeViewer,
)
from cmsfix.models.pagenode import PageNode
from cmsfix.lib.workflow import get_workflow
from cmsfix.lib import macro
from rhombus.lib.utils import get_dbhandler, cerr, cout
from rhombus.lib.tags import *
import docutils.core
import os
class PageNodeViewer(NodeViewer):
template_edit = 'cmsfix:templates/pagenode/edit.mako'
template_view = 'cmsfix:templates/pagenode/node.mako'
mimetype_filter = lambda c, x: x.startswith('text/')
def render(self, request):
node = self.node
# set the formatter
if node.mimetype == 'text/x-rst':
content = literal(render_rst(node.content))
content = literal(macro.postrender(content, node, request))
elif node.mimetype == 'text/html':
content = literal(node.content)
elif node.mimetype == 'text/plain':
content = pre(node.content)
else:
content = node.content
# check if we have a custom template
path = node.path
if path == '/':
path = 'home.mako'
else:
path = path[1:] + '.mako'
path = request.registry.settings.get('cmsfix.templatedir', '') + path
cerr('checking for template %s' % path)
if os.path.exists(path):
template_view = path
else:
template_view = self.template_view
return render_to_response(template_view,
{ 'node': node,
'breadcrumb': self.breadcrumb(request),
'infobar': self.infobar(request),
'html': content,
'stickybar': self.statusbar(request),
'macro': macro,
}, request = request )
def parse_form(self, f, d=None):
d = super().parse_form(f, d)
if 'cmsfix-title' in f:
d['title'] = f['cmsfix-title']
if 'cmsfix-content' in f:
d['content'] = macro.postedit(f['cmsfix-content'], self.node)
if 'cmsfix-summary' in f:
d['summary'] = f['cmsfix-summary']
# some of our inherited class might not use keywords
if 'cmsfix-keywords' in f:
d['keywords'] = f['cmsfix-keywords']
if 'cmsfix-options' in f:
d['view'] = True if 'cmsfix-view' in f else False
d['flags-on'] = d['flags-off'] = 0
if 'cmsfix-inmenu' in f:
d['flags-on'] = d['flags-on'] | self.node.f_inmenu
else:
d['flags-off'] = d['flags-off'] | self.node.f_inmenu
return d
def edit_form(self, request, create=False):
dbh = get_dbhandler()
n = self.node
eform, jscode = super().edit_form(request, create)
eform.get('cmsfix.node-main').add(
input_text('cmsfix-title', 'Title', value=n.title, offset=1),
input_textarea('cmsfix-content', 'Content', value=n.content, offset=1, size="18x8",
info = 'Docs on <a href="/dashboard/docs/reST.rst" target="_blank">reStructuredText</a>'
' and <a href="/dashboard/docs/@macro" target="_blank">Macros</a>' ),
#div(literal(node.content) if node.mimetype == 'text/html' else node.content,
# id='cmsfix-content', name='cmsfix-content'),
input_textarea('cmsfix-summary', 'Summary', value=n.summary, offset=1, size='5x8'),
input_textarea('cmsfix-keywords', 'Keywords', value=n.keywords, offset=1, size='2x8'),
)
#eform.get('cmsfix-option-group').add(
# checkbox_item('cmsfix-view', 'View as index', n.view ),
# checkbox_item('cmsfix-inmenu', 'In Menu', n.check_flags(n.f_inmenu)),
#)
eform.get('cmsfix-mimetype_id').attrs['onChange'] = 'set_editor(this.value);'
jscode += 'var html_mimetype=%d;\n' % dbh.EK.getid('text/html', dbh.session(), '@MIMETYPE')
return eform, jscode
def properties_form(self, request, create=False):
dbh = get_dbhandler()
n = self.node
pform, jscode = super().properties_form(request, create)
pform.get('cmsfix-option-group').add(
checkbox_item('cmsfix-view', 'View as index', n.view ),
checkbox_item('cmsfix-inmenu', 'In Menu', n.check_flags(n.f_inmenu)),
)
return pform, jscode
def editingbar(self, request):
bar = super().editingbar(request)
bar.get('cmsfix.editingbar.left').add(
li(a(span('Preview', class_='btn btn-primary navbar-btn'),
onclick=literal(r"alert('Not implemented yet');"))),
)
return bar
def new_node(self):
n = super().new_node()
n.mimetype_id = get_dbhandler().get_ekey('text/x-rst').id
return n
def index_xxx(request, node):
return render_pagenode(node, request)
def view_xxx(request, node):
return render_pagenode(node, request)
def info_xxx(request, node):
raise NotImplementedError()
def content_xxx(request, node):
return render_pagenode_content(node, request)
def edit_xxx(request, node):
if request.method == 'POST':
# update data
d = parse_form(request.params)
node.update(d)
if request.params['_method'] == 'save_edit':
return HTTPFound(location = request.route_url('node-edit', path=node.url))
print(node.url)
return HTTPFound(location = request.route_url('node-index', path=node.url))
eform, jscode = edit_form(node, request)
return render_to_response('cmsfix:templates/pagenode/edit.mako',
{ 'parent_url': ('/' + node.parent.url) if node.parent else 'None',
'node': node,
'toolbar': toolbar(request, node),
'eform': eform,
'code': jscode,
}, request = request )
def add_xxx(request, node):
if request.method == 'POST':
# sanity check
d = parse_form(request.params)
new_node = PageNode()
get_workflow(new_node).set_defaults(new_node, request.user, node)
new_node.update(d)
if not new_node.slug:
new_node.generate_slug()
node.add(new_node)
get_dbhandler().session().flush()
new_node.ordering = 19 * new_node.id
if request.params['_method'].endswith('_edit'):
return HTTPFound(location = request.route_url('node-edit', path=new_node.url))
return HTTPFound(location = new_node.path)
# show the edit form
# create a dummy node
dbh = get_dbhandler()
with dbh.session().no_autoflush:
new_node = PageNode()
new_node.parent_id = node.id
new_node.site = node.site
new_node.group_id = node.group_id
new_node.user_id = request.user.id
new_node.mimetype_id = dbh.get_ekey('text/x-rst').id
eform, jscode = edit_form(new_node, request, create=True)
return render_to_response('cmsfix:templates/pagenode/edit.mako',
{ 'parent_url': node.path,
'node': new_node,
'toolbar': '', # new node does not have toolbar yet!
'eform': eform,
'code': jscode,
}, request = request )
def action_xxx(request, node):
raise NotImplementedError()
def render_pagenode_xxx(node, request):
# set the formatter
if node.mimetype == 'text/x-rst':
content = literal(render_rst(node.content))
content = literal(postrender(content, node))
elif node.mimetype == 'text/html':
content = literal(node.content)
else:
content = node.content
return render_to_response('cmsfix:templates/pagenode/node.mako',
{ 'node': node,
'toolbar': toolbar(request, node),
'html': content,
}, request = request )
def render_pagenode_content_xxx(node, request):
return render_node_content(node, request)
def edit_form_xxx(node, request, create=False):
dbh = get_dbhandler()
eform, jscode = node_edit_form(node, request, create)
eform.get('cmsfix.node-main').add(
input_text('cmsfix-title', 'Title', value=node.title, offset=1),
node_submit_bar(create),
input_textarea('cmsfix-content', 'Content', value=node.content, offset=1, size="18x8"),
#div(literal(node.content) if node.mimetype == 'text/html' else node.content,
# id='cmsfix-content', name='cmsfix-content'),
input_textarea('cmsfix-summary', 'Summary', value=node.summary, offset=1, size='5x8')
)
eform.get('cmsfix-mimetype_id').attrs['onChange'] = 'set_editor(this.value);'
jscode += 'var html_mimetype=%d;\n' % dbh.EK.getid('text/html', dbh.session(), '@MIMETYPE')
return eform, jscode
def parse_form_xxx(f, d=None):
d = node_parse_form(f, d)
d['title'] = f['cmsfix-title']
d['content'] = f['cmsfix-content']
d['summary'] = f['cmsfix-summary']
return d
def toolbar_xxx(request, n):
wf = get_workflow()
if not wf.is_manageable(n, request.user):
return ''
bar = nav(class_='navbar navbar-default')[
div(class_='container-fluid')[
div(class_='collapse navbar-collapse')[
ul(class_='nav navbar-nav')[
li(a('View', href=request.route_url('node-view', path=n.url))),
li(a('Edit', href=request.route_url('node-edit', path=n.url))),
li(a('Content', href=request.route_url('node-content', path=n.url))),
li(a('Info', href=request.route_url('node-info', path=n.url))),
get_add_menu(n, request),
],
ul(class_='nav navbar-nav navbar-right')[
li(a('Delete')),
wf.show_menu(n, request)
]
]
]
]
return bar
def render_rst(text, format='html'):
parts = docutils.core.publish_parts( text, writer_name=format,
settings_overrides={'initial_header_level': 2} )
if format == 'html':
return parts['html_body']
return None
|
the-stack_106_25324 | from collections import defaultdict
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List
from .time_util import datetime_to_ms_timestamp, round_single_commit_by_time
# files we don't want to count towards lines of code
EXCLUSION_LIST = [
'.lock',
'package.json'
]
@dataclass
class Commit:
"""
This is just a dataclass that holds data, see class below
You can then do something like
c = Commit('hello ', 'nini', ...)
print(c.msg + c.author)
>>> 'hello nini'
"""
hash: str
msg: str
author: str
committer: str
author_date: datetime
author_timezone: int
committer_date: datetime
ms_timestamp: int
committer_timezone: int
in_main_branch: bool
merge: bool
modified_files: List[Any]
project_name: str
project_path: str
deletions: int
insertions: int
lines: int
files: int
dmm_unit_size: float
dmm_unit_complexity: float
dmm_unit_interfacing: float
# non pydriller.Commit custom attributes below
rounded_commit_time_5min: str
file_extensions: List[str]
loc_changed_by_file_extension: Dict[str, int]
methods_modified: List[str]
class CommitHandler:
def create_commit(self, commit):
'''
Takes in a raw pydriller commit object and extracts the relevant fields into a new
Commit dataclass object
:param clone_repo_to: temporary directory, made accessible so we can pass flags
and get merge commits which pydriller can't handle natively.
'''
print(" loading commit from {}".format(commit.project_name))
# lines is insertions AND deletions
deletions, insertions, lines = commit.deletions, commit.insertions, commit.lines
# if file is in exclusion list by extension, ignore for line count
for modified_file in commit.modified_files:
if modified_file.filename.endswith(tuple(EXCLUSION_LIST)):
deletions -= modified_file.deleted_lines
insertions -= modified_file.added_lines
lines -= (modified_file.deleted_lines + modified_file.added_lines)
return Commit(
hash = commit.hash,
msg = commit.msg,
author = commit.author ,
committer = commit.committer,
author_date = commit.author_date,
author_timezone = commit.author_timezone,
committer_date = commit.committer_date,
ms_timestamp = datetime_to_ms_timestamp(commit.committer_date),
committer_timezone = commit.committer_timezone,
in_main_branch = commit.in_main_branch,
merge = commit.merge,
modified_files = commit.modified_files,
project_name = commit.project_name,
project_path = commit.project_path,
deletions = deletions,
insertions = insertions,
lines = lines,
files = commit.files,
dmm_unit_size = commit.dmm_unit_size,
dmm_unit_complexity = commit.dmm_unit_complexity,
dmm_unit_interfacing = commit.dmm_unit_interfacing,
rounded_commit_time_5min = round_single_commit_by_time(commit.committer_date, granularity_min = 5),
file_extensions = self.get_commit_file_extensions(commit),
loc_changed_by_file_extension = self.get_loc_changed_by_file_extension(commit),
methods_modified = self.get_methods_modified(commit)
)
# Note: because of how Git works, modified_files will be empty for merge commits
# - file_extensions will also be empty
def get_commit_file_extensions(self, commit) -> List[str]:
"""
Returns a list of the file extensions of files changed in a single commit
"""
return [f.filename.split('.')[-1] for f in commit.modified_files]
def get_loc_changed_by_file_extension(self, commit) -> Dict[str, int]:
"""
Returns a dictionary which counts the lines of code changed by all files with the same file extension in a single commit
# example of single commit
a.json +5
aa.json +5
aaa.json +5
b.py +4
c.js +90
returns:
{json: 15, py: 4, js: 90}
"""
tally_dictionary = defaultdict(int)
for f in commit.modified_files:
extension_name = f.filename.split('.')[-1]
tally_dictionary[extension_name] += (f.added_lines - f.deleted_lines)
return tally_dictionary
def get_methods_modified(self, commit) -> List[Any]: #List[Method]
changed_methods_nested = [f.changed_methods for f in commit.modified_files]
#unpack list of lists into single list
return [item for sublist in changed_methods_nested for item in sublist]
|
the-stack_106_25325 | import os
import sys
import textwrap
import pytest
from tests.lib import assert_all_changes, pyversion
from tests.lib.local_repos import local_checkout
def test_no_upgrade_unless_requested(script):
"""
No upgrade if not specifically requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', 'INITools', expect_error=True)
assert not result.files_created, (
'pip install INITools upgraded when it should not have'
)
def test_invalid_upgrade_strategy_causes_error(script):
"""
It errors out when the upgrade-strategy is an invalid/unrecognised one
"""
result = script.pip_install_local(
'--upgrade', '--upgrade-strategy=bazinga', 'simple',
expect_error=True
)
assert result.returncode
assert "invalid choice" in result.stderr
def test_only_if_needed_does_not_upgrade_deps_when_satisfied(script):
"""
It doesn't upgrade a dependency if it already satisfies the requirements.
"""
script.pip_install_local('simple==2.0', expect_error=True)
result = script.pip_install_local(
'--upgrade', '--upgrade-strategy=only-if-needed', 'require_simple',
expect_error=True
)
assert (
(script.site_packages / 'require_simple-1.0-py%s.egg-info' % pyversion)
not in result.files_deleted
), "should have installed require_simple==1.0"
assert (
(script.site_packages / 'simple-2.0-py%s.egg-info' % pyversion)
not in result.files_deleted
), "should not have uninstalled simple==2.0"
assert (
"Requirement already satisfied, skipping upgrade: simple"
in result.stdout
), "did not print correct message for not-upgraded requirement"
def test_only_if_needed_does_upgrade_deps_when_no_longer_satisfied(script):
"""
It does upgrade a dependency if it no longer satisfies the requirements.
"""
script.pip_install_local('simple==1.0', expect_error=True)
result = script.pip_install_local(
'--upgrade', '--upgrade-strategy=only-if-needed', 'require_simple',
expect_error=True
)
assert (
(script.site_packages / 'require_simple-1.0-py%s.egg-info' % pyversion)
not in result.files_deleted
), "should have installed require_simple==1.0"
assert (
script.site_packages / 'simple-3.0-py%s.egg-info' %
pyversion in result.files_created
), "should have installed simple==3.0"
assert (
script.site_packages / 'simple-1.0-py%s.egg-info' %
pyversion in result.files_deleted
), "should have uninstalled simple==1.0"
def test_eager_does_upgrade_dependecies_when_currently_satisfied(script):
"""
It does upgrade a dependency even if it already satisfies the requirements.
"""
script.pip_install_local('simple==2.0', expect_error=True)
result = script.pip_install_local(
'--upgrade', '--upgrade-strategy=eager', 'require_simple',
expect_error=True
)
assert (
(script.site_packages / 'require_simple-1.0-py%s.egg-info' % pyversion)
not in result.files_deleted
), "should have installed require_simple==1.0"
assert (
(script.site_packages / 'simple-2.0-py%s.egg-info' % pyversion)
in result.files_deleted
), "should have uninstalled simple==2.0"
def test_eager_does_upgrade_dependecies_when_no_longer_satisfied(script):
"""
It does upgrade a dependency if it no longer satisfies the requirements.
"""
script.pip_install_local('simple==1.0', expect_error=True)
result = script.pip_install_local(
'--upgrade', '--upgrade-strategy=eager', 'require_simple',
expect_error=True
)
assert (
(script.site_packages / 'require_simple-1.0-py%s.egg-info' % pyversion)
not in result.files_deleted
), "should have installed require_simple==1.0"
assert (
script.site_packages / 'simple-3.0-py%s.egg-info' %
pyversion in result.files_created
), "should have installed simple==3.0"
assert (
script.site_packages / 'simple-1.0-py%s.egg-info' %
pyversion in result.files_deleted
), "should have uninstalled simple==1.0"
@pytest.mark.network
def test_upgrade_to_specific_version(script):
"""
It does upgrade to specific version requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert result.files_created, (
'pip install with specific version did not upgrade'
)
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion in result.files_deleted
)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion in result.files_created
)
@pytest.mark.network
def test_upgrade_if_requested(script):
"""
And it does upgrade if requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', '--upgrade', 'INITools', expect_error=True)
assert result.files_created, 'pip install --upgrade did not upgrade'
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion not in result.files_created
)
def test_upgrade_with_newest_already_installed(script, data):
"""
If the newest version of a package is already installed, the package should
not be reinstalled and the user should be informed.
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple')
result = script.pip(
'install', '--upgrade', '-f', data.find_links, '--no-index', 'simple'
)
assert not result.files_created, 'simple upgraded when it should not have'
assert 'already up-to-date' in result.stdout, result.stdout
@pytest.mark.network
def test_upgrade_force_reinstall_newest(script):
"""
Force reinstallation of a package even if it is already at its newest
version if --force-reinstall is supplied.
"""
result = script.pip('install', 'INITools')
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install', '--upgrade', '--force-reinstall', 'INITools'
)
assert result2.files_updated, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_uninstall_before_upgrade(script):
"""
Automatic uninstall-before-upgrade.
"""
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('install', 'INITools==0.3', expect_error=True)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_uninstall_before_upgrade_from_url(script):
"""
Automatic uninstall-before-upgrade from URL.
"""
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install',
'https://files.pythonhosted.org/packages/source/I/INITools/INITools-'
'0.3.tar.gz',
expect_error=True,
)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_upgrade_to_same_version_from_url(script):
"""
When installing from a URL the same version that is already installed, no
need to uninstall and reinstall if --upgrade is not specified.
"""
result = script.pip('install', 'INITools==0.3', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install',
'https://files.pythonhosted.org/packages/source/I/INITools/INITools-'
'0.3.tar.gz',
expect_error=True,
)
assert not result2.files_updated, 'INITools 0.3 reinstalled same version'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_upgrade_from_reqs_file(script):
"""
Upgrade from a requirements file.
"""
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\
PyLogo<0.4
# and something else to test out:
INITools==0.3
"""))
install_result = script.pip(
'install', '-r', script.scratch_path / 'test-req.txt'
)
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\
PyLogo
# and something else to test out:
INITools
"""))
script.pip(
'install', '--upgrade', '-r', script.scratch_path / 'test-req.txt'
)
uninstall_result = script.pip(
'uninstall', '-r', script.scratch_path / 'test-req.txt', '-y'
)
assert_all_changes(
install_result,
uninstall_result,
[script.venv / 'build', 'cache', script.scratch / 'test-req.txt'],
)
def test_uninstall_rollback(script, data):
"""
Test uninstall-rollback (using test package with a setup.py
crafted to fail on install).
"""
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken==0.1'
)
assert script.site_packages / 'broken.py' in result.files_created, list(
result.files_created.keys()
)
result2 = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken===0.2broken',
expect_error=True,
)
assert result2.returncode == 1, str(result2)
assert script.run(
'python', '-c', "import broken; print(broken.VERSION)"
).stdout == '0.1\n'
assert_all_changes(
result.files_after,
result2,
[script.venv / 'build'],
)
@pytest.mark.network
def test_should_not_install_always_from_cache(script):
"""
If there is an old cached package, pip should download the newer version
Related to issue #175
"""
script.pip('install', 'INITools==0.2', expect_error=True)
script.pip('uninstall', '-y', 'INITools')
result = script.pip('install', 'INITools==0.1', expect_error=True)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion not in result.files_created
)
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion in result.files_created
)
@pytest.mark.network
def test_install_with_ignoreinstalled_requested(script):
"""
Test old conflicting package is completely ignored
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', '-I', 'INITools==0.3', expect_error=True)
assert result.files_created, 'pip install -I did not install'
# both the old and new metadata should be present.
assert os.path.exists(
script.site_packages_path / 'INITools-0.1-py%s.egg-info' % pyversion
)
assert os.path.exists(
script.site_packages_path / 'INITools-0.3-py%s.egg-info' % pyversion
)
@pytest.mark.network
def test_upgrade_vcs_req_with_no_dists_found(script, tmpdir):
"""It can upgrade a VCS requirement that has no distributions otherwise."""
req = "%s#egg=pip-test-package" % local_checkout(
"git+https://github.com/pypa/pip-test-package.git",
tmpdir.join("cache"),
)
script.pip("install", req)
result = script.pip("install", "-U", req)
assert not result.returncode
@pytest.mark.network
def test_upgrade_vcs_req_with_dist_found(script):
"""It can upgrade a VCS requirement that has distributions on the index."""
# TODO(pnasrat) Using local_checkout fails on windows - oddness with the
# test path urls/git.
req = (
"%s#egg=pretend" %
(
"git+git://github.com/alex/pretend@e7f26ad7dbcb4a02a4995aade4"
"743aad47656b27"
)
)
script.pip("install", req, expect_stderr=True)
result = script.pip("install", "-U", req, expect_stderr=True)
assert "pypi.org" not in result.stdout, result.stdout
class TestUpgradeDistributeToSetuptools(object):
"""
From pip1.4 to pip6, pip supported a set of "hacks" (see Issue #1122) to
allow distribute to conflict with setuptools, so that the following would
work to upgrade distribute:
``pip install -U setuptools``
In pip7, the hacks were removed. This test remains to at least confirm pip
can upgrade distribute to setuptools using:
``pip install -U distribute``
The reason this works is that a final version of distribute (v0.7.3) was
released that is simple wrapper with:
install_requires=['setuptools>=0.7']
The test use a fixed set of packages from our test packages dir. Note that
virtualenv-1.9.1 contains distribute-0.6.34 and virtualenv-1.10 contains
setuptools-0.9.7
"""
def prep_ve(self, script, version, pip_src, distribute=False):
self.script = script
self.script.pip_install_local('virtualenv==%s' % version)
args = ['virtualenv', self.script.scratch_path / 'VE']
if distribute:
args.insert(1, '--distribute')
if version == "1.9.1" and not distribute:
# setuptools 0.6 didn't support PYTHONDONTWRITEBYTECODE
del self.script.environ["PYTHONDONTWRITEBYTECODE"]
self.script.run(*args)
if sys.platform == 'win32':
bindir = "Scripts"
else:
bindir = "bin"
self.ve_bin = self.script.scratch_path / 'VE' / bindir
self.script.run(self.ve_bin / 'pip', 'uninstall', '-y', 'pip')
self.script.run(
self.ve_bin / 'python', 'setup.py', 'install',
cwd=pip_src,
expect_stderr=True,
)
|
the-stack_106_25328 | #!/usr/bin/env vpython3
# Copyright 2012 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import hashlib
import io
import json
import logging
import os
import re
import stat
import subprocess
import sys
import tempfile
import unittest
import six
# Mutates sys.path.
import test_env
import isolate
import isolated_format
from utils import file_path
ALGO = hashlib.sha1
HASH_NULL = ALGO().hexdigest()
# These are per test case, not per mode.
RELATIVE_CWD = {
'all_items_invalid': '.',
'fail': '.',
'missing_trailing_slash': '.',
'no_run': '.',
'non_existent': '.',
'split': '.',
'symlink_full': '.',
'symlink_partial': '.',
'symlink_outside_build_root': '.',
'touch_only': '.',
'touch_root': os.path.join('tests', 'isolate'),
'with_flag': '.',
}
DEPENDENCIES = {
'all_items_invalid': (
{
'tests/isolate/all_items_invalid.isolate':
"""{
'variables': {
'command': ['python', 'empty.py'],
'files': [
# A single valid file so the command is valid and exits without
# an error.
'empty.py',
# File doesn't exist.
'A_file_that_does_not_exist',
# Directory missing trailing slash.
'files1',
# File doesn't exist.
'A_file_that_does_not_exist_either',
],
},
}""",
'tests/isolate/empty.py':
'import sys; sys.exit(0)',
},
['empty.py'],
),
'fail': (
{
'tests/isolate/fail.isolate':
"""{
'conditions': [
['(OS=="linux" and chromeos==1) or '
'((OS=="mac" or OS=="win") and chromeos==0)', {
'variables': {
'command': ['python', 'fail.py'],
'files': ['fail.py'],
},
}],
],
}""",
'tests/isolate/fail.py':
'import sys\nprint(\'Failing\')\nsys.exit(1)',
},
['fail.py'],
),
'missing_trailing_slash': (
{
# Directory missing trailing slash.
'tests/isolate/missing_trailing_slash.isolate':
"{'variables': {'files': ['files1']}}",
'tests/isolate/files1/foo':
'bar',
},
[],
),
'no_run': (
{
'tests/isolate/files1/subdir/42.txt':
'the answer to life the universe and everything\n',
'tests/isolate/files1/test_file1.txt':
'Foo\n',
'tests/isolate/files1/test_file2.txt':
'Bar\n',
'tests/isolate/no_run.isolate':
"""{
# Includes itself.
'variables': {'files': ['no_run.isolate', 'files1/']},
}""",
},
[
'no_run.isolate',
os.path.join('files1', 'subdir', '42.txt'),
os.path.join('files1', 'test_file1.txt'),
os.path.join('files1', 'test_file2.txt'),
],
),
'non_existent': (
{
'tests/isolate/non_existent.isolate':
"{'variables': {'files': ['A_file_that_do_not_exist']}}",
},
[],
),
'split': (
{
'tests/isolate/files1/subdir/42.txt':
'the answer to life the universe and everything',
'tests/isolate/split.isolate':
"""{
'variables': {
'command': ['python', 'split.py'],
'files': [
'<(DEPTH)/split.py',
'<(PRODUCT_DIR)/subdir/42.txt',
'test/data/foo.txt',
],
},
}""",
'tests/isolate/split.py':
"import sys; sys.exit(1)",
'tests/isolate/test/data/foo.txt':
'Split',
},
[
os.path.join('files1', 'subdir', '42.txt'),
os.path.join('test', 'data', 'foo.txt'),
'split.py',
],
),
'symlink_full': (
{
'tests/isolate/files1/subdir/42.txt':
'the answer to life the universe and everything\n',
'tests/isolate/files1/test_file1.txt':
'Foo\n',
'tests/isolate/files1/test_file2.txt':
'Bar\n',
'tests/isolate/files2':
test_env.SymLink('files1'),
'tests/isolate/symlink_full.isolate':
"""{
'conditions': [
['(OS=="linux" and chromeos==1) or ((OS=="mac" or OS=="win") and '
'chromeos==0)', {
'variables': {
'command': ['python', 'symlink_full.py'],
'files': ['files2/', 'symlink_full.py'],
},
}],
],
}""",
'tests/isolate/symlink_full.py':
"""if __name__ == '__main__':
import os, sys
print('symlink: touches files2/')
assert len(sys.argv) == 1
expected = {
os.path.join('subdir', '42.txt'):
b'the answer to life the universe and everything\\n',
'test_file1.txt': b'Foo\\n',
'test_file2.txt': b'Bar\\n',
}
root = 'files2'
actual = {}
for relroot, dirnames, filenames in os.walk(root):
for filename in filenames:
fullpath = os.path.join(relroot, filename)
actual[fullpath[len(root)+1:]] = open(fullpath, 'rb').read()
if '.svn' in dirnames:
dirnames.remove('.svn')
if actual != expected:
print('Failure')
print(actual)
print(expected)
sys.exit(1)
""",
},
[
os.path.join('files1', 'subdir', '42.txt'),
os.path.join('files1', 'test_file1.txt'),
os.path.join('files1', 'test_file2.txt'),
# files2 is a symlink to files1.
'files2',
'symlink_full.py',
],
),
'symlink_partial': (
{
'tests/isolate/files1/subdir/42.txt':
'the answer to life the universe and everything\n',
'tests/isolate/files1/test_file1.txt':
'Foo\n',
'tests/isolate/files1/test_file2.txt':
'Bar\n',
'tests/isolate/files2':
test_env.SymLink('files1'),
'tests/isolate/symlink_partial.isolate':
"""{
'conditions': [
['(OS=="linux" and chromeos==1) or ((OS=="mac" or OS=="win") and '
'chromeos==0)', {
'variables': {
'command': ['python', 'symlink_partial.py'],
'files': ['files2/test_file2.txt', 'symlink_partial.py'],
},
}],
],
}""",
'tests/isolate/symlink_partial.py':
"""if __name__ == '__main__':
import os, sys
print('symlink: touches files2/test_file2.txt')
assert len(sys.argv) == 1
with open(os.path.join('files2', 'test_file2.txt'), 'rb') as f:
if b'Bar\\n' != f.read():
print('Failed')
sys.exit(1)
""",
},
[
os.path.join('files1', 'test_file2.txt'),
# files2 is a symlink to files1.
'files2',
'symlink_partial.py',
],
),
'symlink_outside_build_root': (
{
'tests/directory_outside_build_root/test_file3.txt':
'asdf\n',
'tests/isolate/link_outside_build_root':
test_env.SymLink('../directory_outside_build_root'),
'tests/isolate/symlink_outside_build_root.isolate':
"""{
'conditions': [
['(OS=="linux" and chromeos==1) or ((OS=="mac" or OS=="win") and '
'chromeos==0)', {
'variables': {
'command': ['python', 'symlink_outside_build_root.py'],
'files': [
'link_outside_build_root/',
'symlink_outside_build_root.py',
],
},
}],
],
}""",
'tests/isolate/symlink_outside_build_root.py':
"""if __name__ == '__main__':
import os, sys
print('symlink: touches link_outside_build_root/')
assert len(sys.argv) == 1
p = os.path.join('link_outside_build_root', 'test_file3.txt')
with open(p, 'rb') as f:
if b'asdf\\n' != f.read():
print('Failed')
sys.exit(1)
""",
},
[
os.path.join('link_outside_build_root', 'test_file3.txt'),
'symlink_outside_build_root.py',
],
),
'touch_only': (
{},
[
'touch_only.py',
os.path.join('files1', 'test_file1.txt'),
],
),
'touch_root': (
{
'tests/isolate/touch_root.isolate':
"""{
'conditions': [
['(OS=="linux" and chromeos==1) or ((OS=="mac" or OS=="win") and '
'chromeos==0)', {
'variables': {
'command': ['python', 'touch_root.py'],
'files': ['../../at_root', 'touch_root.py'],
},
}],
],
}""",
'tests/isolate/touch_root.py':
"""if __name__ == '__main__':
import os, sys
print('child_touch_root: Verify the relative directories')
root_dir = os.path.dirname(os.path.abspath(
__file__))
parent_dir, base = os.path.split(root_dir)
parent_dir, base2 = os.path.split(parent_dir)
if base != 'isolate' or base2 != 'tests':
print('Invalid root dir %s' % root_dir)
sys.exit(4)
content = open(os.path.join(parent_dir, 'at_root'), 'r').read()
sys.exit(int(content != 'foo'))""",
'at_root':
'foo',
},
[
os.path.join('tests', 'isolate', 'touch_root.py'),
'at_root',
],
),
'with_flag': (
{
'tests/isolate/files1/subdir/42.txt':
'the answer to life the universe and everything\n',
'tests/isolate/files1/test_file1.txt':
'Foo\n',
'tests/isolate/files1/test_file2.txt':
'Bar\n',
'tests/isolate/with_flag.isolate':
"""{
'conditions': [
['(OS=="linux" and chromeos==1) or ((OS=="mac" or OS=="win") and '
'chromeos==0)', {
'variables': {
'command': ['python', 'with_flag.py', '<(FLAG)'],
'files': ['files1/', 'with_flag.py'],
},
}],
],
}""",
'tests/isolate/with_flag.py':
"""if __name__ == '__main__':
import os, sys
print('with_flag: Verify the test data files were mapped properly')
assert len(sys.argv) == 2
mode = sys.argv[1]
assert mode in ('run', 'trace')
expected = {
os.path.join('subdir', '42.txt'):
'the answer to life the universe and everything\\n',
'test_file1.txt': 'Foo\\n',
'test_file2.txt': 'Bar\\n',
}
root = 'files1'
actual = {}
for relroot, dirnames, filenames in os.walk(root):
for filename in filenames:
fullpath = os.path.join(relroot, filename)
actual[fullpath[len(root)+1:]] = open(fullpath, 'r').read()
if mode == 'trace' and '.svn' in dirnames:
dirnames.remove('.svn')
if actual != expected:
print('Failure')
print(actual)
print(expected)
sys.exit(1)
root_dir = os.path.dirname(os.path.abspath(
__file__))
parent_dir, base = os.path.split(root_dir)
if mode == 'trace':
# Verify the parent directory.
parent_dir, base2 = os.path.split(parent_dir)
if base != 'isolate' or base2 != 'tests':
print('mode trace: Invalid root dir %s' % root_dir)
sys.exit(4)
else:
# Verify that we are not inside a checkout.
if base == 'tests':
print('mode run: Invalid root dir %s' % root_dir)
sys.exit(5)
""",
},
[
'with_flag.py',
os.path.join('files1', 'subdir', '42.txt'),
os.path.join('files1', 'test_file1.txt'),
os.path.join('files1', 'test_file2.txt'),
],
),
}
SIMPLE_ISOLATE = {
'simple.isolate':
"""{
'variables': {
'command': ['python', 'simple.py'],
'files': ['simple.py'],
},
}""",
'simple.py':
"""if __name__ == '__main__':
import os, sys
actual = set(os.listdir('.'))
expected = set(['simple.py'])
if expected != actual:
print('Unexpected files: %s' % ', '.join(sorted(actual- expected)))
sys.exit(1)
print('Simply works.')
""",
}
class CalledProcessError(subprocess.CalledProcessError):
"""Adds stderr data."""
def __init__(self, returncode, cmd, output, stderr, cwd):
super(CalledProcessError, self).__init__(returncode, cmd, output)
self.stderr = stderr
self.cwd = cwd
def __str__(self):
return super(CalledProcessError, self).__str__() + (
'\n'
'cwd=%s\n%s\n%s\n%s') % (self.cwd, self.output, self.stderr, ' '.join(
self.cmd))
def list_files_tree(directory):
"""Returns the list of all the files in a tree."""
actual = []
for root, dirnames, filenames in os.walk(directory):
actual.extend(os.path.join(root, f)[len(directory) + 1:] for f in filenames)
for dirname in dirnames:
full = os.path.join(root, dirname)
# Manually include symlinks.
if os.path.islink(full):
actual.append(full[len(directory) + 1:])
return sorted(actual)
def _isolate_dict_to_string(values):
buf = io.StringIO()
isolate.isolate_format.pretty_print(values, buf)
return buf.getvalue()
def _wrap_in_condition(variables):
"""Wraps a variables dict inside the current OS condition.
Returns the equivalent string.
"""
return _isolate_dict_to_string({
'conditions': [['OS=="mac" and chromeos==0', {
'variables': variables
}],],
})
def _fix_file_mode(filename):
"""4 modes are supported, 0700 (rwx), 0600 (rw)."""
min_mode = 0o600
return (min_mode | 0o100) if filename.endswith('.py') else min_mode
class Isolate(unittest.TestCase):
def test_help_modes(self):
# Check coherency in the help and implemented modes.
cmd = [
sys.executable,
os.path.join(test_env.CLIENT_DIR, 'isolate.py'),
'--help',
]
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=test_env.CLIENT_DIR)
out = p.communicate()[0].decode().splitlines()
self.assertEqual(0, p.returncode)
out = out[out.index('Commands are:') + 1:]
out = out[:out.index('')]
regexp = '^ (?:\x1b' + r'\[\d\dm|)(\w+)\s*(:?' + '\x1b' + r'\[\d\dm|) .+'
modes = [re.match(regexp, l) for l in out]
modes = [m.group(1) for m in modes if m]
EXPECTED_MODES = (
'archive',
'batcharchive',
'check',
'help',
'remap',
'run',
)
# If a new command is added it should at least has a bare test.
self.assertEqual(sorted(EXPECTED_MODES), sorted(modes))
class IsolateTempdirBase(unittest.TestCase):
def setUp(self):
super(IsolateTempdirBase, self).setUp()
self.tempdir = file_path.get_native_path_case(
six.text_type(tempfile.mkdtemp(prefix=u'isolate_smoke_')))
self.isolated = os.path.join(self.tempdir, 'isolate_smoke_test.isolated')
self.isolate_dir = os.path.join(self.tempdir, 'isolate')
def tearDown(self):
try:
logging.debug(self.tempdir)
file_path.rmtree(self.tempdir)
finally:
super(IsolateTempdirBase, self).tearDown()
def make_tree(self, case=None):
case = case or self.case()
if not case:
return
test_env.make_tree(self.isolate_dir, DEPENDENCIES[case][0])
def _gen_files(self, empty_file):
"""Returns a dict of files like calling isolate.files_to_metadata() on each
file.
Arguments:
- empty_file: Add a specific empty file (size 0).
"""
root_dir = self.isolate_dir
if RELATIVE_CWD[self.case()] == '.':
root_dir = os.path.join(root_dir, 'tests', 'isolate')
files = {six.ensure_text(f): {} for f in DEPENDENCIES[self.case()][1]}
for relfile, v in files.items():
filepath = os.path.join(root_dir, relfile)
filestats = os.lstat(filepath)
is_link = stat.S_ISLNK(filestats.st_mode)
if not is_link:
v[u's'] = int(filestats.st_size)
if sys.platform != 'win32':
v[u'm'] = _fix_file_mode(relfile)
if is_link:
v[u'l'] = os.readlink(filepath)
else:
# Upgrade the value to unicode so diffing the structure in case of
# test failure is easier, since the basestring type must match,
# str!=unicode.
v[u'h'] = six.text_type(isolated_format.hash_file(filepath, ALGO))
if empty_file:
item = files[empty_file]
item['h'] = six.text_type(HASH_NULL)
if sys.platform != 'win32':
item['m'] = 0o400
item['s'] = 0
return files
def _expected_isolated(self, args, empty_file):
"""Verifies self.isolated contains the expected data."""
expected = {
u'algo': u'sha-1',
u'files': self._gen_files(empty_file),
u'version': six.text_type(isolated_format.ISOLATED_FILE_VERSION),
}
if args:
expected[u'command'] = [u'python'] + [six.ensure_text(x) for x in args]
expected[u'relative_cwd'] = six.text_type(RELATIVE_CWD[self.case()])
with open(self.isolated, 'r') as f:
self.assertEqual(expected, json.load(f))
def _expected_saved_state(self, args, empty_file, root_dir):
expected = {
u'OS':
six.text_type(sys.platform),
u'algo':
u'sha-1',
u'child_isolated_files': [],
u'command': [],
u'config_variables': {
u'OS': u'mac',
u'chromeos': 0,
},
u'files':
self._gen_files(empty_file),
u'isolate_file':
file_path.safe_relpath(
file_path.get_native_path_case(
six.ensure_text(self.filename())),
six.text_type(os.path.dirname(self.isolated))),
u'path_variables': {},
u'relative_cwd':
six.text_type(RELATIVE_CWD[self.case()]),
u'root_dir':
six.text_type(root_dir or os.path.dirname(self.filename())),
u'version':
six.text_type(isolate.SavedState.EXPECTED_VERSION),
}
if args:
expected[u'command'] = [u'python'] + [six.ensure_text(x) for x in args]
with open(self.saved_state(), 'r') as f:
self.assertEqual(expected, json.load(f))
def _expect_results(self, args, empty_file, root_dir=None):
self._expected_isolated(args, empty_file)
self._expected_saved_state(args, empty_file, root_dir)
# Also verifies run_isolated.py will be able to read it.
with open(self.isolated, 'rb') as f:
isolated_format.load_isolated(f.read(), ALGO)
def _expect_no_result(self):
self.assertFalse(os.path.exists(self.isolated))
def _get_cmd(self, mode):
return [
sys.executable,
os.path.join(test_env.CLIENT_DIR, 'isolate.py'),
mode,
'--isolated',
self.isolated,
'--isolate',
self.filename(),
'--config-variable',
'OS',
'mac',
'--config-variable',
'chromeos',
'0',
]
def _execute(self, mode, case, args, cwd=test_env.CLIENT_DIR):
"""Executes isolate.py."""
self.assertEqual(case,
self.case() + '.isolate',
'Rename the test case to test_%s()' % case)
cmd = self._get_cmd(mode)
cmd.extend(args)
env = os.environ.copy()
if 'ISOLATE_DEBUG' in env:
del env['ISOLATE_DEBUG']
logging.debug(cmd)
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env,
universal_newlines=True)
out, err = p.communicate()
if p.returncode:
raise CalledProcessError(p.returncode, cmd, out, err, cwd)
# Do not check on Windows since a lot of spew is generated there.
if sys.platform != 'win32':
self.assertTrue(err in (None, '', isolate._VARIABLE_WARNING), err)
return out
def case(self):
"""Returns the filename corresponding to this test case."""
test_id = self.id().split('.')
return re.match('^test_([a-z_]+)$', test_id[2]).group(1)
def filename(self):
"""Returns the filename corresponding to this test case."""
filename = os.path.join(self.isolate_dir, 'tests', 'isolate',
self.case() + '.isolate')
self.assertTrue(os.path.isfile(filename), filename)
return filename
def saved_state(self):
return isolate.isolatedfile_to_state(self.isolated)
def _test_all_items_invalid(self, mode):
out = self._execute(mode, 'all_items_invalid.isolate',
['--ignore_broken_item'])
self._expect_results(['empty.py'], None, None)
return out or ''
def _test_missing_trailing_slash(self, mode):
try:
self._execute(mode, 'missing_trailing_slash.isolate', [])
self.fail()
except subprocess.CalledProcessError as e:
self.assertEqual('', e.output)
out = getattr(e, 'stderr', None)
self._expect_no_result()
root = file_path.get_native_path_case(six.ensure_text(self.isolate_dir))
expected = ('Input directory %s must have a trailing slash' %
os.path.join(root, 'tests', 'isolate', 'files1'))
self.assertIn(expected, out)
def _test_non_existent(self, mode):
try:
self._execute(mode, 'non_existent.isolate', [])
self.fail()
except subprocess.CalledProcessError as e:
self.assertEqual('', e.output)
out = getattr(e, 'stderr', None)
self._expect_no_result()
root = file_path.get_native_path_case(six.ensure_text(self.isolate_dir))
expected = (
'Input file %s doesn\'t exist' %
os.path.join(root, 'tests', 'isolate', 'A_file_that_do_not_exist'))
self.assertIn(expected, out)
class IsolateOutdir(IsolateTempdirBase):
def setUp(self):
super(IsolateOutdir, self).setUp()
# The tests assume the current directory is the file's directory.
os.mkdir(self.isolate_dir, 0o700)
self.old_cwd = os.getcwd()
os.chdir(self.isolate_dir)
self.outdir = os.path.join(self.tempdir, 'isolated')
def tearDown(self):
os.chdir(self.old_cwd)
super(IsolateOutdir, self).tearDown()
def _expect_no_tree(self):
# No outdir was created.
self.assertFalse(os.path.exists(self.outdir))
def _result_tree(self):
return list_files_tree(self.outdir)
def _expected_tree(self):
"""Verifies the files written in the temporary directory."""
self.assertEqual(
sorted(f for f in DEPENDENCIES[self.case()][1]), self._result_tree())
def _get_cmd(self, mode):
"""Adds --outdir for the commands supporting it."""
cmd = super(IsolateOutdir, self)._get_cmd(mode)
cmd.extend(('--outdir', self.outdir))
return cmd
def _test_missing_trailing_slash(self, mode):
super(IsolateOutdir, self)._test_missing_trailing_slash(mode)
self._expect_no_tree()
def _test_non_existent(self, mode):
super(IsolateOutdir, self)._test_non_existent(mode)
self._expect_no_tree()
class Isolate_check(IsolateTempdirBase):
def setUp(self):
super(Isolate_check, self).setUp()
self.make_tree()
def test_fail(self):
self._execute('check', 'fail.isolate', [])
self._expect_results(['fail.py'], None, None)
def test_missing_trailing_slash(self):
self._test_missing_trailing_slash('check')
def test_non_existent(self):
self._test_non_existent('check')
def test_all_items_invalid(self):
out = self._test_all_items_invalid('check')
self.assertEqual('', out)
def test_no_run(self):
self._execute('check', 'no_run.isolate', [])
self._expect_results([], None, None)
def test_touch_root(self):
self._execute('check', 'touch_root.isolate', [])
self._expect_results(['touch_root.py'], None, self.isolate_dir)
if sys.platform != 'win32':
def test_symlink_full(self):
self._execute('check', 'symlink_full.isolate', [])
self._expect_results(['symlink_full.py'], None, None)
def test_symlink_partial(self):
self._execute('check', 'symlink_partial.isolate', [])
self._expect_results(['symlink_partial.py'], None, None)
def test_symlink_outside_build_root(self):
self._execute('check', 'symlink_outside_build_root.isolate', [])
self._expect_results(['symlink_outside_build_root.py'], None, None)
class Isolate_remap(IsolateOutdir):
def setUp(self):
super(Isolate_remap, self).setUp()
self.make_tree()
def test_fail(self):
self._execute('remap', 'fail.isolate', [])
self._expected_tree()
self._expect_results(['fail.py'], None, None)
def test_missing_trailing_slash(self):
self._test_missing_trailing_slash('remap')
def test_non_existent(self):
self._test_non_existent('remap')
def test_all_items_invalid(self):
out = self._test_all_items_invalid('remap')
self.assertTrue(out.startswith('Remapping'))
self._expected_tree()
def test_no_run(self):
self._execute('remap', 'no_run.isolate', [])
self._expected_tree()
self._expect_results([], None, None)
def test_touch_root(self):
self._execute('remap', 'touch_root.isolate', [])
self._expected_tree()
self._expect_results(['touch_root.py'], None, self.isolate_dir)
if sys.platform != 'win32':
def test_symlink_full(self):
self._execute('remap', 'symlink_full.isolate', [])
self._expected_tree()
self._expect_results(['symlink_full.py'], None, None)
def test_symlink_partial(self):
self._execute('remap', 'symlink_partial.isolate', [])
self._expected_tree()
self._expect_results(['symlink_partial.py'], None, None)
def test_symlink_outside_build_root(self):
self._execute('remap', 'symlink_outside_build_root.isolate', [])
self._expected_tree()
self._expect_results(['symlink_outside_build_root.py'], None, None)
class Isolate_run(IsolateTempdirBase):
def setUp(self):
super(Isolate_run, self).setUp()
self.make_tree()
def test_fail(self):
try:
self._execute('run', 'fail.isolate', [])
self.fail()
except subprocess.CalledProcessError:
pass
self._expect_results(['fail.py'], None, None)
def test_missing_trailing_slash(self):
self._test_missing_trailing_slash('run')
def test_non_existent(self):
self._test_non_existent('run')
def test_all_items_invalid(self):
out = self._test_all_items_invalid('run')
self.assertEqual('', out)
def test_no_run(self):
try:
self._execute('run', 'no_run.isolate', [])
self.fail()
except subprocess.CalledProcessError:
pass
self._expect_no_result()
def test_touch_root(self):
self._execute('run', 'touch_root.isolate', [])
self._expect_results(['touch_root.py'], None, self.isolate_dir)
if sys.platform != 'win32':
def test_symlink_full(self):
self._execute('run', 'symlink_full.isolate', [])
self._expect_results(['symlink_full.py'], None, None)
def test_symlink_partial(self):
self._execute('run', 'symlink_partial.isolate', [])
self._expect_results(['symlink_partial.py'], None, None)
def test_symlink_outside_build_root(self):
self._execute('run', 'symlink_outside_build_root.isolate', [])
self._expect_results(['symlink_outside_build_root.py'], None, None)
class IsolateNoOutdir(IsolateTempdirBase):
# Test without the --outdir flag.
# So all the files are first copied in the tempdir and the test is run from
# there.
def setUp(self):
super(IsolateNoOutdir, self).setUp()
self.make_tree('touch_root')
def _execute_short(self, mode, args):
"""Executes isolate.py."""
cmd = [
sys.executable,
os.path.join(test_env.CLIENT_DIR, 'isolate.py'),
mode,
'--isolated',
self.isolated,
'--config-variable',
'OS',
'mac',
'--config-variable',
'chromeos',
'0',
]
cmd.extend(args)
env = os.environ.copy()
if 'ISOLATE_DEBUG' in env:
del env['ISOLATE_DEBUG']
logging.debug(cmd)
cwd = self.tempdir
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
env=env,
universal_newlines=True)
out, err = p.communicate()
if p.returncode:
raise CalledProcessError(p.returncode, cmd, out, err, cwd)
return out
def mode(self):
"""Returns the execution mode corresponding to this test case."""
test_id = self.id().split('.')
self.assertEqual(3, len(test_id))
self.assertEqual('__main__', test_id[0])
return re.match('^test_([a-z]+)$', test_id[2]).group(1)
def filename(self):
"""Returns the filename corresponding to this test case."""
filename = os.path.join(self.isolate_dir, 'tests', 'isolate',
'touch_root.isolate')
self.assertTrue(os.path.isfile(filename), filename)
return filename
def test_check(self):
self._execute_short('check', ['--isolate', self.filename()])
files = sorted([
'isolate_smoke_test.isolated',
'isolate_smoke_test.isolated.state',
os.path.join('isolate', 'tests', 'isolate', 'touch_root.isolate'),
os.path.join('isolate', 'tests', 'isolate', 'touch_root.py'),
os.path.join('isolate', 'at_root'),
])
self.assertEqual(files, list_files_tree(self.tempdir))
def test_remap(self):
with self.assertRaises(CalledProcessError):
self._execute_short('remap', ['--isolate', self.filename()])
def test_run(self):
self._execute_short('run', ['--isolate', self.filename()])
files = sorted([
'isolate_smoke_test.isolated',
'isolate_smoke_test.isolated.state',
os.path.join('isolate', 'tests', 'isolate', 'touch_root.isolate'),
os.path.join('isolate', 'tests', 'isolate', 'touch_root.py'),
os.path.join('isolate', 'at_root'),
])
self.assertEqual(files, list_files_tree(self.tempdir))
class IsolateOther(IsolateTempdirBase):
def test_run_mixed(self):
# Test when a user mapped from a directory and then replay from another
# directory. This is a very rare corner case.
indir = os.path.join(self.tempdir, 'input')
test_env.make_tree(indir, SIMPLE_ISOLATE)
proc = subprocess.Popen([
sys.executable,
'isolate.py',
'check',
'-i',
os.path.join(indir, 'simple.isolate'),
'-s',
os.path.join(indir, 'simple.isolated'),
'--config-variable',
'OS',
'mac',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=test_env.CLIENT_DIR)
stdout = proc.communicate()[0].decode()
if sys.platform == 'win32':
variable_warning = isolate._VARIABLE_WARNING.replace('\n', '\r\n')
else:
variable_warning = isolate._VARIABLE_WARNING
self.assertEqual(variable_warning, stdout)
self.assertEqual(0, proc.returncode)
expected = [
'simple.isolate',
'simple.isolated',
'simple.isolated.state',
'simple.py',
]
self.assertEqual(expected, sorted(os.listdir(indir)))
# Remove the original directory.
indir2 = indir + '2'
os.rename(indir, indir2)
# simple.isolated.state is required; it contains the variables.
proc = subprocess.Popen([
sys.executable,
'isolate.py',
'run',
'-s',
os.path.join(indir2, 'simple.isolated'),
'--skip-refresh',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=test_env.CLIENT_DIR,
universal_newlines=True)
stdout = proc.communicate()[0]
self.assertEqual(1, proc.returncode)
self.assertTrue('simple.py is missing' in stdout)
def test_empty_and_renamed(self):
a_isolate = os.path.join(self.tempdir, 'a.isolate')
with open(a_isolate, 'wb') as f:
f.write(b'{}')
cmd = [
sys.executable,
'isolate.py',
'check',
'-s',
os.path.join(self.tempdir, 'out.isolated'),
]
subprocess.check_call(cmd + ['-i', a_isolate], cwd=test_env.CLIENT_DIR)
# Move the .isolate file aside and rerun the command with the new source but
# same destination.
b_isolate = os.path.join(self.tempdir, 'b.isolate')
os.rename(a_isolate, b_isolate)
subprocess.check_call(cmd + ['-i', b_isolate], cwd=test_env.CLIENT_DIR)
if __name__ == '__main__':
test_env.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.