id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
96075
|
<gh_stars>1-10
import torch
import torch.nn as nn
from pytorch_lightning import LightningModule
from pytorch_lightning.metrics import Accuracy
def lin_relu(in_features, out_features):
return nn.Sequential(
nn.Linear(in_features, out_features),
nn.ReLU(inplace=True),
)
class MLP(nn.Module):
def __init__(self, input_dim=25*(8+1+1),
hidden_dim=256, num_layers=6,
num_classes=101):
super().__init__()
self.layers = nn.ModuleList([lin_relu(input_dim, hidden_dim)])
for _ in range(num_layers-2):
self.layers.append(lin_relu(hidden_dim, hidden_dim))
self.layers.append(nn.Linear(hidden_dim, num_classes))
def forward(self, x):
for i in range(len(self.layers)):
x = self.layers[i](x)
return x
class MLPModule(LightningModule):
def __init__(self, input_dim=25*(8+1+1),
hidden_dim=256,
num_layers=6,
num_classes=101):
super().__init__()
self.save_hyperparameters()
self.model = MLP(input_dim, hidden_dim, num_layers, num_classes)
self.loss = nn.CrossEntropyLoss()
self.val_accuracy = Accuracy()
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
out = self(x)
loss = self.loss(out, y)
self.log('train_loss', loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
out = self(x)
loss = self.loss(out, y)
acc = self.val_accuracy(out, y)
self.log('val_loss', loss, prog_bar=True)
self.log('acc', acc, prog_bar=True)
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.model.parameters(), lr=1e-3)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=50,
gamma=0.5)
return {
'optimizer': optimizer,
'lr_scheduler': lr_scheduler,
}
|
StarcoderdataPython
|
4816268
|
# composition-kompozisyon
import time
class TekerlekliArac():
def yon_degistir(self, yon, durum):
print("Tekerlekli araç ", yon, durum)
class PaletliArac():
def yon_degistir(self, yon, durum):
print("Paletli araç ", yon, durum)
class Arac():
def __init__(self, sinif_kontrol):
self.sinif_kontrol = sinif_kontrol
def dondur(self, yon):
self.sinif_kontrol.yon_degistir(yon, "Başla")
time.sleep(0.25)
self.sinif_kontrol.yon_degistir(yon, "Dur")
bmw_m = Arac(TekerlekliArac())
bmw_m.dondur("sol")
p_arac = Arac(PaletliArac())
p_arac.dondur("sağ")
|
StarcoderdataPython
|
79104
|
# import dash related libraries
import dash
import dash_html_components as html
import dash_bootstrap_components as dbc
import warnings
warnings.filterwarnings('ignore')
# import local libraries
from callbacks import register_callbacks
from lib import tabs
from lib import title
# create dash App server
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
app.config['suppress_callback_exceptions'] = True
# layout
app.layout = html.Div(className="body-bk", children=
[
html.Div(className="title_wrap", children=
[
dbc.Row([
dbc.Col([
html.Div([title.title])
])
])
]),
html.Div(className="ds4a-body", children=
[
html.Div([
tabs.build_tabs()
])
])
]
)
register_callbacks(app)
if __name__ == "__main__":
app.run_server(debug=False, port=8181)
|
StarcoderdataPython
|
1620196
|
import os
def ATWFileList(HtmlUrl,n2):
global ATWFileList0
# ATWFileList
# 找出現在目錄所有.py
L1 = []
L1.clear()
ss = os.listdir(HtmlUrl)
for item in ss:
if(item.endswith(n2)):
L1.append(item)
i = 0
while i < len(L1):
print(' ',i,'=====',L1[i],'\n')
i+=1
while True:
ImportPyUrl = input('\n 請填寫文件號\n 或直接填寫文件名和路徑,如 ./path'+n2+'\n')
# 不能空值
if ImportPyUrl == '':
continue
# 查 ImportPyUrl 是否 int
try:
ATWFileList0 = L1[int(ImportPyUrl)]
break
except:
break
# 不存在,請從試 返回 ImportPyUrl
#ATWFileList0 = ImportPyUrl
###########################################
######################################################################################################
######################################################################## 用「例外處理」檢查檔案是否存在
def IfHaveFile(target_file):
try:
file = open(target_file, 'r')
except FileNotFoundError:
gfdg = (target_file + '不存在')
OK = 0
except PermissionError:
gfdg = (target_file + '不是檔案')
OK = 0
else:
gfdg = (target_file + '檔案存在')
OK = 1
file.close()
print ('gfdg,',gfdg)
return OK
|
StarcoderdataPython
|
3366971
|
from time import sleep
while True:
print('Hello world')
sleep(1)
|
StarcoderdataPython
|
3232067
|
<reponame>BradleyBrown19/ModernArchitecturesFromScratch
# AUTOGENERATED! DO NOT EDIT! File to edit: FullyConnectedNetwork.ipynb (unless otherwise specified).
__all__ = ['get_weight', 'linear', 'relu', 'lin_rel', 'softmax', 'mse_loss', 'Module', 'Linear', 'ReLU', 'CrossSoft',
'eps', 'Model']
# Cell
from .basic_operations_01 import *
from nbdev.showdoc import *
# Cell
#Kaiming initialization with fan_in
def get_weight(in_d, out_d, relu_after):
"Returns weight matrix of size `in_d` x `out_d` initialized using Kaiming initialization"
if relu_after: return torch.randn(in_d, out_d) * math.sqrt(2. / in_d)
else: return torch.randn(in_d, out_d) / math.sqrt(in_d)
# Cell
#hide
def linear(x, w, b):
"Basic linear layer"
return x @ w + b
# Cell
#hide
def relu(x):
"ReLU activation function"
return x.clamp_min(0.) - 0.
# Cell
#hide
def lin_rel(x, w, b):
"Linear layer followed by ReLU activation on `x` with weight `w` and bias `b`"
return relu(linear(x, w, b))
# Cell
#hide
def softmax(x):
"Softmax activation function"
return torch.exp(x) / torch.sum(torch.exp(x.unsqueeze(-1)), dim=1)
# Cell
#hide
def mse_loss(xb, yb):
"Mean Square Error loss"
return (xb.squeeze(-1) - yb).pow(2).mean()
# Cell
class Module():
"Base class for every layer operation in a sequential network"
def __call__(self, *args):
"Executes forward pass of module and stores result in `self.out` for backwards pass"
self.args = args
self.out = self.forward(*args)
return self.out
def forward(self):
"Executes desired operation of module"
raise Exception("Not Implemented")
def backward(self):
"Calls backwards method to find gradient with stored output of layer"
self.bwd(self.out, *self.args)
# Cell
class Linear(Module):
def __init__(self, in_d, out_d, final):
"Initialize weight using 'get_weight' and bias to 0 for linear operation"
self.w, self.b = get_weight(in_d, out_d, final), torch.zeros(out_d)
def forward(self, xb):
"Perform forward linear pass"
return xb @ self.w + self.b
def bwd(self, out, inp):
"Gradient with respect to the forward linear layer"
inp.g = out.g @ self.w.t()
self.w.g = inp.t() @ out.g
self.b.g = out.g.sum(0)
# Cell
class ReLU(Module):
def forward(self, x):
"Set all activations to have a minimum of zero, subtract 0.5 to maintain mean of 0"
return x.clamp_min_(0.)-0.5
def bwd(self, out, inp):
"Backward with respect to the ReLU layer"
inp.g = (inp>0).float() * out.g
# Cell
eps = 1e-9
class CrossSoft(Module):
def forward(self, inp, targ):
"Calls `soft_forward` and `cross_loss` on inp compared with `targ`"
softed = self.soft_forward(inp)
return self.cross_loss(softed, targ)
def soft_forward(self, x):
"Implements softmax activation function on `x`"
return torch.exp(x) / torch.sum(torch.exp(x.unsqueeze(-1)), dim=1)
def cross_loss(self, xb, targ):
"Cross entropy loss of `xb` compared to `targ`"
return -( (xb + eps).log()[range(targ.shape[0]), targ.long()].mean() )
def bwd(self, loss, inp, targ):
"Gradient with respect to both softmax and cross entropy loss"
targ = torch.nn.functional.one_hot(targ.to(torch.int64), 10)
inp_s = softmax(inp)
inp.g = ( inp_s - targ ) / targ.shape[0]
# Cell
#hide
class Model():
def __init__(self, layers):
self.layers = layers
self.learner = None
for lay in self.layers:
lay.learner = None
def set_learner(self, learner):
for lay in self.layers:
lay.learner = learner
def __call__(self, x):
for l in self.layers: x = l(x)
return x
def backward(self):
for l in reversed(self.layers): l.backward()
|
StarcoderdataPython
|
3363364
|
<reponame>pibico/frappe
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
# Tree (Hierarchical) Nested Set Model (nsm)
#
# To use the nested set model,
# use the following pattern
# 1. name your parent field as "parent_item_group" if not have a property nsm_parent_field as your field name in the document class
# 2. have a field called "old_parent" in your fields list - this identifies whether the parent has been changed
# 3. call update_nsm(doc_obj) in the on_upate method
# ------------------------------------------
from typing import Iterator
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.query_builder import DocType, Order
from frappe.query_builder.functions import Coalesce, Max
from frappe.query_builder.utils import DocType
class NestedSetRecursionError(frappe.ValidationError): pass
class NestedSetMultipleRootsError(frappe.ValidationError): pass
class NestedSetChildExistsError(frappe.ValidationError): pass
class NestedSetInvalidMergeError(frappe.ValidationError): pass
# called in the on_update method
def update_nsm(doc):
# get fields, data from the DocType
old_parent_field = 'old_parent'
parent_field = "parent_" + frappe.scrub(doc.doctype)
if hasattr(doc,'nsm_parent_field'):
parent_field = doc.nsm_parent_field
if hasattr(doc,'nsm_oldparent_field'):
old_parent_field = doc.nsm_oldparent_field
parent, old_parent = doc.get(parent_field) or None, doc.get(old_parent_field) or None
# has parent changed (?) or parent is None (root)
if not doc.lft and not doc.rgt:
update_add_node(doc, parent or '', parent_field)
elif old_parent != parent:
update_move_node(doc, parent_field)
# set old parent
doc.set(old_parent_field, parent)
frappe.db.set_value(doc.doctype, doc.name, old_parent_field, parent or '', update_modified=False)
doc.reload()
def update_add_node(doc, parent, parent_field):
"""
insert a new node
"""
doctype = doc.doctype
name = doc.name
Table = DocType(doctype)
# get the last sibling of the parent
if parent:
left, right = frappe.db.get_value(doctype, {"name": parent}, ["lft", "rgt"], for_update=True)
validate_loop(doc.doctype, doc.name, left, right)
else: # root
right = frappe.qb.from_(Table).select(
Coalesce(Max(Table.rgt), 0) + 1
).where(Coalesce(Table[parent_field], "") == "").run(pluck=True)[0]
right = right or 1
# update all on the right
frappe.qb.update(Table).set(Table.rgt, Table.rgt + 2).where(Table.rgt >= right).run()
frappe.qb.update(Table).set(Table.lft, Table.lft + 2).where(Table.lft >= right).run()
if frappe.qb.from_(Table).select("*").where((Table.lft == right) | (Table.rgt == right + 1)).run():
frappe.throw(_("Nested set error. Please contact the Administrator."))
# update index of new node
frappe.qb.update(Table).set(Table.lft, right).set(Table.rgt, right + 1).where(Table.name == name).run()
return right
def update_move_node(doc: Document, parent_field: str):
parent: str = doc.get(parent_field)
Table = DocType(doc.doctype)
if parent:
new_parent = frappe.qb.from_(Table).select(
Table.lft, Table.rgt
).where(Table.name == parent).for_update().run(as_dict=True)[0]
validate_loop(doc.doctype, doc.name, new_parent.lft, new_parent.rgt)
# move to dark side
frappe.qb.update(Table).set(Table.lft, - Table.lft).set(Table.rgt, - Table.rgt).where(
(Table.lft >= doc.lft) & (Table.rgt <= doc.rgt)
).run()
# shift left
diff = doc.rgt - doc.lft + 1
frappe.qb.update(Table).set(Table.lft, Table.lft - diff).set(Table.rgt, Table.rgt - diff).where(
Table.lft > doc.rgt
).run()
# shift left rgts of ancestors whose only rgts must shift
frappe.qb.update(Table).set(Table.rgt, Table.rgt - diff).where(
(Table.lft < doc.lft) & (Table.rgt > doc.rgt)
).run()
if parent:
# re-query value due to computation above
new_parent = frappe.qb.from_(Table).select(
Table.lft, Table.rgt
).where(Table.name == parent).for_update().run(as_dict=True)[0]
# set parent lft, rgt
frappe.qb.update(Table).set(Table.rgt, Table.rgt + diff).where(Table.name == parent).run()
# shift right at new parent
frappe.qb.update(Table).set(Table.lft, Table.lft + diff).set(Table.rgt, Table.rgt + diff).where(
Table.lft > new_parent.rgt
).run()
# shift right rgts of ancestors whose only rgts must shift
frappe.qb.update(Table).set(Table.rgt, Table.rgt + diff).where(
(Table.lft < new_parent.lft) & (Table.rgt > new_parent.rgt)
).run()
new_diff = new_parent.rgt - doc.lft
else:
# new root
max_rgt = frappe.qb.from_(Table).select(Max(Table.rgt)).run(pluck=True)[0]
new_diff = max_rgt + 1 - doc.lft
# bring back from dark side
frappe.qb.update(Table).set(
Table.lft, -Table.lft + new_diff
).set(
Table.rgt, -Table.rgt + new_diff
).where(Table.lft < 0).run()
@frappe.whitelist()
def rebuild_tree(doctype, parent_field):
"""
call rebuild_node for all root nodes
"""
# Check for perm if called from client-side
if frappe.request and frappe.local.form_dict.cmd == 'rebuild_tree':
frappe.only_for('System Manager')
meta = frappe.get_meta(doctype)
if not meta.has_field("lft") or not meta.has_field("rgt"):
frappe.throw(_("Rebuilding of tree is not supported for {}").format(frappe.bold(doctype)),
title=_("Invalid Action"))
# get all roots
right = 1
table = DocType(doctype)
column = getattr(table, parent_field)
result = (
frappe.qb.from_(table)
.where(
(column == "") | (column.isnull())
)
.orderby(table.name, order=Order.asc)
.select(table.name)
).run()
frappe.db.auto_commit_on_many_writes = 1
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
frappe.db.auto_commit_on_many_writes = 0
def rebuild_node(doctype, parent, left, parent_field):
"""
reset lft, rgt and recursive call for all children
"""
# the right value of this node is the left value + 1
right = left+1
# get all children of this node
table = DocType(doctype)
column = getattr(table, parent_field)
result = (
frappe.qb.from_(table).where(column == parent).select(table.name)
).run()
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
# we've got the left value, and now that we've processed
# the children of this node we also know the right value
frappe.db.set_value(doctype, parent, {"lft": left, "rgt": right}, for_update=False, update_modified=False)
#return the right value of this node + 1
return right+1
def validate_loop(doctype, name, lft, rgt):
"""check if item not an ancestor (loop)"""
if name in frappe.get_all(doctype, filters={"lft": ["<=", lft], "rgt": [">=", rgt]}, pluck="name"):
frappe.throw(_("Item cannot be added to its own descendents"), NestedSetRecursionError)
class NestedSet(Document):
def __setup__(self):
if self.meta.get("nsm_parent_field"):
self.nsm_parent_field = self.meta.nsm_parent_field
def on_update(self):
update_nsm(self)
self.validate_ledger()
def on_trash(self, allow_root_deletion=False):
if not getattr(self, 'nsm_parent_field', None):
self.nsm_parent_field = frappe.scrub(self.doctype) + "_parent"
parent = self.get(self.nsm_parent_field)
if not parent and not allow_root_deletion:
frappe.throw(_("Root {0} cannot be deleted").format(_(self.doctype)))
# cannot delete non-empty group
self.validate_if_child_exists()
self.set(self.nsm_parent_field, "")
try:
update_nsm(self)
except frappe.DoesNotExistError:
if self.flags.on_rollback:
frappe.message_log.pop()
else:
raise
def validate_if_child_exists(self):
has_children = frappe.db.count(self.doctype, filters={self.nsm_parent_field: self.name})
if has_children:
frappe.throw(_("Cannot delete {0} as it has child nodes").format(self.name), NestedSetChildExistsError)
def before_rename(self, olddn, newdn, merge=False, group_fname="is_group"):
if merge and hasattr(self, group_fname):
is_group = frappe.db.get_value(self.doctype, newdn, group_fname)
if self.get(group_fname) != is_group:
frappe.throw(_("Merging is only possible between Group-to-Group or Leaf Node-to-Leaf Node"), NestedSetInvalidMergeError)
def after_rename(self, olddn, newdn, merge=False):
if not self.nsm_parent_field:
parent_field = "parent_" + self.doctype.replace(" ", "_").lower()
else:
parent_field = self.nsm_parent_field
# set old_parent for children
frappe.db.set_value(self.doctype, {"old_parent": newdn}, {parent_field: newdn}, update_modified=False, for_update=False)
if merge:
rebuild_tree(self.doctype, parent_field)
def validate_one_root(self):
if not self.get(self.nsm_parent_field):
if self.get_root_node_count() > 1:
frappe.throw(_("""Multiple root nodes not allowed."""), NestedSetMultipleRootsError)
def get_root_node_count(self):
return frappe.db.count(self.doctype, {
self.nsm_parent_field: ''
})
def validate_ledger(self, group_identifier="is_group"):
if hasattr(self, group_identifier) and not bool(self.get(group_identifier)):
if frappe.get_all(self.doctype, {self.nsm_parent_field: self.name, "docstatus": ("!=", 2)}):
frappe.throw(_("{0} {1} cannot be a leaf node as it has children").format(_(self.doctype), self.name))
def get_ancestors(self):
return get_ancestors_of(self.doctype, self.name)
def get_parent(self) -> "NestedSet":
"""Return the parent Document."""
parent_name = self.get(self.nsm_parent_field)
if parent_name:
return frappe.get_doc(self.doctype, parent_name)
def get_children(self) -> Iterator["NestedSet"]:
"""Return a generator that yields child Documents."""
child_names = frappe.get_list(self.doctype, filters={self.nsm_parent_field: self.name}, pluck="name")
for name in child_names:
yield frappe.get_doc(self.doctype, name)
def get_root_of(doctype):
"""Get root element of a DocType with a tree structure"""
from frappe.query_builder.functions import Count
from frappe.query_builder.terms import subqry
Table = DocType(doctype)
t1 = Table.as_("t1")
t2 = Table.as_("t2")
subq = frappe.qb.from_(t2).select(Count("*")).where(
(t2.lft < t1.lft) & (t2.rgt > t1.rgt)
)
result = frappe.qb.from_(t1).select(t1.name).where(
(subqry(subq) == 0) & (t1.rgt > t1.lft)
).run()
return result[0][0] if result else None
def get_ancestors_of(doctype, name, order_by="lft desc", limit=None):
"""Get ancestor elements of a DocType with a tree structure"""
lft, rgt = frappe.db.get_value(doctype, name, ["lft", "rgt"])
result = [d["name"] for d in frappe.db.get_all(doctype, {"lft": ["<", lft], "rgt": [">", rgt]},
"name", order_by=order_by, limit_page_length=limit)]
return result or []
def get_descendants_of(doctype, name, order_by="lft desc", limit=None,
ignore_permissions=False):
'''Return descendants of the current record'''
lft, rgt = frappe.db.get_value(doctype, name, ['lft', 'rgt'])
result = [d["name"] for d in frappe.db.get_list(doctype, {"lft": [">", lft], "rgt": ["<", rgt]},
"name", order_by=order_by, limit_page_length=limit, ignore_permissions=ignore_permissions)]
return result or []
|
StarcoderdataPython
|
1727445
|
<reponame>moeyensj/atm
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
from ..constants import Constants
from .hg import calcQ
__all__ = ["calcTss",
"calcT1"]
S = Constants.SOLAR_CONSTANT
sigma = Constants.STEFAN_BOLTZMANN
def calcTss(r, p_v, eps, G, eta):
"""
Calculate the subsolar temperature.
Parameters
----------
r : float or `~numpy.ndarray` (N)
Distance between asteroid and the Sun in AU.
p_v : float or `~numpy.ndarray` (N)
Geometric albedo.
eps : float or `~numpy.ndarray` (N)
Emissivity.
G : float or `~numpy.ndarray` (N)
HG slope parameter.
eta: float or `~numpy.ndarray` (N)
Beaming parameter.
Returns
-------
float or `~numpy.ndarray` (N)
Returns subsolar temperature in K.
"""
return ((1 - p_v * calcQ(G)) * S / (eps * eta * sigma * (r**2)))**0.25
def calcT1(r, p_v, eps, G, eta):
"""
Calculate the normalized subsolar temperature.
See Myhrvold 2017 (https://doi.org/10.1016/j.icarus.2017.12.024).
Parameters
----------
r : float or `~numpy.ndarray` (N)
Distance between asteroid and the Sun in AU.
p_v : float or `~numpy.ndarray` (N)
Geometric albedo.
eps : float or `~numpy.ndarray` (N)
Emissivity.
G : float or `~numpy.ndarray` (N)
HG slope parameter.
eta: float or `~numpy.ndarray` (N)
Beaming parameter.
Returns
-------
float or `~numpy.ndarray` (N)
Returns normalized subsolar temperature in K.
"""
return calcTss(r, p_v, eps, G, eta) * np.sqrt(r)
|
StarcoderdataPython
|
125672
|
<reponame>bugengine/BugEngine
from be_typing import TYPE_CHECKING
class LR0Path(object):
def __init__(self, node, use_marker=True):
# type: (LR0DominanceNode, bool) -> None
self._node = node
self._use_marker = use_marker
self._hash_cache = (self._node._item, ) # type: Optional[Tuple[LR0Item,...]]
def _hash(self):
# type: () -> Tuple[LR0Item,...]
return (self._node._item, )
def __hash__(self):
# type: () -> int
if self._hash_cache is None:
self._hash_cache = self._hash()
return hash(self._hash_cache)
def __eq__(self, other):
# type: (Any) -> bool
return isinstance(other, LR0Path) and self._hash() == other._hash()
def extend(self, node, lookahead):
# type: (LR0DominanceNode, int) -> LR0Path
return _LR0Extension(node, self)
def derive_from(self, node):
# type: (LR0DominanceNode) -> LR0Path
return _LR0Derivation(node, self)
def expand_left(self):
# type: () -> LR0Path
return _LR0LeftExpansion(self._node, self)
def expand_next(self, path):
# type: (LR0Path) -> LR0Path
return _LR0Expansion(self._node, self, path)
def to_string(self, name_map):
# type: (List[str]) -> Tuple[List[Text], int]
return self._to_string([], name_map, True, True)
def _to_string(self, sequence, name_map, add_derivation, complete_right):
# type: (List[int], List[str], bool, bool) -> Tuple[List[Text], int]
expanded_symbol = name_map[self._node._item._symbol]
if self._use_marker:
sequence.append(1)
if complete_right:
sequence += self._node._item.rule.production[self._node._item._index:]
sequence_str = u' '.join(name_map[i] if i != 1 else '\u2666' for i in sequence)
if add_derivation:
extra_padding = u'\u2500' * (len(sequence_str) - 2 - len(expanded_symbol))
derivation_str = u'\u2570%s%s\u256f' % (expanded_symbol, extra_padding)
return [sequence_str, derivation_str], max(len(sequence_str), len(derivation_str))
else:
return [sequence_str], len(sequence_str)
class _LR0BaseConstruction(LR0Path):
def __init__(self, node, follow):
# type: (LR0DominanceNode, LR0Path) -> None
self._node = node
self._follow = follow
self._hash_cache = None
def _hash(self):
# type: () -> Tuple[LR0Item,...]
if self._hash_cache is None:
self._hash_cache = (self._node._item, ) + self._follow._hash()
return self._hash_cache
class _LR0Extension(_LR0BaseConstruction):
def _to_string(self, sequence, name_map, add_derivation, complete_right):
# type: (List[int], List[str], bool, bool) -> Tuple[List[Text], int]
sequence.append(self._node._item.rule.production[self._node._item._index])
return self._follow._to_string(sequence, name_map, add_derivation, complete_right)
class _LR0Derivation(_LR0BaseConstruction):
def _to_string(self, sequence, name_map, add_derivation, complete_right):
# type: (List[int], List[str], bool, bool) -> Tuple[List[Text], int]
expanded_symbol = name_map[self._node._item._symbol]
sequence_str = u' '.join(name_map[i] for i in sequence)
if complete_right:
post_sequence_str = u' '.join(
name_map[i] for i in self._node._item.rule.production[self._node._item._index + 1:]
)
else:
post_sequence_str = u''
result, length = self._follow.to_string(name_map)
if sequence_str:
padding = ' ' * (len(sequence_str) + 1)
result = [u'%s %s' % (sequence_str, result[0])] + [u'%s%s' % (padding, s) for s in result[1:]]
length += len(sequence_str) + 1
if post_sequence_str:
result[0] += ' ' * (length - len(result[0]) + 1) + post_sequence_str
length += len(post_sequence_str) + 1
if add_derivation:
extra_padding = u'\u2500' * (length - 2 - len(expanded_symbol))
derivation_str = u'\u2570%s%s\u256f' % (expanded_symbol, extra_padding)
return result + [derivation_str], max(length, len(derivation_str))
else:
return result, length
class _LR0LeftExpansion(_LR0BaseConstruction):
def _to_string(self, sequence, name_map, add_derivation, complete_right):
# type: (List[int], List[str], bool, bool) -> Tuple[List[Text], int]
sequence += self._node._item.rule.production[:self._node._item._index]
return self._follow._to_string(sequence, name_map, add_derivation, complete_right)
class _LR0Expansion(_LR0BaseConstruction):
def __init__(self, node, follow, expanded_path):
# type: (LR0DominanceNode, LR0Path, LR0Path) -> None
self._node = node
self._follow = follow
self._next = expanded_path
self._hash_cache = None
def _to_string(self, sequence, name_map, add_derivation, complete_right):
# type: (List[int], List[str], bool, bool) -> Tuple[List[Text], int]
expanded_symbol = name_map[self._node._item._symbol]
sequence_str = u' '.join(name_map[i] for i in sequence)
result, length = self._follow._to_string([], name_map, False, False)
next, next_len = self._next._to_string([], name_map, False, complete_right)
if sequence_str:
padding = ' ' * (len(sequence_str) + 1)
result = [u'%s %s' % (sequence_str, result[0])] + [u'%s%s' % (padding, s) for s in result[1:]]
length += len(sequence_str) + 1
max_length = length
for i, (n1, n2) in enumerate(zip(result, next)):
result[i] = '%s%s%s' % (n1, ' ' * (length - len(n1) + 1), n2)
max_length = max(max_length, len(result[i]))
for final in next[i + 1:]:
result.append(' ' * (length + 1) + final)
if add_derivation:
extra_padding = u'\u2500' * (max_length - 2 - len(expanded_symbol))
derivation_str = u'\u2570%s%s\u256f' % (expanded_symbol, extra_padding)
return result + [derivation_str], max(max_length, len(derivation_str))
else:
return result, max_length
if TYPE_CHECKING:
from be_typing import Any, List, Optional, Text, Tuple, Union
from .lr0dominancenode import LR0DominanceNode
from .lr0item import LR0Item
|
StarcoderdataPython
|
105037
|
import argparse, sys, json, yaml
import pandas as pd
import asyncio
from iotsim.utils import to_iterable
from iotsim.runtime.destinations import known_destinations
from iotsim.assembler import from_config
if __name__ != '__main__':
sys.exit("This program must be run as a standalone script")
parser = argparse.ArgumentParser(description='Assembly runner')
parser.add_argument('assembly_config_filename',
help="Name of YAML config file for the assembly.")
parser.add_argument('-c', '--config', metavar='runner_config_filename',
help='Name of YAML config file for the runner.')
parser.add_argument('-t', '--ticks', metavar='ticks', type=int,
help='Number of time ticks to go, int >=0. '
'Zero means infinite run.')
parser.add_argument('-p', '--pace', metavar='pace', type=float,
help="Factor to speed up (>1) or slow down (<1) the assembly.")
parser.add_argument('-b', '--start-time', metavar='start_time',
help="Start time for the run. The default is 'now'.")
parser.add_argument('-d', '--start-delta', metavar='start_delta',
help="Seconds added to the local machine's time to compensate "
"clock skew at destination. May be negative. The default is 0.")
defaults=dict(
ticks=0,
message_format='json',
start_time='now',
start_delta=2,
pace=1,
routing={'reading': ['stdout'], 'truth': ['stdout']},
destinations=dict(),
)
def get_param_value(param):
global defaults, args, config
if hasattr(args, param) and getattr(args, param) is not None:
return getattr(args, param)
else:
return config.get(param, defaults.get(param, None))
### Parse config file and command line arguments
args = parser.parse_args()
config = dict()
if args.config is not None:
with open(str(args.config), 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
# else:
# config=dict(timing=dict(),
# signals=[],
# destinations=dict(),
# routing=defaults['routing'])
assembly_name = get_param_value('name')
message_format = get_param_value('message_format')
ticks = get_param_value('ticks')
if ticks == 0:
ticks = None
tick_counter = ticks
pace = get_param_value('pace')
start_time = get_param_value('start_time')
start_delta = get_param_value('start_delta')
start_time = pd.Timestamp(start_time) + pd.Timedelta(start_delta, unit='s')
signal_label = 'signal'
value_label = 'value'
event_time_label = 'event_time'
arrival_time_label = 'arrival_time'
meta_label = 'meta'
#### Create an assembly
assembly = from_config(args.assembly_config_filename)
if assembly_name is None:
assembly_name = 'assembly' if assembly.name is None else assembly.name
tick_duration = pd.Timedelta(assembly.tick, unit='s')
assembly_runner = assembly.launch()
### Set up destinations
active_destinations = dict()
destination_routing = dict(reading=[], truth=[])
configured_routing = get_param_value('routing')
configured_destinations = get_param_value('destinations')
for dataview in destination_routing.keys():
routing = to_iterable(configured_routing[dataview])
for destination in routing:
if destination not in active_destinations:
if destination in configured_destinations:
cls, kwargs = known_destinations[
configured_destinations[destination]['type']]
else:
cls, kwargs = known_destinations[destination]
try:
additional_params = configured_destinations[destination]['parameters']
except KeyError:
pass
else:
kwargs.update(additional_params)
handler = cls(**kwargs)
active_destinations[destination] = handler
else:
handler = active_destinations[destination]
destination_routing[dataview].append(handler)
### Define the real-time flow of the messages
# assembly_snapshot.readings -> list [ named_tuple (signal_name, value, arrived=True, arrival_delay)]
# assembly_snapshot.all_readings -> include lost readings (arrived==False)
#
# example:
# assembly_snapshot.readings[0].value
#
# aseembly_state.truths -> list [ named_tuple (signal_name, value)]
#
# assembly_snapshot.signal(signal_name) -> named_tuple (truth, reading)
# truth: named_tuple (signal_name, value)
# reading: None or named_tuple (signal_name, value, arrived, arrival_delay)
#
# example:
# assembly_snapshot.signal('control').reading.value
async def deliver_datapoint(datapoint, dataview, delivery_time, event_time,
arrival_time=None):
message_data = {
meta_label: "{}:{}".format(assembly_name, dataview),
signal_label: datapoint.signal_name,
value_label: datapoint.value,
event_time_label: str(event_time),
}
if dataview == 'reading':
message_data[arrival_time_label] = str(arrival_time)
message = json.dumps(message_data)
wait_until_delivery = (delivery_time - pd.Timestamp('now')).total_seconds()
await asyncio.sleep(wait_until_delivery)
if wait_until_delivery < 0:
raise RuntimeError("System fell behind assembly's schedule")
for destination_handler in destination_routing[dataview]:
destination_handler.send(message)
async def main():
global tick_counter
latest_delivery_time = pd.Timestamp('now')
event_time=start_time
for asm_snapshot in assembly_runner:
next_tick_at = pd.Timestamp('now') + pd.Timedelta(tick_duration / pace, unit='s')
event_time = event_time + tick_duration
for reading in asm_snapshot.readings:
if reading.value is None or not reading.arrived:
continue
arrival_time = event_time + pd.Timedelta(reading.arrival_delay, unit='s')
delivery_time = next_tick_at + pd.Timedelta(reading.arrival_delay / pace, unit='s')
asyncio.ensure_future(deliver_datapoint(
reading, 'reading', delivery_time, event_time, arrival_time))
if delivery_time > latest_delivery_time:
latest_delivery_time = delivery_time
for truth in asm_snapshot.truths:
asyncio.ensure_future(deliver_datapoint(
truth, 'truth', next_tick_at, event_time))
if next_tick_at > latest_delivery_time:
latest_delivery_time = next_tick_at
if not tick_counter is None:
if tick_counter == 0:
break
tick_counter -= 1
wait_until_next_tick = (next_tick_at - pd.Timestamp('now')).total_seconds()
await asyncio.sleep(wait_until_next_tick)
if wait_until_next_tick < 0:
raise RuntimeError("Pace is too fast. System fell behind.")
await asyncio.sleep(
(latest_delivery_time - pd.Timestamp('now')).total_seconds() + 1
)
if not tick_counter is None and tick_counter > 0:
sys.exit("Assembly ran out after {} ticks whish is less than required {}".
format(ticks - tick_counter, ticks))
### Run
loop = asyncio.get_event_loop()
task = loop.create_task(main())
loop.run_until_complete(task)
|
StarcoderdataPython
|
3325531
|
# coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CandlestickData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'o': 'str',
'h': 'str',
'l': 'str',
'c': 'str'
}
attribute_map = {
'o': 'o',
'h': 'h',
'l': 'l',
'c': 'c'
}
def __init__(self, o=None, h=None, l=None, c=None): # noqa: E501
"""CandlestickData - a model defined in Swagger""" # noqa: E501
self._o = None
self._h = None
self._l = None
self._c = None
self.discriminator = None
if o is not None:
self.o = o
if h is not None:
self.h = h
if l is not None:
self.l = l
if c is not None:
self.c = c
@property
def o(self):
"""Gets the o of this CandlestickData. # noqa: E501
The first (open) price in the time-range represented by the candlestick. # noqa: E501
:return: The o of this CandlestickData. # noqa: E501
:rtype: str
"""
return self._o
@o.setter
def o(self, o):
"""Sets the o of this CandlestickData.
The first (open) price in the time-range represented by the candlestick. # noqa: E501
:param o: The o of this CandlestickData. # noqa: E501
:type: str
"""
self._o = o
@property
def h(self):
"""Gets the h of this CandlestickData. # noqa: E501
The highest price in the time-range represented by the candlestick. # noqa: E501
:return: The h of this CandlestickData. # noqa: E501
:rtype: str
"""
return self._h
@h.setter
def h(self, h):
"""Sets the h of this CandlestickData.
The highest price in the time-range represented by the candlestick. # noqa: E501
:param h: The h of this CandlestickData. # noqa: E501
:type: str
"""
self._h = h
@property
def l(self):
"""Gets the l of this CandlestickData. # noqa: E501
The lowest price in the time-range represented by the candlestick. # noqa: E501
:return: The l of this CandlestickData. # noqa: E501
:rtype: str
"""
return self._l
@l.setter
def l(self, l):
"""Sets the l of this CandlestickData.
The lowest price in the time-range represented by the candlestick. # noqa: E501
:param l: The l of this CandlestickData. # noqa: E501
:type: str
"""
self._l = l
@property
def c(self):
"""Gets the c of this CandlestickData. # noqa: E501
The last (closing) price in the time-range represented by the candlestick. # noqa: E501
:return: The c of this CandlestickData. # noqa: E501
:rtype: str
"""
return self._c
@c.setter
def c(self, c):
"""Sets the c of this CandlestickData.
The last (closing) price in the time-range represented by the candlestick. # noqa: E501
:param c: The c of this CandlestickData. # noqa: E501
:type: str
"""
self._c = c
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CandlestickData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
StarcoderdataPython
|
3337570
|
<reponame>onezens/python<filename>virtualenv/virtualenv.py
#!/usr/bin/python
#encoding=utf8
# 创建一个独立的 :virtualenv --no-site-packages venv
# 新建的Python环境被放到当前目录下的venv目录。有了venv这个Python环境,可以用source进入该环境:
# 了venv这个Python环境,可以用source进入该环境 : source venv/bin/activate
# 在venv环境下,用pip安装的包都被安装到venv这个环境下,
# 系统Python环境不受任何影响。也就是说,venv环境是专门针对myproject这个应用创建的。
# 退出当前的venv环境,使用deactivate命令:
|
StarcoderdataPython
|
1621741
|
<gh_stars>0
#
# Copyright 2020 Nebulon, Inc.
# All Rights Reserved.
#
# DISCLAIMER: THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
# EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
# OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from .graphqlclient import GraphQLParam, NebMixin
from datetime import datetime
from .common import PageInput, read_value
from .filters import StringFilter
from .sorting import SortDirection
from .npods import NPodSpuInput, \
BondType, \
BondLACPTransmitRate, \
BondTransmitHashPolicy
from .updates import UpdateHistory
from .tokens import TokenResponse
__all__ = [
"SpuSort",
"SpuFilter",
"NTPServerInput",
"SecureEraseSPUInput",
"ReplaceSpuInput",
"SetNTPServersInput",
"NTPServer",
"IPInfoState",
"Spu",
"SpuList",
"SpuCustomDiagnostic",
"SpuMixin"
]
class SpuSort:
"""A sort object for services processing units (SPU)
Allows sorting SPUs on common properties. The sort object allows only one
property to be specified.
"""
def __init__(
self,
serial: SortDirection = None
):
"""Constructs a new sort object for SPUs
Allows sorting SPUs on common properties. The sort object allows
only one property to be specified.
:param serial: Sort direction for the ``serial`` property
:type serial: SortDirection, optional
"""
self.__serial = serial
@property
def serial(self) -> SortDirection:
"""Sort direction for the ``serial`` property"""
return self.__serial
@property
def as_dict(self):
result = dict()
result["serial"] = self.serial
return result
class SpuFilter:
"""A filter object to filter services processing units (SPU)
Allows filtering for specific SPUs registered in nebulon ON. The
filter allows only one property to be specified. If filtering on multiple
properties is needed, use the ``and_filter`` and ``or_filter`` options to
concatenate multiple filters.
"""
def __init__(
self,
serial: StringFilter = None,
not_in_npod: bool = None,
and_filter=None,
or_filter=None
):
"""Constructs a new filter object
The filter allows only one property to be specified. If filtering on
multiple properties is needed, use the ``and_filter`` and ``or_filter``
options to concatenate multiple filters.
:param serial: Filter based on SPU serial number
:type serial: StringFilter, optional
:param not_in_npod: Filter for SPUs that are not in a nPod
:type not_in_npod: bool, optional
:param and_filter: Concatenate another filter with a logical AND
:type and_filter: SpuFilter, optional
:param or_filter: Concatenate another filter with a logical OR
:type or_filter: SpuFilter, optional
"""
self.__serial = serial
self.__not_in_npod = not_in_npod
self.__and = and_filter
self.__or = or_filter
@property
def serial(self) -> StringFilter:
"""Filter based on SPU serial number"""
return self.__serial
@property
def not_in_npod(self) -> bool:
"""Filter for SPUs that are not in a nPod"""
return self.__not_in_npod
@property
def and_filter(self):
"""Allows concatenation of multiple filters via logical AND"""
return self.__and
@property
def or_filter(self):
"""Allows concatenation of multiple filters via logical OR"""
return self.__or
@property
def as_dict(self):
result = dict()
result["serial"] = self.serial
result["notInNPod"] = self.not_in_npod
result["and"] = self.and_filter
result["or"] = self.or_filter
return result
class NTPServerInput:
"""An input object to configure a NTP server
NTP servers are used for automatic time configuration on the services
processing unit (SPU). The SPU has default network time servers (NTP)
configured. However, customers can customize them if the default NTP
servers are not accessible or different time settings are required.
"""
def __init__(
self,
server_hostname: str,
pool: bool = None,
prefer: bool = None
):
"""Constructs a new input object to configure NTP servers
NTP servers are used for automatic time configuration on the services
processing unit (SPU). The SPU has default network time servers (NTP)
configured. However, customers can customize them if the default NTP
servers are not accessible or different time settings are required.
:param server_hostname: The DNS hostname of the NTP server to use
:type server_hostname: str
:param pool: Indicates if the specified NTP server hostname is a NTP
pool. By default, this value is considered ``False``.
:type pool: bool, optional
:param prefer: Indicates if the specified NTP server is the preferred
NTP server. By default, this value is considered ``False``.
:type prefer: bool, optional
"""
self.__server_hostname = server_hostname
self.__pool = pool
self.__prefer = prefer
@property
def server_hostname(self) -> str:
"""The DNS hostname of the NTP server"""
return self.__server_hostname
@property
def pool(self) -> bool:
"""Indicates if the specified NTP server hostname is a NTP pool"""
return self.__pool
@property
def prefer(self) -> bool:
"""Indicates if the specified NTP server is the preferred NTP server"""
return self.__prefer
@property
def as_dict(self):
result = dict()
result["serverHostname"] = self.server_hostname
result["pool"] = self.pool
result["prefer"] = self.prefer
return result
class SecureEraseSPUInput:
"""An input object to secure-erase a services processing unit (SPU)
The secure erase functionality allows a deep-erase of data stored on the
physical drives attached to the SPU. Only SPUs that are not part of a
nPod can be secure-erased.
"""
def __init__(
self,
spu_serial: str
):
"""Constructs a new input object for secure-erase a SPU
The secure erase functionality allows a deep-erase of data stored on
the physical drives attached to the SPU. Only SPUs that are not part
of a nPod can be secure-erased.
:param spu_serial: The serial number of the SPU to secure-erase
:type spu_serial: str
"""
self.__spu_serial = spu_serial
@property
def spu_serial(self) -> str:
"""The serial number of the SPU"""
return self.__spu_serial
@property
def as_dict(self):
result = dict()
result["spuSerial"] = self.spu_serial
return result
class ReplaceSpuInput:
"""An input object to replace a services processing unit (SPU)
The replace services processing unit (SPU) operation is used to transition
the configuration of an old, likely failed, SPU to a new replacement unit
and allows modifying the configuration during the process.
"""
def __init__(
self,
npod_uuid: str,
previous_spu_serial: str,
new_spu_info: NPodSpuInput,
sset_uuid: str
):
"""Constructs a new input object to replace a SPU
The replace services processing unit (SPU) operation is used to
transition the configuration of an old, likely failed, SPU to a new
replacement unit and allows modifying the configuration during the
process.
:param npod_uuid: The unique identifier of the nPod of the old SPU
that is being replaced
:type npod_uuid: str
:param previous_spu_serial: The serial number of the old SPU that is
being replaced
:type previous_spu_serial: str
:param new_spu_info: Configuration information for the new SPU
:type new_spu_info: NPodSpuInput
:param sset_uuid: The storage set information for the existing SPU.
This information can be obtained from the active replacement
alert and only used to verify that the correct SPU is selected.
:type sset_uuid: str
"""
self.__npod_uuid = npod_uuid
self.__previous_spu_serial = previous_spu_serial
self.__new_spu_info = new_spu_info
self.__sset_uuid = sset_uuid
@property
def npod_uuid(self) -> str:
"""The UUID of the nPod of the old SPU that is being replaced"""
return self.__npod_uuid
@property
def previous_spu_serial(self) -> str:
"""The serial number of the old SPU that is being replaced"""
return self.__previous_spu_serial
@property
def new_spu_info(self) -> NPodSpuInput:
"""Configuration information for the new SPU"""
return self.__new_spu_info
@property
def sset_uuid(self) -> str:
"""The storage set information for the existing SPU"""
return self.__sset_uuid
@property
def as_dict(self):
result = dict()
result["nPodUUID"] = self.npod_uuid
result["previousSPUSerial"] = self.previous_spu_serial
result["newSPUInfo"] = self.new_spu_info
result["ssetUUID"] = self.sset_uuid
return result
class SetNTPServersInput:
"""An input object to configure NTP servers
NTP servers are used for automatic time configuration on the services
processing unit (SPU). The SPU has default network time servers (NTP)
configured. However, customers can customize them if the default NTP
servers are not accessible or different time settings are required.
"""
def __init__(
self,
servers: [NTPServerInput],
spu_serial: str = None,
npod_uuid: str = None
):
"""Constructs a new input object to configure NTP servers
NTP servers are used for automatic time configuration on the services
processing unit (SPU). The SPU has default network time servers (NTP)
configured. However, customers can customize them if the default NTP
servers are not accessible or different time settings are required.
Either a SPU serial number or a nPod uuid must be specified.
:param servers: List of NTP server configurations that shall be applied
to an SPU
:type servers: [NTPServerInput]
:param spu_serial: The serial number of the services processing unit
:type spu_serial: str, optional
:param npod_uuid: The unique identifier of the nPod
:type npod_uuid: str, optional
"""
self.__spu_serial = spu_serial
self.__pod_uuid = npod_uuid
self.__servers = servers
@property
def spu_serial(self) -> str:
"""The serial number of the services processing unit"""
return self.__spu_serial
@property
def pod_uuid(self) -> str:
"""The unique identifier of the nPod"""
return self.__pod_uuid
@property
def servers(self) -> [NTPServerInput]:
"""List of NTP server configurations that shall be applied to an SPU"""
return self.__servers
@property
def as_dict(self):
result = dict()
result["spuSerial"] = self.spu_serial
result["podUUID"] = self.pod_uuid
result["servers"] = self.servers
return result
class NTPServer:
"""A network time protocol server
NTP servers are used for automatic time configuration on the services
processing unit (SPU).
"""
def __init__(
self,
response: dict
):
"""Constructs a new network time protocol server
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__server_hostname = read_value(
"serverHostname", response, str, True)
self.__pool = read_value(
"pool", response, bool, True)
self.__prefer = read_value(
"prefer", response, bool, True)
@property
def server_hostname(self) -> str:
"""The DNS hostname of the NTP server"""
return self.__server_hostname
@property
def pool(self) -> bool:
"""Indicates if the specified NTP server hostname is a NTP pool"""
return self.__pool
@property
def prefer(self) -> bool:
"""Indicates if the specified NTP server is the preferred NTP server"""
return self.__prefer
@staticmethod
def fields():
return [
"serverHostname",
"pool",
"prefer",
]
class IPInfoState:
"""A state for IP configuration of a SPU logical network interface"""
def __init__(
self,
response: dict
):
"""Constructs a new IPInfoState object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__dhcp = read_value(
"dhcp", response, bool, True)
self.__addresses = read_value(
"addresses", response, str, True)
self.__gateway = read_value(
"gateway", response, str, True)
self.__bond_mode = read_value(
"bondMode", response, BondType, True)
self.__bond_transmit_hash_policy = read_value(
"bondTransmitHashPolicy", response, BondTransmitHashPolicy, False)
self.__bond_mii_monitor_milli_seconds = read_value(
"bondMIIMonitorMilliSeconds", response, int, False)
self.__bond_lacp_transmit_rate = read_value(
"bondLACPTransmitRate", response, BondLACPTransmitRate, False)
self.__interface_names = read_value(
"interfaceNames", response, str, True)
self.__interface_mac = read_value(
"interfaceMAC", response, str, True)
self.__half_duplex = read_value(
"halfDuplex", response, bool, True)
self.__speed = read_value(
"speed", response, int, True)
self.__locked_speed = read_value(
"lockedSpeed", response, bool, True)
self.__mtu = read_value(
"mtu", response, int, True)
self.__switch_name = read_value(
"switchName", response, str, True)
self.__switch_mac = read_value(
"switchMAC", response, str, True)
self.__switch_port = read_value(
"switchPort", response, str, True)
@property
def dhcp(self) -> bool:
"""Indicates if DHCP is used for IP addressing"""
return self.__dhcp
@property
def addresses(self) -> [str]:
"""List of IPv4 or IPv6 addresses in CIDR format"""
return self.__addresses
@property
def gateway(self) -> str:
"""The gateway IP address specified for the interface"""
return self.__gateway
@property
def bond_mode(self) -> BondType:
"""The link aggregation mode for the interface"""
return self.__bond_mode
@property
def bond_transmit_hash_policy(self) -> BondTransmitHashPolicy:
"""The active transmit hash policy for the link aggregation"""
return self.__bond_transmit_hash_policy
@property
def bond_mii_monitor_milli_seconds(self) -> int:
"""The active MII monitoring interval in ms for the link aggregation"""
return self.__bond_mii_monitor_milli_seconds
@property
def bond_lacp_transmit_rate(self) -> BondLACPTransmitRate:
"""The active LACP transmit rate for the link aggregation"""
return self.__bond_lacp_transmit_rate
@property
def interface_names(self) -> list:
"""The names of the physical interfaces for the logical interface"""
return self.__interface_names
@property
def interface_mac(self) -> str:
"""The physical address of the interface"""
return self.__interface_mac
@property
def half_duplex(self) -> bool:
"""Indicates if the interface operates in half-duplex"""
return self.__half_duplex
@property
def speed(self) -> int:
"""Indicates the network interface speed"""
return self.__speed
@property
def locked_speed(self) -> bool:
"""Indicates if the network interface speed is locked"""
return self.__locked_speed
@property
def mtu(self) -> int:
"""maximum transfer unit"""
return self.__mtu
@property
def switch_name(self) -> str:
"""The name of the switch this interface connects to"""
return self.__switch_name
@property
def switch_mac(self) -> str:
"""The physical address of the switch port this interface connects to"""
return self.__switch_mac
@property
def switch_port(self) -> str:
"""The port identifier of the switch this interface connects to"""
return self.__switch_port
@staticmethod
def fields():
return [
"dhcp",
"addresses",
"gateway",
"bondMode",
"bondTransmitHashPolicy",
"bondMIIMonitorMilliSeconds",
"bondLACPTransmitRate",
"interfaceNames",
"interfaceMAC",
"halfDuplex",
"speed",
"lockedSpeed",
"mtu",
"switchName",
"switchMAC",
"switchPort",
]
class Spu:
"""A services processing unit"""
def __init__(
self,
response: dict
):
"""Constructs a new services processing unit
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__npod_uuid = read_value(
"nPod.uuid", response, str, False)
self.__host_uuid = read_value(
"host.uuid", response, str, False)
self.__serial = read_value(
"serial", response, str, True)
self.__version = read_value(
"version", response, str, True)
self.__spu_type = read_value(
"spuType", response, str, True)
self.__hw_revision = read_value(
"hwRevision", response, str, True)
self.__control_interface = read_value(
"controlInterface", response, IPInfoState, False)
self.__data_interfaces = read_value(
"dataInterfaces", response, IPInfoState, False)
self.__lun_uuids = read_value(
"luns.uuid", response, str, False)
self.__lun_count = read_value(
"lunCount", response, int, True)
self.__physical_drive_wwns = read_value(
"physicalDrives.wwn", response, str, False)
self.__physical_drive_count = read_value(
"physicalDriveCount", response, int, True)
self.__npod_member_can_talk_count = read_value(
"podMemberCanTalkCount", response, int, True)
self.__uptime_seconds = read_value(
"uptimeSeconds", response, int, True)
self.__update_history = read_value(
"updateHistory", response, UpdateHistory, True)
self.__last_reported = read_value(
"lastReported", response, datetime, True)
self.__reset_reason_int = read_value(
"resetReasonInt", response, int, True)
self.__reset_reason_string = read_value(
"resetReasonString", response, str, True)
self.__ntp_servers = read_value(
"ntpServers", response, NTPServer, True)
self.__ntp_status = read_value(
"ntpStatus", response, str, True)
self.__time_zone = read_value(
"timeZone", response, str, True)
self.__uefi_version = read_value(
"uefiVersion", response, str, True)
self.__wiping = read_value(
"wiping", response, bool, True)
@property
def npod_uuid(self) -> str:
"""The services processing unit's nPod identifier"""
return self.__npod_uuid
@property
def host_uuid(self) -> str:
"""The unique identifier of the host the SPU is installed in"""
return self.__host_uuid
@property
def serial(self) -> str:
"""The unique serial number of the SPU"""
return self.__serial
@property
def version(self) -> str:
"""The version of nebOS that is running on the SPU"""
return self.__version
@property
def spu_type(self) -> str:
"""The type of SPU"""
return self.__spu_type
@property
def hw_revision(self) -> str:
"""The hardware revision of the SPU"""
return self.__hw_revision
@property
def control_interface(self) -> IPInfoState:
"""Network information for the control interface"""
return self.__control_interface
@property
def data_interfaces(self) -> [IPInfoState]:
"""Network information for the data interfaces"""
return self.__data_interfaces
@property
def lun_uuids(self) -> [str]:
"""List of unique identifiers of LUNs provisioned on the SPU"""
return self.__lun_uuids
@property
def lun_count(self) -> int:
"""Number of provisioned LUNs on the SPU"""
return self.__lun_count
@property
def physical_drive_wwns(self) -> [str]:
"""List of WWNs for all physical drives attached to the SPU"""
return self.__physical_drive_wwns
@property
def physical_drive_count(self) -> int:
"""Number of physical drives attached to the SPU"""
return self.__physical_drive_count
@property
def npod_member_can_talk_count(self) -> int:
"""Number of SPUs that can successfully communicate with each other"""
return self.__npod_member_can_talk_count
@property
def uptime_seconds(self) -> int:
"""Uptime of the services processing unit in seconds"""
return self.__uptime_seconds
@property
def update_history(self) -> [UpdateHistory]:
"""List of historical updates that were applied to the SPU"""
return self.__update_history
@property
def last_reported(self) -> datetime:
"""Date and time when the SPU last reported state to nebulon ON"""
return self.__last_reported
@property
def reset_reason_int(self) -> int:
"""A int representation of the reason why a SPU was reset"""
return self.__reset_reason_int
@property
def reset_reason_string(self) -> str:
"""A string representation of the reason why a SPU was reset"""
return self.__reset_reason_string
@property
def ntp_servers(self) -> [NTPServer]:
"""List of configured NTP servers"""
return self.__ntp_servers
@property
def ntp_status(self) -> str:
"""Status message for NTP"""
return self.__ntp_status
@property
def time_zone(self) -> str:
"""The configured time zone"""
return self.__time_zone
@property
def uefi_version(self) -> str:
"""Version for UEFI"""
return self.__uefi_version
@property
def wiping(self) -> str:
"""Indicates if the SPU is doing a secure wipe"""
return self.__wiping
@staticmethod
def fields():
return [
"nPod{uuid}",
"host{uuid}",
"serial",
"version",
"spuType",
"hwRevision",
"controlInterface{%s}" % ",".join(IPInfoState.fields()),
"dataInterfaces{%s}" % ",".join(IPInfoState.fields()),
"luns{uuid}",
"lunCount",
"physicalDrives{wwn}",
"physicalDriveCount",
"podMemberCanTalkCount",
"uptimeSeconds",
"updateHistory{%s}" % ",".join(UpdateHistory.fields()),
"lastReported",
"resetReasonInt",
"resetReasonString",
"ntpServers{%s}" % ",".join(NTPServer.fields()),
"ntpStatus",
"timeZone",
"uefiVersion",
"wiping"
]
class SpuList:
"""Paginated services processing unit (SPU) list
Contains a list of SPU objects and information for
pagination. By default a single page includes a maximum of `100` items
unless specified otherwise in the paginated query.
Consumers should always check for the property ``more`` as per default
the server does not return the full list of alerts but only one page.
"""
def __init__(
self,
response: dict
):
"""Constructs a new SPU list object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__items = read_value(
"items", response, Spu, True)
self.__more = read_value(
"more", response, bool, True)
self.__total_count = read_value(
"totalCount", response, int, True)
self.__filtered_count = read_value(
"filteredCount", response, int, True)
@property
def items(self) -> [Spu]:
"""List of SPUs in the pagination list"""
return self.__items
@property
def more(self) -> bool:
"""Indicates if there are more items on the server"""
return self.__more
@property
def total_count(self) -> int:
"""The total number of items on the server"""
return self.__total_count
@property
def filtered_count(self) -> int:
"""The number of items on the server matching the provided filter"""
return self.__filtered_count
@staticmethod
def fields():
return [
"items{%s}" % ",".join(Spu.fields()),
"more",
"totalCount",
"filteredCount",
]
class SpuCustomDiagnostic:
"""A staged custom diagnostics request
SPU custom diagnostics requests allows customers to run arbitrary
diagnostic commands on the services processing units as part of
troubleshooting issues during a support case.
"""
def __init__(
self,
response: dict
):
"""Constructs a new SpuCustomDiagnostic object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__request_uuid = read_value(
"requestUID", response, str, True)
self.__diagnostic_name = read_value(
"diagnosticName", response, str, True)
self.__spu_serial = read_value(
"spuSerial", response, str, True)
self.__once_only = read_value(
"onceOnly", response, bool, True)
self.__note = read_value(
"note", response, str, True)
@property
def request_uuid(self) -> str:
"""The unique identifier or the custom diagnostic request"""
return self.__request_uuid
@property
def diagnostic_name(self) -> str:
"""The human readable name of the custom diagnostic request"""
return self.__diagnostic_name
@property
def spu_serial(self) -> str:
"""The serial number of the SPU on which to run diagnostic"""
return self.__spu_serial
@property
def once_only(self) -> bool:
"""Indicates if this request will disappear after execution"""
return self.__once_only
@property
def note(self) -> str:
"""An optional note for the diagnostic request"""
return self.__note
@staticmethod
def fields():
return [
"requestUID",
"diagnosticName",
"spuSerial",
"onceOnly",
"note",
]
class SpuMixin(NebMixin):
"""Mixin to add SPU related methods to the GraphQL client"""
def get_spus(
self,
page: PageInput = None,
spu_filter: SpuFilter = None,
sort: SpuSort = None
) -> SpuList:
"""Retrieves a list of SPUs
:param page: The requested page from the server. This is an optional
argument and if omitted the server will default to returning the
first page with a maximum of `100` items.
:type page: PageInput, optional
:param spu_filter: A filter object to filter the SPUs on the
server. If omitted, the server will return all objects as a
paginated response.
:type spu_filter: SpuFilter, optional
:param sort: A sort definition object to sort the SPU objects on
supported properties. If omitted objects are returned in the order
as they were created in.
:type sort: SpuSort, optional
:returns SpuList: A paginated list of SPUs
:raises GraphQLError: An error with the GraphQL endpoint.
"""
# setup query parameters
parameters = dict()
parameters["page"] = GraphQLParam(
page, "PageInput", False)
parameters["filter"] = GraphQLParam(
spu_filter, "SPUFilter", False)
parameters["sort"] = GraphQLParam(
sort, "SPUSort", False)
# make the request
response = self._query(
name="getSPUs",
params=parameters,
fields=SpuList.fields()
)
# convert to object
return SpuList(response)
def get_spu_custom_diagnostics(
self,
spu_serial: str
) -> [SpuCustomDiagnostic]:
"""Retrieves a list of custom diagnostic command requests
Custom diagnostic command requests are used by customer satisfaction
teams to run arbitrary troubleshooting commands on SPUs. These require
user confirmation.
:param spu_serial: The serial number for which to query for custom
diagnostic command requests
:type spu_serial: str
:returns [SpuCustomDiagnostic]: A list of custom diagnostic command
requests.
:raises GraphQLError: An error with the GraphQL endpoint.
"""
# setup query parameters
parameters = dict()
parameters["spuSerial"] = GraphQLParam(
spu_serial, "String", False)
# make the request
response = self._query(
name="spuCustomDiagnostics",
params=parameters,
fields=SpuCustomDiagnostic.fields()
)
# convert to object
return [SpuCustomDiagnostic(i) for i in response]
def claim_spu(
self,
spu_serial: str
):
"""Adds an unregistered SPU to the organization
SPUs need to be claimed by an organization before they can be used
for nPod creation. While the nPod creation command will perform an
implicit claim, this method allows registering SPUs with an
organization without creating an nPod.
Once an SPU was claimed, it will become visible in the ``get_spus``
query and in the nebulon ON web user interface.
:param spu_serial: The serial number of the SPU to register with an
organization.
:type spu_serial: str
:raises GraphQLError: An error with the GraphQL endpoint.
:raises Exception: An error when delivering a token to the SPU
"""
# setup query parameters
parameters = dict()
parameters["serial"] = GraphQLParam(
spu_serial, "String", True)
# make the request
response = self._mutation(
name="claimSPU",
params=parameters,
fields=TokenResponse.fields()
)
# convert to object
token_response = TokenResponse(response)
token_response.deliver_token()
def delete_spu_info(
self,
spu_serial: str
) -> bool:
"""Allows deletion of SPU information in nebulon ON
:param spu_serial: The serial number of the SPU
:type spu_serial: str
:raises GraphQLError: An error with the GraphQL endpoint.
:returns bool: If the delete operation was successful
"""
# setup query parameters
parameters = dict()
parameters["serial"] = GraphQLParam(
spu_serial, "String", True)
# make the request
response = self._mutation(
name="delSPUInfo",
params=parameters,
fields=None
)
# response is a boolean
return response
def ping_spu(
self,
spu_serial: str
):
"""Turns on the locate LED pattern of the SPU
Allows identification of an SPU in the servers by turning on the
locate LED pattern for the SPU. Please consult the Cloud-Defined
Storage manual for the LED blink patterns.
:param spu_serial: The serial number of the SPU
:type spu_serial: str
:raises GraphQLError: An error with the GraphQL endpoint.
:raises Exception: An error when delivering a token to the SPU
"""
# setup query parameters
parameters = dict()
parameters["serial"] = GraphQLParam(
spu_serial, "String", True)
# make the request
response = self._mutation(
name="claimSPU",
params=parameters,
fields=TokenResponse.fields()
)
# convert to object
token_response = TokenResponse(response)
token_response.deliver_token()
def send_spu_debug_info(
self,
spu_serial: str,
note: str = None
):
"""Allows submitting additional debugging information to nebulon ON
Used for customers to send additional debug information to nebulon ON
for troubleshooting and resolve issues.
:param spu_serial: The serial number of the SPU
:type spu_serial: str
:param note: An optional note to attach to the debug information
:type note: str, optional
:raises GraphQLError: An error with the GraphQL endpoint.
:raises Exception: An error when delivering a token to the SPU
"""
# setup query parameters
parameters = dict()
parameters["spuSerial"] = GraphQLParam(
spu_serial, "String", False)
parameters["note"] = GraphQLParam(
note, "String", False)
# make the request
response = self._mutation(
name="sendDebugInfo",
params=parameters,
fields=TokenResponse.fields()
)
# convert to object
token_response = TokenResponse(response)
token_response.deliver_token()
def run_custom_diagnostics(
self,
spu_serial: str = None,
npod_uuid: str = None,
diagnostic_name: str = None,
request_uuid: str = None
):
"""Allows running custom diagnostic commands
SPU custom diagnostics requests allows customers to run arbitrary
diagnostic commands on the services processing units as part of
troubleshooting issues during a support case.
:param spu_serial: The serial number of the SPU on which to run
diagnostic
:type spu_serial: str, optional
:param npod_uuid: The unique identifier of the nPod on which to run
diagnostic
:type npod_uuid: str, optional
:param diagnostic_name: The name of the diagnostic to run
:type diagnostic_name: str, optional
:param request_uuid: The unique identifier of the custom diagnostic
request to run
:type request_uuid: str, optional
:raises GraphQLError: An error with the GraphQL endpoint.
:raises Exception: An error when delivering a token to the SPU
"""
# setup query parameters
parameters = dict()
parameters["spuSerial"] = GraphQLParam(
spu_serial, "String", False)
parameters["podUID"] = GraphQLParam(
npod_uuid, "String", False)
parameters["diagnosticName"] = GraphQLParam(
diagnostic_name, "String", False)
parameters["requestUID"] = GraphQLParam(
request_uuid, "String", False)
# make the request
response = self._mutation(
name="runCustomDiagnostic",
params=parameters,
fields=TokenResponse.fields()
)
# convert to object
token_response = TokenResponse(response)
token_response.deliver_token()
def release_spu(
self,
spu_serial: str
):
"""Removes an SPU from an organization
:param spu_serial: The serial number of the SPU
:type spu_serial: str
:raises GraphQLError: An error with the GraphQL endpoint.
:raises Exception: An error when delivering a token to the SPU
"""
# setup query parameters
parameters = dict()
parameters["spuSerial"] = GraphQLParam(
spu_serial, "String", True)
# make the request
response = self._mutation(
name="releaseSPU",
params=parameters,
fields=TokenResponse.fields()
)
# convert to object
token_response = TokenResponse(response)
token_response.deliver_token()
def set_proxy(
self,
spu_serial: str,
proxy: str
):
"""Allows configuring a proxy server for an SPU
:param spu_serial: The serial number of the SPU
:type spu_serial: str
:param proxy: The proxy server IP address
:type proxy: str
:raises GraphQLError: An error with the GraphQL endpoint.
:raises Exception: An error when delivering a token to the SPU
"""
# setup query parameters
parameters = dict()
parameters["spuSerial"] = GraphQLParam(spu_serial, "String", True)
parameters["proxy"] = GraphQLParam(proxy, "String", True)
# make the request
response = self._mutation(
name="setProxy",
params=parameters,
fields=TokenResponse.fields()
)
# convert to object
token_response = TokenResponse(response)
token_response.deliver_token()
def replace_spu(
self,
npod_uuid: str,
previous_spu_serial: str,
new_spu_info: NPodSpuInput,
sset_uuid: str
):
"""Allows replacing an SPU
The replace services processing unit (SPU) operation is used to
transition the configuration of an old, likely failed, SPU to a new
replacement unit and allows modifying the configuration during the
process.
:param npod_uuid: The unique identifier of the nPod of the old SPU
that is being replaced
:type npod_uuid: str
:param previous_spu_serial: The serial number of the old SPU that is
being replaced
:type previous_spu_serial: str
:param new_spu_info: Configuration information for the new SPU
:type new_spu_info: NPodSpuInput
:param sset_uuid: The storage set information for the existing SPU.
This information can be obtained from the active replacement
alert and only used to verify that the correct SPU is selected.
:type sset_uuid: str
:raises GraphQLError: An error with the GraphQL endpoint.
:raises Exception: An error when delivering a token to the SPU
"""
# setup query parameters
parameters = dict()
parameters["input"] = GraphQLParam(
ReplaceSpuInput(
npod_uuid=npod_uuid,
previous_spu_serial=previous_spu_serial,
new_spu_info=new_spu_info,
sset_uuid=sset_uuid
),
"ReplaceSPUInput",
True
)
# make the request
response = self._mutation(
name="replaceSPU",
params=parameters,
fields=TokenResponse.fields()
)
# convert to object
token_response = TokenResponse(response)
token_response.deliver_token()
def secure_erase_spu(
self,
spu_serial: str
):
"""Allows to secure-erase data on a services processing unit (SPU)
The secure erase functionality allows a deep-erase of data stored on
the physical drives attached to the SPU. Only SPUs that are not part
of a nPod can be secure-erased.
:param spu_serial: The serial number of the SPU to secure-erase
:type spu_serial: str
:raises GraphQLError: An error with the GraphQL endpoint.
:raises Exception: An error when delivering a token to the SPU
"""
# setup query parameters
parameters = dict()
parameters["input"] = GraphQLParam(
SecureEraseSPUInput(
spu_serial=spu_serial
),
"SecureEraseSPUInput",
True
)
# make the request
response = self._mutation(
name="secureEraseSPU",
params=parameters,
fields=TokenResponse.fields()
)
# convert to object
token_response = TokenResponse(response)
token_response.deliver_token()
|
StarcoderdataPython
|
3378255
|
from typing import (
List,
Tuple,
Dict,
Callable,
)
import os
import argparse
import logging
import torch
from torch import optim
from allennlp.models import Model
from allennlp.data.vocabulary import Vocabulary
from allennlp.data import DatasetReader
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.iterators import BucketIterator
from allennlp.training.trainer import Trainer
from allennlp.data.token_indexers import PretrainedBertIndexer
from dpd.dataset import (
ActiveBIODataset,
BIODataset,
BIODatasetReader,
UnlabeledBIODataset,
)
from dpd.utils import (
get_dataset_files,
Logger,
construct_f1_class_labels,
PickleSaveFile,
)
from dpd.constants import (
CADEC_SPACY,
)
from dpd.weak_supervision.feature_extractor import SpaCyFeatureExtractor
from dpd.models import build_model
from dpd.models.embedder import CachedTextFieldEmbedder
from dpd.oracles import Oracle, GoldOracle
from dpd.heuristics import RandomHeuristic, ClusteringHeuristic
from dpd.weak_supervision import build_weak_data
from dpd.utils import get_all_embedders, log_train_metrics
from dpd.args import get_active_args
ORACLE_SAMPLES = [10, 40, 50]
logger = logging.getLogger(name=__name__)
# type definitions
'''
EntryDataType:
int (id)
List[str] (input)
List[str] (output)
float (weight)
'''
EntryDataType = Dict[str, object]
DatasetType = List[EntryDataType]
MetricsType = Dict[str, object]
def train(
model: Model,
binary_class: str,
train_data: DatasetType,
valid_reader: DatasetReader,
vocab: Vocabulary,
optimizer_type: str,
optimizer_learning_rate: float,
optimizer_weight_decay: float,
batch_size: int,
patience: int,
num_epochs: int,
device: str,
) -> Tuple[Model, MetricsType]:
train_reader = BIODatasetReader(
ActiveBIODataset(train_data, dataset_id=0, binary_class=binary_class),
token_indexers={
'tokens': ELMoTokenCharactersIndexer(),
},
)
train_dataset = train_reader.read('tmp.txt')
valid_dataset = valid_reader.read('tmp.txt')
cuda_device = -1
if device == 'cuda':
cuda_device = 0
model = model.cuda(cuda_device)
else:
cuda_device = -1
optimizer = optim.SGD(
model.parameters(),
lr=optimizer_learning_rate,
weight_decay=optimizer_weight_decay,
)
iterator = BucketIterator(
batch_size=batch_size,
sorting_keys=[("sentence", "num_tokens")],
)
iterator.index_with(vocab)
trainer = Trainer(
model=model,
optimizer=optimizer,
iterator=iterator,
train_dataset=train_dataset,
validation_dataset=valid_dataset,
patience=patience,
num_epochs=num_epochs,
cuda_device=cuda_device,
validation_metric='f1-measure-overall',
)
metrics = trainer.train()
return model, metrics
def active_train_fine_tune_iteration(
heuristic: RandomHeuristic,
unlabeled_dataset: UnlabeledBIODataset,
sample_size: int,
labeled_indexes: List[int],
oracle: Oracle,
train_data: DatasetType,
valid_reader: DatasetReader,
vocab: Vocabulary,
model: Model,
cached_text_field_embedders: List[CachedTextFieldEmbedder],
spacy_feature_extractor: SpaCyFeatureExtractor,
optimizer_type: str,
optimizer_learning_rate: float,
optimizer_weight_decay: float,
use_weak: bool,
weak_weight: float,
weak_function: List[str],
weak_collator: str,
sample_strategy: str,
batch_size: int,
patience: int,
num_epochs: int,
device: str,
) -> Tuple[Model, Dict[str, object]]:
# select new points from distribution
distribution = heuristic.evaluate(unlabeled_dataset, sample_size)
new_points = []
sample_size = min(sample_size, len(distribution) - 1)
if sample_strategy == 'sample':
new_points = torch.multinomial(distribution, sample_size)
elif sample_strategy == 'top_k':
new_points = sorted(
range(len(distribution)),
reverse=True,
key=lambda ind: distribution[ind]
)
else:
raise Exception(f'Unknown sampling strategry: {sample_strategy}')
new_points = new_points[:sample_size]
# use new points to augment train_dataset
# remove points from unlabaled corpus
query = [
(
unlabeled_dataset[ind]['id'],
unlabeled_dataset[ind]['input'],
) for ind in new_points
]
labeled_indexes.extend(
ind for (ind, _) in query
)
oracle_labels = [oracle.get_query(q) for q in query]
train_data.extend(oracle_labels)
# remove unlabeled data points from corpus
[unlabeled_dataset.remove(q) for q in query]
weak_data = []
if use_weak:
# builds a weak set to augment the training
# set
weak_data = build_weak_data(
train_data,
unlabeled_dataset,
model,
weight=weak_weight,
function_types=weak_function,
collator_type=weak_collator,
contextual_word_embeddings=cached_text_field_embedders,
spacy_feature_extractor=spacy_feature_extractor,
vocab=vocab,
)
model, _ = train(
model=model,
binary_class=unlabeled_dataset.binary_class,
train_data=weak_data,
valid_reader=valid_reader,
vocab=vocab,
optimizer_type=optimizer_type,
optimizer_learning_rate=optimizer_learning_rate,
optimizer_weight_decay=optimizer_weight_decay,
batch_size=batch_size,
patience=patience,
num_epochs=num_epochs,
device=device,
)
model, metrics = train(
model=model,
binary_class=unlabeled_dataset.binary_class,
train_data=train_data,
valid_reader=valid_reader,
vocab=vocab,
optimizer_type=optimizer_type,
optimizer_learning_rate=optimizer_learning_rate,
optimizer_weight_decay=optimizer_weight_decay,
batch_size=batch_size,
patience=patience,
num_epochs=num_epochs,
device=device,
)
return model, metrics
def active_train_iteration(
heuristic: RandomHeuristic,
unlabeled_dataset: UnlabeledBIODataset,
sample_size: int,
labeled_indexes: List[int],
oracle: Oracle,
train_data: DatasetType,
valid_reader: DatasetReader,
vocab: Vocabulary,
model: Model,
cached_text_field_embedders: List[CachedTextFieldEmbedder],
spacy_feature_extractor: SpaCyFeatureExtractor,
optimizer_type: str,
optimizer_learning_rate: float,
optimizer_weight_decay: float,
use_weak: bool,
weak_weight: float,
weak_function: List[str],
weak_collator: str,
sample_strategy: str,
batch_size: int,
patience: int,
num_epochs: int,
device: str,
) -> Tuple[Model, Dict[str, object]]:
# select new points from distribution
# distribution contains score for each index
distribution = heuristic.evaluate(unlabeled_dataset, sample_size)
new_points = []
# sample the sample size from the distribution
sample_size = min(sample_size, len(distribution) - 1)
if sample_strategy == 'sample':
new_points = torch.multinomial(distribution, sample_size)
elif sample_strategy == 'top_k':
new_points = sorted(
range(len(distribution)),
reverse=True,
key=lambda ind: distribution[ind]
)
else:
raise Exception(f'Unknown sampling strategry: {sample_strategy}')
new_points = new_points[:sample_size]
# new points now contains list of indexes in the unlabeled
# corpus to annotate
# use new points to augment train_dataset
# remove points from unlabaled corpus
query = [
(
unlabeled_dataset[ind]['id'],
unlabeled_dataset[ind]['input'],
) for ind in new_points
]
labeled_indexes.extend(
ind for (ind, _) in query
)
oracle_labels = [oracle.get_query(q) for q in query]
train_data.extend(oracle_labels)
# remove unlabeled data points from corpus
[unlabeled_dataset.remove(q) for q in query]
weak_data = []
if use_weak:
# builds a weak set to augment the training
# set
weak_data = build_weak_data(
train_data,
unlabeled_dataset,
model,
weight=weak_weight,
function_types=weak_function,
collator_type=weak_collator,
contextual_word_embeddings=cached_text_field_embedders,
spacy_feature_extractor=spacy_feature_extractor,
vocab=vocab,
)
model, metrics = train(
model=model,
binary_class=unlabeled_dataset.binary_class,
train_data=train_data + weak_data,
valid_reader=valid_reader,
vocab=vocab,
optimizer_type=optimizer_type,
optimizer_learning_rate=optimizer_learning_rate,
optimizer_weight_decay=optimizer_weight_decay,
batch_size=batch_size,
patience=patience,
num_epochs=num_epochs,
device=device,
)
return model, metrics
def active_train(
model: Model,
unlabeled_dataset: UnlabeledBIODataset,
valid_dataset: BIODataset,
vocab: Vocabulary,
oracle: Oracle,
optimizer_type: str,
optimizer_learning_rate: float,
optimizer_weight_decay: float,
use_weak: bool,
weak_fine_tune: bool,
weak_weight: float,
weak_function: List[str],
weak_collator: str,
sample_strategy: str,
batch_size: int,
patience: int,
num_epochs: int,
device: str,
log_dir: str,
model_name: str,
) -> Model:
heuristic = ClusteringHeuristic(model.word_embeddings, unlabeled_dataset) # RandomHeuristic()
log_dir = os.path.join(log_dir, model_name)
logger = Logger(logdir=log_dir)
# keep track of all the ids that have been
# labeled
labeled_indexes: List[int] = []
# the current training data that is being built up
train_data: DatasetType = []
valid_reader = BIODatasetReader(
bio_dataset=valid_dataset,
token_indexers={
'tokens': ELMoTokenCharactersIndexer(),
},
)
cached_text_field_embedders: List[CachedTextFieldEmbedder] = get_all_embedders()
spacy_feature_extractor: SpaCyFeatureExtractor = SpaCyFeatureExtractor.setup(dataset_ids=[0, 1])
spacy_feature_extractor.load(save_file=PickleSaveFile(CADEC_SPACY))
for i, sample_size in enumerate(ORACLE_SAMPLES):
active_iteration_kwargs = dict(
heuristic=heuristic,
unlabeled_dataset=unlabeled_dataset,
sample_size=sample_size,
labeled_indexes=labeled_indexes,
oracle=oracle,
train_data=train_data,
valid_reader=valid_reader,
model=model,
cached_text_field_embedders=cached_text_field_embedders,
spacy_feature_extractor=spacy_feature_extractor,
vocab=vocab,
optimizer_type=optimizer_type,
optimizer_learning_rate=optimizer_learning_rate,
optimizer_weight_decay=optimizer_weight_decay,
use_weak=use_weak,
weak_weight=weak_weight,
weak_function=weak_function,
weak_collator=weak_collator,
sample_strategy=sample_strategy,
batch_size=batch_size,
patience=patience,
num_epochs=num_epochs,
device=device,
)
if weak_fine_tune:
model, metrics = active_train_fine_tune_iteration(**active_iteration_kwargs)
else:
model, metrics = active_train_iteration(**active_iteration_kwargs)
log_train_metrics(logger, metrics, step=len(train_data))
print(f'Finished experiment on training set size: {len(train_data)}')
logger.flush()
def construct_vocab(datasets: List[BIODataset]) -> Vocabulary:
readers = [BIODatasetReader(
bio_dataset=bio_dataset,
token_indexers={
'tokens': ELMoTokenCharactersIndexer(),
'single_tokens': SingleIdTokenIndexer(), # including for future pipelines to use, one hot
},
) for bio_dataset in datasets]
allennlp_datasets = [r.read('tmp.txt') for r in readers]
result = allennlp_datasets[0]
for i in range(1, len(allennlp_datasets)):
result += allennlp_datasets[i]
vocab = Vocabulary.from_instances(result)
return vocab
def main():
args = get_active_args().parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
device = 'cuda' if torch.cuda.is_available() and args.cuda else 'cpu'
train_file, valid_file, test_file = get_dataset_files(dataset=args.dataset)
class_labels: List[str] = construct_f1_class_labels(args.binary_class)
train_bio = BIODataset(
dataset_id=0,
file_name=train_file,
binary_class=args.binary_class,
)
train_bio.parse_file()
if args.test:
print('using test set')
valid_bio = BIODataset(
dataset_id=1,
file_name=valid_file if not args.test else test_file,
binary_class=args.binary_class,
)
valid_bio.parse_file()
vocab = construct_vocab([train_bio, valid_bio])
unlabeled_corpus = UnlabeledBIODataset(
dataset_id=train_bio.dataset_id,
bio_data=train_bio,
)
model = build_model(
model_type=args.model_type,
vocab=vocab,
hidden_dim=args.hidden_dim,
class_labels=class_labels,
cached=args.cached,
)
oracle = GoldOracle(train_bio)
active_train(
model=model,
unlabeled_dataset=unlabeled_corpus,
valid_dataset=valid_bio,
vocab=vocab,
oracle=oracle,
optimizer_type=args.opt_type,
optimizer_learning_rate=args.opt_lr,
optimizer_weight_decay=args.opt_weight_decay,
use_weak=args.use_weak,
weak_fine_tune=args.use_weak_fine_tune,
weak_weight=args.weak_weight,
weak_function=args.weak_function,
weak_collator=args.weak_collator,
sample_strategy=args.sample_strategy,
batch_size=args.batch_size,
patience=args.patience,
num_epochs=args.num_epochs,
device=device,
log_dir=args.log_dir,
model_name=args.model_name,
)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3295459
|
<filename>acme/settings.py
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = <KEY>'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'channels',
'demo',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'acme.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'acme.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "demo.channels.channel_routing",
},
}
|
StarcoderdataPython
|
3207209
|
<filename>virt/lib/python3.7/site-packages/martor/fields.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from .settings import MARTOR_ENABLE_LABEL
from .widgets import (MartorWidget, AdminMartorWidget)
class MartorFormField(forms.CharField):
def __init__(self, *args, **kwargs):
# to setup the editor without label
if not MARTOR_ENABLE_LABEL:
kwargs['label'] = ''
super(MartorFormField, self).__init__(*args, **kwargs)
if not issubclass(self.widget.__class__, MartorWidget):
self.widget = MartorWidget()
|
StarcoderdataPython
|
3342043
|
<gh_stars>100-1000
expected_output = {
"cdp": {
"index": {
0: {
"capability": "H P M",
"hold_time": 94,
"local_interface": "FastEthernet0/1",
"platform": "420HD_GBE",
"port_id": "Esw0"
},
1: {
"capability": "T B I",
"device_id": "xxx012",
"hold_time": 141,
"local_interface": "FastEthernet0/2",
"platform": "AIR-CAP27",
"port_id": "GigabitEthernet0.1"
},
2: {
"capability": "T B I",
"device_id": "xxx006.xx.xxx.xxx",
"hold_time": 159,
"local_interface": "FastEthernet0/3",
"platform": "AIR-CAP27",
"port_id": "GigabitEthernet0.1"
},
3: {
"capability": "S I",
"device_id": "xxx001.xx.xxx.xxx",
"hold_time": 125,
"local_interface": "GigabitEthernet0/1",
"platform": "WS-C2960X",
"port_id": "GigabitEthernet1/0/41"
},
4: {
"device_id": "xxx010"
}
}
}
}
|
StarcoderdataPython
|
4833903
|
'''
Created by auto_sdk on 2019.08.05
'''
from dingtalk.api.base import RestApi
class OapiCateringPersonalorderPushRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.fee_actually_pay = None
self.fee_after_discount = None
self.fee_original = None
self.fee_should_pay = None
self.order_details = None
self.order_id = None
self.payment_time = None
self.shop_id = None
self.shop_name = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.catering.personalorder.push'
|
StarcoderdataPython
|
137468
|
<filename>test_str_divide.py<gh_stars>1-10
import unittest
from str_divide import divide
class TestDivideStrings(unittest.TestCase):
#cases to test
# divide by 0 error
# divide by 1
# 0 divided by
#
def test_obvious_small(self):
self.assertEqual(divide('10','2'), '5')
self.assertEqual(divide('2','3'), '0')
self.assertEqual(divide('123','2'), '61')
self.assertEqual(divide('2','123'), '0')
self.assertEqual(divide('999','999'), '1')
self.assertEqual(divide('2','99'), '0')
self.assertEqual(divide('567','765'), '0')
self.assertEqual(divide('500','100'), '5')
self.assertEqual(divide('10000','1000'), '10')
# self.assertEqual(divide('',''), '')
def test_edge(self):
#should be if either is 0
self.assertEqual(divide('0','123'), '0')
self.assertEqual(divide('456','0'), '0')
#negative should find it based on either being negative
self.assertEqual(divide('-123','1234'), '-')
self.assertEqual(divide('123','-456'), '-')
#large divide
self.assertEqual(divide('987654321987654321987654321','123456789123456789123456789'), '8')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
159176
|
from tkinter import *
from tkinter import ttk
root = Tk()
root.geometry("640x480+200+200")
ttk.Label(root, text="Orange", background="orange").place(
x=100, y=50, width=100, height=50
)
ttk.Label(root, text="Red", background="red").place(
relx=0.5, rely=0.5, anchor="center", relwidth=0.5, relheight=0.5
)
ttk.Label(root, text="Purple", background="purple").place(
relx=0.5, x=100, rely=0.5, y=50
)
ttk.Label(root, text="Green", background="green").place(relx=1, x=-5, y=5, anchor="ne")
root.mainloop()
|
StarcoderdataPython
|
1658279
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('achat', '0002_achat_association'),
]
operations = [
migrations.CreateModel(
name='Pending',
fields=[
('achat', models.OneToOneField(primary_key=True, default=0, serialize=False, to='achat.Achat')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='achat',
name='association',
field=models.IntegerField(default=0),
preserve_default=True,
),
migrations.AlterField(
model_name='achat',
name='brand',
field=models.CharField(max_length=200, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='achat',
name='description',
field=models.CharField(max_length=200, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='achat',
name='equivalent',
field=models.CharField(max_length=200, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='achat',
name='equivalent_price',
field=models.DecimalField(default=0, verbose_name=b'price', max_digits=8, decimal_places=2),
preserve_default=True,
),
migrations.AlterField(
model_name='achat',
name='equivalent_unit',
field=models.DecimalField(default=0, verbose_name=b'unit', max_digits=8, decimal_places=2),
preserve_default=True,
),
]
|
StarcoderdataPython
|
1666181
|
<gh_stars>0
import json
import os,glob
import time
import numpy as np
from flask_socketio import emit
from flask_login import current_user
from app import socketio, app, session
from diagnostic_text import *
# from models import add_file_selected, user_files_selected, remove_file_selected, clear_user_file_selected, add_file_source, consolidate_sources
# from models import remove_source_group, clear_user_file_source, remove_file_source, measure_path_from_name, measure_path_response, get_associated_plots, remove_path_selected
import datetime
from Khound import make_spec_file, shared_dict, shared_dict_lock, meas_lock, file_write_lock, plot_files
import multiprocessing
from multiprocessing import Process, Lock, Manager, Process
def get_latest_plot_tag():
'''
get the last plotted timestamp
'''
try:
plot = sorted(glob.glob(os.path.join(app.config['PLOT_DIR'], "KHound_plot_*.png")))[-1]
# print(os.path.basename(plot))
except IndexError:
return 0
return os.path.basename(plot).split('.')[0].split('plot_')[1]
def plot_full_spec(args):
'''
Plot the last <time> periodically upon new data present in data folder.
'''
shared_dict = args[0]
file_write_lock = args[1]
socketio = args[2]
print('Running plotter process now')
plot_time = shared_dict['plot_time'] # in seconds, from last file received
while shared_dict['plot_full_spec_enable']:
#get the file list
search_string = os.path.join(app.config['GLOBAL_MEASURES_PATH'], "Khound_*.txt")
# print(search_string)
with file_write_lock:
meas_list = sorted(glob.glob(search_string))
if len(meas_list) == 0:
print('no measures to plot')
time.sleep(2)
continue
# print(meas_list)
latest_meas_tag = int(os.path.basename(meas_list[-1]).split('.')[0].split('_')[1])
latest_plot_tag = int(get_latest_plot_tag())
if latest_meas_tag!=latest_plot_tag:
print(latest_meas_tag)
print(latest_plot_tag)
meas_list_int = [int(os.path.basename(m).split('.')[0].split('_')[1]) for m in meas_list]
meas_list_base = [os.path.basename(m) for m in meas_list]
file_list = []
for m in range(len(meas_list_int)):
if latest_meas_tag - meas_list_int[m] < plot_time:
file_list.append(meas_list_base[m])
file_list = sorted(file_list)
# with file_write_lock:
print('plotting...')
#processes are not PIL locked, matplotlib works way faster
pp = Process(target = plot_files, args = [shared_dict,file_list,'matplotlib',False,app.config['GLOBAL_MEASURES_PATH'],app.config['PLOT_DIR']])
pp.start()
pp.join()
print("New plot available, reloading mainpage")
reload_page_command(socketio)
else:
print('Nothing to plot')
time.sleep(1)
# plot_full_spec_process = Process(target = plot_full_spec, args = [shared_dict,file_write_lock,socketio])
@socketio.on('init_plotter')
def init_plotter(msg, methods=['GET', 'POST']):
print('request to start plotter received')
# if not plot_full_spec_process.is_alive():
if not shared_dict['plot_full_spec_enable']:
print("starting plotter")
plot_full_spec_process = socketio.start_background_task(target = plot_full_spec, args = [shared_dict,file_write_lock,socketio])
shared_dict['plot_full_spec_enable'] = True
else:
print('plotter already initialized')
def run_full_spec(args):
'''
run full spec periodically
'''
shared_dict = args[0]
meas_lock = args[1]
socketio = args[2]
print('Running full spec process now.')
start_time = time.time()
while shared_dict['full_spec_enable']:
wait_time = np.abs(time.time()-start_time)
# print(wait_time)
if wait_time>shared_dict['full_spec_every']:
with meas_lock:
status_update('Starting scan loop', socketio)
daq_thread = multiprocessing.Process(target=make_spec_file, args=(shared_dict, None,
shared_dict['master_clock_rate'],
shared_dict['start'],
shared_dict['end'],
shared_dict['gain'],
shared_dict['resolution'],
shared_dict['iterations'])
)
# daq_thread.daemon = True
daq_thread.start()
while daq_thread.is_alive():
with shared_dict_lock:
status_update(shared_dict['progress'], socketio)
time.sleep(0.127)
status_update(shared_dict['progress'], socketio)
daq_thread.join()
else:
with shared_dict_lock:
shared_dict['progress'] = "Updating in %d s" % int(shared_dict['full_spec_every'] - wait_time)
status_update(shared_dict['progress'], socketio)
time.sleep(1)
# else:
# time.sleep(1)
full_spec_process = Process(target = run_full_spec, args = [shared_dict,meas_lock,socketio])
def status_update(message, socketio, UUID = None):
message = str(message)
if len(message) > 1024:
print_warning("transmitting very long status update")
msg = {'status':message}
if UUID is not None:
socketio.emit('status_update',json.dumps(msg),namespace="/"+UUID)
else:
socketio.emit('status_update',json.dumps(msg))
print("------------>sending status update")
def reload_page_command(socketio, UUID = None):
msg = {'command':'reload'}
if UUID is not None:
socketio.emit('command',json.dumps(msg),namespace="/"+UUID)
else:
socketio.emit('command',json.dumps(msg))
print("------------>sending reload command")
@socketio.on('start_full')
def start_full(msg, methods=['GET', 'POST']):
print("starting full from window UUID: " + msg['window_UUID'])
window_UUID = msg['window_UUID']
with shared_dict_lock:
shared_dict['full_spec_enable'] = True
# full_spec_process = Process(target = run_full_spec, args = [shared_dict,meas_lock,socketio])
full_spec_process = socketio.start_background_task(target = run_full_spec, args = [shared_dict,meas_lock,socketio])
# full_spec_process.start()
@socketio.on('stop_full')
def stop_full(msg, methods=['GET', 'POST']):
print("stopping full from window UUID: " + msg['window_UUID'])
window_UUID = msg['window_UUID']
with shared_dict_lock:
shared_dict['full_spec_enable'] = False
while full_spec_process.is_alive():
status_update("stopping full_spec_thread", socketio)
time.sleep(0.136)
try:
full_spec_process.join()
except AssertionError:
pass
with shared_dict_lock:
time.sleep(0.0954)
shared_dict['progress'] = 'full_spec thread stopped'
status_update(shared_dict['progress'], socketio)
@socketio.on('ping')
def ping(msg, methods=['GET', 'POST']):
print("received ping from window UUID: " + msg['window_UUID'])
window_UUID = msg['window_UUID']
print(window_UUID)
@socketio.on('scan_all')
def plot_all(msg, methods=['GET', 'POST']):
print("Sending data to UUID: " + msg['window_UUID'])
window_UUID = msg['window_UUID']
print("Scan request received")
master_clock_rate = 52e6
start = 10e6
end = 500e6
gain = 30
resolution = 10000
iterations = 200
# status_update('Starting scan loop', window_UUID)
# daq_thread = multiprocessing.Process(target=make_spec_file, args=(shared_dict, None, master_clock_rate, start,end, gain, resolution, iterations))
# # daq_thread.daemon = True
# daq_thread.start()
# while daq_thread.is_alive():
# with shared_dict_lock:
# status_update(shared_dict['progress'], window_UUID)
# time.sleep(0.127)
# status_update(shared_dict['progress'], window_UUID)
# daq_thread.join()
print('thread joined')
# def get_small_timestamp():
# return datetime.datetime.now().strftime("%H%M%S")
# @socketio.on('explore_clear_selection')
# def clear_all_selected_files(msg):
# '''
# Clear the selectef file list.
# '''
# msg_clear_warning = "Clearing all temporary files of user %s"%current_user
# clear_user_file_selected()
# print_warning(msg_clear_warning)
#
# @socketio.on('remove_from_selection')
# def remove_from_selection(msg):
# '''
# Remove file from selected list
# '''
# old_list = user_files_selected()
# filepath = measure_path_from_name(msg['file'])
# if filepath in old_list:
#
# ret = remove_file_selected(filepath)
# old_list = user_files_selected()
# socketio.emit('update_selection',json.dumps({'files':old_list,'err':int(ret)}))
# else:
# print_warning('cannot remove %s from selected list, not found'%filepath)
#
# @socketio.on('add_to_selection')
# def add_to_selection(msg):
# '''
# Add file from selected list
# '''
# filepath = measure_path_from_name(msg['file'])
# ret = add_file_selected(filepath)
# old_list = user_files_selected()
# socketio.emit('update_selection',json.dumps({'files':old_list,'err':int(ret)}))
#
# @socketio.on('request_selection')
# def send_selection_update(msg):
# socketio.emit('update_selection',json.dumps({'files':user_files_selected(),'err':int(1)}))
#
#
# @socketio.on('request_selection_file_list')
# def send_selection_update_file_list(msg):
# folders_req = msg['folders']
# dbs = []
# plot = []
# files = []
# sizes = []
# kinds = []
# parent= []
# for folder in folders_req:
# dbs_, plot_, files_, sizes_, kinds_, parent_ = measure_path_response(folder,msg['recursive'])
# parent += parent_
# dbs+=dbs_
# plot+=plot_
# files+=files_
# sizes+=sizes_
# kinds+=kinds_
# ret = list(zip(dbs, plot, files, sizes, kinds, parent))
# socketio.emit('update_selection_file_list',json.dumps({'items':ret}))
#
# @socketio.on('request_selection_plot_list')
# def send_selection_update_file_list(msg):
# file_req = msg['file']
# path = measure_path_from_name(file_req)
# plots = get_associated_plots([path])['plots'][0]
# ret = []
# for i in range(len(plots['path'])):
# ret.append([
# plots['path'][i],
# plots['kind'][i],
# plots['timestamp'][i],
# plots['comment'][i],
# ])
# # print(ret)
#
# socketio.emit('update_selection_plot_list',json.dumps({'items':ret}))
#
#
#
# @socketio.on('add_to_selection_from_folder')
# def select_from_folder(msg):
# '''
# Select all the files in a folder.
# '''
# #relative_path = os.path.join(msg['path'], msg['folder'])
# relative_path = msg['folder']
# ret = True
# for root, dirs, files in os.walk(os.path.join(app.config["GLOBAL_MEASURES_PATH"],relative_path), topdown=False):
# for name in files:
# if name.endswith('.h5'):
# ret = ret and add_file_selected(measure_path_from_name(name))
# socketio.emit('update_selection',json.dumps({'files':user_files_selected(),'err':int(ret)}))
#
# @socketio.on('remove_selection_from_folder')
# def remove_select_from_folder(msg):
# ret = remove_path_selected(msg['folder'])
# socketio.emit('update_selection',json.dumps({'files':user_files_selected(),'err':int(ret)}))
#
#
# @socketio.on('analysis_modal_config')
# def define_possible_analysis(msg):
# file_list = user_files_selected()
# config = Analysis_Config(file_list) # TODO: move it to user space.
# config.check_file_list() # Determine which analysis on which file
# session['analysis_config'] = config
# socketio.emit('analyze_config_modal',json.dumps(config.config))
#
#
# @socketio.on('explore_add_source')
# def add_source_file(msg):
# print("Adding %s to file source (permanent? %s) for user %s"%(msg['file'], msg['permanent'], current_user.username))
# if msg['group'] == '':
# gr = None
# else:
# gr = msg['group']
# try:
# file_path = measure_path_from_name(msg['file'])
# add_file_source(file_path, msg['permanent'], gr)
# result = 1
# except ValueError:
# print_warning("Database error, cannot add file %s to source"%msg['file'])
# result = 0
# socketio.emit('explore_add_source_done',json.dumps({'file':str(msg['file']),'result':result}))
#
# @socketio.on('explore_remove_source')
# def remove_source(msg):
# try:
# group = msg['group']
# print('Removing source group %s'%group)
# remove_source_group(group)
# except KeyError:
# measure = msg['file']
# print('Removing source file %s'%measure)
# remove_file_source(measure)
#
# @socketio.on('consolidate_source_files')
# def consolidate_source_files(msg):
# deleted_items = consolidate_sources()
# socketio.emit('consolidate_source_files',json.dumps(deleted_items))
#
# @socketio.on('remove_temporary_source_files')
# def remove_temporary_source_files(msg):
# clear_user_file_source()
# socketio.emit('remove_temporary_source_files',json.dumps({}))
#
# @socketio.on('init_test_run')
# def init_test_run_handler(msg):
# clean_tmp_folder()
# name = ""
# for file in msg['files']:
# arguments = {}
# arguments['file'] = file
# arguments['parameters'] = msg['params']
# name = "Fit_init_test_%s_%s"%(file,get_small_timestamp())
# job_manager.submit_job(init_dry_run, arguments = arguments, name = name, depends = None)
# if name != "":
# socketio.emit('init_test_run',json.dumps({'last':name}))
#
# @socketio.on('run_analysis')
# def run_analysis(msg):
# clear_all_selected_files({})
# print('updating configuration variable...')
# config = session.get('analysis_config')
# config.update_configuration(msg['params'])
# session['analysis_config'] = config
# print(config.pprint())
# print("building job queue...")
# config.build_job_queue()
# print("sorting job queue...")
# config.sort_job_queue()
# print("submitting jobs...")
# config.enqueue_jobs()
|
StarcoderdataPython
|
3341916
|
<reponame>tahirs95/pepys-import
def format_datatime(datetime):
microsecond_text = ""
if datetime.microsecond:
if datetime.microsecond > 9999:
microsecond_text = ".9999"
else:
microsecond_text = "." + str(datetime.microsecond).zfill(4)
return datetime.strftime("%Y%m%d %H%M%S") + microsecond_text
def break_point_dimention_to_sub_units(val):
degree = int(val)
minutesFloat = (val - degree) * 60
minutes = int(minutesFloat)
secondsFloat = (minutesFloat - minutes) * 60
return [degree, minutes, secondsFloat]
def format_point_dimention(val, hemisphere_pair):
[degree, minutes, seconds] = break_point_dimention_to_sub_units(abs(val))
[positive, negative] = hemisphere_pair
return " ".join(
[
str(degree),
str(minutes),
str(round(seconds, 3)),
negative if val < 0 else positive,
]
)
def format_point(x, y):
return (
format_point_dimention(x, ["N", "S"])
+ " "
+ format_point_dimention(y, ["E", "W"])
)
|
StarcoderdataPython
|
1604537
|
#!/usr/bin/env python
"""
This is the base class seisflows.workflow.Inversion
This is a main Seisflows class, it controls the main workflow.
"""
import os
import sys
import time
from glob import glob
import numpy as np
from seisflows.config import custom_import
from seisflows.tools import unix
from seisflows.tools.tools import exists
from seisflows.config import save
from seisflows.tools.err import ParameterError
PAR = sys.modules["seisflows_parameters"]
PATH = sys.modules["seisflows_paths"]
system = sys.modules["seisflows_system"]
solver = sys.modules["seisflows_solver"]
optimize = sys.modules["seisflows_optimize"]
preprocess = sys.modules["seisflows_preprocess"]
postprocess = sys.modules["seisflows_postprocess"]
class Inversion(custom_import("workflow", "base")):
"""
Waveform inversion base class
Peforms iterative nonlinear inversion and provides a base class on top
of which specialized strategies can be implemented.
To allow customization, the inversion workflow is divided into generic
methods such as "initialize", "finalize", "evaluate_function",
"evaluate_gradient", which can be easily overloaded.
Calls to forward and adjoint solvers are abstracted through the "solver"
interface so that various forward modeling packages can be used
interchangeably.
Commands for running in serial or parallel on a workstation or cluster
are abstracted through the "system" interface.
"""
def check(self):
"""
Checks parameters and paths
"""
# Starting and stopping iterations
if "BEGIN" not in PAR:
raise ParameterError(PAR, "BEGIN")
if "END" not in PAR:
raise ParameterError(PAR, "END")
# Scratch paths
if "SCRATCH" not in PATH:
raise ParameterError(PATH, "SCRATCH")
if "LOCAL" not in PATH:
setattr(PATH, "LOCAL", None)
if "FUNC" not in PATH:
setattr(PATH, "FUNC", os.path.join(PATH.SCRATCH, "evalfunc"))
if "GRAD" not in PATH:
setattr(PATH, "GRAD", os.path.join(PATH.SCRATCH, "evalgrad"))
if "HESS" not in PATH:
setattr(PATH, "HESS", os.path.join(PATH.SCRATCH, "evalhess"))
if "OPTIMIZE" not in PATH:
setattr(PATH, "OPTIMIZE", os.path.join(PATH.SCRATCH, "optimize"))
# Input paths
if "DATA" not in PATH:
setattr(PATH, "DATA", None)
if "MODEL_INIT" not in PATH:
raise ParameterError(PATH, "MODEL_INIT")
# Output paths
if "OUTPUT" not in PATH:
raise ParameterError(PATH, "OUTPUT")
# Outputs to disk
if "SAVEMODEL" not in PAR:
setattr(PAR, "SAVEMODEL", True)
if "SAVEGRADIENT" not in PAR:
setattr(PAR, "SAVEGRADIENT", False)
if "SAVEKERNELS" not in PAR:
setattr(PAR, "SAVEKERNELS", False)
if "SAVEAS" not in PAR:
setattr(PAR, "SAVEAS", "binary")
if "SAVETRACES" not in PAR:
setattr(PAR, "SAVETRACES", False)
if "SAVERESIDUALS" not in PAR:
setattr(PAR, "SAVERESIDUALS", False)
# Print statement outputs
if "VERBOSE" not in PAR:
setattr(PAR, "VERBOSE", True)
# Parameter assertions
assert 1 <= PAR.BEGIN <= PAR.END
# Path assertions
if PATH.DATA in PAR and not exists(PATH.DATA):
assert "MODEL_TRUE" in PATH, "MODEL_TRUE must be in PATH"
assert exists(PATH.MODEL_TRUE), "MODEL_TRUE does not exist"
# Check that there is a given starting model
if not exists(PATH.MODEL_INIT):
raise Exception("MODEL_INIT does not exist")
# Check if this is a synthetic-synthetic or data-synthetic inversion
if "CASE" not in PAR:
raise ParameterError(PAR, "CASE")
elif PAR.CASE.upper() == "SYNTHETIC" and not exists(PATH.MODEL_TRUE):
raise Exception("CASE == SYNTHETIC requires PATH.MODEL_TRUE")
if "RESUME_FROM" not in PAR:
setattr(PAR, "RESUME_FROM", None)
if "STOP_AFTER" not in PAR:
setattr(PAR, "STOP_AFTER", None)
def main(self):
"""
!!! This function controls the main workflow !!!
Carries out seismic inversion by running a series of functions in order
"""
# The workflow is a list of functions that can be called dynamically
flow = [self.initialize,
self.evaluate_gradient,
self.write_gradient,
self.compute_direction,
self.line_search,
self.finalize,
self.clean
]
print(f"BEGINNING WORKFLOW AT {time.asctime()}")
optimize.iter = PAR.BEGIN
print(f"{optimize.iter} <= {PAR.END}")
# Allow workflow resume from a given mid-workflow location
if PAR.RESUME_FROM:
self.resume_from(flow)
elif optimize.iter == 1:
# First-time intialization of the workflow
self.setup()
# Run the workflow until from the current iteration until PAR.END
while optimize.iter <= PAR.END:
print(f"ITERATION {optimize.iter}")
for func in flow:
func()
# Stop the workflow at STOP_AFTER if requested
if PAR.STOP_AFTER and func.__name__ == PAR.STOP_AFTER:
print(f"STOP ITERATION {optimize.iter} AT {PAR.STOP_AFTER}")
break
print(f"FINISHED ITERATION {optimize.iter} AT {time.asctime()}\n")
optimize.iter += 1
def resume_from(self, flow):
"""
Resume the workflow from a given function, proceed in the same fashion
as main until the end of the current iteration.
:type flow: list of functions
:param flow: the order of functions defined in main(), which should be
parsed to determine where workflow should be resumed from
"""
# Determine the index that corresponds to the resume function named
try:
resume_idx = [_.__name__ for _ in flow].index(PAR.RESUME_FROM)
except ValueError:
print(f"{PAR.RESUME_FROM} does not correspond to any workflow "
"functions. Exiting...")
sys.exit(-1)
print(f"RESUME ITERATION {optimize.iter} (from function "
f"{flow[resume_idx].__name__})")
for func in flow[resume_idx:]:
func()
print(f"FINISHED ITERATION {optimize.iter} AT {time.asctime()}\n")
optimize.iter += 1
def setup(self):
"""
Lays groundwork for inversion by running setup() functions for the
involved sub-modules, and generating synthetic true data if necessary,
and generating the pre-requisite database files. Should only be run once
at the iteration 1
"""
# Set up all the requisite modules
print("SETUP")
preprocess.setup()
postprocess.setup()
optimize.setup()
system.run("solver", "setup")
def initialize(self):
"""
Generates synthetics via a forward simulation, calculates misfits
for the forward simulation. Writes misfit for use in optimization.
"""
print("INITIALIZE")
self.evaluate_function(path=PATH.GRAD, suffix="new")
def compute_direction(self):
"""
Computes search direction
"""
print("COMPUTE SEARCH DIRECTION")
optimize.compute_direction()
def line_search(self):
"""
Conducts line search in given search direction
Status codes:
status > 0 : finished
status == 0 : not finished
status < 0 : failed
"""
print("LINE SEARCH")
# This statement allows restarting a workflow mid line-search. If
# resuming from the start of a line search, the line search machinery
# should have been reset and step_count should be 0. If resuming line
# search due to a failed step, will not re-initialize search and instead
# pick up line search where it left off
if not PAR.RESUME_FROM == "line_search" and \
not optimize.line_search.step_count:
optimize.initialize_search()
while True:
optimize.line_search.step_count += 1
print(f"TRIAL STEP: {optimize.line_search.step_count}")
self.evaluate_function(path=PATH.FUNC, suffix="try")
status = optimize.update_search()
# Determine the outcome of the line search
if status > 0:
print("\tTrial step successful")
optimize.finalize_search()
break
elif status == 0:
print("\tRetrying with new trial step")
continue
elif status < 0:
if optimize.retry_status():
print("\tLine search failed\n\n Restarting optimization...")
optimize.restart()
self.line_search()
break
else:
print("\tLine search failed\n\n Aborting...")
sys.exit(-1)
def evaluate_function(self, path, suffix):
"""
Performs forward simulation, and evaluates the objective function
:type path: str
:param path: path in the scratch directory to use for I/O
:type suffix: str
:param suffix: suffix to use for I/O
"""
print("EVALUATE FUNCTION\n\tRunning forward simulation")
self.write_model(path=path, suffix=suffix)
system.run("solver", "eval_func", path=path)
self.write_misfit(path=path, suffix=suffix)
def evaluate_gradient(self):
"""
Performs adjoint simulation to retrieve the gradient of the objective
"""
print("EVALUATE GRADIENT\n\tRunning adjoint simulation")
system.run("solver", "eval_grad", path=PATH.GRAD,
export_traces=PAR.SAVETRACES)
def finalize(self):
"""
Saves results from current model update iteration
"""
self.checkpoint()
preprocess.finalize()
# Save files from scratch before discarding
if PAR.SAVEMODEL:
self.save_model()
if PAR.SAVEGRADIENT:
self.save_gradient()
if PAR.SAVEKERNELS:
self.save_kernels()
if PAR.SAVETRACES:
self.save_traces()
if PAR.SAVERESIDUALS:
self.save_residuals()
def clean(self):
"""
Cleans directories in which function and gradient evaluations were
carried out
"""
print("CLEAN")
unix.rm(PATH.GRAD)
unix.rm(PATH.FUNC)
unix.mkdir(PATH.GRAD)
unix.mkdir(PATH.FUNC)
def checkpoint(self):
"""
Writes information to disk so workflow can be resumed following a break
"""
save()
def write_model(self, path, suffix):
"""
Writes model in format expected by solver
:type path: str
:param path: path to write the model to
:type suffix: str
:param suffix: suffix to add to the model
"""
src = f"m_{suffix}"
dst = os.path.join(path, "model")
solver.save(solver.split(optimize.load(src)), dst)
def write_gradient(self):
"""
Writes gradient in format expected by non-linear optimization library.
Calls the postprocess module, which will smooth/precondition gradient.
"""
print("POSTPROCESSING")
src = os.path.join(PATH.GRAD, "gradient")
dst = f"g_new"
postprocess.write_gradient(PATH.GRAD)
parts = solver.load(src, suffix="_kernel")
optimize.save(dst, solver.merge(parts))
def write_misfit(self, path, suffix):
"""
Writes misfit in format expected by nonlinear optimization library.
Collects all misfit values within the given residuals directory and sums
them in a manner chosen by the preprocess class.
:type path: str
:param path: path to write the misfit to
:type suffix: str
:param suffix: suffix to add to the misfit
"""
src = glob(os.path.join(path, "residuals", "*"))
dst = f"f_{suffix}"
total_misfit = preprocess.sum_residuals(src)
optimize.savetxt(dst, total_misfit)
def save_gradient(self):
"""
Save the gradient vector. Allows saving numpy array or standard
Fortran .bin files
Saving as a vector saves on file count, but requires numpy and seisflows
functions to read
"""
dst = os.path.join(PATH.OUTPUT, f"gradient_{optimize.iter:04d}")
if PAR.SAVEAS in ["binary", "both"]:
src = os.path.join(PATH.GRAD, "gradient")
unix.mv(src, dst)
if PAR.SAVEAS in ["vector", "both"]:
src = os.path.join(PATH.OPTIMIZE, "g_old")
unix.cp(src, dst + ".npy")
def save_model(self):
"""
Save the model vector. Allows saving numpy array or standard
Fortran .bin files
Saving as a vector saves on file count, but requires numpy and seisflows
functions to read
"""
src = "m_new"
dst = os.path.join(PATH.OUTPUT, f"model_{optimize.iter:04d}")
if PAR.SAVEAS in ["binary", "both"]:
solver.save(solver.split(optimize.load(src)), dst)
if PAR.SAVEAS in ["vector", "both"]:
np.save(file=dst, arr=optimize.load(src))
def save_kernels(self):
"""
Save the kernel vector as a Fortran binary file on disk
"""
src = os.path.join(PATH.GRAD, "kernels")
dst = os.path.join(PATH.OUTPUT, f"kernels_{optimize.iter:04d}")
unix.mv(src, dst)
def save_traces(self):
"""
Save the waveform traces to disk
"""
src = os.path.join(PATH.GRAD, "traces")
dst = os.path.join(PATH.OUTPUT, f"traces_{optimize.iter:04d}")
unix.mv(src, dst)
def save_residuals(self):
"""
Save the residuals to disk
"""
src = os.path.join(PATH.GRAD, "residuals")
dst = os.path.join(PATH.OUTPUT, f"residuals_{optimize.iter:04d}")
unix.mv(src, dst)
|
StarcoderdataPython
|
119703
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2020 ICON Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Any
from ...wallet.wallet import Wallet
BASE_TYPES = {bool, bytes, int, str, Wallet}
TYPE_NAME_TO_TYPE = {_type.__name__: _type for _type in BASE_TYPES}
def is_base_type(value: type) -> bool:
try:
return value in BASE_TYPES
except:
return False
def name_to_type(type_name: str) -> type:
return TYPE_NAME_TO_TYPE[type_name]
def isinstance_ex(value: Any, _type: type) -> bool:
if not isinstance(value, _type):
return False
if type(value) is bool and _type is not bool:
return False
return True
def base_object_to_str(value: Any) -> str:
if isinstance(value, Wallet):
return value.get_address()
elif isinstance(value, int):
return hex(value)
elif isinstance(value, bytes):
return bytes_to_hex(value)
elif isinstance(value, bool):
return hex(value)
elif isinstance(value, str):
return value
raise TypeError(f"Unsupported type: {type(value)}")
def object_to_str(value: Any) -> Union[Any]:
if is_base_type(type(value)):
return base_object_to_str(value)
if isinstance(value, list):
return [object_to_str(i) for i in value]
if isinstance(value, dict):
return {k: object_to_str(value[k]) for k in value}
if value is None:
return None
raise TypeError(f"Unsupported type: {type(value)}")
def bytes_to_hex(value: bytes, prefix: str = "0x") -> str:
return f"{prefix}{value.hex()}"
|
StarcoderdataPython
|
1684729
|
<reponame>bluesky0960/AlgorithmTest<filename>AlgorithmTest/BOJ_STEP_PYTHON/Step2/BOJ9498.py
#https://www.acmicpc.net/problem/9498
a = int(input())
if a >= 90:
print("A")
elif a >= 80:
print("B")
elif a >= 70:
print("C")
elif a >= 60:
print("D")
else:
print("F")
|
StarcoderdataPython
|
4800523
|
<filename>NewTests/testEncoderSimilarity.py
import torch
from torchvision import transforms
import matplotlib.pyplot as plt
import os
from PIL import Image
import glob
import numpy as np
from model_new import *
from model import *
from model_small import ImageCompressor_small
from models.temp import Cheng2020Attention
from utils.Conditional_Entropy import compute_conditional_entropy
#pretrained_model_path ='/home/access/dev/iclr_17_compression/checkpoints/iter_471527.pth.tar'
pretrained_model_path = '/home/access/dev/iclr_17_compression/checkpoints_new/new_net/rec/iter_1.pth.tar'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Cheng2020Attention()
#model = ImageCompressor_new(out_channel_N=1024)
#model = ImageCompressor_new(out_channel_N=256)
#model = ImageCompressor_new(out_channel_N=512)
#model = ImageCompressor_new()
#model = ImageCompressor_small()
#model = ImageCompressor()
global_step_ignore = load_model(model, pretrained_model_path)
net = model.to(device)
net.eval()
#stereo1_dir = '/home/access/dev/data_sets/kitti/flow_2015/data_scene_flow/testing/image_2'
#stereo2_dir = '/home/access/dev/data_sets/kitti/flow_2015/data_scene_flow/testing/image_3'
# smaller dataset:
stereo1_dir = '/home/access/dev/data_sets/kitti/data_stereo_flow_multiview/train_small_set_32/image_02'
stereo2_dir = '/home/access/dev/data_sets/kitti/data_stereo_flow_multiview/train_small_set_32/image_03'
stereo1_path_list = glob.glob(os.path.join(stereo1_dir, '*png'))
avg_hamm_dist = 0
min = 1
max = 0
min_idx = 0
max_idx = 0
#transform = transforms.Compose([transforms.Resize((384, 1216),interpolation=3), transforms.ToTensor()])
#transform = transforms.Compose([transforms.Resize((384, 1248), interpolation=Image.BICUBIC), transforms.ToTensor()])
#transform = transforms.Compose([transforms.Resize((192, 624),interpolation=3), transforms.ToTensor()])
#transform = transforms.Compose([transforms.Resize((96, 320), interpolation=3), transforms.ToTensor()])
transform = transforms.Compose([transforms.ToTensor()])
##z1_avg = np.array([0])[None,:]
##z2_avg = np.array([0])[None,:]
c_max = 0
c_min = 0
uncertainty_coefficient_avg = 0
mean_conditional_entropy_avg = 0
for i in range(len(stereo1_path_list)):
img_stereo1 = Image.open(stereo1_path_list[i])
img_stereo2_name = os.path.join(stereo2_dir, os.path.basename(stereo1_path_list[i]))
img_stereo2 = Image.open(img_stereo2_name)
img_stereo1 = transform(img_stereo1)
img_stereo2 = transform(img_stereo2)
# cut image H*W to be a multiple of 16
shape = img_stereo1.size()
img_stereo1 = img_stereo1[:, :16 * (shape[1] // 16), :16 * (shape[2] // 16)]
img_stereo2 = img_stereo2[:, :16 * (shape[1] // 16), :16 * (shape[2] // 16)]
##
input1 = img_stereo1[None, ...].to(device)
input2 = img_stereo2[None, ...].to(device)
######### Temp patch:
'''
i, j, h, w = transforms.RandomCrop.get_params(img_stereo1, output_size=(128, 128))
if i == 128:
i = 127
if j == 128:
j = 127
input1 = img_stereo1[:, i:i + h, j:j + w]
input2 = img_stereo1[:, i:i + h, j+1:j + w +1]
input1 = input1[None, ...].to(device)
input2 = input2[None, ...].to(device)
'''
# Use center crop, shifted 33 pixel ~ vertical alignment
'''
input1 = input1[:, :, :, 33:]
input2 = input2[:, :, :, :-33]
# cut image H*W to be a multiple of 16
shape = input1.size()
input1 = input1[:, :, :16 * (shape[2] // 16), :16 * (shape[3] // 16)]
input2 = input2[:, :, :16 * (shape[2] // 16), :16 * (shape[3] // 16)]
input1 = input1.to(device)
input2 = input2.to(device)
'''
######### End Temp patch
# Encoded images:
use_new_net = True
if use_new_net:
outputs_cam1, encoded = net(input1)
outputs_cam2, encoded2 = net(input2)
else:
outputs_cam1, encoded, _ = net(input1)
outputs_cam2, encoded2, _ = net(input2)
save_channels_images = True
if save_channels_images:
save_path = '/home/access/dev/data_sets/image_z_crops/new_net/'
e1 = encoded[0, :, :, :]
e2 = encoded2[0, :, :, :]
for i_c in range(len(e1)):
im1 = e1[i_c, :, :]
im2 = e2[i_c, :, :]
cat = torch.cat((im1, im2), 0).cpu().detach().numpy()
cat = (cat - cat.min()) / (cat.max() - cat.min())
image = Image.fromarray((cat * 255).astype(np.uint8))
image.save(save_path + f"{i_c:04d}.png")
'''
c1 = torch.squeeze(encoded.cpu()).detach().numpy()
c2 = torch.squeeze(encoded2.cpu()).detach().numpy()
diff = c1 - c2
plt.hist(diff.flatten(), np.arange(diff.min(), diff.max()+1))
'''
e1 = torch.squeeze(encoded.cpu()).detach().numpy().flatten()
e2 = torch.squeeze(encoded2.cpu()).detach().numpy().flatten()
uncertainty_coefficient, mean_conditional_entropy = compute_conditional_entropy(e1, e2)
uncertainty_coefficient_avg += uncertainty_coefficient
mean_conditional_entropy_avg += mean_conditional_entropy
#print(uncertainty_coefficient)
max_curr = np.max((e1.max(), e2.max()))
if max_curr > c_max:
c_max = max_curr
min_curr = np.min((e1.min(), e2.min()))
if min_curr < c_min:
c_min = min_curr
##z1_avg = np.concatenate((z1_avg, e1[None,:]), axis=1)
##z2_avg = np.concatenate((z2_avg, e2[None,:]), axis=1)
hamm_dist = (e1 != e2).sum()
nbits = e1.size
hamm_dist_normalized = hamm_dist / nbits
if hamm_dist_normalized > max:
max = hamm_dist_normalized
max_idx = i
if hamm_dist_normalized < min:
min = hamm_dist_normalized
min_idx = i
print(hamm_dist_normalized)
avg_hamm_dist = avg_hamm_dist + hamm_dist_normalized
avg_hamm_dist = avg_hamm_dist/len(stereo1_path_list)
uncertainty_coefficient_avg = uncertainty_coefficient_avg/len(stereo1_path_list)
mean_conditional_entropy_avg = mean_conditional_entropy_avg/len(stereo1_path_list)
print('average uncertainty coefficient:', uncertainty_coefficient_avg)
print('average mean-conditional-entropy:', mean_conditional_entropy)
print('average hamming distance: ', avg_hamm_dist)
print('min dist = ', min)
print('max dist = ', max)
plot_best_and_worst = True
if plot_best_and_worst:
img1_minDist = Image.open(stereo1_path_list[min_idx])
img2_minDist = os.path.join(stereo2_dir, os.path.basename(stereo1_path_list[min_idx]))
img2_minDist = Image.open(img2_minDist)
img1_maxDist = Image.open(stereo1_path_list[max_idx])
img2_maxDist = os.path.join(stereo2_dir, os.path.basename(stereo1_path_list[max_idx]))
img2_maxDist = Image.open(img2_maxDist)
print('the worst image is',stereo1_path_list[max_idx])
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle('Best Hamming distance, ' + str(format(min*100, ".4f")) + '%')
ax1.imshow(img1_minDist)
ax2.imshow(img2_minDist)
fig.tight_layout()
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle('Worst Hamming distance, ' + str(format(max*100, ".2f")) + '%')
ax1.imshow(img1_maxDist)
ax2.imshow(img2_maxDist)
fig.tight_layout()
plt.show()
|
StarcoderdataPython
|
3306695
|
<reponame>McMasterAI/RadiologyandAI-MedicalZooPytorch
import numpy as np
import scipy.ndimage as ndimage
def random_rotate3D(img_numpy, min_angle, max_angle):
"""
Returns a random rotated array in the same shape
:param img_numpy: 3D numpy array
:param min_angle: in degrees
:param max_angle: in degrees
:return: 3D rotated img
"""
assert img_numpy.ndim == 3, "provide a 3d numpy array"
assert min_angle < max_angle, "min should be less than max val"
assert min_angle > -360 or max_angle < 360
all_axes = [(1, 0), (1, 2), (0, 2)]
angle = np.random.randint(low=min_angle, high=max_angle + 1)
axes_random_id = np.random.randint(low=0, high=len(all_axes))
axes = all_axes[axes_random_id]
return ndimage.rotate(img_numpy, angle, axes=axes)
class RandomRotation(object):
def __init__(self, min_angle=-10, max_angle=10):
self.min_angle = min_angle
self.max_angle = max_angle
def __call__(self, img_numpy, label=None):
"""
Args:
img_numpy (numpy): Image to be rotated.
label (numpy): Label segmentation map to be rotated
Returns:
img_numpy (numpy): rotated img.
label (numpy): rotated Label segmentation map.
"""
img_numpy = random_rotate3D(img_numpy, self.min_angle, self.max_angle)
if label.any() != None:
label = random_rotate3D(label, self.min_angle, self.max_angle)
return img_numpy, label
|
StarcoderdataPython
|
43276
|
<filename>src/calculator_app.py
from flask import Flask, render_template, request, redirect, url_for
from calculator_logic import find_take_home_OR
api = Flask(__name__)
tax_inputs = {}
@api.route('/welcome/')
def hello_world():
# if request.method == 'GET':
return render_template('tax_welcomepage.html')
@api.route('/tax_simulator/') #canonical url /link/ = /link
def tax_simulator():
return render_template('tax_inputpage.html')
@api.route('/tax_inputs/', methods = ['POST'])
def display_inputs():
# if request.method == 'POST':
user_formdata = request.form
global tax_inputs
tax_inputs = request.form
return render_template('tax_read.html', user_data = user_formdata)
@api.route('/tax_payable/', methods = ['POST'])
def final_action():
if request.form['submit'] == 'ok':
# user_formdata2 = request.form
# global tax_inputs
# print(tax_inputs)
# take_home, annual_tax_amt = find_take_home_OR(float(user_formdata2['gsalary']), user_formdata2['pfmode'])
take_home, annual_tax_amt = find_take_home_OR(float(tax_inputs['gsalary']), int(tax_inputs['pfmode']))
return render_template('tax_payable.html', take_home = take_home, annual_tax_amt = annual_tax_amt, name = tax_inputs['name'])
else:
return redirect(url_for('tax_simulator'))
if __name__ == '__main__':
api.run(debug=True, host='0.0.0.0')
|
StarcoderdataPython
|
12385
|
<filename>initialize_app_db.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The next steps use just in case to recreate the already existing DB
Backup and Delete the folder "migrations"
Backup and Delete the file "app.db"
Execute the next console commands
Linux
(venv) $ export FLASK_APP=microblog.py
MS Windows
(venv) $ set FLASK_APP=microblog.py
(venv) $ flask db init
(venv) $ flask db migrate -m "initialization"
(venv) $ python initialize_app_db.py
### (venv) $ flask shell
(venv) $ flask run
http://localhost:5000/
http://localhost:5000/index
Use the function "initialize_data_into_db()"
for data recreation.
Use the function "remove_data_from_db()"
for data deletion. Then you can simply
use again the function "initialize_data_into_db()"
for data recreation.
"""
from datetime import datetime, timedelta
from app import create_app, db
from app.models import User, Post
from config import Config
def initialize_data_into_db():
app = create_app(Config)
app_context = app.app_context()
app_context.push()
db.create_all()
u1 = User(username='john', email='<EMAIL>')
u2 = User(username='susan', email='<EMAIL>')
u3 = User(username='mary', email='<EMAIL>')
u4 = User(username='david', email='<EMAIL>')
u5 = User(username='daniel', email='<EMAIL>')
u5.set_password('<PASSWORD>')
db.session.add_all([u1, u2, u3, u4, u5])
now = datetime.utcnow()
p1 = Post(body="post from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post from mary", author=u3,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post from david", author=u4,
timestamp=now + timedelta(seconds=2))
p5 = Post(body="My post number one.", author=u5,
timestamp=now + timedelta(seconds=5))
p6 = Post(body="My post number two.", author=u5,
timestamp=now + timedelta(seconds=6))
p7 = Post(body="My post number three.", author=u5,
timestamp=now + timedelta(seconds=7))
p8 = Post(body="My post number four.", author=u5,
timestamp=now + timedelta(seconds=8))
p9 = Post(body="My post number five.", author=u5,
timestamp=now + timedelta(seconds=9))
db.session.add_all([p1, p2, p3, p4, p5, p6, p7, p8, p9])
db.session.commit()
u1.follow(u2)
u1.follow(u4)
u2.follow(u3)
u3.follow(u4)
db.session.commit()
users = User.query.all()
print(users)
"""
[<User john>, <User susan>]
"""
for u in users:
print(u.id, u.username)
def remove_data_from_db():
"""
In case of removing data...
"""
app = create_app(Config)
app_context = app.app_context()
app_context.push()
db.create_all()
db.session.remove()
db.drop_all()
app_context.pop()
if __name__ == '__main__':
initialize_data_into_db()
# remove_data_from_db()
|
StarcoderdataPython
|
3343935
|
<reponame>italo-batista/problems-solving<filename>models/queue.py
class queue:
def __init__(self, size):
self.array = [0] * size
self.tail = -1
self.head = 0
self.elements = 0
self.size = size
def push(self, element):
self.tail = (self.tail + 1) % self.size
self.array[self.tail] = element
self.elements += 1
def pop(self):
if not self.empty():
element = self.array[self.head]
self.elements -= 1
self.head = (self.head + 1) % self.size
return element
def empty(self):
return self.elements == 0
def top(self):
return self.array[self.head]
def my_array(self):
return self.array
|
StarcoderdataPython
|
3330169
|
from common.test_base.page_base import PageBase
class PersonalPage(PageBase):
def go_to_favorite_page(self):
self.log.info("go to favorite")
self.element("favorite_link").click()
sync = self.element("sync_complete_toast").wait_presence()
self.log.info(sync.text)
self.element("sync_complete_toast").wait_disappear()
def count_favorite_items(self):
self.element("favorite_item_address").wait_presence()
favorite_items = self.element("favorite_item_address").all()
self.log.info("there are %d favorite items" % len(favorite_items))
return len(favorite_items)
def back_to_main_page_from_favorite_page(self):
self.log.info("go back to main page from favorite page")
self.element("favorite_back").click()
self.element("personal_page_back").click()
# assertions below
def favorite_items_should_more_than_five(self):
count = self.count_favorite_items()
assert count == 5, "there are %d favorite items not 5" % count
|
StarcoderdataPython
|
1649216
|
# row selection using loc and iloc
import pandas as pd
import numpy as np
d={'one':pd.Series([1,2,3],index=['a','b','c']),
'two':pd.Series([1,2,3,4],index=['a','b','c','d'])
}
df=pd.DataFrame(d)
print(df.loc['b'])
print(df.iloc[0])
|
StarcoderdataPython
|
3285107
|
<gh_stars>0
import json
class Edge:
id = -1
node_1_id = -1
node_2_id = -1
weight = 1
class Node:
id = -1
edges = None # list of (edge_id, node_id)
class Instance:
def __init__(self, filepath=None):
self.nodes = []
self.edges = []
self.maximum_length = 1
if filepath is not None:
with open(filepath) as json_file:
data = json.load(json_file)
self.maximum_length = data["maximum_length"]
edges = zip(
data["edge_heads"],
data["edge_tails"],
data["edge_weights"])
for (node_1_id, node_2_id, weight) in edges:
self.add_edge(node_1_id, node_2_id, weight)
def add_node(self):
node = Node()
node.id = len(self.nodes)
node.edges = []
self.nodes.append(node)
def add_edge(self, node_id_1, node_id_2, weight):
edge = Edge()
edge.id = len(self.edges)
edge.node_1_id = node_id_1
edge.node_2_id = node_id_2
edge.weight = weight
self.edges.append(edge)
while max(node_id_1, node_id_2) >= len(self.nodes):
self.add_node()
self.nodes[node_id_1].edges.append((edge.id, node_id_2))
def write(self, filepath):
data = {"maximum_length": self.maximum_length,
"edge_heads": [edge.node_1_id for edge in self.edges],
"edge_tails": [edge.node_2_id for edge in self.edges],
"edge_weights": [edge.weight for edge in self.edges]}
with open(filepath, 'w') as json_file:
json.dump(data, json_file)
def check(self, filepath):
print("Checker")
print("-------")
with open(filepath) as json_file:
data = json.load(json_file)
# Compute number of duplicates.
nodes_in = [0] * len(self.nodes)
nodes_out = [0] * len(self.nodes)
for edge_id in data["edges"]:
edge = self.edges[edge_id]
nodes_in[edge.node_1_id] += 1
nodes_out[edge.node_2_id] += 1
number_of_duplicates = sum(v > 1 for v in nodes_in)
number_of_duplicates += sum(v > 1 for v in nodes_out)
# Compute is_connected and is_cycle.
is_connected = True
node_id_prec = None
for edge_id in data["edges"]:
edge = self.edges[edge_id]
if node_id_prec is not None:
if edge.node_1_id != node_id_prec:
is_connected = False
node_id_prec = edge.node_2_id
is_cycle = (node_id_prec == self.edges[data["edges"][0].node_1_id])
# Compute lenght.
length = len(data["edges"])
# Compute weight.
weight = sum(self.edges[edge_id].weight
for edge_id in data["edges"])
is_feasible = (
(number_of_duplicates == 0)
and is_connected
and is_cycle
and length <= self.maximum_length)
print(f"Number of duplicates: {number_of_duplicates}")
print(f"Length: {length}")
print(f"Is cycle: {is_cycle}")
print(f"Is connected: {is_connected}")
print(f"Feasible: {is_feasible}")
print(f"Weight: {weight}")
return (is_feasible, weight)
def dynamic_programming(instance):
# TODO START
pass
# TODO END
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument(
"-a", "--algorithm",
type=str,
default="dynamic_programming",
help='')
parser.add_argument(
"-i", "--instance",
type=str,
help='')
parser.add_argument(
"-c", "--certificate",
type=str,
default=None,
help='')
args = parser.parse_args()
if args.algorithm == "dynamic_programming":
instance = Instance(args.instance)
solution = dynamic_programming(instance)
if args.certificate is not None:
data = {"edges": solution}
with open(args.certificate, 'w') as json_file:
json.dump(data, json_file)
print()
instance.check(args.certificate)
elif args.algorithm == "checker":
instance = Instance(args.instance)
instance.check(args.certificate)
|
StarcoderdataPython
|
1620780
|
<filename>bd_tools/utils.py
"""Defines a set of convenient utilities to use in scripts."""
import time
from typing import Callable
class DebugLoop:
"""Loops a function and prints statistics."""
def __init__(
self,
callback: Callable[[], bool],
num_iters: int,
iters_per_print: int = 1000,
):
"""
Initializes the loop variables.
Arguments:
callback: called for each iteration of the loop. This callable
takes no arguments and should return True if successful else,
False.
num_iters: number of iterations to run before exiting.
iters_per_print: number of iterations between prints.
"""
self._callback = callback
self._num_iters = num_iters
self._iters_per_print = iters_per_print
self._errors = 0
self._iters = 0
self._iter_start_time = None
def _loop_func(self):
self._errors += 0 if self._callback() else 1
self._iters += 1
if self._iters % self._iters_per_print == 0:
self._print_func()
self._iter_start_time = time.time()
def _print_func(self):
now = time.time()
diff = now - self._iter_start_time
freq = self._iters_per_print / diff
error_rate = self._errors / self._iters_per_print * 100
print(
f"Loop frequency is {round(freq, 2)}hz at "
f"{round(error_rate, 2)}% error rate."
)
# Reset statistics variables.
self._start_time = now
self._errors = 0
def loop(self):
"""Loops the callback. Can be escaped with ctrl^c."""
self._iter_start_time = time.time()
try:
while True:
self._loop_func()
if self._iters >= self._num_iters and self._num_iters != 0:
break
except KeyboardInterrupt:
print() # Clear line immediately after the ctrl-c
print(
f"Interrupted. Loop exiting. Completed {self._iters} "
f"iterations."
)
pass
|
StarcoderdataPython
|
3243100
|
# adjectives.json and nouns.json from: https://github.com/leonardr/olipy
# places.json from personal Moves data.
import json
nouns_file = open("nouns.json").read()
adjectives_file = open("adjectives.json").read()
places_file = open("places.json").read()
nouns = json.loads(nouns_file)
adjectives = json.loads(adjectives_file)
places = json.loads(places_file)
maximum_n = len(nouns)
maximum_a = len(adjectives)
punctuation = [".\n", ",", ".", "?", ".", ",\n", "?\n", "!\n"]
conjunction = ["and", "by", "with", "to", "for", "or", "not", "yet", "so", "nor", "caused", "is", "over"]
pronoun = ["my", "that", "thy", "his", "her", "our", "they", "their", "all", "this", "those"]
final = []
for place in places:
segments = place["segments"]
newPara = place.get("lastUpdate")
for segment in segments:
activities = segment.get("activities")
if activities != None:
for activity in activities:
keya = activity["distance"]
anum = int(keya)
while anum > maximum_a:
anum = anum - maximum_a
final.append(adjectives[anum])
place = segment.get("place")
if place != None:
keyp = place["id"]
pnum = int(keyp)
while pnum > maximum_n:
pnum = pnum - maximum_n
cnum = pnum
if cnum % 4 == 0:
while cnum > (len(conjunction) - 1):
cnum = cnum - len(conjunction)
final.append(conjunction[cnum])
elif cnum % 3 == 0:
while cnum > (len(pronoun) - 1):
cnum = cnum - len(pronoun)
final.append(pronoun[cnum])
final.append(nouns[pnum])
lastUpdate = segment.get("lastUpdate")
if lastUpdate != None:
lnum = int(filter(str.isdigit, str(lastUpdate)))
lnum = lnum % len(punctuation)
final.append(punctuation[lnum]+"\n")
if newPara != None:
final.append("---\n\n")
joined = " ".join(final)
joined = joined.replace(" ?", "?").replace(" .", ".").replace(" !", "!").replace(" ,", ",").replace("\n ", "\n")
with open('locationpoetry.txt', 'a') as f:
f.write(joined)
|
StarcoderdataPython
|
139794
|
<filename>TargetDataLoaderProcess/data_loader.py
# Python 3 required. This is a seperate process.
import aiohttp
import asyncio
import async_timeout
import os
import numpy as np
import time
import cv2
# Settings:
Buffer = 100
BackBuffer = 20
Max_Query = 12
Timeout = 40
url_base = "https://test.yisual.com/images/media/download/picturethis/"
headers = {"api-key": "ccea03e0e3a08c428870393376e5cf7b7be7a55c", "api-secret": os.environ["SECRET"]}
cacheLoc = "/media/jerome/DATA/Study_d/ThesisD/TargetData/"
# dummy_im_id = "5461e5219da59bde29aed195"
# dummy_url = url_base + dummy_im_id
# counter txt's interface:
def update_fetched(fetched):
with open("fetched_temp.txt",'w') as f:
f.write(str(fetched))
f.flush()
os.fsync(f.fileno())
# atomic:
os.rename("fetched_temp.txt","fetched.txt")
def get_read():
global read
with open("read.txt",'r') as f:
numstr = f.read()
read = int(numstr)
return read
def to_filename(im_id):
return "target_{}.jpg".format(im_id)
def append_log(msg):
with open("log.txt",'a') as f:
f.write(str(time.time()) + ' :\t')
f.write(str(msg) + '\n')
async def download_coroutine(session, im_id, im_num):
# im_id = "5c59addcb71ee102f1e439ba"
cache = cacheLoc + im_id + '.jpg'
filename = to_filename(im_num)
if os.path.exists(cache):
# copy from cache:
os.system('cp {} ./{}'.format(cache,filename))
return
url = url_base + im_id
im = None
problematic = False
while type(im) == type(None):
try:
with async_timeout.timeout(Timeout):
async with session.get(url,headers=headers) as response:
with open(filename, 'wb') as f_handle:
while True:
chunk = await response.content.read(1024)
if not chunk:
# print('done')
break
f_handle.write(chunk)
f_handle.flush()
os.fsync(f_handle.fileno())
res = await response.release()
# Verify if download was succesfull:
im = cv2.imread(filename)
if type(im) == type(None):
problematic = True
append_log("{} {} Incorrect download.".format(im_num,im_id))
print("{} {} Incorrect download.".format(im_num,im_id))
except:
problematic = True
append_log("Downloading timed out, retrying {} {}".format(im_num,im_id))
print("Downloading timed out, retrying {} {}".format(im_num,im_id))
if problematic:
append_log("Succeeded! {} {}".format(im_num,im_id))
# Finally:
if os.path.exists(cacheLoc):
os.system('cp {} {}'.format(filename,cache))
return res
async def get_batch(loop,im_ids,im_nums):
async with aiohttp.ClientSession(loop=loop) as session:
tasks = [download_coroutine(session, im_id, im_num) for im_id,im_num in zip(im_ids,im_nums)]
await asyncio.gather(*tasks)
if __name__ == "__main__":
# init gobals
present = []
fetched = 0
read = 0
removed = 0
idle_count = 0
# init/reset protocol files:
update_fetched(0)
with open("read.txt",'w') as f:
f.write('0')
f.flush()
os.fsync(f.fileno())
ids = []
with open("ids.txt",'r') as f:
ids = [i.strip() for i in f.readlines()]
def shuffle(ids, epoch):
np.random.shuffle(ids)
filename = "ids_ep{}.txt".format(epoch)
with open(filename,'w') as f:
f.write('\n'.join(ids))
f.flush()
os.system('cp {} ids_current.txt'.format(filename))
def id_generator():
i = 0
epoch = 0
shuffle(ids,epoch)
while True:
yield ids[i]
i += 1
if i == len(ids):
i = 0
epoch += 1
shuffle(ids,epoch)
print("Loaded epoch {}".format(epoch))
id_gen = id_generator()
append_log("Starting")
while True:
# update read
read = get_read()
# print(fetched,read,removed)
# refill:
if (fetched - read) < Buffer:
# TODO: determine next imgs:
load_N = read + Buffer - fetched
load_N = min(load_N,Max_Query)
im_nums = [str(i) for i in range(fetched,fetched+load_N)]
im_ids = [next(id_gen) for _ in range(fetched,fetched+load_N)]
loop = asyncio.get_event_loop()
loop.run_until_complete(get_batch(loop,im_ids,im_nums))
# done fetching
fetched += load_N
present.extend(im_nums)
# broadcast
update_fetched(fetched)
idle_count = 0
else:
# we're all set
# Check for exitting:
stop = False
idle_count += 1
if idle_count > 1000: # about 3 mins idle
append_log("Idle time-out. Exiting.")
stop = True
if (fetched - read) > Buffer: # read.txt has decreased:
append_log("Read.txt has decreased. Exiting.")
stop = True
if stop:
for im_num in present:
os.remove(to_filename(im_num))
exit()
# sleep a bit to avoid spinning.
time.sleep(.2)
# remove
while removed < (read - BackBuffer):
try:
im_num = present[0]
except:
append_log("Non-existing file reported as read. Exiting.")
exit()
present = present[1:]
try:
os.remove(to_filename(im_num))
except:
append_log("While removing: File not found: {}".format(to_filename(im_num)))
print("While removing: File not found: {}".format(to_filename(im_num)))
removed += 1
|
StarcoderdataPython
|
4806573
|
from floodsystem import geo
from floodsystem.stationdata import build_station_list
def run():
"""Prints the number of rivers with a station and
prints the first 10 stations at the rivers
River Aire, River Cam, and River Thames
in alphabetical order.
"""
# Build station list
stations = build_station_list()
# List of rivers with atleast one monitoring station
rivers = geo.rivers_with_station(stations)
# Number of rivers with atleast one monitoring station
num = len(rivers)
first_ten = sorted(rivers)[:10]
print(f'{num} stations. First 10 - {first_ten}')
river_map = geo.stations_by_river(stations)
print(sorted([station.name for station in river_map["River Aire"]]))
print(sorted([station.name for station in river_map["River Cam"]]))
print(sorted([station.name for station in river_map["River Thames"]]))
if __name__ == "__main__":
print("*** Task1D: CUED Part IA ***")
run()
|
StarcoderdataPython
|
3317668
|
<filename>__manifest__.py
# © 2021 <NAME> <<EMAIL>>, SOULinux
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{ # pylint: disable=C8101,C8103
'name': 'Plano de Contas SOULinux',
'summary': "SOULinux - Plano de Contas",
'description': """SOULinux - Plano de Contas para Microempresa""",
'version': '15.0.1.0.0',
'category': 'Localization',
'author': 'SOULinux',
'license': 'AGPL-3',
'website': 'http://www.soulinux.com',
'contributors': [
'<NAME> <<EMAIL>>',
],
'depends': [
'account',
],
'data': [
'data/account_group.xml',
'data/br_chart_data.xml',
'data/account.account.template.csv',
'data/account_tax_template_data.xml',
],
'active': True,
}
|
StarcoderdataPython
|
1751064
|
from concurrent.futures import ThreadPoolExecutor
from .mp import connection as _connection
from .executor import CoroBuilder
from .util import run_in_executor
__all__ = ["AioConnection"]
class AioConnection(metaclass=CoroBuilder):
coroutines = [
"recv",
"poll",
"send_bytes",
"recv_bytes",
"recv_bytes_into",
"send",
]
def __init__(self, obj):
""" Initialize the AioConnection.
obj - a multiprocessing.Connection object.
"""
super().__init__()
self._obj = obj
def __enter__(self):
self._obj.__enter__()
return self
def __exit__(self, *args, **kwargs):
self._obj.__exit__(*args, **kwargs)
def AioClient(*args, **kwargs):
""" Returns an AioConnection instance. """
conn = _connection.Client(*args, **kwargs)
return AioConnection(conn)
class AioListener(metaclass=CoroBuilder):
delegate = _connection.Listener
coroutines = ["accept"]
def accept(self):
conn = self._obj.accept()
return AioConnection(conn)
def __enter__(self):
self._obj.__enter__()
return self
def __exit__(self, *args, **kwargs):
self._obj.__exit__(*args, **kwargs)
def coro_deliver_challenge(*args, **kwargs):
executor = ThreadPoolExecutor(max_workers=1)
return run_in_executor(
executor, _connection.deliver_challenge, *args, **kwargs
)
def coro_answer_challenge(*args, **kwargs):
executor = ThreadPoolExecutor(max_workers=1)
return run_in_executor(
executor, _connection.answer_challenge, *args, **kwargs
)
def coro_wait(*args, **kwargs):
executor = ThreadPoolExecutor(max_workers=1)
return run_in_executor(
executor, _connection.wait, *args, **kwargs
)
|
StarcoderdataPython
|
1755392
|
<reponame>samkim91/CookItYourself
import requests
from bs4 import BeautifulSoup
content = $_POST[''];
|
StarcoderdataPython
|
3285727
|
#! /usr/bin/env python
import femtomes_ros_driver.femtomes as femtomes
import rospy
def main():
rospy.init_node("femtomes_rtk")
ip = rospy.get_param("~ip")
port = rospy.get_param("~port")
rtk = femtomes.Femtomes(ip, port)
rtk.capture()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException as e:
print(e)
|
StarcoderdataPython
|
4810824
|
<filename>static/bingfaceshi.py
# -*- coding: utf-8 -*-
# author: lituoheng
# 环境要求python3以上
# 在命令行cmd用管理员模式使用"pip install gevent grequests"即可安装需要的第三方模块
import grequests
import time
from collections import Counter
# 重写了模块内的方法,增加请求成功时的回调
def map(requests, stream=False, size=None, exception_handler=None, gtimeout=None, success_handler=None):
"""Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param stream: If True, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. If None, no throttling occurs.
:param exception_handler: Callback function, called when exception occured. Params: Request, Exception
:param gtimeout: Gevent joinall timeout in seconds. (Note: unrelated to requests timeout)
:param success_handler: Callback function, called when secceed. Params: Request
"""
requests = list(requests)
pool = grequests.Pool(size) if size else None
jobs = [grequests.send(r, pool, stream=stream) for r in requests]
grequests.gevent.joinall(jobs, timeout=gtimeout)
ret = []
for request in requests:
if request.response is not None:
ret.append(request.response)
success_handler and success_handler(request)
elif exception_handler and hasattr(request, 'exception'):
ret.append(exception_handler(request, request.exception))
else:
ret.append(None)
return ret
grequests.map = map
def main():
#成功失败计数器
success = 0
fail = 0
# 请求成功时传入的函数
def suc_handler(request):
nonlocal success
success += 1
# 转码解析
# request.response.encoding = request.response.apparent_encoding
# print(request.response.text)
pass
# 请求丢失时传入的函数
def err_handler(request, exception):
nonlocal fail
fail += 1
pass
N = input('====================\n输入请求数量:')
if not N.isdigit() or int(N) < 0:
print('====================\n\n请输入正整数!\n\n====================')
raise TypeError
N = int(N)
#请求头
header = {"Content-type": "appliaction/json", "Accept":"application/json"}
# 请求初始化,具体参数可参考requests模块,可以初始化提交类型、请求头、代理、超时等
reqs = [
# grequests.get('http://appmid:8080/testDict/emr/WorkDesktop.jsp', timeout=10) for _ in range(N)
# grequests.post('http://192.168.1.111:9090/office/home/index', timeout=10) for _ in range(N)
# grequests.post('https://www.chudshop.com/office/home/index', timeout=10) for _ in range(N)
# grequests.post('http://www.imooc.com/search/hotwords', timeout=10) for _ in range(N)
grequests.get('http://www.baidu.com', headers=header) for _ in range(N)
]
print(f'====================')
print(f'请求地址:{reqs[0].url}')
print(f'请求数量:{N}')
# 连续并发请求M轮
M = 1
for i in range(M):
start = time.time()
#并发发送请求,gtimeout为总体请求限定时间,单位秒
res_list = grequests.map(
reqs,
exception_handler=err_handler,
success_handler=suc_handler,
# gtimeout=100
)
if M > 1:
print(f'--------------------')
print(f'第{i + 1}轮结果')
print(f'====================')
print(f'请求时间:{round(time.time() - start, 4)}秒')
print(f'链接成功:{success}')
print(f'链接失败:{fail}')
print(f'成功率: {round(100 * success / N, 2)}%')
print(f'====================')
print(f'状态码统计:')
statusCounters = Counter(res.status_code if str(type(res)) == "<class 'requests.models.Response'>" else 'lost' for res in res_list)
for k, v in statusCounters.items():
print(f'{k}: {v}')
print(f'====================')
out = ''
# 取消下一行注释即可获得输出
# out = input('是否输出返回结果[Y/-]:')
if out in {'y', 'Y', '1', ' ', '是', '输出'}:
for res in res_list:
# 转码解析
res.encoding = res.apparent_encoding
print(f'====================')
# 返回结果可以通过内置json模块解析数据json数据,或者通过第三方模块beautifulsoup、pyquery(jQuery的Python版)进行页面解析
print([item.text for item in res_list])
print(f'====================')
# 休眠时间,单位秒
# time.sleep(1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
33805
|
from flask import g, request
from flask_restful import reqparse
from werkzeug import datastructures
from ..exceptions.system_error import SystemError
from ..exceptions.system_exception import SystemException
from ..exceptions.service_error import ServiceError
from ..exceptions.service_exception import ServiceException
def _get_request():
if 'req' not in g:
g.req = reqparse.RequestParser()
return g.req
def get_argument(
key, *, default=None, type=str, location=None,
help=None, required=False, action='store'
):
'''
:param default: The value produced if the argument is absent from the
request.
:param type: The type to which the request argument should be
converted. If a type raises an exception, the message in the
error will be returned in the response. Defaults to :class:`unicode`
in python2 and :class:`str` in python3.
:param action: The basic type of action to be taken when this argument
is encountered in the request. Valid options are "store" and "append".
:param location: The attributes of the :class:`flask.Request` object
to source the arguments from (ex: headers, args, etc.), can be an
iterator. The last item listed takes precedence in the result set.
:param help: A brief description of the argument, returned in the
response when the argument is invalid. May optionally contain
an "{error_msg}" interpolation token, which will be replaced with
the text of the error raised by the type converter.
'''
cur_type = type # 保存参数初始时的状态
type = str if type == int else cur_type # 当类型为int时,先转换成str的获取形式
kwargs = dict(default=default, type=type, action=action)
if location:
kwargs['location'] = location
if type == 'file':
kwargs['type'] = datastructures.FileStorage
kwargs['location'] = location if location else 'files'
parser = _get_request()
parser.add_argument(key, **kwargs)
args = parser.parse_args()
if cur_type == int and args[key]: # 将str的结果转换成int
try:
args[key] = cur_type(args[key])
type = cur_type
except:
raise ServiceException(ServiceError.INVALID_VALUE, key)
if required and action == 'store' and \
(args[key] is None or type == str and args[key].strip() == '' and key != '_id'):
raise SystemException(SystemError.MISSING_REQUIRED_PARAMETER, help if help else key)
return args[key]
def get_request_ip():
if request.remote_addr == '127.0.0.1':
return '127.0.0.1'
ip_list = request.headers['X-Forwarded-For']
ip = ip_list.split(',')[0]
return ip
|
StarcoderdataPython
|
1634366
|
<filename>targets/power/isa/p-common/isa.py
# Copyright 2011-2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Docstring
"""
# Futures
from __future__ import absolute_import, division, print_function
# Built-in modules
import os
# This party modules
import six
# Own modules
from microprobe.code.address import Address, InstructionAddress
from microprobe.code.ins import Instruction
from microprobe.code.var import Variable, VariableArray
from microprobe.exceptions import MicroprobeCodeGenerationError
from microprobe.target.isa import GenericISA
from microprobe.target.isa.register import Register
from microprobe.utils.logger import get_logger
__author__ = "<NAME>"
__copyright__ = "Copyright 2011-2021 IBM Corporation"
__credits__ = []
__license__ = "IBM (c) 2011-2021 All rights reserved"
__version__ = "0.5"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development" # "Prototype", "Development", or "Production"
# Constants
__all__ = ["PowerISA"]
LOG = get_logger(__name__)
_MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
# Functions
# Classes
class PowerISA(GenericISA):
def __init__(self, name, descr, path, ins, regs, comparators, generators):
super(PowerISA, self).__init__(name, descr, path,
ins, regs, comparators,
generators)
self._scratch_registers += [self.registers["GPR11"],
self.registers["GPR12"],
self.registers["FPR28"],
self.registers["FPR29"],
self.registers["VSR28"],
self.registers["VSR29"],
self.registers["CR4"],
# self.registers["VR28"],
# self.registers["VR29"],
# self.registers["VSR60"],
# self.registers["VSR61"],
]
self._control_registers += [reg for reg in self.registers.values()
if reg.type.name == "SPR"] + \
[reg for reg in self.registers.values()
if reg.type.name == "SPR32"] + \
[self.registers["FPSCR"],
self.registers["MSR"],
self.registers["VSCR"]]
# self._control_registers += [reg for reg in self.registers.values()
# if reg.type.name == "CR"]
def set_register(self, register, value, context, opt=True):
LOG.debug("Begin setting '%s' to value '%s'", register, value)
instrs = []
current_value = context.get_register_value(register)
force_reset = False
if isinstance(current_value, Address):
force_reset = True
closest_register = context.get_register_closest_value(register)
if closest_register is not None:
closest_value = context.get_register_value(closest_register)
else:
closest_value = None
if context.register_has_value(value):
present_reg = context.registers_get_value(value)[0]
if present_reg.type.name != register.type.name:
present_reg = None
else:
present_reg = None
if register.type.name == "FPR":
if present_reg is not None:
fmr_ins = self.new_instruction("FMR_V0")
fmr_ins.set_operands([register, present_reg])
instrs.append(fmr_ins)
else:
instrs += self.set_register(self.scratch_registers[0],
value,
context)
if not context.register_has_value(self.scratch_var.address):
instrs += self.set_register_to_address(
self.scratch_registers[1],
self.scratch_var.address,
context)
ldstore_reg = self._scratch_registers[1]
else:
ldstore_reg = context.registers_get_value(
self.scratch_var.address)[0]
std_ins = self.new_instruction("STD_V0")
std_ins.set_operands([self.scratch_registers[0],
ldstore_reg,
0])
instrs.append(std_ins)
ld_ins = self.new_instruction("LFD_V0")
ld_ins.set_operands([register, ldstore_reg, 0])
instrs.append(ld_ins)
elif register.type.name == "GPR":
value_highest = int((value & 0xFFFF000000000000) >> 48)
value_higher = int((value & 0x0000FFFF00000000) >> 32)
value_high = int((value & 0x00000000FFFF0000) >> 16)
value_low = int((value & 0x000000000000FFFF))
if present_reg is not None and present_reg == register:
# The value is already in the register
return []
elif value >= -32768 and value <= 32767 and opt:
li_ins = self.new_instruction("ADDI_V1")
li_ins.set_operands([register, 0, value])
instrs.append(li_ins)
elif present_reg is not None and opt:
or_ins = self.new_instruction("OR_V0")
or_ins.set_operands([present_reg, register, present_reg])
instrs.append(or_ins)
elif (not force_reset and current_value is not None and
abs(value - current_value) <= 32767 and opt):
addi_ins = self.new_instruction("ADDI_V0")
addi_ins.set_operands([register, register,
value - current_value])
instrs.append(addi_ins)
elif (closest_value is not None and
abs(value - closest_value) <= 32767 and opt):
addi_ins = self.new_instruction("ADDI_V0")
addi_ins.set_operands([register, closest_register,
value - closest_value])
instrs.append(addi_ins)
elif value >= -2147483648 and value <= 2147483647 and opt:
if value_high > 32767:
# Negative
value_high = (2**16 - value_high) * -1
lis_ins = self.new_instruction("ADDIS_V1")
lis_ins.set_operands([register, 0, value_high])
instrs.append(lis_ins)
ori_ins = self.new_instruction("ORI_V0")
ori_ins.set_operands([register, register, value_low])
instrs.append(ori_ins)
else:
if value_highest > 32767:
# Negative
value_highest = (2**16 - value_highest) * -1
lis_ins = self.new_instruction("ADDIS_V1")
lis_ins.set_operands([register, 0, value_highest])
instrs.append(lis_ins)
ori_ins = self.new_instruction("ORI_V0")
ori_ins.set_operands([register, register, value_higher])
instrs.append(ori_ins)
rldicr_ins = self.new_instruction("RLDICR_V0")
rldicr_ins.set_operands([register, register, 32, 31])
instrs.append(rldicr_ins)
oris_ins = self.new_instruction("ORIS_V0")
oris_ins.set_operands([register, register, value_high])
instrs.append(oris_ins)
ori_ins = self.new_instruction("ORI_V0")
ori_ins.set_operands([register, register, value_low])
instrs.append(ori_ins)
elif register.type.name == "CR":
if value > 15 and value < 0:
LOG.warning("User trying to set a CR register with an invalid "
"value (%d) ", str(value))
value_4bits = int((value & 0x000000000000000F))
instrs += self.set_register(self.scratch_registers[0],
value_4bits,
context)
fxm_mask = int("".join(
list(reversed("{0:08b}".format(2 ** int(
register.representation))))), 2)
mtocrf_ins = self.new_instruction("MTOCRF_V0")
mtocrf_ins.set_operands([self.scratch_registers[0], fxm_mask])
instrs.append(mtocrf_ins)
elif register.type.name == "VR" or register.type.name == "VSR":
if present_reg is not None:
if register.type.name == "VR":
vor_ins = self.new_instruction("VOR_V0")
vor_ins.set_operands([register, present_reg, present_reg])
instrs.append(vor_ins)
else:
xxlor_ins = self.new_instruction("XXLOR_V0")
xxlor_ins.set_operands([register, present_reg,
present_reg])
instrs.append(xxlor_ins)
else:
if not context.register_has_value(self.scratch_var.address):
if self.scratch_var.address is None and context.symbolic:
# Assume symbolic
instrs += self.set_register_to_address(
self.scratch_registers[1],
Address(base_address=self.scratch_var.name),
context)
else:
instrs += self.set_register_to_address(
self.scratch_registers[1],
self.scratch_var.address,
context)
if len(str(value).split("_")) == 2:
# Value format: <value>_<bit_size>
item_value = int(str(value).split("_")[0], base=0)
item_size = int(str(value).split("_")[1], base=10)
item_format_str = "%%0%dx" % (item_size // 4)
value = int((item_format_str %
item_value) * (128 // item_size), 16)
elif len(str(value).split("_")) == 1:
pass
else:
raise NotImplementedError("Unknown value format")
value_high = int((value & 0x0000000000000000FFFFFFFFFFFFFFFF))
value_low = int((value &
0xFFFFFFFFFFFFFFFF0000000000000000) >> 64)
instrs += self.set_register(self.scratch_registers[0],
value_high,
context)
std_ins = self.new_instruction("STD_V0")
std_ins.set_operands([self.scratch_registers[0],
self.scratch_registers[1],
8])
instrs.append(std_ins)
instrs += self.set_register(self.scratch_registers[0],
value_low,
context)
std_ins = self.new_instruction("STD_V0")
std_ins.set_operands([self.scratch_registers[0],
self.scratch_registers[1],
0])
instrs.append(std_ins)
if register.type.name == "VR":
lvx_ins = self.new_instruction("LVX_V1")
lvx_ins.set_operands([register,
self.registers["GPR0"],
self.scratch_registers[1]])
instrs.append(lvx_ins)
else:
# TODO: make sure we use the version where GPR0 is zero
lxvd2x_ins = self.new_instruction("LXVD2X_V1")
lxvd2x_ins.set_operands([register,
self.registers["GPR0"],
self.scratch_registers[1]])
instrs.append(lxvd2x_ins)
elif register.type.name in ["SPR", "SPR32"]:
instrs += self.set_register(self.scratch_registers[0],
value,
context)
# Skip code generation for register that are
# not architected
if register.representation == 'N/A':
return []
if register.type.name in ["SPR"]:
mtspr = self.new_instruction("MTSPR_V0")
else:
mtspr = self.new_instruction("MTSPR_V2")
mtspr.set_operands([self.scratch_registers[0], register])
instrs.append(mtspr)
if len(instrs) > 0:
return instrs
return super(PowerISA, self).set_register(register, value, context)
def negate_register(self, register, dummy_context):
instrs = []
if register.type.name == "FPR":
# FPRs map to VSRs
register_name = "VSR%s" % register.representation
register = self.registers[register_name]
if register.type.name == "GPR":
instr = self.new_instruction("NOR_V0")
instr.set_operands([register, register, register])
instrs.append(instr)
elif register.type.name == "CR":
raise NotImplementedError
elif register.type.name == "VSR":
instr = self.new_instruction("XXLNOR_V0")
instr.set_operands([register, register, register])
instrs.append(instr)
elif register.type.name == "VR":
instr = self.new_instruction("XXLNOR_V0")
reg = int(register.representation)
register = self.registers["VSR%d" % (reg + 32)]
instr.set_operands([register, register, register])
elif register.type.name == "SPR":
raise NotImplementedError
return instrs
def set_register_to_address(self, register, address, context,
force_absolute=False,
force_relative=False):
instrs = []
assert address is not None
LOG.debug("Begin setting '%s' to address '%s'", register, address)
if isinstance(address.base_address, Variable):
LOG.debug("Base address is a Variable: %s", address.base_address)
closest = context.get_closest_address_value(address)
if context.register_has_value(address):
present_reg = context.registers_get_value(address)[0]
displacement = 0
LOG.debug("Address already in register '%s'", present_reg)
elif closest is not None:
present_reg, taddress = closest
displacement = address.displacement - taddress.displacement
LOG.debug("Closest value '%s' found in '%s'",
taddress,
present_reg)
LOG.debug("Displacement needed: %s", displacement)
elif context.register_has_value(
Address(base_address=address.base_address)):
present_reg = context.registers_get_value(
Address(base_address=address.base_address))[0]
displacement = address.displacement
LOG.debug("Base address '%s' found in '%s'",
taddress,
present_reg)
LOG.debug("Displacement needed: %s", displacement)
else:
present_reg = None
displacement = None
LOG.debug("Present_reg: %s", present_reg)
LOG.debug("Displacement: %s", displacement)
if present_reg is not None:
if displacement != 0 and abs(displacement) < (2 ** 15):
addi_ins = self.new_instruction("ADDI_V0")
addi_ins.set_operands([register, present_reg,
displacement])
instrs.append(addi_ins)
LOG.debug("Computing directly from context (short)")
return instrs
if present_reg != register:
or_ins = self.new_instruction("OR_V0")
or_ins.set_operands([present_reg, register, present_reg])
instrs.append(or_ins)
if displacement != 0:
instrs += self.add_to_register(register, displacement)
LOG.debug("Computing directly from context (long)")
return instrs
if context.symbolic and not force_absolute and not force_relative:
# TODO: This should be a call to the environment object because
# the implementation depends on the environment
# Base address can be an instruction label (str) or
# a Variable instance
basename = address.base_address
if not isinstance(address.base_address, str):
basename = address.base_address.name
lis_ins = self.new_instruction("ADDIS_V1")
lis_ins.operands()[0].set_value(register)
lis_ins.operands()[1].set_value(0)
lis_ins.operands()[2].set_value(
"%s@highest" % basename,
check=False
)
instrs.append(lis_ins)
ori_ins = self.new_instruction("ORI_V0")
ori_ins.operands()[0].set_value(register)
ori_ins.operands()[1].set_value(register)
ori_ins.operands()[2].set_value(
"%s@higher" % basename,
check=False
)
instrs.append(ori_ins)
rldicr_ins = self.new_instruction("RLDICR_V0")
rldicr_ins.set_operands([register, register, 32, 31])
instrs.append(rldicr_ins)
oris_ins = self.new_instruction("ORIS_V0")
oris_ins.operands()[0].set_value(register)
oris_ins.operands()[1].set_value(register)
oris_ins.operands()[2].set_value(
"%s@h" % basename,
check=False
)
instrs.append(oris_ins)
ori_ins = self.new_instruction("ORI_V0")
ori_ins.operands()[0].set_value(register)
ori_ins.operands()[1].set_value(register)
ori_ins.operands()[2].set_value(
"%s@l" % basename,
check=False
)
instrs.append(ori_ins)
if address.displacement != 0:
instrs += self.add_to_register(register, address.displacement)
LOG.debug("End Loading symbolic reference")
return instrs
LOG.debug("Context not symbolic")
base_address = address.base_address
displacement = address.displacement
LOG.debug("Base_address: %s", base_address)
LOG.debug("Displacement: %s", displacement)
if isinstance(base_address, Variable):
LOG.debug("Get absolute address")
displacement += base_address.address.displacement
base_address = base_address.address.base_address
LOG.debug("Base_address 2: %s", base_address)
LOG.debug("Displacement 2: %s", displacement)
if isinstance(base_address, str):
if base_address == "data":
base_address = Address(base_address=base_address)
elif base_address == "code":
base_address = InstructionAddress(
base_address=base_address)
source_register = None
if context.register_has_value(base_address):
source_register = context.registers_get_value(base_address)[0]
else:
for reg, value in context.register_values.items():
if not isinstance(value, Address):
continue
if (Address(base_address=value.base_address) ==
base_address.base_address):
source_register = reg
displacement += base_address.displacement
base_address = Address(
base_address=base_address.base_address)
break
if value.base_address == base_address.base_address:
source_register = reg
displacement += base_address.displacement
base_address = Address(
base_address=base_address.base_address)
break
if source_register is None or displacement >= (2 ** 31):
# Not source register found
if base_address.base_address == "data":
value = context.data_segment
elif base_address.base_address == "code":
value = context.code_segment
else:
LOG.debug(context.dump())
raise MicroprobeCodeGenerationError(
"Unable to generate "
"the base address: '%s'"
" for target address: '%s'."
% (base_address, address)
)
if abs(displacement) >= (2 ** 31):
value = value + displacement
displacement = 0
assert(value is not None)
instrs += self.set_register(register, value, context)
if source_register is not None and source_register != register:
or_ins = self.new_instruction("OR_V0")
or_ins.set_operands([source_register, register, source_register])
instrs.append(or_ins)
if displacement != 0:
instrs += self.add_to_register(register, displacement)
LOG.debug("End address generation")
return instrs
# TODO: change API to support length parameter
def load(self, register, address, context):
# TODO: Ensure RA is not zero
ld_ins = self.new_instruction("LD_V0")
ld_ins.operands()[0].set_value(register)
ld_ins.memory_operands()[0].set_address(address, context)
return [ld_ins]
# TODO: change API to support single/double precission (length parameter)
def load_float(self, register, address, context):
# TODO: Ensure RA is not zero
lfd_ins = self.new_instruction("LFD_V0")
lfd_ins.operands()[0].set_value(register)
lfd_ins.memory_operands()[0].set_address(address, context)
return [lfd_ins]
# TODO: change API to support single/double precission (length parameter)
def store_float(self, register, address, context):
# TODO: Ensure RA is not zero
stfd_ins = self.new_instruction("STFD_V0")
stfd_ins.operands()[0].set_value(register)
stfd_ins.memory_operands()[0].set_address(address, context)
return [stfd_ins]
def store_integer(self, register, address, length, context):
if length == 64:
std_ins = self.new_instruction("STD_V0")
std_ins.operands()[0].set_value(register)
std_ins.memory_operands()[0].set_address(address, context)
return [std_ins]
elif length == 32:
stw_ins = self.new_instruction("STW_V0")
stw_ins.operands()[0].set_value(register)
stw_ins.memory_operands()[0].set_address(address, context)
return [stw_ins]
elif length == 16:
sth_ins = self.new_instruction("STH_V0")
sth_ins.operands()[0].set_value(register)
sth_ins.memory_operands()[0].set_address(address, context)
return [sth_ins]
elif length == 8:
stb_ins = self.new_instruction("STB_V0")
stb_ins.operands()[0].set_value(register)
stb_ins.memory_operands()[0].set_address(address, context)
return [stb_ins]
# elif length == 128:
# std_ins1 = self.new_instruction("STD_V0")
# std_ins1.operands()[0].set_value(register)
# std_ins1.memory_operands()[0].set_address(address, context)
# std_ins2 = self.new_instruction("STD_V0")
# std_ins2.operands()[0].set_value(register)
# std_ins2.memory_operands()[0].set_address(address + 8, context)
# return [std_ins1, std_ins2]
elif length == 256:
std_ins1 = self.new_instruction("STD_V0")
std_ins1.operands()[0].set_value(register)
std_ins1.memory_operands()[0].set_address(address, context)
std_ins2 = self.new_instruction("STD_V0")
std_ins2.operands()[0].set_value(register)
std_ins2.memory_operands()[0].set_address(address + 8, context)
std_ins3 = self.new_instruction("STD_V0")
std_ins3.operands()[0].set_value(register)
std_ins3.memory_operands()[0].set_address(address + 16, context)
std_ins4 = self.new_instruction("STD_V0")
std_ins4.operands()[0].set_value(register)
std_ins4.memory_operands()[0].set_address(address + 24, context)
return [std_ins1, std_ins2, std_ins3, std_ins4]
else:
raise NotImplementedError("Request store length: %d" % length)
def set_register_bits(self, register, value, mask, shift, context):
raise NotImplementedError
def store_decimal(self, address, length, value, context):
instrs = []
instrs += self.set_register(self.scratch_registers[3], value, context)
if length == 64:
dcffix_ins = self.new_instruction("DCFFIX_V0")
dcffix_ins.set_operands([self.scratch_registers[2],
self.scratch_registers[3]])
instrs.append(dcffix_ins)
instrs += self.store_float(self.scratch_registers[2],
address, context)
elif length == 128:
dcffixq_ins = self.new_instruction("DCFFIXQ_V0")
dcffixq_ins.set_operands([self.scratch_registers[2],
self.scratch_registers[3]])
instrs.append(dcffixq_ins)
# TODO: This assumes a 64-bit store
instrs += self.store_float(self.scratch_registers[2],
address, context)
instrs += self.store_float(self.scratch_registers[3],
address + 8, context)
else:
raise NotImplementedError
return instrs
@property
def program_counter(self):
raise NotImplementedError
def branch_unconditional_relative(self, source, target):
LOG.debug("Source: %s", source)
LOG.debug("Target: %s", target)
if isinstance(target, InstructionAddress):
target_address = target
elif isinstance(target, Instruction):
target_address = target.address
else:
raise NotImplementedError
if isinstance(source, InstructionAddress):
source_address = source
relative_offset = target_address - source_address
instruction = self.new_instruction("B_V0")
instruction.set_address(source_address)
LOG.debug("Source address: %s", source_address)
LOG.debug("Target address: %s", target_address)
LOG.debug("Relative offset: %s", relative_offset)
elif isinstance(source, Instruction):
source_address = source.address
relative_offset = target_address - source_address
instruction = source
else:
raise NotImplementedError
for operand in instruction.operands():
if not operand.type.address_relative:
continue
operand.set_value(relative_offset)
return instruction
def add_to_register(self, register, value):
instrs = []
if register.type.name == "GPR" and isinstance(value,
six.integer_types):
origreg = None
if register.name == "GPR0":
origreg = register
register = self._scratch_registers[0]
or_ins = self.new_instruction("OR_V0")
or_ins.set_operands([origreg, register, origreg])
instrs.append(or_ins)
if abs(value) >= (2 ** 31):
raise NotImplementedError
if abs(value) >= (2 ** 16):
shift_value = value // (2 ** 16)
addis_ins = self.new_instruction("ADDIS_V0")
addis_ins.set_operands([register, register, shift_value])
instrs.append(addis_ins)
value = value - (shift_value * (2 ** 16))
if abs(value) < (2 ** 15):
if register.name != "GPR0":
addi_ins = self.new_instruction("ADDI_V0")
addi_ins.set_operands([register, register, value])
else:
addi_ins = self.new_instruction("ADDI_V1")
addi_ins.set_operands([register, 0, value])
instrs.append(addi_ins)
else:
assert abs(value) >= (2**15) and abs(value) < (2**16)
instrs = self.add_to_register(
register, value // 2) + \
self.add_to_register(register,
(value // 2) + (value % 2))
if origreg is not None:
or_ins = self.new_instruction("OR_V0")
or_ins.set_operands([register, origreg, register])
instrs.append(or_ins)
else:
raise NotImplementedError
return instrs
# TODO: change API to take into account operand length (now 64-bits)
def compare_and_branch(self, val1, val2, cond, target, context):
assert cond in ["<", ">", "!=", "=", ">=", "<="]
instrs = []
if isinstance(val1, Register) and isinstance(val2, six.integer_types):
cmpi_ins = self.new_instruction("CMPI_V0")
cmpi_ins.set_operands([self.scratch_registers[6], 1, val1, val2])
for reg in cmpi_ins.sets() + cmpi_ins.uses():
if reg not in cmpi_ins.allowed_regs:
cmpi_ins.add_allow_register(reg)
instrs.append(cmpi_ins)
elif isinstance(val1, Register) and isinstance(val2, Register):
assert val1.type.name == val2.type.name
if val1.type.name == "GPR":
cmp_ins = self.new_instruction("CMP_V0")
cmp_ins.set_operands([self.scratch_registers[6], 1,
val1, val2])
instrs.append(cmp_ins)
elif val1.type.name == "FPR":
fcmpu_ins = self.new_instruction("FCMPU_V0")
fcmpu_ins.set_operands([self.scratch_registers[6],
val1, val2])
instrs.append(fcmpu_ins)
else:
raise NotImplementedError
else:
raise NotImplementedError
# Fix to use BF 4 of the CR register (it is set in the
# reserved list of registers
cr_field = 4
if cond == "<":
bo_value = 12
bi_value = (cr_field * 4) + 0
elif cond == ">=":
bo_value = 4
bi_value = (cr_field * 4) + 0
elif cond == ">":
bo_value = 12
bi_value = (cr_field * 4) + 1
elif cond == "<=":
bo_value = 4
bi_value = (cr_field * 4) + 1
elif cond == "=":
bo_value = 12
bi_value = (cr_field * 4) + 2
elif cond == "!=":
bo_value = 4
bi_value = (cr_field * 4) + 2
else:
raise NotImplementedError
bc_ins = self.new_instruction("BC_V0")
bc_ins.operands()[0].set_value(bo_value)
bc_ins.operands()[1].set_value(bi_value)
for operand in bc_ins.memory_operands():
if operand.is_branch_target:
taddress = InstructionAddress(base_address=target)
operand.set_address(taddress, context)
break
instrs.append(bc_ins)
return instrs
def nop(self):
instr = self.new_instruction("ORI_V1")
for operand in instr.operands():
value = list(operand.type.values())[0]
operand.set_value(value)
return instr
@property
def context_var(self):
if self._context_var is None:
self._context_var = VariableArray("%s_context_var" % self._name,
"uint8_t", 1600)
return self._context_var
def set_context(self, variable=None, tmpl_path=None):
""" """
if tmpl_path is None:
tmpl_path = _MODULE_DIR
return super(
PowerISA,
self).set_context(
variable=variable,
tmpl_path=tmpl_path)
def get_context(self, variable=None, tmpl_path=None):
""" """
if tmpl_path is None:
tmpl_path = _MODULE_DIR
return super(
PowerISA,
self).get_context(
variable=variable,
tmpl_path=tmpl_path)
|
StarcoderdataPython
|
196973
|
<filename>jsk_recognition/jsk_perception/node_scripts/deep_sort/vis_bboxes.py
import six
import cv2
def voc_colormap(nlabels):
colors = []
for i in six.moves.range(nlabels):
r, g, b = 0, 0, 0
for j in range(8):
if i & (1 << 0):
r |= 1 << (7 - j)
if i & (1 << 1):
g |= 1 << (7 - j)
if i & (1 << 2):
b |= 1 << (7 - j)
i >>= 3
colors.append([r, g, b])
return colors
def vis_bboxes(img, bboxes, labels,
font_scale=0.8,
thickness=1,
font_face=cv2.FONT_HERSHEY_SIMPLEX,
text_color=(255, 255, 255),
max_label_num=1024):
"""Visualize bounding boxes inside image.
"""
if len(bboxes) != len(labels):
raise ValueError("len(bboxes) and len(labels) should be same "
"we get len(bboxes):{}, len(lables):{}"
.format(len(bboxes), len(labels)))
colormap = voc_colormap(max_label_num)
CV_AA = 16 # for anti-alias
for bbox, label in zip(bboxes, labels):
color = colormap[label % max_label_num]
x1, y1, w, h = bbox
x2 = x1 + w
y2 = y1 + h
x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2, CV_AA)
label_name = str(label)
img_bbox = img[y1:y2, x1:]
text = label_name
size, baseline = cv2.getTextSize(
text, font_face, font_scale, thickness)
cv2.rectangle(
img_bbox, (0, 0), (size[0], size[1] + baseline),
color=color, thickness=-1)
cv2.putText(img_bbox, text, (0, size[1]),
font_face, font_scale, text_color, thickness)
return img
|
StarcoderdataPython
|
1731922
|
<reponame>plamen-nikolov/djangocms-flexslider
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class PropertiesConfig(AppConfig):
name = 'djangocms_flexslider'
verbose_name = _("djangocms flexslider app")
|
StarcoderdataPython
|
1619314
|
from importlib import metadata
# print(metadata.version('pip'))
metadados_pip = metadata.metadata('pip')
print(metadados_pip)
print(list(metadados_pip))
print(metadados_pip['Project-URL'])
print(len(metadata.files('django')))
print(metadata.requires('django'))
|
StarcoderdataPython
|
148040
|
"""
Evaluation Scripts
"""
from __future__ import absolute_import
from __future__ import division
from collections import namedtuple, OrderedDict
from network import mynn
import argparse
import logging
import os
import torch
import time
import numpy as np
from config import cfg, assert_and_infer_cfg
import network
import optimizer
from ood_metrics import fpr_at_95_tpr
from tqdm import tqdm
from PIL import Image
from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score, plot_roc_curve
import torchvision.transforms as standard_transforms
import tensorflow as tf
import tensorflow_datasets as tfds
from torchvision.transforms.functional import to_pil_image
import matplotlib.pyplot as plt
dirname = os.path.dirname(__file__)
pretrained_model_path = os.path.join(dirname, 'pretrained/r101_os8_base_cty.pth')
# Argument Parser
parser = argparse.ArgumentParser(description='Semantic Segmentation')
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--arch', type=str, default='network.deepv3.DeepR101V3PlusD_OS8',
help='Network architecture. We have DeepSRNX50V3PlusD (backbone: ResNeXt50) \
and deepWV3Plus (backbone: WideResNet38).')
parser.add_argument('--dataset', type=str, default='cityscapes',
help='possible datasets for statistics; cityscapes')
parser.add_argument('--fp16', action='store_true', default=False,
help='Use Nvidia Apex AMP')
parser.add_argument('--local_rank', default=0, type=int,
help='parameter used by apex library')
parser.add_argument('--trunk', type=str, default='resnet101',
help='trunk model, can be: resnet101 (default), resnet50')
parser.add_argument('--bs_mult', type=int, default=2,
help='Batch size for training per gpu')
parser.add_argument('--bs_mult_val', type=int, default=1,
help='Batch size for Validation per gpu')
parser.add_argument('--class_uniform_pct', type=float, default=0,
help='What fraction of images is uniformly sampled')
parser.add_argument('--class_uniform_tile', type=int, default=1024,
help='tile size for class uniform sampling')
parser.add_argument('--batch_weighting', action='store_true', default=False,
help='Batch weighting for class (use nll class weighting using batch stats')
parser.add_argument('--jointwtborder', action='store_true', default=False,
help='Enable boundary label relaxation')
parser.add_argument('--snapshot', type=str, default=pretrained_model_path)
parser.add_argument('--restore_optimizer', action='store_true', default=False)
parser.add_argument('--date', type=str, default='default',
help='experiment directory date name')
parser.add_argument('--exp', type=str, default='default',
help='experiment directory name')
parser.add_argument('--tb_tag', type=str, default='',
help='add tag to tb dir')
parser.add_argument('--ckpt', type=str, default='logs/ckpt',
help='Save Checkpoint Point')
parser.add_argument('--tb_path', type=str, default='logs/tb',
help='Save Tensorboard Path')
parser.add_argument('--syncbn', action='store_true', default=True,
help='Use Synchronized BN')
parser.add_argument('--dist_url', default='tcp://127.0.0.1:', type=str,
help='url used to set up distributed training')
parser.add_argument('--backbone_lr', type=float, default=0.0,
help='different learning rate on backbone network')
parser.add_argument('--pooling', type=str, default='mean',
help='pooling methods, average is better than max')
parser.add_argument('--ood_dataset_path', type=str,
default='/home/nas1_userB/dataset/ood_segmentation/fishyscapes',
help='OoD dataset path')
# Anomaly score mode - msp, max_logit, standardized_max_logit
parser.add_argument('--score_mode', type=str, default='standardized_max_logit', #change to fssd!!!
help='score mode for anomaly [msp, max_logit, standardized_max_logit, fssd, standardized_fssd]')
# Boundary suppression configs
parser.add_argument('--enable_boundary_suppression', type=bool, default=False,
help='enable boundary suppression')
parser.add_argument('--boundary_width', type=int, default=4,
help='initial boundary suppression width')
parser.add_argument('--boundary_iteration', type=int, default=4,
help='the number of boundary iterations')
# Dilated smoothing configs
parser.add_argument('--enable_dilated_smoothing', type=bool, default=False,
help='enable dilated smoothing')
parser.add_argument('--smoothing_kernel_size', type=int, default=7,
help='kernel size of dilated smoothing')
parser.add_argument('--smoothing_kernel_dilation', type=int, default=6,
help='kernel dilation rate of dilated smoothing')
args = parser.parse_args()
# Enable CUDNN Benchmarking optimization
#torch.backends.cudnn.benchmark = True
random_seed = cfg.RANDOM_SEED
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
args.world_size = 1
print(f'World Size: {args.world_size}')
if 'WORLD_SIZE' in os.environ:
# args.apex = int(os.environ['WORLD_SIZE']) > 1
args.world_size = int(os.environ['WORLD_SIZE'])
print("Total world size: ", int(os.environ['WORLD_SIZE']))
torch.cuda.set_device(args.local_rank)
print('My Rank:', args.local_rank)
# Initialize distributed communication
args.dist_url = args.dist_url + str(8000 + (int(time.time()%1000))//10)
torch.distributed.init_process_group(backend='nccl',
init_method=args.dist_url,
world_size=args.world_size,
rank=args.local_rank)
def get_net():
"""
Main Function
"""
# Set up the Arguments, Tensorboard Writer, Dataloader, Loss Fn, Optimizer
assert_and_infer_cfg(args)
net = network.get_net(args, criterion=None, criterion_aux=None)
net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
net = network.warp_network_in_dataparallel(net, args.local_rank)
if args.snapshot:
epoch, mean_iu = optimizer.load_weights(net, None, None,
args.snapshot, args.restore_optimizer)
print(f"Loading completed. Epoch {epoch} and mIoU {mean_iu}")
else:
raise ValueError(f"snapshot argument is not set!")
class_mean = np.load(f'stats/{args.dataset}_mean.npy', allow_pickle=True)
class_var = np.load(f'stats/{args.dataset}_var.npy', allow_pickle=True)
fss = np.load(f'stats/fss_init_softmax.npy', allow_pickle=True)
fssd_mean = np.load(f'stats/{args.dataset}_fssd_mean.npy', allow_pickle=True)
fssd_var = np.load(f'stats/{args.dataset}_fssd_var.npy', allow_pickle=True)
net.module.set_statistics(mean=class_mean.item(),
var=class_var.item(),
fss = fss.tolist(),
fssd_mean = fssd_mean.item(),
fssd_var = fssd_var.item())
torch.cuda.empty_cache()
net.eval()
return net
def preprocess_image(x, mean_std):
x = Image.fromarray(x)
x = standard_transforms.ToTensor()(x)
x = standard_transforms.Normalize(*mean_std)(x)
x = x.cuda()
if len(x.shape) == 3:
x = x.unsqueeze(0)
return x
if __name__ == '__main__':
net = get_net()
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
ood_data_root = args.ood_dataset_path
image_root_path = os.path.join(ood_data_root, 'leftImg8bit_trainvaltest/leftImg8bit/val')
mask_root_path = os.path.join(ood_data_root, 'gtFine_trainvaltest/gtFine/val')
if not os.path.exists(image_root_path):
raise ValueError(f"Dataset directory {image_root_path} doesn't exist!")
anomaly_score_list = []
ood_gts_list = []
for image_file in tqdm(os.listdir(image_root_path)):
image_path = os.path.join(image_root_path, image_file)
mask_path = os.path.join(mask_root_path, image_file)
if os.path.isfile(image_path):
# 3 x H x W
image = np.array(Image.open(image_path).convert('RGB')).astype('uint8')
mask = Image.open(mask_path)
ood_gts = np.array(mask)
ood_gts_list.append(np.expand_dims(ood_gts, 0))
with torch.no_grad():
image = preprocess_image(image, mean_std)
main_out, anomaly_score = net(image)
del main_out
### save output image ###
# image = torch.clamp(-anomaly_score.cpu(), 0, 255)
# plt.imshow(to_pil_image(image), cmap='gray')
# plt.imsave('img/sml'+str(image_file),to_pil_image(image))
# image = np.array(image, dtype=np.uint8)
anomaly_score_list.append(anomaly_score.cpu().numpy())
ood_gts = np.array(ood_gts_list)
anomaly_scores = np.array(anomaly_score_list)
# drop void pixels
ood_mask = (ood_gts == 1)
ind_mask = (ood_gts == 0)
ood_out = -anomaly_scores[ood_mask]
ind_out = -anomaly_scores[ind_mask]
ood_label = np.ones(len(ood_out))
ind_label = np.zeros(len(ind_out))
val_out = np.concatenate((ind_out, ood_out))
val_label = np.concatenate((ind_label, ood_label))
print('Measuring metrics...')
#AUROC
fpr, tpr, _ = roc_curve(val_label, val_out)
roc_auc = auc(fpr, tpr)
#AUPRC
precision, recall, _ = precision_recall_curve(val_label, val_out)
prc_auc = average_precision_score(val_label, val_out)
#FPR at 95 TPR
fpr = fpr_at_95_tpr(val_out, val_label)
print(f'AUROC score: {roc_auc}')
print(f'AUPRC score: {prc_auc}')
print(f'FPR@TPR95: {fpr}')
### plot curve ###
# plt.plot(fpr, tpr)
# plt.ylabel("True Positive Rate")
# plt.xlabel("False Positive Rate")
# plt.savefig("curve/sml_roc_curve.png")
# plt.cla()
# plt.plot(precision, recall)
# plt.ylabel("recall")
# plt.xlabel("precision")
# plt.savefig("curve/sml_precision_recall_curve.png")
|
StarcoderdataPython
|
1717061
|
from typing import Any
from FlaUILibrary.flaui.exception import FlaUiError
class TreeItemsParser:
"""
Helper class which handles the management of the given location string.
The location is used to locate the exact tree item in the tree control.
Examples:
location = N:Nameofitem1->N:Nameofitem2->N:Nameofitem3
location = I:indexofitem1->I:indexofitem2->I:indexofitem3
location = N:Nameofitem1->I:indexofitem2->I:indexofitem3
"""
def __init__(self, location):
self.location = location.split("->")
def get_treeitem(self, treeitems: Any, index: Any):
"""
This function gets the index of the location, the location can either be name or index,
and returns the corresponding tree item to that name or index.
if the given name or index is not found a flauierror will be thrown.
"""
loc = self.location[index]
if loc.startswith("I:"):
loc = loc[2:]
try:
return treeitems[int(loc)]
except IndexError:
raise FlaUiError(FlaUiError.ArrayOutOfBoundException.format(int(loc))) from None
elif loc.startswith("N:"):
loc = loc[2:]
for item in treeitems:
if item.Name == loc:
return item
raise FlaUiError(FlaUiError.ElementNameNotFound.format(loc))
else:
raise FlaUiError(FlaUiError.FalseSyntax.format(loc)) from None
def is_last_element(self, index: Any):
"""
Returns true if the index corresponds the last element of given location series.
"""
if index == len(self.location) - 1:
return True
return False
|
StarcoderdataPython
|
3315974
|
<gh_stars>10-100
'''
Author: <NAME>
Dec 10, 2020
Data and paper from here: https://robjhyndman.com/publications/mint/
Frequency is monthly, total of 36 observations
Description:
'''
# Imports
import pandas as pd
import numpy as np
from collections import OrderedDict
from matplotlib import pyplot as plt
def import_tourism_large(filename, plotting=False):
# filename = 'hyndman_tourism_large.csv'
df = pd.read_csv(filename)
bottom_ts_labels = list(df.columns)[2:] # ignore data columns
num_bottom_ts = len(bottom_ts_labels)
num_levels = len(bottom_ts_labels[0]) - 1 # add 1 for the root node 'Total'
# Get labels for all series including aggregate
level_labels = ['Total','State','Zone','Region','Type'] # order is important here! Taken from Table 6 in https://robjhyndman.com/papers/mint.pdf
level_dict = OrderedDict()
for idx,l in enumerate(level_labels):
if l == 'Total':
level_dict[l] = ('Total',)
else:
letter_list = set([e[idx-1] for e in bottom_ts_labels]) # use set to get unique
level_dict[l] = sorted(list(letter_list))
# Should be 555 series (yay!)
purpose = ['All', 'Hol', 'Vis', 'Bus', 'Oth']
all_labels = []
for i in range(num_levels-1):
level_list = []
if i == 0:
for p in purpose:
all_labels.append('Total' + p)
else:
for l in bottom_ts_labels:
level_list.append(l[:i])
# Get unique and sort
level_list = sorted(list(set(level_list)))
# Append type to each element of the list
all_labels.extend([e + p for e in level_list for p in purpose])
num_ts = len(all_labels)
S = np.zeros((num_ts, num_bottom_ts))
row = 0 #row counter
# Remaining rows loop through all labels
for l in all_labels:
#get 1st part of label (State/Region/Zone)
part1 = l[:-3]
#get 2nd part of label (purpose)
part2 = l[-3:]
# Totals
if part1 == 'Total':
if part2 == 'All':
mask = [1 for bl in bottom_ts_labels]
else:
mask = [1 if (bl.endswith(part2)) else 0 for bl in bottom_ts_labels]
# All others
else:
if part2 == 'All':
mask = [1 if bl.startswith(part1) else 0 for bl in bottom_ts_labels]
else:
mask = [1 if (bl.startswith(part1) and bl.endswith(part2)) else 0 for bl in bottom_ts_labels]
S[row,:] = mask
row += 1
# Get bottom series Y data
Y_bottom = df.transpose()[2:][:].to_numpy()
# Compute all Y data using S
Y = S @ Y_bottom
# Plotting if True
if plotting:
# Plot S matrix
plt.figure(num=1, figsize=(8, 20), dpi=80, facecolor='w')
plt.spy(S)
plt.show()
# Plot Y data (only the first 60 of 555)
# Get shortened plot titles
acronyms = []
for i in all_labels:
if i == []:
acronyms.append(['Total'])
else:
ac = ["".join(e[0] for e in j.split()).upper() for j in i]
acronyms.append(ac)
fig, axs = plt.subplots(6,10, num = 2, figsize=(28, 14), facecolor='w')
fig.subplots_adjust(hspace = .5, wspace=.001)
# Set font sizes
small = 7
med = 10
big = 12
plt.rc('font', size=small) # controls default text sizes
plt.rc('axes', titlesize=med) # fontsize of the axes title
plt.rc('axes', labelsize=small) # fontsize of the x and y labels
plt.rc('xtick', labelsize=small) # fontsize of the tick labels
plt.rc('ytick', labelsize=small) # fontsize of the tick labels
plt.rc('legend', fontsize=small) # legend fontsize
plt.rc('figure', titlesize=big) # fontsize of the figure title
axs = axs.ravel()
for i in range(6*10):
axs[i].plot(np.arange(0,Y.shape[1],1),Y[i,:])
axs[i].set_title(all_labels[i])
plt.show()
# Save data to csv
# Indices and timestamps
index = pd.date_range(
start=pd.Timestamp('1998-01-01'), periods=Y.shape[1], freq="MS"
)
# Y data
dataY = {
str(column): Y[i, :]
for i, column in enumerate(all_labels)
}
df = pd.DataFrame(
index=index,
data=dataY,
)
df.to_csv('./data.csv')
# sanity check for Y
data = pd.read_csv('./data.csv', index_col=0)
values = data.values.transpose()
assert np.max(np.abs(Y - values)) <= 1e-6 # values in this dataset are large
# assert data.index.equals(index)
# S matrix
dataS={
str(column): S[:, i]
for i,column in enumerate(bottom_ts_labels)
}
agg_mat_df = pd.DataFrame(
index=[str(i) for i in all_labels],
data=dataS
)
agg_mat_df.to_csv('./agg_mat.csv')
# sanity check for S
agg_mat = pd.read_csv('./agg_mat.csv', index_col=0).values
assert (agg_mat == S).all()
return print('Importing ' + filename + ' successful!...')
if __name__ == "__main__":
import_tourism_large('hyndman_tourism_large.csv')
|
StarcoderdataPython
|
3277685
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-22 11:53
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('part', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Build',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('batch', models.CharField(blank=True, help_text='Batch code for this build output', max_length=100, null=True)),
('status', models.PositiveIntegerField(choices=[(40, 'Complete'), (10, 'Pending'), (20, 'Holding'), (30, 'Cancelled')], default=10, validators=[django.core.validators.MinValueValidator(0)])),
('creation_date', models.DateField(auto_now=True)),
('completion_date', models.DateField(blank=True, null=True)),
('title', models.CharField(help_text='Brief description of the build', max_length=100)),
('quantity', models.PositiveIntegerField(default=1, help_text='Number of parts to build', validators=[django.core.validators.MinValueValidator(1)])),
('notes', models.TextField(blank=True)),
('part', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='builds', to='part.Part')),
],
),
]
|
StarcoderdataPython
|
3348508
|
#!/usr/bin/env python
# encoding: utf-8
import heapq
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# base subroutine
def mergeTwoLists(l1, l2):
curr = dummy = ListNode(-1)
while l1 and l2:
if l1.val < l2.val:
curr.next = l1
l1 = l1.next
else:
curr.next = l2
l2 = l2.next
curr = curr.next
curr.next = l1 or l2
return dummy.next
def mergeKLists(self, lists):
# dummy case
if not lists:
return None
left, right = 0, len(lists) - 1
while right > 0:
if left >= right:
left = 0
else:
lists[left] = self.mergeTwoLists(lists[left], lists[right])
left += 1
right -= 1
return lists[0]
# Time: O(nlogk)
# Space: O(logk)
# Divide and Conquer solution.
def mergeKListsV2(self, lists):
def mergeKListsHelper(lists, begin, end):
if begin > end:
return None
if begin == end:
return lists[begin]
l = mergeKListsHelper(lists, begin, (begin+end)/2)
r = mergeKListsHelper(lists, (begin+end)/2 + 1, end)
return self.mergeTwoLists(l, r)
# call
return mergeKListsHelper(lists, 0, len(lists) - 1)
# Time: O(nklogk)
# Space: O(k)
# Heap solution
def mergeKListsV3(self, lists):
curr = dummy = ListNode(-1)
heap = []
for idx, sorted_list in enumerate(lists):
if sorted_list:
heapq.heappush(heap, (sorted_list.val, idx))
while heap:
val, idx = heapq.heappop(heap)
curr.next = ListNode(val)
curr = curr.next
node = lists[idx].next
lists[idx] = node
if node:
heapq.heappush(heap, (node.val, idx))
return dummy.next()
|
StarcoderdataPython
|
4812377
|
<filename>compiler/dialect_libraries/bq_library.py
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
library = """
->(left:, right:) = {arg: left, value: right};
# All ORDER BY arguments are wrapped, to avoid confusion with
# column index.
ArgMin(a) = SqlExpr("ARRAY_AGG({arg} order by [{value}][offset(0)] limit 1)[OFFSET(0)]",
{arg: a.arg, value: a.value});
ArgMax(a) = SqlExpr(
"ARRAY_AGG({arg} order by [{value}][offset(0)] desc limit 1)[OFFSET(0)]",
{arg: a.arg, value: a.value});
ArgMaxK(a, l) = SqlExpr(
"ARRAY_AGG({arg} order by [{value}][offset(0)] desc limit {lim})",
{arg: a.arg, value: a.value, lim: l});
ArgMinK(a, l) = SqlExpr(
"ARRAY_AGG({arg} order by [{value}][offset(0)] limit {lim})",
{arg: a.arg, value: a.value, lim: l});
Array(a) = SqlExpr(
"ARRAY_AGG({value} order by [{arg}][offset(0)])",
{arg: a.arg, value: a.value});
"""
|
StarcoderdataPython
|
3330179
|
import streamlit as st
import altair as alt
from clean_data_2 import *
import pandas as pd
st.image('logo.jpg', width=150, format='JPEG')
st.title('Top offers by demographic')
st.sidebar.title('Parameters')
income = st.sidebar.slider('Income level', 0, 140_000, 40_000)
gender = st.sidebar.selectbox(
"Select Gender",
("F", "M", "O"))
age = st.sidebar.slider('Age', 10, 100, 25)
@st.cache
def load_data():
df = pd.read_csv('df.csv')
df.drop(columns='Unnamed: 0', inplace=True)
profile = pd.read_csv('profile.csv')
profile.drop(columns='Unnamed: 0', inplace=True)
customers = per_customer_data(df, profile)
return customers
customers = load_data()
offers = get_most_popular_offers_filtered(
customers, n_top=10, income=income, gender=gender, age=age)
df_offer = pd.DataFrame(offers[0])
df_offer = df_offer[0].str.split(expand=True)
df_offer.columns = ('Offer Name', 'Reward', 'Difficulty', 'Duration')
st.write('Most effective offers for the selected demographic')
st.write('To view the most succesful offers for the different demographics select different sliders on the left side of the app')
st.table(df_offer)
offers_net_expense = offers[1]
df_expense = pd.DataFrame.from_records(offers_net_expense, index=[0])
df_expense = df_expense.melt()
df_expense.columns = ('Offer', 'Net Expense')
st.write('Net expense chart by offer type')
chart2 = alt.Chart(df_expense).mark_bar(size=40).encode(
y='Net Expense',
x='Offer',
).properties(width=700, height=600
).configure_axis(grid=False
).configure_view(strokeWidth=0
)
st.altair_chart(chart2)
|
StarcoderdataPython
|
3332310
|
from dataclasses import dataclass
@dataclass
class Video:
"""Represents an YouTube video."""
playlist_id: int = int()
video_id: int = int()
name: str = str()
search_query: str = str()
search_query_bg: str = str()
date: int = int()
def __iter__(self):
"""Creates an iterator of the object for the dict built-in."""
attributes = [
"playlist_id",
"video_id",
"name",
"search_query",
"search_query_bg",
"date",
]
for attr in attributes:
# yield a tuple for key and value pair
yield (attr, eval(f"self.{attr}"))
|
StarcoderdataPython
|
1770451
|
<reponame>SemanticPriming/word2manylanguages
import bz2
import html
import numpy as np
import os
import pandas as pd
import re
import requests
import simhash
import sklearn.linear_model
import sklearn.model_selection
import sklearn.preprocessing
import sklearn.utils
import zipfile
from lxml import etree
from gensim.models import Word2Vec
def download(source, language):
"""
Download data by source and language.
Source must be one of {'subtitles', 'wikipedia'}.
Language must be a valid ISO3166 country code (lower case)
Output file will be named in the pattern 'source-language.extension'.
Subtitle files use the 'zip' extension, while Wikipedia dumps use 'bz2'.
For example, download('subtitles', 'fr') will result in a file called 'subtitles-fr.zip'
"""
sources = {
'subtitles': f'http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/raw/{language}.zip',
'wikipedia': f'http://dumps.wikimedia.your.org/{language}wiki/latest/{language}wiki-latest-pages-meta-current.xml.bz2'
}
extensions = {
'subtitles': 'zip',
'wikipedia': 'bz2'
}
file_name = f'{source}-{language}.{extensions[source]}'
print(f'Remote file {sources[source]}, Local file {file_name}')
r = requests.get(sources[source], stream=True)
with open(file_name, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
print("Download complete.")
class sentences(object):
"""
Return lines from a full corpus text file as a sequence
using the generator pattern (an iterable)
"""
def __init__(self, language):
self.myfile = open(f'data/corpus-{language}.txt', 'r')
def __iter__(self):
return self
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
line = self.myfile.readline()
if line:
tok = [w for w in line.rstrip().split(' ') if len(w) > 0]
return tok
raise StopIteration()
class articles(object):
"""
Read a wikipedia dump file and return one article at a time
using the generator pattern (an iterable)
"""
def __init__(self, language):
self.myfile = bz2.open(f'data/wikipedia-{language}.bz2', 'rt', encoding='utf-8')
def __iter__(self):
return self
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
article = ""
body = False
line = self.myfile.readline()
while line:
if "<page>" in line:
body = True
if "</page>" in line:
return html.unescape(html.unescape(article))
if body:
article = article + line
line = self.myfile.readline()
self.myfile.close()
raise StopIteration()
def clean(source, language):
"""
Start the cleaning process for a given source and language.
Routes to the appropriate file handing functions for the given source.
"""
if ('subtitles' == source):
clean_subtitles(language)
prune(source, language)
else:
clean_wikipedia(language)
prune(source, language)
def sub_xml_to_text(xml, parser):
"""
Extracts text from xml nodes in subtitle files, removes unused nodes.
"""
tree = etree.fromstring(xml, parser)
for node in tree.iter():
if node.tag == 'meta':
tree.remove(node)
return etree.tostring(tree, encoding=str, method='text')
def wiki_strip_circumflex(txt):
"""
Removes the (deeply nested) circumflex characters from wiki text.
"""
circumflex = 0
txt = list(txt)
for i in range(len(txt)):
if txt[i] == '{':
circumflex += 1
elif txt[i] == '}':
circumflex -= 1
txt[i] = ''
if circumflex > 0:
txt[i] = ''
elif circumflex < 0:
# discard unmatched
txt = []
break
return ''.join(txt)
# Regular expressions for cleaning subtitle text
subs_expressions = [
(r'<.*?>', ''), # xml tags
(r'http.*?(?:[\s\n\]]|$)', ''), # links
(r'\s\(.*?\)', ''), # parentheses
(r'([^\s]{2,})[\.\!\?\:\;]+?[\s\n]|$', '\\1\n'), # break sentences at periods
(r"[-–—/']", ' '), # hyphens, apostrophes and slashes
(r'\s*\n\s*', '\n'), # empty lines
(r'\s{2,}', ' '), # excessive spaces
]
subs_patterns = [(re.compile(expression[0], re.IGNORECASE), expression[1]) for expression in subs_expressions]
# Regular expressions for cleaning wikipedia text
wiki_expressions = [
(r'(?s)<ref.*?</ref>', ''), # strip reference links
(r'(?s)<references.*?</references>', ''), # strip references
(r'(?s)<table.*?</table>', ''), # strip tables
(r'(?s)<gallery.*?</gallery>', ''), # strip galleries
(r'(?s)<kml.*?</kml>', ''), # strip KML tags
(r'<.*?>', ''), # strip other xml tags
(r'http.*?(?:[\s\n\]]|$)', ''), # strip external http(s) links
(r'\[\[[^\]]*?:.*\|(.*?)\]\]', '\\1'), # strip links to files, etc. but keep labels
(r'\[\[[^\]]*?:(.*?)\]\]', ''), # strip category links
(r'\[\[[^\]]*?\|(.*?)\]\]', '\\1'), # convert labeled links to just labels
(r'(?m)^[\s]*[!?*;:=+\-|#_].*?$', ''), # strip lines that do not start with alphanumerics, quotes, or brackets
(r'(?m)^.*?\(UTC\).*?$', ''), # strip lines containing a time stamp
(r'\s\(.*?\)', ''), # remove everything in parentheses
(r'([^\s.!?:;]{2})[.!?:;]+?[\s\n]|$', '\\1\n'), # break sentences at periods
(r"[-–—/']", ' '), # replace hyphens, apostrophes and slashes with spaces
(r'\s*\n\s*', '\n'), # strip empty lines and lines containing whitespace
(r'\s{2,}', ' '), # strip excessive spaces
]
wiki_patterns = [(re.compile(expression[0], re.IGNORECASE), expression[1]) for expression in wiki_expressions]
def clean_text(text, patterns):
"""
Applies the given patterns to the input text. Ensures lower-casing of all text.
"""
txt = text
for pattern in patterns:
txt = pattern[0].sub(pattern[1], txt)
txt = ''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())])
return txt.lower()
def clean_subtitles(language):
"""
Prepare subtitle files for processing.
"""
input_file = zipfile.ZipFile(f'subtitles-{language}.zip', 'r')
output_file = zipfile.ZipFile(f'subtitles-{language}-pre.zip', 'a', zipfile.ZIP_DEFLATED)
xmlparser = etree.XMLParser(recover=True, encoding='utf-8')
# Make list of files to process
files = []
for f in input_file.namelist():
if f.endswith('xml'):
if f.startswith(os.path.join('OpenSubtitles/raw', language)):
files.append(f)
print(f'Preprocessing {len(files)} {language} subtitle files.')
for f in sorted(files):
output_file.writestr(f.replace('xml', 'txt'),
clean_text(sub_xml_to_text(input_file.open(f).read(), xmlparser), subs_patterns))
print('Complete')
def token_frequency_check(tokens):
"""
Checking to see if the 30 most frequent tokens cover 30% of all tokens.
Probably not doing this.
"""
s = set(tokens)
freqs = []
for t in s:
freqs.append((t, tokens.count(t)))
freqs.sort(key = lambda x: x[1])
thresh = 30
if len(freqs) < 30:
thresh = len(freqs)
t30 = 0
for i in range(thresh):
t30 += freqs[i][1]
return t30 >= len(tokens) * 0.3
def get_hash(tokens):
"""
Creates the simhash for the given list of tokens
"""
shingles = [''.join(shingle) for shingle in
simhash.shingle(''.join(tokens), 4)]
hashes = [simhash.unsigned_hash(s.encode('utf8')) for s in shingles]
return simhash.compute(hashes)
def prune(source, language):
"""
Remove duplicate documents from archive file using simhash.
"""
input_file = zipfile.ZipFile(f'{source}-{language}-pre.zip', 'r')
to_remove = []
hash_list = []
hash_dict = dict()
print("Checking for duplicates.")
for f in input_file.namelist():
text = str(input_file.open(f).read())
tokens = re.split(r'\W+', text.lower(), flags=re.UNICODE)
hash = get_hash(tokens)
hash_list.append(hash)
hash_dict[hash] = f
input_file.close()
blocks = 4
distance = 2
matches = simhash.find_all(hash_list, blocks, distance)
print(f'Got {len(matches)} matches')
for match in matches:
print(f'({hash_dict[match[0]]}, {hash_dict[match[1]]})')
to_remove.append(hash_dict[match[1]])
print(f'Found {len(to_remove)} files to prune.')
input_file = zipfile.ZipFile(f'data/{source}-{language}-pre.zip', 'r')
output_file = zipfile.ZipFile(f'data/{source}-{language}-pruned.zip', 'a', zipfile.ZIP_DEFLATED)
for f in input_file.namelist():
if f not in to_remove:
output_file.writestr(f, input_file.open(f).read())
output_file.close()
def concatenate_corpus(language):
"""
Reads pre-processed subtitle and wikipedia text, and creates a single
text file containing all of the tokenized sentences.
"""
print(f"Concatenating {language} corpus.")
output_corpus = f'data/corpus-{language}.txt'
with open(output_corpus, mode="w") as out:
subs_input_file = zipfile.ZipFile(f'data/subtitles-{language}-pre.zip', 'r')
for f in subs_input_file.namelist():
out.write(subs_input_file.open(f).read().decode("utf-8"))
subs_input_file.close()
wiki_input_file = zipfile.ZipFile(f'data/wikipedia-{language}-pre.zip', 'r')
for f in wiki_input_file.namelist():
out.write(wiki_input_file.open(f).read().decode("utf-8"))
wiki_input_file.close()
def clean_wikipedia(language):
"""
Prepare wikipedia files for processing.
"""
with zipfile.ZipFile(f'wikipedia-{language}-pre.zip', 'a', zipfile.ZIP_DEFLATED) as output_archive:
i = 0
print(f'Preprocessing {language} Wikipedia dump.')
for article in articles(language):
filename = f'wiki-{language}-{str(i)}.txt'
txt = article.lower()
txt = wiki_strip_circumflex(article) if ((not txt.startswith('#'))
and ('<noinclude>' not in txt)
and ('__noindex__' not in txt)
) else ''
for pattern in wiki_patterns:
txt = pattern[0].sub(pattern[1], txt)
output_archive.writestr(filename, ''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())]))
i += 1
print("Complete")
def vectorize_stream(language, min_freq=5, dim=50, win=3, alg=0):
"""
Creates the word2vec model using gensim.
"""
model = Word2Vec(sentences(language), min_count=min_freq, size=dim, workers=3, window=win, sg=alg)
return model
def evaluate_norms(lang, wordsXdims, alpha=1.0):
# Using subs2vec norms data for now
path = os.path.join('/', 'home', 'pgrim', 'workspace', 'subs2vec', 'subs2vec')
norms_path = os.path.join(path, 'datasets', 'norms')
scores = []
for norms_fname in os.listdir(norms_path):
if norms_fname.startswith(lang):
print(f'predicting norms from {norms_fname}')
norms = pd.read_csv(os.path.join(norms_path, norms_fname), sep='\t', comment='#')
norms = norms.set_index('word')
score = predict_norms(wordsXdims, norms, alpha)
score['source'] = norms_fname
scores.append(score)
if len(scores) > 0:
scores = pd.concat(scores)
return scores
def predict_norms(vectors, norms, alpha=1.0):
cols = norms.columns.values
df = norms.join(vectors, how='inner')
# compensate for missing ys somehow
total = len(norms)
missing = len(norms) - len(df)
penalty = (total - missing) / total
print(f'missing vectors for {missing} out of {total} words')
df = sklearn.utils.shuffle(df) # shuffle is important for unbiased results on ordered datasets!
model = sklearn.linear_model.Ridge(alpha=alpha) # use ridge regression models
cv = sklearn.model_selection.RepeatedKFold(n_splits=5, n_repeats=10)
# compute crossvalidated prediction scores
scores = []
for col in cols:
# set dependent variable and calculate 10-fold mean fit/predict scores
df_subset = df.loc[:, vectors.columns.values] # use .loc[] so copy is created and no setting with copy warning is issued
df_subset[col] = df[col]
df_subset = df_subset.dropna() # drop NaNs for this specific y
x = df_subset[vectors.columns.values]
y = df_subset[col]
cv_scores = sklearn.model_selection.cross_val_score(model, x, y, cv=cv)
median_score = np.median(cv_scores)
penalized_score = median_score * penalty
scores.append({
'norm': col,
'adjusted r': np.sqrt(penalized_score), # take square root of explained variance to get Pearson r
'adjusted r-squared': penalized_score,
'r-squared': median_score,
'r': np.sqrt(median_score),
})
return pd.DataFrame(scores)
dimension_list = [50,100,200,300,500]
window_list = [3,4,5,6,7,8,9,10,11,12,13]
algo_list = [0,1]
def build_models(language):
for dim in dimension_list:
for win in window_list:
for alg in algo_list:
algo = 'cbow' if alg ==0 else 'sg'
base_file_name = f'{language}_{str(dim)}_{str(win)}_{algo}'
print("Building model " + base_file_name)
model = vectorize_stream(language, 5, dim, win, alg)
#Write down the model?
words=list(model.wv.vocab)
wordsxdims = pd.DataFrame(model[words],words)
wordsxdims.to_csv(f'{base_file_name}_wxd.csv',index_label='word')
|
StarcoderdataPython
|
3325602
|
from Camera import Camera
from abc import ABC
import numpy as np
import cv2
from skimage import io
from io import BytesIO
from IPython.display import clear_output, Image, display, update_display
import PIL
try:
from Cameras.PySpinCapture import PySpinCapture as psc
print('1')
except ImportError:
PySpinCapture = None
class Flir(Camera, ABC):
def __init__(self, exposure=0.01, white_balance=1, auto_focus=False, grayscale=False):
self._isMonochrome = True
self._is16bits = True
self.Cam = psc(0, self._isMonochrome, self._is16bits)
fps = self.getFPS()
resolution = self.getResolution()
super().__init__(exposure, white_balance, auto_focus, fps, resolution, grayscale)
self.hdr_exposures = None
def getImage(self, name='test', saveImage=True, saveNumpy=True, calibration=False, timeout=5000):
try:
# Take and return current camera frame
success, img = self.Cam.grabFrame()
# Save if desired
if saveImage:
if calibration:
filenamePNG = 'CalibrationImages/' + name + '.PNG'
cv2.imwrite(filenamePNG, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
else:
filename = 'CapturedImages/' + name + '.PNG'
cv2.imwrite(filename, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
if saveNumpy:
if calibration:
np.save('CalibrationNumpyData/' + name, img)
else:
np.save('CapturedNumpyData/' + name, img)
# self.Cam.release()
return img
except SystemError:
self.quit_and_open()
return None
def setExposure(self, exposure):
self.Cam.setExposure(exposure)
def getExposure(self):
return self.Cam.getExposure()
def getFPS(self):
return self.Cam.getFPS()
def setFPS(self, fps):
self.Cam.setFPS(fps)
def setAutoGain(self):
self.Cam.setCamAutoProperty()
def getGain(self):
return self.Cam.getGain()
def setGain(self, gain):
self.Cam.setGain(gain)
def getResolution(self):
size = self.Cam.getResolution()
return size
def setResolution(self, resolution):
self.Cam.setWidth(resolution[0])
self.Cam.setHeight(resolution[1])
def getHDRImage(self, name='test', saveImage=True, saveNumpy=True):
self.Cam.setupHDR()
imgs = self.Cam.captureHDR()
if saveNumpy:
np.save('CapturedNumpyData/' + name, imgs)
if saveImage:
png_frame = (imgs - np.min(imgs)) / (np.max(imgs) - np.min(imgs))
png_frame *= 255.0
io.imsave('CapturedImages/' + name + '.PNG', png_frame.astype(np.uint8))
def viewCameraStream(self):
img = self.getImage(saveImage=False, saveNumpy=False)
while True:
_,img = self.Cam.grabFrameCont()
cv2.imshow('FLIR camera image', img)
c = cv2.waitKey(1)
if c != -1:
# When everything done, release the capture
self.Cam._camera.EndAcquisition()
cv2.destroyAllWindows()
self.quit_and_open()
break
def viewCameraStreamJupyter(self):
# Live view in a Jupyter Notebook
try:
start = self.getImage(saveImage=False, saveNumpy=False)
g = BytesIO()
PIL.Image.fromarray(start).save(g, 'jpeg')
obj = Image(data=g.getvalue())
dis = display(obj, display_id=True)
while True:
img = self.getImage(saveImage=False, saveNumpy=False)
if img is None:
break
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
f = BytesIO()
PIL.Image.fromarray(img).save(f, 'jpeg')
obj = Image(data=f.getvalue())
update_display(obj, display_id=dis.display_id)
clear_output(wait=True)
except KeyboardInterrupt:
self.quit_and_open()
def viewCameraStreamJupyterWindows(self):
# Live view in a Jupyter Notebook
try:
start = self.getImage(saveImage=False, saveNumpy=False)
g = BytesIO()
PIL.Image.fromarray(start).save(g, 'jpeg')
obj = Image(data=g.getvalue())
dis = display(obj, display_id=True)
while True:
img = self.getImage(saveImage=False, saveNumpy=False)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
f = BytesIO()
PIL.Image.fromarray(img).save(f, 'jpeg')
obj = Image(data=f.getvalue())
update_display(obj, display_id=dis.display_id)
except KeyboardInterrupt:
self.quit_and_open()
def quit_and_close(self):
self.Cam.release()
def quit_and_open(self):
self.Cam.release()
self.Cam = psc(1, self._isMonochrome, self._is16bits)
def getStatus(self):
raise NotImplementedError
|
StarcoderdataPython
|
3334797
|
"""
This module defines functions and data structures relating to descriptors.
A descriptor is intended to be a generalized entity name; it's a short string
expression that can represent the input or output of a function in Bionic. For
example, instead of referring only to atomic entities like `raw_data` or
`model`, we will eventually be able to use complex expressions like `model,
report`, `list of model`, and `file_path of model`. However, for the time
being we still only support the simplest type of descriptor: a single entity
name.
A descriptor string can be parsed into a DescriptorNode object, which
represents the abstract syntax tree (AST) of the expression. Bionic's
internals use these "dnodes" to represent values to represent the inputs and
outputs of tasks. DescriptorNodes can also be converted back into descriptor
strings if they need to be serialized or presented to a human.
"""
from abc import ABC, abstractmethod
import re
import attr
ENTITY_NAME_PATTERN = re.compile("[a-zA-Z_][a-zA-Z0-9_]*")
class DescriptorNode(ABC):
"""
Abstract base class representing a parsed descriptor.
"""
@classmethod
def from_descriptor(self, descriptor):
"""
Parses a descriptor string and returns a DescriptorNode corresponding to
the descriptor's abstract syntax tree.
"""
if ENTITY_NAME_PATTERN.fullmatch(descriptor):
return EntityNode(descriptor)
else:
# For now we only support the simplest kind of descriptors.
raise ValueError(f"Invalid entity descriptor: {descriptor!r}")
@abstractmethod
def to_descriptor(self):
"""
Returns a descriptor string corresponding to this node.
"""
pass
def to_entity_name(self):
"""
If this descriptor is a simple entity name, returns that name; otherwise
throws a TypeError.
"""
raise TypeError(f"Descriptor {self.to_descriptor()!r} is not an entity name")
@attr.s(frozen=True)
class EntityNode(DescriptorNode):
"""
A descriptor node corresponding to a simple entity name.
"""
name = attr.ib()
def to_entity_name(self):
return self.name
def to_descriptor(self):
return self.name
|
StarcoderdataPython
|
1791136
|
<reponame>pwnfooj716/cube-program
import random
import copy
POP_SIZE = 50
INITIAL_LENGTH = 5
class PuzzleSolver:
def __init__(self, puzzle):
self.puzzle = puzzle
def generate(self, length):
chromosome = ""
temp_moves = self.puzzle.moves
prev_move = ""
for i in range(length):
move = random.choice(temp_moves)
if not self.puzzle.is_parallel(move, prev_move):
temp_moves = copy.copy(self.puzzle.moves)
temp_moves.remove(move)
prev_move = move
modifier = random.choice(self.puzzle.modifiers)
chromosome += move + modifier + " "
return chromosome[:-1]
|
StarcoderdataPython
|
3365540
|
<filename>budget/util/fileloader.py
import json
import logging
import yaml
def load_json(filename):
fh = None
try:
with open(filename) as f:
fh = json.load(f)
except TypeError:
fh = json.load(filename)
except IOError:
fh = json.loads('{}')
except ValueError:
log = logging.getLogger(__name__)
log.error('Unable to read %s' % filename)
return fh
def save_json(filename, data):
try:
json.dump(data, open(filename, 'w+'))
except TypeError:
json.dump(data, filename)
except IOError:
raise
def load_yaml(filename):
try:
yamlfile = yaml.load(open(filename, 'r+'))
except TypeError:
yamlfile = yaml.load(filename)
except IOError:
raise
return yamlfile
def save_yaml(filename, data):
try:
yaml.dump(data, open(filename, 'w+'))
except TypeError:
yaml.dump(data, filename)
except IOError:
raise
|
StarcoderdataPython
|
4804733
|
<gh_stars>1-10
"""
Displays index.html. Leaves the routing to react.
"""
from flask import render_template
from . import app
@app.route('/')
@app.route('/gameDayLineups')
@app.route('/gameDateGames')
@app.route('/gameDayAnalysis')
def show_index():
return render_template('index.html')
|
StarcoderdataPython
|
153875
|
<filename>kinova_demo/nodes/kinova_demo/grab_object_in_tf.py
#!/usr/bin/env python
import rospy
import numpy as np
from pose_action_client import moveArm, currentCartesianCommand,Quaternion2EulerXYZ
from fingers_action_client import moveFingers, currentFingerPosition,unitParser
from math import pi
import tf
aux = [0.3, -0.3, 0.5, -3, 0, 0]
def getPoseObjectSeen(trans):
refe = list(aux)
x,y,z = trans[0:]
refe[0:2] = [x,y]
refe[2] = 0.6
moveArm(refe, is_relative=False)
refe[2] = 0.4
moveArm(refe, is_relative=False)
refe[2] = z + 0.2
moveArm(refe, is_relative=False)
if __name__ == '__main__':
rospy.init_node('grab_object_in_tf')
moveArm(aux, is_relative=False)
#number_object = rospy.get_param("index_object")
listener = tf.TransformListener()
try:
#'cosa_{}'.format(number_object)
listener.waitForTransform('/cosa_22','/j2n6s300_link_base',rospy.Time(), rospy.Duration(4.0))
trans,rot = listener.lookupTransform('cosa_22', 'j2n6s300_link_base' , rospy.Time(0))
getPoseObjectSeen(trans)
except (tf.LookupException) as e:
print("tf.LookupException: {}".format(e))
except tf.ConnectivityException as e:
print("OS error: {}".format(e))
except tf.ExtrapolationException as e:
print("OS error: {}".format(e))
|
StarcoderdataPython
|
24321
|
<gh_stars>0
"""
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
custom_ioa - Falcon Custom Indicators of Attack API Interface Class
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
# pylint: disable=C0103 # Aligning method names to API operation IDs
from ._util import service_request, parse_id_list, force_default, args_to_params
from ._service_class import ServiceClass
from ._endpoint._custom_ioa import _custom_ioa_endpoints as Endpoints
class Custom_IOA(ServiceClass):
"""
The only requirement to instantiate an instance of this class
is a valid token provided by the Falcon API SDK OAuth2 class.
"""
@force_default(defaults=["parameters"], default_types=["dict"])
def get_patterns(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get pattern severities by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-patterns
operation_id = "get_patterns"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_platformsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get platforms by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-platformsMixin0
operation_id = "get_platformsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rule_groupsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rule groups by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rule-groupsMixin0
operation_id = "get_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def create_rule_groupMixin0(self: object, body: dict, cs_username: str) -> dict:
"""
Create a rule group for a platform with a name and an optional description. Returns the rule group.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule-groupMixin0
operation_id = "create_rule_groupMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rule_groupMixin0(self: object, *args, **kwargs) -> dict:
"""
Delete rule groups by ID. (Redirects to actual method. Typo fix.)
"""
returned = self.delete_rule_groupsMixin0(*args, **kwargs)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rule_groupsMixin0(self: object, cs_username: str, parameters: dict = None, **kwargs) -> dict:
"""
Delete rule groups by ID.
"""
# [DELETE] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/delete-rule-groupsMixin0
operation_id = "delete_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="DELETE",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def update_rule_groupMixin0(self: object, body: dict, cs_username: str) -> dict:
"""
Update a rule group. The following properties can be modified: name, description, enabled.
"""
# [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/update-rule-groupMixin0
operation_id = "update_rule_groupMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="PATCH",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rule_types(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rule types by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rule-types
operation_id = "get_rule_types"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def get_rules_get(self: object, ids) -> dict:
"""
Get rules by ID and optionally version in the following format: ID[:version]
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rules-get
operation_id = "get_rules_get"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
body_payload = {}
body_payload["ids"] = parse_id_list(ids).split(",")
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rulesMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rules by ID and optionally version in the following format: ID[:version].
The max number of IDs is constrained by URL size.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rulesMixin0
operation_id = "get_rulesMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def create_rule(self: object, body: dict, cs_username: str) -> dict:
"""
Create a rule within a rule group. Returns the rule.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule
operation_id = "create_rule"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rules(self: object, cs_username: str, parameters: dict = None, **kwargs) -> dict:
"""
Delete rules from a rule group by ID.
"""
# [DELETE] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/delete-rules
operation_id = "delete_rules"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="DELETE",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def update_rules(self: object, body: dict, cs_username: str) -> dict:
"""
Update rules within a rule group. Return the updated rules.
"""
# [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/update-rules
operation_id = "update_rules"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="PATCH",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def validate(self: object, body: dict) -> dict:
"""
Validates field values and checks for matches if a test string is provided.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/validate
operation_id = "validate"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_patterns(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all pattern severity IDs
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-patterns
operation_id = "query_patterns"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_platformsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all platform IDs.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-platformsMixin0
operation_id = "query_platformsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_groups_full(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Find all rule groups matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-groups-full
operation_id = "query_rule_groups_full"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_groupsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Finds all rule group IDs matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-groupsMixin0
operation_id = "query_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_types(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all rule type IDs.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-types
operation_id = "query_rule_types"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rulesMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Finds all rule IDs matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rulesMixin0
operation_id = "query_rulesMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
|
StarcoderdataPython
|
3680
|
import tkinter as tk
import tkinter.messagebox
from Control import Control
class View:
def __init__(self, control : Control.Control):
self.control = control
# Init Window
self.root = tk.Tk()
self.root.title(u"Header File Generator")
self.root.geometry("700x800")
self.config_frame = tk.Frame(self.root)
# Config Table
lb_symbol = tk.Label(self.config_frame, width = 20)
lb_symbol["text"] = "Symbol"
lb_symbol.grid(row = 0, column = 0)
lb_description = tk.Label(self.config_frame, width = 40)
lb_description["text"] = "Detail"
lb_description.grid(row = 0, column = 1)
lb_enable = tk.Label(self.config_frame, width = 10)
lb_enable["text"] = "Enable"
lb_enable.grid(row = 0, column = 2)
for i, config in enumerate(self.control.getConfigs()):
symbol_entry = tk.Entry(self.config_frame, width=20)
symbol_entry.insert(tk.END, config.symbol)
symbol_entry.config(state = tk.DISABLED)
symbol_entry.config(disabledforeground = "black", disabledbackground = "white")
symbol_entry.grid(row= i + 1, column = 0)
detail_entry = tk.Entry(self.config_frame, width=40)
detail_entry.insert(tk.END, config.detail)
detail_entry.config(state = tk.DISABLED)
detail_entry.config(disabledforeground = "black", disabledbackground = "white")
detail_entry.grid(row= i + 1, column = 1)
bt_enable = tk.Button(self.config_frame, text="ON", width= 5)
bt_enable["text"] = "ON" if config.enable else "OFF"
color = "green" if config.enable else "red"
bt_enable.config(bg=color, activebackground = color)
bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_config_enable(id, button)
bt_enable.grid(row = i + 1, column = 2)
self.config_frame.pack(side=tk.TOP, anchor=tk.NW)
self.value_config_frame = tk.Frame(self.root)
# Config Table
lb_symbol = tk.Label(self.value_config_frame, width = 20)
lb_symbol["text"] = "Symbol"
lb_symbol.grid(row = 0, column = 0)
lb_description = tk.Label(self.value_config_frame, width = 40)
lb_description["text"] = "Detail"
lb_description.grid(row = 0, column = 1)
lb_value = tk.Label(self.value_config_frame, width = 10)
lb_value["text"] = "Value"
lb_value.grid(row = 0, column = 2)
lb_enable = tk.Label(self.value_config_frame, width = 10)
lb_enable["text"] = "Enable"
lb_enable.grid(row = 0, column = 3)
for i, val_config in enumerate(self.control.getValConfigs()):
symbol_entry = tk.Entry(self.value_config_frame, width=20)
symbol_entry.insert(tk.END, val_config.symbol)
symbol_entry.config(state = tk.DISABLED)
symbol_entry.config(disabledforeground = "black", disabledbackground = "white")
symbol_entry.grid(row= i + 1, column = 0)
detail_entry = tk.Entry(self.value_config_frame, width=40)
detail_entry.insert(tk.END, val_config.detail)
detail_entry.config(state = tk.DISABLED)
detail_entry.config(disabledforeground = "black", disabledbackground = "white")
detail_entry.grid(row= i + 1, column = 1)
value_entry = tk.Entry(self.value_config_frame, width=10)
value_entry.insert(tk.END, val_config.value)
value_entry.config(state = tk.DISABLED)
value_entry.config(disabledforeground = "black", disabledbackground = "white")
value_entry.grid(row= i + 1, column = 2)
bt_enable = tk.Button(self.value_config_frame, text="ON", width= 5)
bt_enable["text"] = "ON" if val_config.enable else "OFF"
color = "green" if val_config.enable else "red"
bt_enable.config(bg=color, activebackground = color)
bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_val_config_enable(id, button)
bt_enable.grid(row = i + 1, column = 3)
self.value_config_frame.pack(side=tk.TOP, anchor=tk.W)
# Generator Button
self.bt_generate = tk.Button(self.root)
self.bt_generate["text"] = "Generate Header"
self.bt_generate["command"] = self.generateHeader
self.bt_generate.pack(side=tk.BOTTOM, anchor=tk.SE)
def start(self):
self.root.mainloop()
def generateHeader(self):
self.control.generateHeader()
tk.messagebox.showinfo("Header Generator Info", "Generated:{0}".format(self.control.header_config.path))
def update(self):
pass
def toggle_config_enable(self, id, button : tk.Button):
config = self.control.getConfigs()[id]
config.enable = not config.enable
button["text"] = "ON" if config.enable else "OFF"
color = "green" if config.enable else "red"
button.config(bg=color, activebackground = color)
def toggle_val_config_enable(self, id, button : tk.Button):
val_config = self.control.getValConfigs()[id]
val_config.enable = not val_config.enable
button["text"] = "ON" if val_config.enable else "OFF"
color = "green" if val_config.enable else "red"
button.config(bg=color, activebackground = color)
|
StarcoderdataPython
|
180818
|
import unittest
from lmatch import film
class TestFilm(unittest.TestCase):
def setUp(self):
self.sample_film = film.Film(412, "path_that", "name_this", 5.21)
def test_ctor(self):
self.assertEqual(self.sample_film.id, 412)
self.assertEqual(self.sample_film.name, "name_this")
self.assertEqual(self.sample_film.url, "path_that")
self.assertEqual(self.sample_film.avg_rate, 5.21)
def test_hash(self):
self.assertEqual(hash(self.sample_film), 412)
def test_eq(self):
f1 = film.Film(412, "a", "b", 1.0)
f2 = film.Film(411, "a", "b", 1.0)
self.assertEqual(True, f1 == self.sample_film)
self.assertEqual(False, f2 == self.sample_film)
def test_repr(self):
self.assertEqual(repr(self.sample_film), "name_this")
|
StarcoderdataPython
|
3256198
|
# Generated by Django 3.1.6 on 2021-08-08 07:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0014_auto_20210807_1625'),
]
operations = [
migrations.RemoveField(
model_name='templatetask',
name='parameters',
),
]
|
StarcoderdataPython
|
120162
|
<gh_stars>1-10
__author__ = 'eric'
from simulate.order_input import OrdersInput
from simulate.market_struct import MarketStructure
def simulate(df_market_struct, ls_symbols):
num_trading_days = len(df_market_struct)
#iterate over each trading day
for day in xrange(num_trading_days):
na_orders = df_market_struct['orders'][day]
na_positions = df_market_struct['positions'][day]
f_available_cash = df_market_struct['available_cash'][day]
#update holdings for each day based on orders
for order_dict in na_orders:
a_trade = order_dict['trade']
s_symbol = a_trade['symbol'][0]
f_close_price = df_market_struct[s_symbol][day]
i_current_position = na_positions[s_symbol]
i_trade_volume = a_trade['volume'][0]
f_trade_cost = i_trade_volume * f_close_price
if a_trade['type'] == 'BUY':
na_positions[s_symbol] = i_current_position + i_trade_volume
f_available_cash = f_available_cash - f_trade_cost
else:
na_positions[s_symbol] = i_current_position - i_trade_volume
f_available_cash = f_available_cash + f_trade_cost
#set the cash available and overall position for the next trading day
if day + 1 < num_trading_days:
df_market_struct['available_cash'][day+1] = f_available_cash
df_market_struct['positions'][day+1] = na_positions.copy()
#update end of day portfolio value based on new position sizes
f_daily_value = f_available_cash
for s_symbol in ls_symbols:
f_close_price = df_market_struct[s_symbol][day]
i_position_size = df_market_struct['positions'][day][s_symbol]
f_daily_value = f_daily_value + (f_close_price * i_position_size)
df_market_struct['closing_value'][day] = f_daily_value
pass
def output(values_file, df_market_struct):
f = open(values_file, 'w')
num_trading_days = len(df_market_struct)
for day in xrange(num_trading_days):
value = df_market_struct['closing_value'][day]
timestamp = df_market_struct['dates'][day]
f.write('{0:4d}, {1:2d}, {2:2d}, {3:7d}\n'
.format(timestamp.year, timestamp.month, timestamp.day, int(round(value, 0))))
pass
f.close()
def main(input_args):
import os
orders_csv = '{0:s}/{1:s}'.format(os.path.dirname(os.path.realpath(__file__)), input_args.orders_csv)
values_csv = '{0:s}/{1:s}'.format(os.path.dirname(os.path.realpath(__file__)), input_args.values_csv)
order_input = OrdersInput(orders_csv)
market_struct = MarketStructure(order_input, input_args.starting_cash)
simulate(market_struct.df_market_struct, order_input.get_symbol_list())
output(values_csv, market_struct.df_market_struct)
print "Done"
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Simulate the market over a given time period')
parser.add_argument('starting_cash', type=int, help='cash to start the trading period with')
parser.add_argument('orders_csv', help='(input) CSV file specifying dates or order execution')
parser.add_argument('values_csv', help='(output) CSV file specifying the daily value of the portfolio')
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
1726373
|
<reponame>agrandev/Openfdafinal<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# <NAME>
import http.client
import http.server
import json
class OpenFDAClient():
OPENFDA_API_URL = "api.fda.gov"
OPENFDA_API_EVENT = "/drug/event.json"
OPENFDA_API_DRUG = '&search=patient.drug.medicinalproduct:'
OPENFDA_API_COMPANY='&search=companynumb:'
def get_event(self, limite):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
#search = patient.reaction.reactionnedrapt:"fati"
conn.request('GET',self.OPENFDA_API_EVENT+'?limit=' + limite)
r1 = conn.getresponse()
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
def get_search_drug(self, drug_search):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request('GET',self.OPENFDA_API_EVENT + '?limit=10' + self.OPENFDA_API_DRUG + drug_search )
r1 = conn.getresponse()
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
def get_search_company(self, company_search):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request('GET',self.OPENFDA_API_EVENT+'?limit=10' + self.OPENFDA_API_COMPANY + company_search)
r1 = conn.getresponse()
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
class OpenFDAParser():
def get_medicamentos(self, events):
medicamentos = []
results = events["results"]
for event in results:
medicamentos += [event["patient"]["drug"][0]["medicinalproduct"]]
return medicamentos
def get_empresas(self, events):
empresas = []
results = events["results"]
for event in results:
empresas += [event["companynumb"]]
return empresas
def get_gender(self, events):
patientsex=[]
results=events['results']
for event in results:
patientsex+= [event['patient']['patientsex']]
return patientsex
class OpenFDAHTML():
def get_main_page(self):
html = """
<html>
<head>
</head>
<body>
<h1>OpenFDA Client</h1>
<form method="get" action="listDrugs">
<input type = "number" size="3" name="limit"></input>
<input type = "submit" value="Drug List"></input>
</form>
<form method="get" action="searchDrug">
<input type = "text" name="drug"></input>
<input type = "submit" value="Drug Search"></input>
</form>
<form method='get' action='listCompanies'>
<input type = "number" size="3" name="limit"></input>
<input type = "submit" value="Companies List"></input>
</form>
<form method="get" action="searchCompany">
<input type = "text" name="company"></input>
<input type = "submit" value="Company Search"></input>
</form>
<form method="get" action="listGender">
<input type = "number" size="3" name="limit"></input>
<input type = "submit" value="Gender List"></input>
</form>
</body>
</html>
"""
return html
def html_error(self):
html = """
<html>
<head>
</head>
<body>
<h1>Error 404</h1>
<br>Pagina no encontrada.</br>
<br>No hemos localizado la pagina que estabas buscando y el servidor devuelve error 404.</br>
<br>La pagina que buscas no existe o ha ocurrido un error inesperado.</br>
</body>
</html>
"""
return html
def list_html(self, items):
s=""
for item in items:
s += "<li>" +item+ "</li>"
html = """
<html>
<head></head>
<body>
<h1></h1>
<ol>
%s
</ol>
</body>
</html>
"""%(s)
return html
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def get_parametro(self):
parametro = self.path.split("=")[1]
return parametro
def limite(self):
limite=str(self.path.split("=")[1])
if limite=='':
limite='10'
return limite
def do_GET(self):
fdaclient=OpenFDAClient()
fdaparser=OpenFDAParser()
fdahtml=OpenFDAHTML()
main_page= False
is_listdrugs=False
is_listcompanies=False
is_search_company=False
is_search_drug=False
is_patientsex=False
if self.path== "/":
main_page= True
elif '/listDrugs'in self.path:
is_listdrugs = True
elif '/searchDrug'in self.path:
is_search_drug=True
elif '/listCompanies'in self.path:
is_listcompanies=True
elif '/searchCompany'in self.path:
is_search_company=True
elif '/listGender' in self.path:
is_patientsex=True
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
if main_page:
html = fdahtml.get_main_page()
self.wfile.write(bytes(html, "utf8"))
elif is_listdrugs:
limite=self.limite()
event=fdaclient.get_event(limite)
medicamentos= fdaparser.get_medicamentos(event)
html= fdahtml.list_html(medicamentos)
self.wfile.write(bytes(html, "utf8"))
elif is_search_drug:
drug=self.get_parametro()
event=fdaclient.get_search_drug(drug)
companies=fdaparser.get_empresas(event)
html=fdahtml.list_html(companies)
self.wfile.write(bytes(html, "utf8"))#event
elif is_listcompanies:
limite=self.limite()
event=fdaclient.get_event(limite)
empresas= fdaparser.get_empresas(event)
html= fdahtml.list_html(empresas)
self.wfile.write(bytes(html, "utf8"))
elif is_search_company:
company=self.get_parametro()
event=fdaclient.get_search_company(company)
drugs=fdaparser.get_medicamentos(event)
html=fdahtml.list_html(drugs)
self.wfile.write(bytes(html, "utf8"))
elif is_patientsex:
limite=self.limite()
event=fdaclient.get_event(limite)
gender=fdaparser.get_gender(event)
html= fdahtml.list_html(gender)
self.wfile.write(bytes(html, "utf8"))
else:
error=fdahtml.html_error()
self.wfile.write(bytes(error, "utf8"))
return
|
StarcoderdataPython
|
3215553
|
<gh_stars>1-10
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
from functools import partial
import inspect
import itertools
import operator
from typing import cast, Iterator, Optional, List, Tuple
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
try:
import numpy_dispatch
except ImportError:
numpy_dispatch = None
import jax
import jax.ops
from jax._src import api
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax._src import dtypes
from jax import tree_util
from jax.interpreters import xla
from jax.test_util import check_grads
from jax._src.util import prod
from jax._src.numpy.util import _parse_numpydoc, ParsedDoc
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
numpy_version = tuple(map(int, np.__version__.split('.')[:3]))
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
one_dim_array_shapes = [(1,), (6,), (12,)]
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
float_dtypes = jtu.dtypes.all_floating
complex_dtypes = jtu.dtypes.complex
int_dtypes = jtu.dtypes.all_integer
unsigned_dtypes = jtu.dtypes.all_unsigned
bool_dtypes = jtu.dtypes.boolean
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]
# uint64 is problematic because with any uint type it promotes to float:
int_dtypes_no_uint64 = [d for d in int_dtypes + unsigned_dtypes if d != np.uint64]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
def _compatible_shapes(shape):
if shape in scalar_shapes or np.ndim(shape) == 0:
return [shape]
return (shape[n:] for n in range(len(shape) + 1))
def _get_y_shapes(y_dtype, shape, rowvar):
# Helper function for testCov.
if y_dtype is None:
return [None]
if len(shape) == 1:
return [shape]
elif rowvar or shape[0] == 1:
return [(1, shape[-1]), (2, shape[-1]), (5, shape[-1])]
return [(shape[0], 1), (shape[0], 2), (shape[0], 5)]
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True,
tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes + unsigned_dtypes + bool_dtypes,
all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ceil", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={jnp.bfloat16: 1e-2, np.float32: 1e-3,
np.float64: 1e-12, np.complex64: 2e-4,
np.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("i0", 1, float_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False),
op_record("ldexp", 2, int_dtypes, all_shapes, jtu.rand_default, [], check_dtypes=False),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equiv", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("trunc", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("trunc", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, low=-1.5, high=1.5), ["rev"],
inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={np.float64: 1e-7, np.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-4, np.complex128: 2E-14}),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-2, np.complex128: 2E-12}),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True, tolerance={np.float64: 1e-9}),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=True),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={jnp.bfloat16: 4e-2, np.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={np.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={np.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("fix", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("floor_divide", 2, number_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("floor_divide", 2, unsigned_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("fmin", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmax", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmod", 2, default_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={np.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float16: 1e-2, np.float64: 2e-14}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={dtypes.bfloat16: 4e-2, np.float16: 1e-2,
np.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={np.complex128: 1e-14}, check_dtypes=False),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("modf", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("modf", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("rint", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan,
[]),
op_record("rint", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("sign", 1, number_dtypes + unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, []),
# numpy 1.16 has trouble mixing uint and bfloat16, so we test these separately.
op_record("copysign", 2, default_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("copysign", 2, unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sinc", 1, [t for t in number_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={np.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("ediff1d", 3, [np.int32], all_shapes, jtu.rand_default, []),
# TODO(phawkins): np.unwrap does not correctly promote its default period
# argument under NumPy 1.21 for bfloat16 inputs. It works fine if we
# explicitly pass a bfloat16 value that does not need promition. We should
# probably add a custom test harness for unwrap that tests the period
# argument anyway.
op_record("unwrap", 1, [t for t in float_dtypes if t != dtypes.bfloat16],
nonempty_nonscalar_array_shapes,
jtu.rand_default, ["rev"],
# numpy.unwrap always returns float64
check_dtypes=False,
# numpy cumsum is inaccurate, see issue #3517
tolerance={dtypes.bfloat16: 1e-1, np.float16: 1e-1}),
op_record("isclose", 2, [t for t in all_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("invert", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, all_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_INITIAL_RECORDS = [
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("max", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_WHERE_NO_INITIAL_RECORDS = [
op_record("all", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("mean", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("nanmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanvar", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanstd", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("nanargmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanargmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__le__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 2e-4, np.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__lshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__rlshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rrshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), [])
]
class _OverrideEverything(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideEverything, rec.name, lambda self, other: self)
class _OverrideNothing(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, np.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = np.zeros([])
for shape in shapes:
try:
accumulator = accumulator + np.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_jnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`.
jnp and np have different type promotion semantics; this decorator allows
tests make an np reference implementation act more like an jnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)
for x in flat_args):
dtype = jnp.result_type(jnp.float_, *flat_args)
else:
dtype = jnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: np.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def setUp(self):
super().setUp()
self._jax_numpy_rank_promotion = config.jax_numpy_rank_promotion
config.update("jax_numpy_rank_promotion", "raise")
def tearDown(self):
config.update("jax_numpy_rank_promotion", self._jax_numpy_rank_promotion)
super().tearDown()
def _GetArgsMaker(self, rng, shapes, dtypes, np_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if np_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (np.ndarray, np.generic)) else a
for a in out]
return f
def testNotImplemented(self):
for name in jnp._NOT_IMPLEMENTED:
func = getattr(jnp, name)
with self.assertRaises(NotImplementedError):
func()
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOp(self, np_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_jnp(np_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory(self.rng())
# np and jnp arrays have different type promotion rules; force the use of
# jnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
self._CompileAndCheck(fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented") # TODO(mattjj): clean up
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
self._CompileAndCheck( fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name + "_{}".format(dtype),
"rng_factory": rec.rng_factory,
"op_name": rec.name, "dtype": dtype}
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes))
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory(self.rng())
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
def testArrayEqualExamples(self):
# examples from the array_equal() docstring.
self.assertTrue(jnp.array_equal([1, 2], [1, 2]))
self.assertTrue(jnp.array_equal(np.array([1, 2]), np.array([1, 2])))
self.assertFalse(jnp.array_equal([1, 2], [1, 2, 3]))
self.assertFalse(jnp.array_equal([1, 2], [1, 4]))
a = np.array([1, np.nan])
self.assertFalse(jnp.array_equal(a, a))
self.assertTrue(jnp.array_equal(a, a, equal_nan=True))
a = np.array([1 + 1j])
b = a.copy()
a.real = np.nan
b.imag = np.nan
self.assertTrue(jnp.array_equal(a, b, equal_nan=True))
def testArrayEquivExamples(self):
# examples from the array_equiv() docstring.
self.assertTrue(jnp.array_equiv([1, 2], [1, 2]))
self.assertFalse(jnp.array_equiv([1, 2], [1, 3]))
with jax.numpy_rank_promotion('allow'):
self.assertTrue(jnp.array_equiv([1, 2], [[1, 2], [1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2], [1, 3]]))
def testArrayModule(self):
if numpy_dispatch is None:
raise SkipTest('requires https://github.com/seberg/numpy-dispatch')
jnp_array = jnp.array(1.0)
np_array = np.array(1.0)
module = numpy_dispatch.get_array_module(jnp_array)
self.assertIs(module, jnp)
module = numpy_dispatch.get_array_module(jnp_array, np_array)
self.assertIs(module, jnp)
def f(x):
module = numpy_dispatch.get_array_module(x)
self.assertIs(module, jnp)
return x
jax.jit(f)(jnp_array)
jax.grad(f)(jnp_array)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
itertools.combinations_with_replacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testBitwiseOp(self, np_op, jnp_op, rng_factory, shapes, dtypes):
rng = rng_factory(self.rng())
if not config.x64_enabled and any(
jnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op.__name__, shapes, dtypes),
"op": op, "dtypes": dtypes, "shapes": shapes}
for op in [jnp.left_shift, jnp.right_shift]
for shapes in filter(
_shapes_are_broadcast_compatible,
# TODO numpy always promotes to shift dtype for zero-dim shapes:
itertools.combinations_with_replacement(nonzerodim_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, int_dtypes_no_uint64) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testShiftOpAgainstNumpy(self, op, dtypes, shapes):
dtype, shift_dtype = dtypes
signed_mix = np.issubdtype(dtype, np.signedinteger) != \
np.issubdtype(shift_dtype, np.signedinteger)
has_32 = any(np.iinfo(d).bits == 32 for d in dtypes)
promoting_to_64 = has_32 and signed_mix
if promoting_to_64 and not config.x64_enabled:
self.skipTest("np.right_shift/left_shift promoting to int64"
"differs from jnp in 32 bit mode.")
info, shift_info = map(np.iinfo, dtypes)
x_rng = jtu.rand_int(self.rng(), low=info.min, high=info.max + 1)
# NumPy requires shifts to be non-negative and below the bit width:
shift_rng = jtu.rand_int(self.rng(), high=max(info.bits, shift_info.bits))
args_maker = lambda: (x_rng(shapes[0], dtype), shift_rng(shapes[1], shift_dtype))
self._CompileAndCheck(op, args_maker)
np_op = getattr(np, op.__name__)
self._CheckAgainstNumpy(np_op, op, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else np.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, np_op, jnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory(self.rng())
@jtu.ignore_warning(category=np.ComplexWarning)
@jtu.ignore_warning(category=RuntimeWarning,
message="mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered.*")
def np_fun(x):
x_cast = x if dtype != jnp.bfloat16 else x.astype(np.float32)
t = out_dtype if out_dtype != jnp.bfloat16 else np.float32
return np_op(x_cast, axis, dtype=t, keepdims=keepdims)
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {np.float16: 1e-2, np.int32: 1E-3, np.float32: 1e-3,
np.complex64: 1e-3, np.float64: 1e-5, np.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_NO_DTYPE_RECORDS))
def testReducerNoDtype(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="All-NaN slice encountered.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
tol = {np.float16: 0.002}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerWhere(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact, whereshape):
if (shape in [()] + scalar_shapes and
dtype in [jnp.int16, jnp.uint16] and
jnp_op in [jnp.min, jnp.max]):
self.skipTest("Known XLA failure; see https://github.com/google/jax/issues/4971.")
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 20), "where parameter not supported in older numpy")
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_WHERE_NO_INITIAL_RECORDS))
def testReducerWhereNoInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact, whereshape):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="Mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="invalid value encountered in true_divide*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
if numpy_version >= (1, 20, 2) or np_op.__name__ in ("all", "any"):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in list(range(-len(shape), len(shape))) + [None]))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.count_nonzero(x, axis)
jnp_fun = lambda x: jnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.nonzero(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_size={}".format(
jtu.format_shape_dtype_string(shape, dtype), size),
"shape": shape, "dtype": dtype, "size": size}
for shape in nonempty_array_shapes
for dtype in all_dtypes
for size in [1, 5, 10]))
def testNonzeroSize(self, shape, dtype, size):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
@jtu.ignore_warning(category=DeprecationWarning, message="Calling nonzero on 0d arrays.*")
def np_fun(x):
result = np.nonzero(x)
if size <= len(result[0]):
return tuple(arg[:size] for arg in result)
else:
return tuple(np.concatenate([arg, np.zeros(size - len(arg), arg.dtype)])
for arg in result)
jnp_fun = lambda x: jnp.nonzero(x, size=size)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testFlatNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.flatnonzero)
jnp_fun = jnp.flatnonzero
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying the size statically:
jnp_fun = lambda x: jnp.flatnonzero(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testArgWhere(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.argwhere)
jnp_fun = jnp.argwhere
args_maker = lambda: [rng(shape, dtype)]
if shape in (scalar_shapes + [()]) and numpy_version < (1, 18):
self.skipTest("np.argwhere() result for scalar input changed in numpy 1.18.")
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of this
# behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.argwhere(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, np_op, jnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory(self.rng())
if dtype == np.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
if "nan" in np_op.__name__ and dtype == jnp.bfloat16:
raise unittest.SkipTest("NumPy doesn't correctly handle bfloat16 arrays")
def np_fun(array_to_reduce):
return np_op(array_to_reduce, axis).astype(jnp.int_)
def jnp_fun(array_to_reduce):
return jnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
except ValueError as e:
if str(e) == "All-NaN slice encountered":
self.skipTest("JAX doesn't support checking for all-NaN slices")
else:
raise
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name.capitalize(), "name": rec.name,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for rec in JAX_ARGMINMAX_RECORDS))
def testArgMinMaxEmpty(self, name, np_op, jnp_op):
name = name[3:] if name.startswith("nan") else name
msg = "attempt to get {} of an empty sequence".format(name)
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.array([]))
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.zeros((2, 0)), axis=1)
np_fun = partial(np_op, axis=0)
jnp_fun = partial(jnp_op, axis=0)
args_maker = lambda: [np.zeros((2, 0))]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)
def np_fun(a, b):
a = a.astype(np.float32) if lhs_dtype == jnp.bfloat16 else a
b = b.astype(np.float32) if rhs_dtype == jnp.bfloat16 else b
out = np.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, np.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-14,
np.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
def np_dot(x, y):
x = x.astype(np.float32) if lhs_dtype == jnp.bfloat16 else x
y = y.astype(np.float32) if rhs_dtype == jnp.bfloat16 else y
return np.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(np_dot, jnp.dot, args_maker,
tol=tol)
self._CompileAndCheck(jnp.dot, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
def np_fun(x, y):
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,
np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 4e-2
self._CheckAgainstNumpy(np_fun, jnp.matmul, args_maker, tol=tol)
self._CompileAndCheck(jnp.matmul, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(3,), (), 0],
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)
def np_fun(a, b):
a = a if lhs_dtype != jnp.bfloat16 else a.astype(np.float32)
b = b if rhs_dtype != jnp.bfloat16 else b.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.tensordot(a, b, axes).astype(dtype)
tol = {np.float16: 1e-1, np.float32: 1e-3, np.float64: 1e-12,
np.complex64: 1e-3, np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
def testTensordotErrors(self):
a = np.random.random((3, 2, 2))
b = np.random.random((2,))
self.assertRaisesRegex(
TypeError, "Number of tensordot axes.*exceeds input ranks.*",
lambda: jnp.tensordot(a, b, axes=2))
self.assertRaisesRegex(
TypeError, "tensordot requires axes lists to have equal length.*",
lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))
self.assertRaisesRegex(
TypeError, "tensordot requires both axes lists to be either ints, tuples or lists.*",
lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))
self.assertRaisesRegex(
TypeError, "tensordot axes argument must be an int, a pair of ints, or a pair of lists.*",
lambda: jnp.tensordot(a, b, axes='badaxes'))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIsin(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.isin(e, t, invert=invert)
np_fun = lambda e, t: np.isin(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIn1d(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.in1d(e, t, invert=invert)
np_fun = lambda e, t: np.in1d(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes))
def testSetdiff1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
self._CheckAgainstNumpy(np.setdiff1d, jnp.setdiff1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes))
def testUnion1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
return np.union1d(arg1, arg2).astype(dtype)
self._CheckAgainstNumpy(np_fun, jnp.union1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_size={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2), size),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2, "size": size}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes
for size in [1, 5, 10]))
def testUnion1dSize(self, shape1, shape2, dtype1, dtype2, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
result = np.union1d(arg1, arg2).astype(dtype)
if size <= len(result):
return result[:size]
else:
return np.concatenate([result, np.full(size - len(result), result[0], result.dtype)])
def jnp_fun(arg1, arg2):
return jnp.union1d(arg1, arg2, size=size)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]))
def testSetxor1d(self, shape1, dtype1, shape2, dtype2, assume_unique):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.setxor1d(ar1, ar2, assume_unique=assume_unique)
def np_fun(ar1, ar2):
if assume_unique:
# pre-flatten the arrays to match with jax implementation
ar1 = np.ravel(ar1)
ar2 = np.ravel(ar2)
return np.setxor1d(ar1, ar2, assume_unique)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}_return_indices={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique,
return_indices),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique, "return_indices": return_indices}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]
for return_indices in [False, True]))
def testIntersect1d(self, shape1, dtype1, shape2, dtype2, assume_unique, return_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
np_fun = lambda ar1, ar2: np.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def np_fun(lhs, rhs):
lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(np.float32)
rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.inner(lhs, rhs).astype(dtype)
jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)
tol_spec = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-13,
np.complex64: 1e-5}
if jtu.device_under_test() == "tpu":
tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-0.9, 1),
(-np.ones(1), None),
(None, np.ones(1)),
(np.full(1, -0.9), np.ones(1))]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testClipStaticBounds(self, shape, dtype, a_min, a_max):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.clip(x, a_min=a_min, a_max=a_max)
jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
# TODO(phawkins): the promotion behavior changed in Numpy 1.17.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testClipError(self):
with self.assertRaisesRegex(ValueError, "At most one of a_min and a_max.*"):
jnp.clip(jnp.zeros((3,)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals):
rng = jtu.rand_default(self.rng())
if jnp.issubdtype(dtype, np.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
np_fun = lambda x: np.round(x, decimals=decimals)
jnp_fun = lambda x: jnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {jnp.bfloat16: 5e-2, np.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.float32(7.5), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.float32(1.234), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.array(7.5, jnp.float32), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.array(1.234, jnp.float32), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_padwidth={}_constantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width,
constant_values),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width": pad_width, "constant_values": constant_values}
for mode, shapes in [
('constant', all_shapes),
('wrap', nonempty_shapes),
('edge', nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for constant_values in [
# None is used for modes other than 'constant'
None,
# constant
0, 1,
# (constant,)
(0,), (2.718,),
# ((before_const, after_const),)
((0, 2),), ((-1, 3.14),),
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i / 2, -3.14 * i) for i in range(len(shape))),
]
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
if (pad_width != () and constant_values != () and
((mode == 'constant' and constant_values is not None) or
(mode != 'constant' and constant_values is None)))))
def testPad(self, shape, dtype, mode, pad_width, constant_values):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if constant_values is None:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode)
else:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_stat_length={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, stat_length),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"stat_length": stat_length}
for mode in ['maximum', 'minimum', 'mean', 'median']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for stat_length in [
None,
# ((before_1, after_1), ..., (before_N, after_N))
tuple(((i % 3 + 1), ((i + 1) % 3) + 1) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 2),),
# (before, after) (not in the docstring but works in numpy)
(1, 1), (3, 4),
# (pad,)
(1,), (2,),
# pad
1, 2
]
if (pad_width != () and stat_length != () and
not (dtype in bool_dtypes and mode == 'mean'))))
def testPadStatValues(self, shape, dtype, mode, pad_width, stat_length):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_reflect_type={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, reflect_type),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"reflect_type": reflect_type}
for mode in ['symmetric', 'reflect']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 3),),
# (before, after) (not in the docstring but works in numpy)
(2, 1), (1, 2),
# (pad,)
(1,), (2,), (3,),
# pad
0, 5, 7, 10
]
for reflect_type in ['even', 'odd']
if (pad_width != () and
# following types lack precision when calculating odd values
(reflect_type != 'odd' or dtype not in [np.bool_, np.float16, jnp.bfloat16]))))
def testPadSymmetricAndReflect(self, shape, dtype, mode, pad_width, reflect_type):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE,
tol={np.float32: 1e-3, np.complex64: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_end_values={}".format(
jtu.format_shape_dtype_string(shape, dtype), "linear_ramp", pad_width, end_values),
"shape": shape, "dtype": dtype, "pad_width": pad_width,
"end_values": end_values}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for end_values in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2.0, 3.14),),
# (before, after) (not in the docstring but works in numpy)
(0, 0), (-8.0, 2.0),
# (end_values,)
(1,), (2,),
# end_values
0, 1, 100, 10.0, 3.5, 4.2, -5, -3
]
if (pad_width != () and end_values != () and
# following types lack precision
dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16])))
def testPadLinearRamp(self, shape, dtype, pad_width, end_values):
if numpy_version < (1, 20) and np.issubdtype(dtype, np.integer):
raise unittest.SkipTest("NumPy 1.20 changed the semantics of np.linspace")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadEmpty(self):
arr = np.arange(6).reshape(2, 3)
pad_width = ((2, 3), (3, 1))
np_res = np.pad(arr, pad_width=pad_width, mode="empty")
jnp_res = jnp.pad(arr, pad_width=pad_width, mode="empty")
np.testing.assert_equal(np_res.shape, jnp_res.shape)
np.testing.assert_equal(arr, np_res[2:-3, 3:-1])
np.testing.assert_equal(arr, jnp_res[2:-3, 3:-1])
np.testing.assert_equal(np_res[2:-3, 3:-1], jnp_res[2:-3, 3:-1])
def testPadKwargs(self):
modes = {
'constant': {'constant_values': 0},
'edge': {},
'linear_ramp': {'end_values': 0},
'maximum': {'stat_length': None},
'mean': {'stat_length': None},
'median': {'stat_length': None},
'minimum': {'stat_length': None},
'reflect': {'reflect_type': 'even'},
'symmetric': {'reflect_type': 'even'},
'wrap': {},
'empty': {}
}
arr = jnp.array([1, 2, 3])
pad_width = 1
for mode in modes.keys():
allowed = modes[mode]
not_allowed = {}
for kwargs in modes.values():
if kwargs != allowed:
not_allowed.update(kwargs)
# Test if allowed keyword arguments pass
jnp.pad(arr, pad_width, mode, **allowed)
# Test if prohibited keyword arguments of other modes raise an error
match = "unsupported keyword arguments for mode '{}'".format(mode)
for key, value in not_allowed.items():
with self.assertRaisesRegex(ValueError, match):
jnp.pad(arr, pad_width, mode, **{key: value})
# Test if unsupported mode raise error.
unsupported_modes = [1, None, "foo"]
for mode in unsupported_modes:
match = "Unimplemented padding mode '{}' for np.pad.".format(mode)
with self.assertRaisesRegex(NotImplementedError, match):
jnp.pad(arr, pad_width, mode)
def testPadFunction(self):
def np_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def jnp_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector = jax.ops.index_update(
vector, jax.ops.index[:pad_width[0]], pad_value)
vector = jax.ops.index_update(
vector, jax.ops.index[-pad_width[1]:], pad_value)
return vector
arr = np.arange(6).reshape(2, 3)
np_res = np.pad(arr, 2, np_pad_with)
jnp_res = jnp.pad(arr, 2, jnp_pad_with)
np.testing.assert_equal(np_res, jnp_res)
arr = np.arange(24).reshape(2, 3, 4)
np_res = np.pad(arr, 1, np_pad_with, padder=100)
jnp_res = jnp.pad(arr, 1, jnp_pad_with, padder=100)
np.testing.assert_equal(np_res, jnp_res)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arr.shape, arr.dtype)]
jnp_fun = partial(jnp.pad, pad_width=1, mode=jnp_pad_with)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadWithNumpyPadWidth(self):
a = jnp.array([1, 2, 3, 4, 5])
f = jax.jit(
partial(
jnp.pad,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
np.testing.assert_array_equal(
f(a),
np.pad(
a,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps}
for reps in [(), (2,), (3, 4), (2, 3, 4), (1, 0, 2)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.tile(arg, reps)
jnp_fun = lambda arg: jnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in all_dtypes))
def testExtract(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, jnp.float32), rng(shape, dtype)]
self._CheckAgainstNumpy(np.extract, jnp.extract, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ncond={}_nfunc={}".format(
jtu.format_shape_dtype_string(shape, dtype), ncond, nfunc),
"shape": shape, "dtype": dtype, "ncond": ncond, "nfunc": nfunc}
for ncond in [1, 2, 3]
for nfunc in [ncond, ncond + 1]
for shape in all_shapes
for dtype in all_dtypes))
def testPiecewise(self, shape, dtype, ncond, nfunc):
rng = jtu.rand_default(self.rng())
rng_bool = jtu.rand_int(self.rng(), 0, 2)
funclist = [lambda x: x - 1, 1, lambda x: x, 0][:nfunc]
args_maker = lambda: (rng(shape, dtype), [rng_bool(shape, bool) for i in range(ncond)])
np_fun = partial(np.piecewise, funclist=funclist)
jnp_fun = partial(jnp.piecewise, funclist=funclist)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
# This is a higher-order function, so the cache miss check will fail.
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, check_cache_misses=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_perm={}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), perm, arg_type),
"dtype": dtype, "shape": shape, "perm": perm, "arg_type": arg_type}
for dtype in default_dtypes
for shape in array_shapes
for arg_type in ["splat", "value"]
for perm in [None, tuple(np.random.RandomState(0).permutation(np.zeros(shape).ndim))]))
def testTransposeTuple(self, shape, dtype, perm, arg_type):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if arg_type == "value":
np_fun = lambda x: x.transpose(perm)
jnp_fun = lambda x: jnp.array(x).transpose(perm)
else:
np_fun = lambda x: x.transpose(*(perm or ()))
jnp_fun = lambda x: jnp.array(x).transpose(*(perm or ()))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_trim={}".format(
jtu.format_shape_dtype_string(a_shape, dtype), trim),
"dtype": dtype, "a_shape": a_shape, "trim": trim}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for trim in ["f", "b", "fb"]))
def testTrimZeros(self, a_shape, dtype, trim):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(a_shape, dtype)]
np_fun = lambda arg1: np.trim_zeros(arg1, trim)
jnp_fun = lambda arg1: jnp.trim_zeros(arg1, trim)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_rank{}".format(
jtu.format_shape_dtype_string(a_shape, dtype), rank),
"dtype": dtype, "a_shape": a_shape, "rank": rank}
for rank in (1, 2)
for dtype in default_dtypes
for a_shape in one_dim_array_shapes))
def testPoly(self, a_shape, dtype, rank):
if dtype in (np.float16, jnp.bfloat16, np.int16):
self.skipTest(f"{dtype} gets promoted to {np.float16}, which is not supported.")
elif rank == 2 and jtu.device_under_test() in ("tpu", "gpu"):
self.skipTest("Nonsymmetric eigendecomposition is only implemented on the CPU backend.")
rng = jtu.rand_default(self.rng())
tol = { np.int8: 1e-3, np.int32: 1e-3, np.float32: 1e-3, np.float64: 1e-6 }
if jtu.device_under_test() == "tpu":
tol[np.int32] = tol[np.float32] = 1e-1
tol = jtu.tolerance(dtype, tol)
args_maker = lambda: [rng(a_shape * rank, dtype)]
self._CheckAgainstNumpy(np.poly, jnp.poly, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp.poly, args_maker, check_dtypes=True, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolyAdd(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polyadd(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polyadd(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolySub(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polysub(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polysub(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_k={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order, k),
"dtype": dtype, "a_shape": a_shape, "order" : order, "k": k}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)
for k in [np.arange(order, dtype=dtype), np.ones(1, dtype), None]))
def testPolyInt(self, a_shape, order, k, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyint(arg1, m=order, k=k)
jnp_fun = lambda arg1: jnp.polyint(arg1, m=order, k=k)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order),
"dtype": dtype, "a_shape": a_shape, "order" : order}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)))
def testPolyDer(self, a_shape, order, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyder(arg1, m=order)
jnp_fun = lambda arg1: jnp.polyder(arg1, m=order)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ptype={}".format(ptype), "ptype": ptype}
for ptype in ['int', 'np.int', 'jnp.int']))
def testIntegerPower(self, ptype):
p = {'int': 2, 'np.int': np.int32(2), 'jnp.int': jnp.int32(2)}[ptype]
jaxpr = api.make_jaxpr(partial(jnp.power, x2=p))(1)
eqns = jaxpr.jaxpr.eqns
self.assertLen(eqns, 1)
self.assertEqual(eqns[0].primitive, lax.integer_pow_p)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}".format(x, y), "x": x, "y": y}
for x in [-1, 0, 1]
for y in [0, 32, 64, 128]))
def testIntegerPowerOverflow(self, x, y):
# Regression test for https://github.com/google/jax/issues/5987
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np.power, jnp.power, args_maker)
self._CompileAndCheck(jnp.power, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompress(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_condition=array[{}]_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), len(condition), axis),
"shape": shape, "dtype": dtype, "condition": condition, "axis": axis}
for shape in [(2, 3)]
for dtype in int_dtypes
# condition entries beyond axis size must be zero.
for condition in [[1], [1, 0, 0, 0, 0, 0, 0]]
for axis in [None, 0, 1]))
def testCompressMismatchedShapes(self, shape, dtype, condition, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.array(condition), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompressMethod(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = lambda condition, x: np.compress(condition, x, axis=axis)
jnp_fun = lambda condition, x: x.compress(condition, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for num_arrs in [3]
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(np.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return np.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(4, 1), (4, 3), (4, 5, 6)]
for dtype in all_dtypes
for axis in [None] + list(range(1 - len(shape), len(shape) - 1))))
def testConcatenateArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.concatenate(x, axis=axis)
jnp_fun = lambda x: jnp.concatenate(x, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testConcatenateAxisNone(self):
# https://github.com/google/jax/issues/3419
a = jnp.array([[1, 2], [3, 4]])
b = jnp.array([[5]])
jnp.concatenate((a, b), axis=None)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(arr, values):
arr = arr.astype(np.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(np.float32) if values.dtype == jnp.bfloat16
else values)
out = np.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idx),
"dtype": dtype, "shape": shape, "axis": axis, "idx": idx}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx in (range(-prod(shape), prod(shape))
if axis is None else
range(-shape[axis], shape[axis]))))
def testDeleteInteger(self, shape, dtype, idx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, slc),
"dtype": dtype, "shape": shape, "axis": axis, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for slc in [slice(None), slice(1, 3), slice(1, 5, 2)]))
def testDeleteSlice(self, shape, dtype, axis, slc):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, slc, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, slc, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
jtu.format_shape_dtype_string(idx_shape, int)),
"dtype": dtype, "shape": shape, "axis": axis, "idx_shape": idx_shape}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx_shape in all_shapes))
def testDeleteIndexArray(self, shape, dtype, axis, idx_shape):
rng = jtu.rand_default(self.rng())
max_idx = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
# Previous to numpy 1.19, negative indices were ignored so we don't test this.
low = 0 if numpy_version < (1, 19, 0) else -max_idx
idx = jtu.rand_int(self.rng(), low=low, high=max_idx)(idx_shape, int)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 19), "boolean mask not supported in numpy < 1.19.0")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"dtype": dtype, "shape": shape, "axis": axis}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testDeleteMaskArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
mask_size = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
mask = jtu.rand_int(self.rng(), low=0, high=2)(mask_size, bool)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, mask, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, mask, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_out_dims={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, out_dims),
"shape": shape, "dtype": dtype, "axis": axis, "out_dims": out_dims}
for shape in nonempty_array_shapes
for dtype in default_dtypes
for axis in range(-len(shape), len(shape))
for out_dims in [0, 1, 2]))
def testApplyAlongAxis(self, shape, dtype, axis, out_dims):
def func(x, out_dims):
if out_dims == 0:
return x.sum()
elif out_dims == 1:
return x * x[0]
elif out_dims == 2:
return x[:, None] + x[None, :]
else:
raise NotImplementedError(f"out_dims={out_dims}")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arr: np.apply_along_axis(func, axis, arr, out_dims=out_dims)
jnp_fun = lambda arr: jnp.apply_along_axis(func, axis, arr, out_dims=out_dims)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_func={}_keepdims={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype),
func, keepdims, axes),
"shape": shape, "dtype": dtype, "func": func, "keepdims": keepdims, "axes": axes}
for shape in nonempty_shapes
for func in ["sum"]
for keepdims in [True, False]
for axes in itertools.combinations(range(len(shape)), 2)
# Avoid low-precision types in sum()
for dtype in default_dtypes if dtype not in [np.float16, jnp.bfloat16]))
def testApplyOverAxes(self, shape, dtype, func, keepdims, axes):
f = lambda x, axis: getattr(x, func)(axis=axis, keepdims=keepdims)
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
np_fun = lambda a: np.apply_over_axes(f, a, axes)
jnp_fun = lambda a: jnp.apply_over_axes(f, a, axes)
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}_fixed_size={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, repeats, fixed_size),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
'fixed_size': fixed_size}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), max(1, len(shape))))
for fixed_size in [True, False]))
def testRepeat(self, axis, shape, dtype, repeats, fixed_size):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.repeat(arg, repeats=repeats, axis=axis)
np_fun = _promote_like_jnp(np_fun)
if fixed_size:
total_repeat_length = np.repeat(np.zeros(shape), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(arg, repeats=rep, axis=axis,
total_repeat_length=total_repeat_length)
jnp_args_maker = lambda: [rng(shape, dtype), repeats]
clo_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis,
total_repeat_length=total_repeat_length)
clo_fun_args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(jnp_fun, jnp_args_maker)
self._CheckAgainstNumpy(np_fun, clo_fun, clo_fun_args_maker)
else:
# Now repeats is in a closure, so a constant.
jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testRepeatScalarFastPath(self):
a = jnp.array([1,2,3,4])
f = lambda a: jnp.repeat(a, repeats=2)
jaxpr = api.make_jaxpr(f)(a)
self.assertLessEqual(len(jaxpr.jaxpr.eqns), 6)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_ind={}_inv={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
return_index, return_inverse, return_counts),
"shape": shape, "dtype": dtype, "axis": axis,
"return_index": return_index, "return_inverse": return_inverse,
"return_counts": return_counts}
for dtype in number_dtypes
for shape in all_shapes
for axis in [None] + list(range(len(shape)))
for return_index in [False, True]
for return_inverse in [False, True]
for return_counts in [False, True]))
def testUnique(self, shape, dtype, axis, return_index, return_inverse, return_counts):
if axis is not None and numpy_version < (1, 19) and np.empty(shape).size == 0:
self.skipTest("zero-sized axis in unique leads to error in older numpy.")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.unique(x, return_index, return_inverse, return_counts, axis=axis)
jnp_fun = lambda x: jnp.unique(x, return_index, return_inverse, return_counts, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_size={}".format(
jtu.format_shape_dtype_string(shape, dtype), size),
"shape": shape, "dtype": dtype, "size": size}
for dtype in number_dtypes
for size in [1, 5, 10]
for shape in nonempty_array_shapes))
def testUniqueSize(self, shape, dtype, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
kwds = dict(return_index=True, return_inverse=True, return_counts=True)
def np_fun(x):
u, ind, inv, counts = jnp.unique(x, **kwds)
if size <= len(u):
u, ind, counts = u[:size], ind[:size], counts[:size]
else:
extra = size - len(u)
u = np.concatenate([u, np.full(extra, u[0], u.dtype)])
ind = np.concatenate([ind, np.full(extra, ind[0], ind.dtype)])
counts = np.concatenate([counts, np.zeros(extra, counts.dtype)])
return u, ind, inv, counts
jnp_fun = lambda x: jnp.unique(x, size=size, **kwds)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_fixed_size={}".format(fixed_size),
"fixed_size": fixed_size}
for fixed_size in [True, False]))
def testNonScalarRepeats(self, fixed_size):
'''
Following numpy test suite from `test_repeat` at
https://github.com/numpy/numpy/blob/main/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = jnp.repeat(m, repeats, axis)
numpy_ans = np.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, rtol=tol, atol=tol)
if fixed_size:
# Calculate expected size of the repeated axis.
rep_length = np.repeat(np.zeros_like(m), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(
arg, repeats=rep, axis=axis, total_repeat_length=rep_length)
else:
jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(jnp_fun, args_maker)
m = jnp.array([1,2,3,4,5,6])
if fixed_size:
args_maker = lambda: [m, repeats]
else:
args_maker = lambda: [m]
for repeats in [2, jnp.array([1,3,0,1,1,2]), jnp.array([1,3,2,1,1,2]), jnp.array([2])]:
test_single(m, args_maker, repeats, axis=None)
test_single(m, args_maker, repeats, axis=0)
m_rect = m.reshape((2,3))
if fixed_size:
args_maker = lambda: [m_rect, repeats]
else:
args_maker = lambda: [m_rect]
for repeats in [2, jnp.array([2,1]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, jnp.array([1,3,2]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
def testIssue2330(self):
'''
Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save
'''
def attempt_sideeffect(x):
x = [x]
x = jnp.concatenate(x)
x -= 1.
return x
np_input = np.ones((1))
jnp_input = jnp.ones((1))
expected_np_input_after_call = np.ones((1))
expected_jnp_input_after_call = jnp.ones((1))
self.assertTrue(xla.type_is_device_array(jnp.concatenate([np_input])))
attempt_sideeffect(np_input)
attempt_sideeffect(jnp_input)
self.assertAllClose(np_input, expected_np_input_after_call)
self.assertAllClose(jnp_input, expected_jnp_input_after_call)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_xshape=[{}]_yshape=[{}]_mode={}".format(
op,
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(yshape, dtype),
mode),
"xshape": xshape, "yshape": yshape, "dtype": dtype, "mode": mode,
"jnp_op": getattr(jnp, op),
"np_op": getattr(np, op)}
for mode in ['full', 'same', 'valid']
for op in ['convolve', 'correlate']
for dtype in number_dtypes
for xshape in one_dim_array_shapes
for yshape in one_dim_array_shapes))
def testConvolutions(self, xshape, yshape, dtype, mode, jnp_op, np_op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
precision = lax.Precision.HIGHEST if jtu.device_under_test() == "tpu" else None
np_fun = partial(np_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode, precision=precision)
tol = {np.float16: 2e-1, np.float32: 1e-2, np.float64: 1e-14,
np.complex128: 1e-14}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["cumsum", "cumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np_op(arg, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["nancumsum", "nancumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testNanCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_some_nan(self.rng())
np_fun = partial(np_op, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = partial(jnp_op, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
if dtype != jnp.bfloat16:
# numpy functions do not properly handle bfloat16
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_yshape={}_xshape={}_dx={}_axis={}".format(
jtu.format_shape_dtype_string(yshape, dtype),
jtu.format_shape_dtype_string(xshape, dtype) if xshape is not None else None,
dx, axis),
"yshape": yshape, "xshape": xshape, "dtype": dtype, "dx": dx, "axis": axis}
for dtype in default_dtypes
for yshape, xshape, dx, axis in [
((10,), None, 1.0, -1),
((3, 10), None, 2.0, -1),
((3, 10), None, 3.0, -0),
((10, 3), (10,), 1.0, -2),
((3, 10), (10,), 1.0, -1),
((3, 10), (3, 10), 1.0, -1),
((2, 3, 10), (3, 10), 1.0, -2),
]))
@jtu.skip_on_devices("tpu") # TODO(jakevdp): fix and reenable this test.
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testTrapz(self, yshape, xshape, dtype, dx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(yshape, dtype), rng(xshape, dtype) if xshape is not None else None]
np_fun = partial(np.trapz, dx=dx, axis=axis)
jnp_fun = partial(jnp.trapz, dx=dx, axis=axis)
tol = jtu.tolerance(dtype, {np.float64: 1e-12,
dtypes.bfloat16: 4e-2})
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol,
check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol,
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
np.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype):
np_fun = lambda: np.tri(n, M=m, k=k, dtype=dtype)
jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: getattr(np, op)(arg, k=k)
jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTrilIndices(self, n, k, m):
np_fun = lambda n, k, m: np.tril_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.tril_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTriuIndices(self, n, k, m):
np_fun = lambda n, k, m: np.triu_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.triu_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTriuIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.triu_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.triu_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTrilIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.tril_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.tril_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
np.testing.assert_equal(np.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "arr_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)
),
"dtype": dtype, "shape": shape}
for dtype in default_dtypes
for shape in [(1,1), (2,2), (3,3), (4,4), (5,5)]))
def testDiagIndicesFrom(self, dtype, shape):
rng = jtu.rand_default(self.rng())
np_fun = np.diag_indices_from
jnp_fun = jnp.diag_indices_from
args_maker = lambda : [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in all_shapes
for k in range(-4, 4)))
def testDiagFlat(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
# numpy has inconsistencies for scalar values
# https://github.com/numpy/numpy/issues/16477
# jax differs in that it treats scalars values as length-1 arrays
np_fun = lambda arg: np.diagflat(np.atleast_1d(arg), k)
jnp_fun = lambda arg: jnp.diagflat(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a1_shape={}_a2_shape2={}".format(
jtu.format_shape_dtype_string(a1_shape, dtype),
jtu.format_shape_dtype_string(a2_shape, dtype)),
"dtype": dtype, "a1_shape": a1_shape, "a2_shape": a2_shape}
for dtype in default_dtypes
for a1_shape in one_dim_array_shapes
for a2_shape in one_dim_array_shapes))
def testPolyMul(self, a1_shape, a2_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polymul(arg1, arg2)
jnp_fun_np = lambda arg1, arg2: jnp.polymul(arg1, arg2, trim_leading_zeros=True)
jnp_fun_co = lambda arg1, arg2: jnp.polymul(arg1, arg2)
args_maker = lambda: [rng(a1_shape, dtype), rng(a2_shape, dtype)]
tol = {np.float16: 2e-1, np.float32: 5e-2, np.float64: 1e-13}
self._CheckAgainstNumpy(np_fun, jnp_fun_np, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun_co, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(np.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
np_fun = lambda: np.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_period={}_left={}_right={}".format(
jtu.format_shape_dtype_string(shape, dtype), period, left, right),
"shape": shape, "dtype": dtype,
"period": period, "left": left, "right": right}
for shape in nonempty_shapes
for period in [None, 0.59]
for left in [None, 0]
for right in [None, 1]
for dtype in default_dtypes
# following types lack precision for meaningful tests
if dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16]
))
def testInterp(self, shape, dtype, period, left, right):
rng = jtu.rand_default(self.rng(), scale=10)
kwds = dict(period=period, left=left, right=right)
np_fun = partial(np.interp, **kwds)
jnp_fun = partial(jnp.interp, **kwds)
args_maker = lambda: [rng(shape, dtype), np.sort(rng((20,), dtype)), np.linspace(0, 1, 20)]
# skip numpy comparison for integer types with period specified, because numpy
# uses an unstable sort and so results differ for duplicate values.
if not (period and np.issubdtype(dtype, np.integer)):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol={np.float32: 2E-4})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, np.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, low=-1075, high=1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
# integer types are converted to float64 in numpy's implementation
if (x1_dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory(self.rng())
x2_rng = x2_rng_factory(self.rng())
np_fun = lambda x1, x2: np.ldexp(x1, x2)
np_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, np.int32)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory(self.rng())
np_fun = lambda x: np.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=np.issubdtype(dtype, np.inexact))
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
if out_dtype == jnp.bfloat16:
return np.trace(arg, offset, axis1, axis2, np.float32).astype(jnp.bfloat16)
else:
return np.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_v={}_side={}".format(
jtu.format_shape_dtype_string(ashape, dtype),
jtu.format_shape_dtype_string(vshape, dtype),
side), "ashape": ashape, "vshape": vshape, "side": side,
"dtype": dtype}
for ashape in [(15,), (16,), (17,)]
for vshape in [(), (5,), (5, 5)]
for side in ['left', 'right']
for dtype in default_dtypes
))
def testSearchsorted(self, ashape, vshape, side, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.sort(rng(ashape, dtype)), rng(vshape, dtype)]
np_fun = lambda a, v: np.searchsorted(a, v, side=side)
jnp_fun = lambda a, v: jnp.searchsorted(a, v, side=side)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_bins={}_right={}_reverse={}".format(
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(binshape, dtype),
right, reverse), "xshape": xshape, "binshape": binshape,
"right": right, "reverse": reverse, "dtype": dtype}
for xshape in [(20,), (5, 4)]
for binshape in [(1,), (5,)]
for right in [True, False]
for reverse in [True, False]
for dtype in default_dtypes
))
def testDigitize(self, xshape, binshape, right, reverse, dtype):
order = jax.ops.index[::-1] if reverse else jax.ops.index[:]
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), jnp.sort(rng(binshape, dtype))[order]]
np_fun = lambda x, bins: np.digitize(x, bins, right=right)
jnp_fun = lambda x, bins: jnp.digitize(x, bins, right=right)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 5)]
for array_input in [True, False]))
def testColumnStack(self, shape, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(np.column_stack)
jnp_fun = jnp.column_stack
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input),
"shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for array_input in [True, False]))
def testStack(self, shape, axis, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(partial(np.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}_array={}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "op": op, "dtypes": dtypes, "array_input": array_input}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for array_input in [True, False]))
def testHVDStack(self, shape, op, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(getattr(np, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}_fillshape={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
np.dtype(out_dtype).name if out_dtype else "None",
fill_value_shape),
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"shape": shape, "out_dtype": out_dtype}
for shape in array_shapes + [3, np.array(7, dtype=np.int32)]
for fill_value_dtype in default_dtypes
for fill_value_shape in _compatible_shapes(shape)
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, fill_value_shape, out_dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda fill_value: np.full(shape, fill_value, dtype=out_dtype)
jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_shape={}_n={}_axis={}_prepend={}_append={}".format(
jtu.format_shape_dtype_string(shape, dtype),
n, axis, prepend, append),
"shape": shape, "dtype": dtype, "n": n, "axis": axis,
"prepend": prepend, "append": append
} for shape, dtype in s(_shape_and_dtypes(nonempty_nonscalar_array_shapes, default_dtypes))
for n in s([0, 1, 2])
for axis in s(list(range(-len(shape), max(1, len(shape)))))
for prepend in s([None, 1, np.zeros(shape, dtype=dtype)])
for append in s([None, 1, np.zeros(shape, dtype=dtype)])
)))
def testDiff(self, shape, dtype, n, axis, prepend, append):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
def np_fun(x, n=n, axis=axis, prepend=prepend, append=append):
if prepend is None:
prepend = np._NoValue
elif not np.isscalar(prepend) and prepend.dtype == jnp.bfloat16:
prepend = prepend.astype(np.float32)
if append is None:
append = np._NoValue
elif not np.isscalar(append) and append.dtype == jnp.bfloat16:
append = append.astype(np.float32)
if x.dtype == jnp.bfloat16:
return np.diff(x.astype(np.float32), n=n, axis=axis, prepend=prepend, append=append).astype(jnp.bfloat16)
else:
return np.diff(x, n=n, axis=axis, prepend=prepend, append=append)
jnp_fun = lambda x: jnp.diff(x, n=n, axis=axis, prepend=prepend, append=append)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"np_op": getattr(np, op), "jnp_op": getattr(jnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), np.array((4, 5, 6), dtype=np.int32),
np.array(4, dtype=np.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, np_op, jnp_op, shape, dtype):
args_maker = lambda: []
np_op = partial(np_op, shape, dtype)
jnp_op = partial(jnp_op, shape, dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testOnesWithInvalidShape(self):
with self.assertRaises(TypeError):
jnp.ones((-1, 1))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_inshape={}_filldtype={}_fillshape={}_outdtype={}_outshape={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
np.dtype(fill_value_dtype).name, fill_value_shape,
np.dtype(out_dtype).name, out_shape),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"out_dtype": out_dtype, "out_shape": out_shape
} for shape in s(array_shapes)
for out_shape in s([None] + array_shapes)
for in_dtype in s(default_dtypes)
for fill_value_dtype in s(default_dtypes)
for fill_value_shape in s(_compatible_shapes(shape if out_shape is None else out_shape))
for out_dtype in s(default_dtypes))))
def testFullLike(self, shape, in_dtype, fill_value_dtype, fill_value_shape, out_dtype, out_shape):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x, fill_value: np.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x, fill_value: jnp.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype), rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
out_shape, out_dtype),
"func": func, "shape": shape, "in_dtype": in_dtype,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for out_shape in [None] + array_shapes
for in_dtype in default_dtypes
for func in ["ones_like", "zeros_like"]
for out_dtype in default_dtypes))
def testZerosOnesLike(self, func, shape, in_dtype, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x: getattr(np, func)(x, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x: getattr(jnp, func)(x, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_weak_type={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
weak_type, out_shape, out_dtype),
"func": func, "args": args,
"shape": shape, "in_dtype": in_dtype, "weak_type": weak_type,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for in_dtype in [np.int32, np.float32, np.complex64]
for weak_type in [True, False]
for out_shape in [None, (), (10,)]
for func, args in [("full_like", (-100,)), ("ones_like", ()), ("zeros_like", ())]
for out_dtype in [None, float]))
def testZerosOnesFullLikeWeakType(self, func, args, shape, in_dtype, weak_type, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, in_dtype), weak_type=weak_type)
fun = lambda x: getattr(jnp, func)(x, *args, dtype=out_dtype, shape=out_shape)
expected_weak_type = weak_type and (out_dtype is None)
self.assertEqual(dtypes.is_weakly_typed(fun(x)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(api.jit(fun)(x)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_funcname={}_input_type={}_val={}_dtype={}".format(
funcname, input_type, val, dtype),
"funcname": funcname, "input_type": input_type, "val": val, "dtype": dtype}
for funcname in ["array", "asarray"]
for dtype in [int, float, None]
for val in [0, 1]
for input_type in [int, float, np.int32, np.float32]))
def testArrayWeakType(self, funcname, input_type, val, dtype):
func = lambda x: getattr(jnp, funcname)(x, dtype=dtype)
fjit = api.jit(func)
val = input_type(val)
expected_weak_type = dtype is None and input_type in set(dtypes._weak_types)
self.assertEqual(dtypes.is_weakly_typed(func(val)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(fjit(val)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_weak_type={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), weak_type, slc),
"shape": shape, "dtype": dtype, "weak_type": weak_type, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in [int, float, complex]
for weak_type in [True, False]
for slc in [slice(None), slice(0), slice(3), 0, ...]))
def testSliceWeakTypes(self, shape, dtype, weak_type, slc):
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, dtype), weak_type=weak_type)
op = lambda x: x[slc]
self.assertEqual(op(x).aval.weak_type, weak_type)
self.assertEqual(api.jit(op)(x).aval.weak_type, weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis, "dtype": dtype}
# All testcases split the specified axis unequally
for shape, axis, num_sections in [
((3,), 0, 2), ((12,), 0, 5), ((12, 4), 0, 7), ((12, 4), 1, 3),
((2, 3, 5), -1, 2), ((2, 4, 4), -2, 3), ((7, 2, 2), 0, 3)]
for dtype in default_dtypes))
def testArraySplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.array_split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.array_split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testSplitTypeError(self):
# If we pass an ndarray for indices_or_sections -> no error
self.assertEqual(3, len(jnp.split(jnp.zeros(3), jnp.array([1, 2]))))
CONCRETIZATION_MSG = "Abstract tracer value encountered where concrete value is expected."
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# An abstract tracer for idx
api.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), idx))(2.)
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# A list including an abstract tracer
api.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), [2, idx]))(2.)
# A concrete tracer -> no error
api.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), idx),
(2.,), (1.,))
# A tuple including a concrete tracer -> no error
api.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), (1, idx)),
(2.,), (1.,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_range={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, range, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"range": range,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in number_dtypes
for bins in [10, np.arange(-5, 6), [-5, 0, 3]]
for range in [None, (0, 0), (0, 10)]
for weights in [True, False]
))
def testHistogramBinEdges(self, shape, dtype, bins, range, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w, r: np.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
jnp_fun = lambda a, w, r: jnp.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), range]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-2}
# linspace() compares poorly to numpy when using bfloat16
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_density={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, density, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"density": density,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in default_dtypes
# We only test explicit integer-valued bin edges because in other cases
# rounding errors lead to flaky tests.
for bins in [np.arange(-5, 6), [-5, 0, 3]]
for density in [True, False]
for weights in [True, False]
))
def testHistogram(self, shape, dtype, bins, density, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w: np.histogram(a, bins=bins, density=density,
weights=_weights(w))
jnp_fun = lambda a, w: jnp.histogram(a, bins=bins, density=density,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density),
"shape": shape,
"dtype": dtype,
"bins": bins,
"weights": weights,
"density": density
}
for shape in [(5,), (12,)]
for dtype in int_dtypes
for bins in [2, [2, 2], [[0, 1, 3, 5], [0, 2, 3, 4, 6]]]
for weights in [False, True]
for density in [False, True]
))
def testHistogram2d(self, shape, dtype, bins, weights, density):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, b, w: np.histogram2d(a, b, bins=bins, weights=_weights(w), density=density)
jnp_fun = lambda a, b, w: jnp.histogram2d(a, b, bins=bins, weights=_weights(w), density=density)
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
with np.errstate(divide='ignore', invalid='ignore'):
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density),
"shape": shape,
"dtype": dtype,
"bins": bins,
"weights": weights,
"density": density
}
for shape in [(5, 3), (10, 3)]
for dtype in int_dtypes
for bins in [(2, 2, 2), [[-5, 0, 4], [-4, -1, 2], [-6, -1, 4]]]
for weights in [False, True]
for density in [False, True]
))
def testHistogramdd(self, shape, dtype, bins, weights, density):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w: np.histogramdd(a, bins=bins, weights=_weights(w), density=density)
jnp_fun = lambda a, w: jnp.histogramdd(a, bins=bins, weights=_weights(w), density=density)
args_maker = lambda: [rng(shape, dtype), rng((shape[0],), dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
np_fun = lambda x: fn(np, axis)(x, num_sections)
jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape, order=order)
jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape)
jnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in itertools.product(all_shapes, array_shapes)))
def testResize(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.resize(x, out_shape)
jnp_fun = lambda x: jnp.resize(x, out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
if len(out_shape) > 0 or numpy_version >= (1, 20, 0):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in (list(range(-len(arg_shape)+1, len(arg_shape)))
+ [np.array(0), np.array(-1), (0,), [np.array(0)],
(len(arg_shape), len(arg_shape) + 1)])))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.expand_dims(x, dim)
jnp_fun = lambda x: jnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CompileAndCheck(jnp_fun, args_maker)
if isinstance(dim, (tuple, list)) and numpy_version < (1, 18, 0):
raise SkipTest("support for multiple axes added in NumPy 1.18.0")
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.swapaxes(x, ax1, ax2)
jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((3, 1), -1),
((3, 1), np.array(1)),
((1, 3, 1), (0, 2)),
((1, 3, 1), (0,)),
((1, 4, 1), (np.array(0),))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.squeeze(x, ax)
jnp_fun = lambda x: jnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in list(range(-len(shape), len(shape))) + [None]
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned):
rng = jtu.rand_default(self.rng())
if weights_shape is None:
np_fun = lambda x: np.average(x, axis, returned=returned)
jnp_fun = lambda x: jnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
np_fun = lambda x, weights: np.average(x, axis, weights, returned)
jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
np_fun = _promote_like_jnp(np_fun, inexact=True)
tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-2, np.float32: 1e-5,
np.float64: 1e-12, np.complex64: 1e-5}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
f"_arg{i}_ndmin={ndmin}_dtype={np.dtype(dtype) if dtype else None}",
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtypes) in enumerate([
([True, False, True], all_dtypes),
(3., all_dtypes),
([1, 2, 3], all_dtypes),
(np.array([1, 2, 3], dtype=np.int64), all_dtypes),
([1., 2., 3.], all_dtypes),
([[1, 2], [3, 4], [5, 6]], all_dtypes),
([[1, 2.], [3, 4], [5, 6]], all_dtypes),
([[1., 2j], [3., 4.], [5., 6.]], complex_dtypes),
([[3, np.array(2, dtype=jnp.float_), 1],
np.arange(3., dtype=jnp.float_)], all_dtypes),
])
for dtype in [None] + dtypes
for ndmin in [None, np.ndim(arg), np.ndim(arg) + 1, np.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
canonical_dtype = dtypes.canonicalize_dtype(dtype or np.array(arg).dtype)
if ndmin is not None:
np_fun = partial(np.array, ndmin=ndmin, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, ndmin=ndmin, dtype=dtype)
else:
np_fun = partial(np.array, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, dtype=dtype)
# We are testing correct canonicalization behavior here, so we turn off the
# permissive canonicalization logic in the test harness.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
canonicalize_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testArrayUnsupportedDtypeError(self):
with self.assertRaisesRegex(TypeError,
"JAX only supports number and bool dtypes.*"):
jnp.array(3, [('a','<i4'),('b','<i4')])
def testArrayFromInteger(self):
int_dtype = dtypes.canonicalize_dtype(jnp.int64)
int_max = jnp.iinfo(int_dtype).max
int_min = jnp.iinfo(int_dtype).min
# Values at extremes are converted correctly.
for val in [int_min, 0, int_max]:
self.assertEqual(jnp.array(val).dtype, int_dtype)
# out of bounds leads to an OverflowError
val = int_max + 1
with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to {int_dtype.name}"):
jnp.array(val)
# explicit uint64 should work
if config.x64_enabled:
self.assertEqual(val, jnp.array(val, dtype='uint64'))
# TODO(jakevdp): fix list inputs to jnp.array and enable the following test
# def testArrayFromList(self):
# int_max = jnp.iinfo(jnp.int64).max
# int_min = jnp.iinfo(jnp.int64).min
#
# # Values at extremes are converted correctly.
# for val in [int_min, 0, int_max]:
# self.assertEqual(jnp.array([val]).dtype, dtypes.canonicalize_dtype('int64'))
#
# # list of values results in promoted type.
# self.assertEqual(jnp.array([0, np.float16(1)]).dtype, jnp.result_type('int64', 'float16'))
#
# # out of bounds leads to an OverflowError
# val = int_min - 1
# with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to int64"):
# jnp.array([0, val])
def testIssue121(self):
assert not np.isscalar(jnp.array(3))
def testArrayOutputsDeviceArrays(self):
assert xla.type_is_device_array(jnp.array([]))
assert xla.type_is_device_array(jnp.array(np.array([])))
class NDArrayLike:
def __array__(self, dtype=None):
return np.array([], dtype=dtype)
assert xla.type_is_device_array(jnp.array(NDArrayLike()))
# NOTE(mattjj): disabled b/c __array__ must produce ndarrays
# class DeviceArrayLike:
# def __array__(self, dtype=None):
# return jnp.array([], dtype=dtype)
# assert xla.type_is_device_array(jnp.array(DeviceArrayLike()))
def testArrayMethod(self):
class arraylike(object):
dtype = np.float32
def __array__(self, dtype=None):
return np.array(3., dtype=dtype)
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
np.array([0x2a], dtype=np.uint8))
def testIsClose(self):
c_isclose = api.jit(jnp.isclose)
c_isclose_nan = api.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = np.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = np.asarray(n * [np.inf]).reshape([n, 1])
nan = np.asarray(n * [np.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = np.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = np.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}_equal_nan={}".format(x, y, equal_nan),
"x": x, "y": y, "equal_nan": equal_nan}
for x, y in itertools.product([
1, [1], [1, 1 + 1E-4], [1, np.nan]], repeat=2)
for equal_nan in [True, False]))
def testAllClose(self, x, y, equal_nan):
jnp_fun = partial(jnp.allclose, equal_nan=equal_nan, rtol=1E-3)
np_fun = partial(np.allclose, equal_nan=equal_nan, rtol=1E-3)
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testZeroStridesConstantHandler(self):
raw_const = np.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = np.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = np.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = np.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(jax.errors.TracerIntegerConversionError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(jax.errors.ConcretizationTypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(np.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(np.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] + [tuple(range(len(shape)))] # Test negative axes and tuples
))
def testFlip(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
np_op = lambda x: np.flip(x, axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFlipud(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
np_op = lambda x: np.flipud(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFliplr(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
np_op = lambda x: np.fliplr(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes))
def testRot90(self, shape, dtype, k, axes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
np_op = lambda x: np.rot90(x, k, axes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_mode={}".format(
shape, order, mode),
"shape": shape, "order": order, "mode": mode}
for shape in nonempty_nonscalar_array_shapes
for order in ['C', 'F']
for mode in ['wrap', 'clip', 'raise']))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRavelMultiIndex(self, shape, order, mode):
# generate indices in each dimension with a few out of bounds.
rngs = [jtu.rand_int(self.rng(), low=-1, high=dim + 1)
for dim in shape]
# generate multi_indices of different dimensions that broadcast.
args_maker = lambda: [tuple(rng(ndim * (3,), jnp.int_)
for ndim, rng in enumerate(rngs))]
def np_fun(x):
try:
return np.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
def jnp_fun(x):
try:
return jnp.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because ravel_multi_index was jit-compiled "
"with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ashape={}{}_cshapes={}{}_mode={}".format(
adtype.__name__, ashape, cdtype.__name__, cshapes, mode),
"ashape": ashape, "adtype": adtype, "cshapes": cshapes, "cdtype": cdtype, "mode": mode}
for ashape in ((), (4,), (3, 4))
for cshapes in [
[(), (4,)],
[(3, 4), (4,), (3, 1)]
]
for adtype in int_dtypes
for cdtype in default_dtypes
for mode in ['wrap', 'clip', 'raise']))
def testChoose(self, ashape, adtype, cshapes, cdtype, mode):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(ashape, adtype), [rng(s, cdtype) for s in cshapes]]
def np_fun(a, c):
try:
return np.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
def jnp_fun(a, c):
try:
return jnp.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because jnp.choose was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.parameters(
(0, (2, 1, 3)),
(5, (2, 1, 3)),
(0, ()),
([0, 1, 2], (2, 2)),
([[[0, 1], [2, 3]]], (2, 2)))
def testUnravelIndex(self, flat_index, shape):
args_maker = lambda: (flat_index, shape)
self._CheckAgainstNumpy(np.unravel_index, jnp.unravel_index,
args_maker)
self._CompileAndCheck(jnp.unravel_index, args_maker)
def testUnravelIndexOOB(self):
self.assertEqual(jnp.unravel_index(2, (2,)), (1,))
self.assertEqual(jnp.unravel_index(-2, (2, 1, 3,)), (1, 0, 1))
self.assertEqual(jnp.unravel_index(-3, (2,)), (0,))
def testAstype(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
np_op = lambda x: np.asarray(x).astype(jnp.int32)
jnp_op = lambda x: jnp.asarray(x).astype(jnp.int32)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in all_dtypes))
def testNbytes(self, shape, dtype):
rng = jtu.rand_default(self.rng())
np_op = lambda x: np.asarray(x).nbytes
jnp_op = lambda x: jnp.asarray(x).nbytes
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_dtype={}".format(
jtu.format_shape_dtype_string(shape, a_dtype), dtype),
"shape": shape, "a_dtype": a_dtype, "dtype": dtype}
for shape in [(8,), (3, 8)] # last dim = 8 to ensure shape compatibility
for a_dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)
for dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)))
def testView(self, shape, a_dtype, dtype):
if jtu.device_under_test() == 'tpu':
if jnp.dtype(a_dtype).itemsize in [1, 2] or jnp.dtype(dtype).itemsize in [1, 2]:
self.skipTest("arr.view() not supported on TPU for 8- or 16-bit types.")
if not config.x64_enabled:
if jnp.dtype(a_dtype).itemsize == 8 or jnp.dtype(dtype).itemsize == 8:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_fullrange(self.rng())
args_maker = lambda: [rng(shape, a_dtype)]
np_op = lambda x: np.asarray(x).view(dtype)
jnp_op = lambda x: jnp.asarray(x).view(dtype)
# Above may produce signaling nans; ignore warnings from invalid values.
with np.errstate(invalid='ignore'):
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testPathologicalFloats(self):
args_maker = lambda: [np.array([
0b_0111_1111_1000_0000_0000_0000_0000_0000, # inf
0b_1111_1111_1000_0000_0000_0000_0000_0000, # -inf
0b_0111_1111_1100_0000_0000_0000_0000_0000, # qnan
0b_1111_1111_1100_0000_0000_0000_0000_0000, # -qnan
0b_0111_1111_1000_0000_0000_0000_0000_0001, # snan
0b_1111_1111_1000_0000_0000_0000_0000_0001, # -snan
0b_0111_1111_1000_0000_0000_1100_0000_0000, # nonstandard nan
0b_1111_1111_1000_0000_0000_1100_0000_0000, # -nonstandard nan
0b_0000_0000_0000_0000_0000_0000_0000_0000, # zero
0b_1000_0000_0000_0000_0000_0000_0000_0000, # -zero
], dtype='uint32')]
np_op = lambda x: np.asarray(x).view('float32').view('uint32')
jnp_op = lambda x: jnp.asarray(x).view('float32').view('uint32')
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test other ndarray-like method overrides
def testNpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(jnp.eye(3, dtype=float), 0.)
ans = np.mean(x)
self.assertAllClose(ans, np.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
self.assertAllClose(np.arange(0.0, 1.0, 0.1, dtype=jnp.float_),
jnp.arange(0.0, 1.0, 0.1))
# from https://github.com/google/jax/issues/3450
self.assertAllClose(np.arange(2.5, dtype=jnp.float_),
jnp.arange(2.5))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testSort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.sort
np_fun = np.sort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in one_dim_array_shapes
for axis in [None]))
def testSortComplex(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.sort_complex, jnp.sort_complex, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp.sort_complex, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_input_type={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
input_type.__name__, axis),
"shape": shape, "dtype": dtype, "input_type": input_type, "axis": axis}
for dtype in all_dtypes
for shape in nonempty_nonscalar_array_shapes
for input_type in [np.array, tuple]
for axis in (-1, *range(len(shape) - 1))))
def testLexsort(self, dtype, shape, input_type, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [input_type(rng(shape, dtype))]
jnp_op = lambda x: jnp.lexsort(x, axis=axis)
np_op = lambda x: np.lexsort(x, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testArgsort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.argsort
np_fun = np.argsort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for dtype in all_dtypes
for shape in nonzerodim_shapes))
def testMsort(self, dtype, shape):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.msort, jnp.msort, args_maker)
self._CompileAndCheck(jnp.msort, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"shape": shape, "dtype": dtype, "shifts": shifts, "axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1)),
((4, 2, 5, 5, 2, 4), None),
(100, None),
]))
def testRoll(self, shape, dtype, shifts, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(shifts)]
jnp_op = partial(jnp.roll, axis=axis)
np_op = partial(np.roll, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_start={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, start),
"shape": shape, "dtype": dtype, "axis": axis,
"start": start}
for dtype in all_dtypes
for shape in [(1, 2, 3, 4)]
for axis in [-3, 0, 2, 3]
for start in [-4, -1, 2, 4]))
def testRollaxis(self, shape, dtype, start, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.rollaxis, axis=axis, start=start)
np_op = partial(np.rollaxis, axis=axis, start=start)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),
"shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder}
for dtype in [np.uint8, np.bool_]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]))
def testPackbits(self, shape, dtype, axis, bitorder):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)
np_op = partial(np.packbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),
"shape": shape, "dtype": dtype, "axis": axis, "bitorder": bitorder,
"count": count}
for dtype in [np.uint8]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for count in [None, 20]))
def testUnpackbits(self, shape, dtype, axis, bitorder, count):
rng = jtu.rand_int(self.rng(), 0, 256)
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)
np_op = partial(np.unpackbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)),
[cast(Optional[int], None)])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in [None, 'wrap', 'clip']))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = jtu.rand_default(self.rng())
if mode is None:
rng_indices = jtu.rand_int(self.rng(), -shape[axis or 0], shape[axis or 0])
else:
rng_indices = jtu.rand_int(self.rng(), -5, 5)
jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)
np_op = lambda x, i: np.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeEmpty(self):
np.testing.assert_array_equal(
jnp.array([], dtype=jnp.float32),
jnp.take(jnp.array([], jnp.float32), jnp.array([], jnp.int32)))
np.testing.assert_array_equal(
jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32), jnp.array([], jnp.int32),
axis=1))
with self.assertRaisesRegex(IndexError, "non-empty jnp.take"):
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.array([0], jnp.int32), axis=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype),
jtu.format_shape_dtype_string(i_shape, index_dtype), axis),
"x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1],
[cast(Optional[int], None)])
for dtype in default_dtypes
for index_dtype in int_dtypes))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, index_dtype, axis):
rng = jtu.rand_default(self.rng())
i_shape = np.array(i_shape)
if axis is None:
i_shape = [np.prod(i_shape, dtype=np.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = np.prod(x_shape, dtype=np.int32) if axis is None else x_shape[axis]
if np.issubdtype(index_dtype, np.unsignedinteger):
index_rng = jtu.rand_int(self.rng(), 0, n)
else:
index_rng = jtu.rand_int(self.rng(), -n, n)
i = index_rng(i_shape, index_dtype)
return x, i
jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)
if hasattr(np, "take_along_axis"):
np_op = lambda x, i: np.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeAlongAxisWithUint8IndicesDoesNotOverflow(self):
# https://github.com/google/jax/issues/5088
h = jtu.rand_default(self.rng())((256, 256, 100), np.float32)
g = jtu.rand_int(self.rng(), 0, 100)((256, 256, 1), np.uint8)
q0 = jnp.take_along_axis(h, g, axis=-1)
q1 = np.take_along_axis( h, g, axis=-1)
np.testing.assert_equal(q0, q1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
arg = arg.astype(np.float32) if dtype == jnp.bfloat16 else arg
return np.vander(arg, N=n, increasing=increasing)
jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol={np.float32: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
"nan_to_num", [shape], [dtype]),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, shape, dtype):
rng = jtu.rand_some_inf_and_nan(self.rng())
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
def np_fun(x):
if dtype == jnp.bfloat16:
x = np.where(np.isnan(x), dtype(0), x)
x = np.where(np.isposinf(x), jnp.finfo(dtype).max, x)
x = np.where(np.isneginf(x), jnp.finfo(dtype).min, x)
return x
else:
return np.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (np.int32,)),
(((3,), (4,)), (np.int32, np.int32)),
(((3,), (1,), (4,)), (np.int32, np.int32, np.int32)),
)))
def testIx_(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(np.ix_, jnp.ix_, args_maker)
self._CompileAndCheck(jnp.ix_, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_dimensions={}_dtype={}_sparse={}".format(
dimensions, dtype, sparse),
"dimensions": dimensions, "dtype": dtype, "sparse": sparse}
for dimensions in [(), (2,), (3, 0), (4, 5, 6)]
for dtype in number_dtypes
for sparse in [True, False]))
def testIndices(self, dimensions, dtype, sparse):
def args_maker(): return []
np_fun = partial(np.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
jnp_fun = partial(jnp.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims, interpolation),
"a_rng": jtu.rand_some_nan,
"q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims,
"interpolation": interpolation}
for (op, q_rng) in (
("percentile", partial(jtu.rand_uniform, low=0., high=100.)),
("quantile", partial(jtu.rand_uniform, low=0., high=1.)),
("nanpercentile", partial(jtu.rand_uniform, low=0., high=100.)),
("nanquantile", partial(jtu.rand_uniform, low=0., high=1.)),
)
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [np.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]
for interpolation in ['linear', 'lower', 'higher', 'nearest',
'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims, interpolation):
a_rng = a_rng(self.rng())
q_rng = q_rng(self.rng())
if "median" in op:
args_maker = lambda: [a_rng(a_shape, a_dtype)]
else:
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
# TODO(jakevdp): remove this ignore_warning when minimum numpy version is 1.17.0
@jtu.ignore_warning(category=RuntimeWarning, message="Invalid value encountered.*")
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims,
interpolation=interpolation)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,
interpolation=interpolation)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_{}_a_shape={}_axis={}_keepdims={}".format(
op, jtu.format_shape_dtype_string(a_shape, a_dtype),
axis, keepdims),
"op": op, "a_shape": a_shape, "a_dtype": a_dtype,
"axis": axis,
"keepdims": keepdims}
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for keepdims in [False, True]
for op in ["median", "nanmedian"]))
def testMedian(self, op, a_shape, a_dtype, axis, keepdims):
if op == "median":
a_rng = jtu.rand_default(self.rng())
else:
a_rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: [a_rng(a_shape, a_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = jtu.tolerance(a_dtype, tol_spec)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.where(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of
# this behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.where(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"shapes": shapes, "dtypes": dtypes
} for shapes in s(filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 3)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, 3)))))
def testWhereThreeArgument(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
def np_fun(cond, x, y):
return _promote_like_jnp(partial(np.where, cond))(x, y)
self._CheckAgainstNumpy(np_fun, jnp.where, args_maker)
self._CompileAndCheck(jnp.where, args_maker)
def testWhereScalarPromotion(self):
x = jnp.where(jnp.array([True, False]), 3,
jnp.ones((2,), dtype=jnp.float32))
self.assertEqual(x.dtype, np.dtype(np.float32))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": jtu.format_test_name_suffix("", shapes, (np.bool_,) * n + dtypes),
"shapes": shapes, "dtypes": dtypes
} for n in s(range(1, 3))
for shapes in s(filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2 * n + 1)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, n + 1)))))
def testSelect(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, np.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def np_fun(condlist, choicelist, default):
choicelist = [x if jnp.result_type(x) != jnp.bfloat16
else x.astype(np.float32) for x in choicelist]
dtype = jnp.result_type(default, *choicelist)
return np.select(condlist,
[np.asarray(x, dtype=dtype) for x in choicelist],
np.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(np_fun, jnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(jnp.select, args_maker,
rtol={np.float64: 1e-7, np.complex128: 1e-7})
def testIssue330(self):
x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + np.eye(1, dtype=np.float32)).dtype
jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = np.eye(3, dtype=np.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = jnp.eye(3, dtype=jnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because
# introducing the convention 0 * inf = 0 leads to silently wrong results in
# some cases. See this comment for details:
# https://github.com/google/jax/issues/1052#issuecomment-514083352
# def testIssue347(self):
# # https://github.com/google/jax/issues/347
# def test_fail(x):
# x = jnp.sqrt(jnp.sum(x ** 2, axis=1))
# ones = jnp.ones_like(x)
# x = jnp.where(x > 0.5, x, ones)
# return jnp.sum(x)
# x = jnp.array([[1, 2], [3, 4], [0, 0]], dtype=jnp.float64)
# result = api.grad(test_fail)(x)
# assert not np.any(np.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = np.arange(6) + 1
ans = jnp.reshape(a, (3, 2), order='F')
expected = np.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),
(bool, jnp.bool_), (complex, jnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
np_fun = lambda arg: getattr(np, op)(arg).astype(dtype)
jnp_fun = lambda arg: getattr(jnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{
"testcase_name": "_shape={}_dtype={}_weights={}_minlength={}_length={}".format(
shape, dtype, weights, minlength, length
),
"shape": shape,
"dtype": dtype,
"weights": weights,
"minlength": minlength,
"length": length}
for shape in [(0,), (5,), (10,)]
for dtype in int_dtypes
for weights in [True, False]
for minlength in [0, 20]
for length in [None, 10]
))
def testBincount(self, shape, dtype, weights, minlength, length):
rng = jtu.rand_positive(self.rng())
args_maker = lambda: (rng(shape, dtype), (rng(shape, 'float32') if weights else None))
np_fun = partial(np.bincount, minlength=minlength)
jnp_fun = partial(jnp.bincount, minlength=minlength, length=length)
if length is not None:
self._CompileAndCheck(jnp_fun, args_maker)
if length is None:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
def testBincountNegative(self):
# Test that jnp.bincount ignores negative values.
x_rng = jtu.rand_int(self.rng(), -100, 100)
w_rng = jtu.rand_uniform(self.rng())
shape = (1000,)
x = x_rng(shape, 'int32')
w = w_rng(shape, 'float32')
xn = np.array(x)
xn[xn < 0] = 0
wn = np.array(w)
np_result = np.bincount(xn[xn >= 0], wn[xn >= 0])
jnp_result = jnp.bincount(x, w)
self.assertAllClose(np_result, jnp_result, check_dtypes=False)
@parameterized.named_parameters(*jtu.cases_from_list(
{"testcase_name": "_case={}".format(i),
"input": input}
for i, input in enumerate([
3,
[3],
[np.array(3)],
[np.array([3])],
[[np.array(3)]],
[[np.array([3])]],
[3, 4, 5],
[
[np.eye(2, dtype=np.int32) * 2, np.zeros((2, 3), dtype=np.int32)],
[np.ones((3, 2), dtype=np.int32), np.eye(3, dtype=np.int32) * 3],
],
[np.array([1, 2, 3]), np.array([2, 3, 4]), 10],
[np.ones((2, 2), dtype=np.int32), np.zeros((2, 2), dtype=np.int32)],
[[np.array([1, 2, 3])], [np.array([2, 3, 4])]],
])))
def testBlock(self, input):
args_maker = lambda: [input]
self._CheckAgainstNumpy(np.block, jnp.block, args_maker)
self._CompileAndCheck(jnp.block, args_maker)
def testLongLong(self):
self.assertAllClose(np.int64(7), api.jit(lambda x: x)(np.longlong(7)))
@jtu.ignore_warning(category=UserWarning,
message="Explicitly requested dtype.*")
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/main/dask/array/tests/test_creation.py#L92
self.assertAllClose(jnp.arange(77),
np.arange(77, dtype=jnp.int_))
self.assertAllClose(jnp.arange(2, 13),
np.arange(2, 13, dtype=jnp.int_))
self.assertAllClose(jnp.arange(4, 21, 9),
np.arange(4, 21, 9, dtype=jnp.int_))
self.assertAllClose(jnp.arange(53, 5, -3),
np.arange(53, 5, -3, dtype=jnp.int_))
self.assertAllClose(jnp.arange(77, dtype=float),
np.arange(77, dtype=float))
self.assertAllClose(jnp.arange(2, 13, dtype=int),
np.arange(2, 13, dtype=int))
self.assertAllClose(jnp.arange(0, 1, -0.5),
np.arange(0, 1, -0.5, dtype=jnp.float_))
self.assertRaises(TypeError, lambda: jnp.arange())
# test that jnp.arange(N) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77)), type(np.arange(77)))
self.assertEqual(type(jnp.arange(77)), type(lax.iota(np.int32, 77)))
# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(np.arange(77, dtype=np.int32)))
self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(lax.iota(np.int32, 77)))
def testArangeJit(self):
ans = api.jit(lambda: jnp.arange(5))()
expected = np.arange(5)
self.assertAllClose(ans, expected)
def testIssue830(self):
a = jnp.arange(4, dtype=jnp.complex64)
self.assertEqual(a.dtype, jnp.complex64)
def testIssue728(self):
assert jnp.allclose(jnp.eye(5000), np.eye(5000))
self.assertEqual(0, np.sum(jnp.eye(1050) - np.eye(1050)))
def testIssue746(self):
jnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = jnp.linspace(190, 200, 4)
f = api.grad(lambda x: jnp.sum(jnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = np.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], np.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jnp.ones(10).at[np.array([2, 4, 5])].add(u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(np.zeros(3,), api.grad(f)(np.ones(3,)))
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because this
# is a numerical stability issue that should be solved with a custom jvp rule
# of the sigmoid function being differentiated here, not by safe_mul.
# def testIssue777(self):
# x = jnp.linspace(-200, 0, 4, dtype=np.float32)
# f = api.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))
# self.assertAllClose(f(x), np.array([0., 0., 0., 0.25], dtype=np.float32))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
np_op = getattr(np, op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_op)
jnp_op = getattr(jnp, op)
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (np.nan, -np.inf, -100., -2., -1., 0., 1., 2., 100., np.inf,
jnp.finfo(dtype).max, np.sqrt(jnp.finfo(dtype).max),
np.sqrt(jnp.finfo(dtype).max) * 2.):
if (op in ("sin", "cos", "tan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789): fix and reenable.
x = dtype(x)
expected = np_op(x)
actual = jnp_op(x)
tol = jtu.tolerance(dtype, {np.float32: 1e-3, np.float64: 1e-7})
self.assertAllClose(expected, actual, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
raise SkipTest("we decided to disallow arrays as static args")
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = jnp.ones((10, 10))
v = jnp.array([1, 2, 3])
_ = f(x, v)
_ = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = jnp.ones((3, 4))
self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.var(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testNanVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_some_nan(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.nanvar(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.nanvar, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_y_shape={}_y_dtype={}_rowvar={}_ddof={}_bias={}_fweights={}_aweights={}".format(
shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights),
"shape": shape, "y_shape": y_shape, "dtype": dtype, "y_dtype": y_dtype,"rowvar": rowvar, "ddof": ddof,
"bias": bias, "fweights": fweights, "aweights": aweights}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for y_dtype in [None, dtype]
for rowvar in [True, False]
for y_shape in _get_y_shapes(y_dtype, shape, rowvar)
for bias in [True, False]
for ddof in [None, 2, 3]
for fweights in [True, False]
for aweights in [True, False]))
def testCov(self, shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights):
rng = jtu.rand_default(self.rng())
wrng = jtu.rand_positive(self.rng())
wdtype = np.real(dtype(0)).dtype
wshape = shape[-1:] if rowvar or shape[0] == 1 else shape[:1]
args_maker = lambda: [rng(shape, dtype),
rng(y_shape, y_dtype) if y_dtype else None,
wrng(wshape, int) if fweights else None,
wrng(wshape, wdtype) if aweights else None]
kwargs = dict(rowvar=rowvar, ddof=ddof, bias=bias)
np_fun = lambda m, y, f, a: np.cov(m, y, fweights=f, aweights=a, **kwargs)
jnp_fun = lambda m, y, f, a: jnp.cov(m, y, fweights=f, aweights=a, **kwargs)
tol = {jnp.bfloat16: 5E-2, np.float16: 1E-2, np.float32: 1e-5,
np.float64: 1e-13, np.complex64: 1e-5, np.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: jnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}".format(
shape, dtype.__name__, rowvar),
"shape": shape, "dtype": dtype, "rowvar": rowvar}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]))
def testCorrCoef(self, shape, dtype, rowvar):
rng = jtu.rand_default(self.rng())
def args_maker():
ok = False
while not ok:
x = rng(shape, dtype)
ok = not np.any(np.isclose(np.std(x), 0.0))
return (x,)
np_fun = partial(np.corrcoef, rowvar=rowvar)
np_fun = jtu.ignore_warning(
category=RuntimeWarning, message="invalid value encountered.*")(np_fun)
jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)
tol = 1e-2 if jtu.device_under_test() == "tpu" else None
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(jtu.format_shape_dtype_string(shape, dtype),
"None" if end_dtype is None else jtu.format_shape_dtype_string(end_shape, end_dtype),
"None" if begin_dtype is None else jtu.format_shape_dtype_string(begin_shape, begin_dtype)),
"shape": shape, "dtype": dtype, "end_shape": end_shape,
"end_dtype": end_dtype, "begin_shape": begin_shape,
"begin_dtype": begin_dtype}
for dtype in number_dtypes
for end_dtype in [None] + [dtype]
for begin_dtype in [None] + [dtype]
for shape in [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE]
for begin_shape in (
[None] if begin_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])
for end_shape in (
[None] if end_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])))
def testEDiff1d(self, shape, dtype, end_shape, end_dtype, begin_shape,
begin_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype),
(None if end_dtype is None else rng(end_shape, end_dtype)),
(None if begin_dtype is None else rng(begin_shape, begin_dtype))]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testEDiff1dWithDtypeCast(self):
rng = jtu.rand_default(self.rng())
shape = jtu.NUMPY_SCALAR_SHAPE
dtype = jnp.float32
end_dtype = jnp.int32
args_maker = lambda: [rng(shape, dtype), rng(shape, end_dtype), rng(shape, dtype)]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]))
def testMeshGrid(self, shapes, dtype, indexing, sparse):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
np_fun = partial(np.meshgrid, indexing=indexing, sparse=sparse)
jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testMgrid(self):
assertAllEqual = partial(self.assertAllClose, atol=0, rtol=0)
assertAllEqual(np.mgrid[:4], jnp.mgrid[:4])
assertAllEqual(np.mgrid[:4,], jnp.mgrid[:4,])
assertAllEqual(np.mgrid[:4], jax.jit(lambda: jnp.mgrid[:4])())
assertAllEqual(np.mgrid[:5, :5], jnp.mgrid[:5, :5])
assertAllEqual(np.mgrid[:3, :2], jnp.mgrid[:3, :2])
assertAllEqual(np.mgrid[1:4:2], jnp.mgrid[1:4:2])
assertAllEqual(np.mgrid[1:5:3, :5], jnp.mgrid[1:5:3, :5])
assertAllEqual(np.mgrid[:3, :2, :5], jnp.mgrid[:3, :2, :5])
assertAllEqual(np.mgrid[:3:2, :2, :5], jnp.mgrid[:3:2, :2, :5])
# Corner cases
assertAllEqual(np.mgrid[:], jnp.mgrid[:])
# When the step length is a complex number, becuase of float calculation,
# the values between jnp and np might slightly different.
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.mgrid[-1:1:5j],
jnp.mgrid[-1:1:5j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[3:4:7j],
jnp.mgrid[3:4:7j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1:6:8j, 2:4],
jnp.mgrid[1:6:8j, 2:4],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.mgrid[0:3.5:0.5],
jnp.mgrid[0:3.5:0.5],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1.3:4.2:0.3],
jnp.mgrid[1.3:4.2:0.3],
atol=atol,
rtol=rtol)
# abstract tracer value for jnp.mgrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.mgrid"):
jax.jit(lambda a, b: jnp.mgrid[a:b])(0, 2)
def testOgrid(self):
def assertListOfArraysEqual(xs, ys):
self.assertIsInstance(xs, list)
self.assertIsInstance(ys, list)
self.assertEqual(len(xs), len(ys))
for x, y in zip(xs, ys):
self.assertArraysEqual(x, y)
self.assertArraysEqual(np.ogrid[:5], jnp.ogrid[:5])
self.assertArraysEqual(np.ogrid[:5], jax.jit(lambda: jnp.ogrid[:5])())
self.assertArraysEqual(np.ogrid[1:7:2], jnp.ogrid[1:7:2])
# List of arrays
assertListOfArraysEqual(np.ogrid[:5,], jnp.ogrid[:5,])
assertListOfArraysEqual(np.ogrid[0:5, 1:3], jnp.ogrid[0:5, 1:3])
assertListOfArraysEqual(np.ogrid[1:3:2, 2:9:3], jnp.ogrid[1:3:2, 2:9:3])
assertListOfArraysEqual(np.ogrid[:5, :9, :11], jnp.ogrid[:5, :9, :11])
# Corner cases
self.assertArraysEqual(np.ogrid[:], jnp.ogrid[:])
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.ogrid[-1:1:5j],
jnp.ogrid[-1:1:5j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.ogrid[0:3.5:0.3],
jnp.ogrid[0:3.5:0.3],
atol=atol,
rtol=rtol)
self.assertAllClose(np.ogrid[1.2:4.8:0.24],
jnp.ogrid[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
# abstract tracer value for ogrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.ogrid"):
jax.jit(lambda a, b: jnp.ogrid[a:b])(0, 2)
def testR_(self):
a = np.arange(6).reshape((2,3))
self.assertArraysEqual(np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])],
jnp.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])])
self.assertArraysEqual(np.r_['-1', a, a], jnp.r_['-1', a, a])
self.assertArraysEqual(np.r_['0,2', [1,2,3], [4,5,6]], jnp.r_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,2,0', [1,2,3], [4,5,6]], jnp.r_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['1,2,0', [1,2,3], [4,5,6]], jnp.r_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.r_['0,4,-1', [1,2,3], [4,5,6]], jnp.r_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,4,-2', [1,2,3], [4,5,6]], jnp.r_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.r_['r',[1,2,3], [4,5,6]], jnp.r_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['c', [1, 2, 3], [4, 5, 6]], jnp.r_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.r_["asdfgh",[1,2,3]]
# abstract tracer value for r_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.r_"):
jax.jit(lambda a, b: jnp.r_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.r_[-1:1:6j],
jnp.r_[-1:1:6j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.r_[-1:1:6j, [0]*3, 5, 6],
jnp.r_[-1:1:6j, [0]*3, 5, 6],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.r_[1.2:4.8:0.24],
jnp.r_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testC_(self):
a = np.arange(6).reshape((2, 3))
self.assertArraysEqual(np.c_[np.array([1,2,3]), np.array([4,5,6])],
jnp.c_[np.array([1,2,3]), np.array([4,5,6])])
self.assertArraysEqual(np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])],
jnp.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])])
self.assertArraysEqual(np.c_['-1', a, a], jnp.c_['-1', a, a])
self.assertArraysEqual(np.c_['0,2', [1,2,3], [4,5,6]], jnp.c_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,2,0', [1,2,3], [4,5,6]], jnp.c_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['1,2,0', [1,2,3], [4,5,6]], jnp.c_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.c_['0,4,-1', [1,2,3], [4,5,6]], jnp.c_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,4,-2', [1,2,3], [4,5,6]], jnp.c_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives, avoid numpy deprecation warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.c_['r',[1,2,3], [4,5,6]], jnp.c_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['c', [1, 2, 3], [4, 5, 6]], jnp.c_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.c_["asdfgh",[1,2,3]]
# abstract tracer value for c_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.c_"):
jax.jit(lambda a, b: jnp.c_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.c_[-1:1:6j],
jnp.c_[-1:1:6j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.c_[1.2:4.8:0.24],
jnp.c_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testS_(self):
self.assertEqual(np.s_[1:2:20],jnp.s_[1:2:20])
def testIndex_exp(self):
self.assertEqual(np.index_exp[5:3:2j],jnp.index_exp[5:3:2j])
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_retstep={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, retstep,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
for dtype in number_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLinspace(self, start_shape, stop_shape, num, endpoint, retstep, dtype):
if num == 1 and not endpoint and numpy_version < (1, 18):
raise SkipTest("Numpy < 1.18 has a linspace bug.")
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else np.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
# NumPy 1.20.0 changed the semantics of linspace to floor for integer
# dtypes.
if numpy_version >= (1, 20) or not np.issubdtype(dtype, np.integer):
np_op = lambda start, stop: np.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
else:
def np_op(start, stop):
out = np.linspace(start, stop, num, endpoint=endpoint,
retstep=retstep, axis=axis)
if retstep:
return np.floor(out[0]).astype(dtype), out[1]
else:
return np.floor(out).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs.
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(dtype), "dtype": dtype}
for dtype in number_dtypes))
def testLinspaceEndpoints(self, dtype):
"""Regression test for Issue #3014."""
rng = jtu.rand_default(self.rng())
endpoints = rng((2,), dtype)
out = jnp.linspace(*endpoints, 10, dtype=dtype)
self.assertAllClose(out[np.array([0, -1])], endpoints, rtol=0, atol=0)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, np.e]
for dtype in inexact_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not config.x64_enabled):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 2e-2, np.float32: 1e-2, np.float64: 1e-6,
np.complex64: 1e-3, np.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered in power")
def np_op(start, stop):
return np.logspace(start, stop, num, endpoint=endpoint,
base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {np.float16: 1e-2}
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}_axis={}").format(
start_shape, stop_shape, num, endpoint,
dtype.__name__ if dtype else "None", axis),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "axis": axis}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for axis in range(-max(len(start_shape), len(stop_shape)),
max(len(start_shape), len(stop_shape)))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, axis):
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 4e-3, np.float32: 2e-3, np.float64: 1e-14,
np.complex128: 1e-14}
def args_maker():
"""Test the set of inputs np.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# np.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = jnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * jnp.sign(start) * jnp.sign(stop)
return start, stop
start, stop = args_maker()
def jnp_op(start, stop):
return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def np_op(start, stop):
start = start.astype(np.float32) if dtype == jnp.bfloat16 else start
stop = stop.astype(np.float32) if dtype == jnp.bfloat16 else stop
return np.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != jnp.bfloat16 else np.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
def testDisableNumpyRankPromotionBroadcastingDecorator(self):
with jax.numpy_rank_promotion("allow"):
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
with jax.numpy_rank_promotion("raise"):
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
with jax.numpy_rank_promotion("warn"):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@api.jit
def foo(x):
return jnp.stack(x)
foo(np.zeros(2)) # doesn't crash
@api.jit
def foo(x):
return jnp.concatenate(x)
foo(np.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
jaxpr = jax.make_jaxpr(f)(np.zeros((3, 4), np.float32))
self.assertFalse(
any(np.array_equal(x, np.full((3, 4), 2., dtype=np.float32))
for x in jaxpr.consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
[(1,), 3],
])
def testBroadcastTo(self, from_shape, to_shape):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [from_shape], [np.float32])
np_op = lambda x: np.broadcast_to(x, to_shape)
jnp_op = lambda x: jnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(
{"testcase_name": f"_{shapes}", "shapes": shapes, "broadcasted_shape": broadcasted_shape}
for shapes, broadcasted_shape in [
[[], ()],
[[()], ()],
[[(1, 3), (4, 3)], (4, 3)],
[[(3,), (2, 1, 3)], (2, 1, 3)],
[[(3,), (3, 3)], (3, 3)],
[[(1,), (3,)], (3,)],
[[(1,), 3], (3,)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[[1], [0, 1]], (0, 1)],
[[(1,), np.array([0, 1])], (0, 1)],
])
def testBroadcastShapes(self, shapes, broadcasted_shape):
# Test against np.broadcast_shapes once numpy 1.20 is minimum required version
np.testing.assert_equal(jnp.broadcast_shapes(*shapes), broadcasted_shape)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: jnp.broadcast_to(np.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(jnp.broadcast_to(1, (3, 2)), np.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)
self.assertIsInstance(np.broadcast_to(10.0, ()), np.ndarray)
def testPrecision(self):
ones_1d = np.ones((2,))
ones_2d = np.ones((2, 2))
ones_3d = np.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_varargs={} axis={}_dtype={}".format(
shape, varargs, axis, dtype),
"shape": shape, "varargs": varargs, "axis": axis, "dtype": dtype}
for shape in [(10,), (10, 15), (10, 15, 20)]
for _num_axes in range(len(shape))
for varargs in itertools.combinations(range(1, len(shape) + 1), _num_axes)
for axis in itertools.combinations(range(len(shape)), _num_axes)
for dtype in inexact_dtypes))
def testGradient(self, shape, varargs, axis, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_fun = lambda y: jnp.gradient(y, *varargs, axis=axis)
np_fun = lambda y: np.gradient(y, *varargs, axis=axis)
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testZerosShapeErrors(self):
# see https://github.com/google/jax/issues/1822
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: jnp.zeros(1.))
self.assertRaisesRegex(
TypeError,
r"Shapes must be 1D sequences of concrete values of integer type.*\n"
"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.",
lambda: api.jit(jnp.zeros)(2))
def testTraceMethod(self):
x = self.rng().randn(3, 4).astype(jnp.float_)
self.assertAllClose(x.trace(), jnp.array(x).trace())
self.assertAllClose(x.trace(), api.jit(lambda y: y.trace())(x))
def testIntegerPowersArePrecise(self):
# See https://github.com/google/jax/pull/3036
# Checks if the squares of float32 integers have no numerical errors.
# It should be satisfied with all integers less than sqrt(2**24).
x = jnp.arange(-2**12, 2**12, dtype=jnp.int32)
np.testing.assert_array_equal(jnp.square(x.astype(jnp.float32)), x * x)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 2, x * x)
# Similarly for cubes.
x = jnp.arange(-2**8, 2**8, dtype=jnp.int32)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 3, x * x * x)
x = np.arange(10, dtype=np.float32)
for i in range(10):
self.assertAllClose(x.astype(jnp.float32) ** i, x ** i,
check_dtypes=False)
def testToBytes(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
for order in ['C', 'F']:
self.assertEqual(jnp.asarray(v).tobytes(order), v.tobytes(order))
def testToList(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
self.assertEqual(jnp.asarray(v).tolist(), v.tolist())
def testReductionWithRepeatedAxisError(self):
with self.assertRaisesRegex(ValueError, r"duplicate value in 'axis': \(0, 0\)"):
jnp.sum(jnp.arange(3), (0, 0))
def testArangeConcretizationError(self):
msg = r"It arose in jax.numpy.arange argument `{}`".format
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(jnp.arange)(3)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('start')):
jax.jit(lambda start: jnp.arange(start, 3))(0)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(lambda stop: jnp.arange(0, stop))(3)
def testIssue2347(self):
# https://github.com/google/jax/issues/2347
object_list = List[Tuple[jnp.array, float, float, jnp.array, bool]]
self.assertRaises(TypeError, jnp.array, object_list)
np_object_list = np.array(object_list)
self.assertRaises(TypeError, jnp.array, np_object_list)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexpComplex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log(np.exp(x1) + np.exp(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexp2Complex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log2(np.exp2(x1) + np.exp2(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp2, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp2, args_maker, rtol=tol, atol=tol)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(jnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.logaddexp, nargs=2, order=1,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
grad_test_spec(jnp.logaddexp2, nargs=2, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),
]
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in itertools.combinations_with_replacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory(self.rng())
tol = jtu.join_tolerance(tol, {np.float32: 1e-1, np.float64: 1e-3,
np.complex64: 1e-1, np.complex128: 1e-3})
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={np.float32: 3e-3})
def testSincAtZero(self):
# Some manual tests for sinc at zero, since it doesn't have well-behaved
# numerical derivatives at zero
def deriv(f):
return lambda x: api.jvp(f, (x,), (1.,))[1]
def apply_all(fns, x):
for f in fns:
x = f(x)
return x
d1 = 0.
for ops in itertools.combinations_with_replacement([deriv, api.grad], 1):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d1)
d2 = -np.pi ** 2 / 3
for ops in itertools.combinations_with_replacement([deriv, api.grad], 2):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d2)
d3 = 0.
for ops in itertools.combinations_with_replacement([deriv, api.grad], 3):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d3)
d4 = np.pi ** 4 / 5
for ops in itertools.combinations_with_replacement([deriv, api.grad], 4):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d4)
def testSincGradArrayInput(self):
# tests for a bug almost introduced in #5077
jax.grad(lambda x: jnp.sinc(x).sum())(jnp.arange(10.)) # doesn't crash
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
def testGradLogaddexpComplex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp, args, 1, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
def testGradLogaddexp2Complex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp2, args, 1, ["fwd", "rev"], tol, tol)
class NumpySignaturesTest(jtu.JaxTestCase):
def testWrappedSignaturesMatch(self):
"""Test that jax.numpy function signatures match numpy."""
jnp_funcs = {name: getattr(jnp, name) for name in dir(jnp)}
func_pairs = {name: (fun, fun.__np_wrapped__) for name, fun in jnp_funcs.items()
if hasattr(fun, '__np_wrapped__')}
assert len(func_pairs) > 0
# TODO(jakevdp): fix some of the following signatures. Some are due to wrong argument names.
unsupported_params = {
'angle': ['deg'],
'asarray': ['like'],
'broadcast_to': ['subok', 'array'],
'clip': ['kwargs'],
'corrcoef': ['ddof', 'bias', 'dtype'],
'cov': ['dtype'],
'empty_like': ['subok', 'order'],
'einsum': ['kwargs'],
'einsum_path': ['einsum_call'],
'eye': ['order', 'like'],
'identity': ['like'],
'full': ['order', 'like'],
'full_like': ['subok', 'order'],
'histogram': ['normed'],
'histogram2d': ['normed'],
'histogramdd': ['normed'],
'ones': ['order', 'like'],
'ones_like': ['subok', 'order'],
'tri': ['like'],
'unwrap': ['period'],
'zeros_like': ['subok', 'order']
}
extra_params = {
'broadcast_to': ['arr'],
'einsum': ['precision'],
'einsum_path': ['subscripts'],
}
mismatches = {}
for name, (jnp_fun, np_fun) in func_pairs.items():
# broadcast_shapes is not available in numpy < 1.20
if numpy_version < (1, 20) and name == "broadcast_shapes":
continue
# Some signatures have changed; skip for older numpy versions.
if numpy_version < (1, 19) and name in ['einsum_path', 'gradient', 'isscalar']:
continue
# Note: can't use inspect.getfullargspec due to numpy issue
# https://github.com/numpy/numpy/issues/12225
try:
np_params = inspect.signature(np_fun).parameters
except ValueError:
# Some functions cannot be inspected
continue
jnp_params = inspect.signature(jnp_fun).parameters
extra = set(extra_params.get(name, []))
unsupported = set(unsupported_params.get(name, []))
# Checks to prevent tests from becoming out-of-date. If these fail,
# it means that extra_params or unsupported_params need to be updated.
assert extra.issubset(jnp_params), f"{name}: extra={extra} is not a subset of jnp_params={set(jnp_params)}."
assert not unsupported.intersection(jnp_params), f"{name}: unsupported={unsupported} overlaps with jnp_params={set(jnp_params)}."
# Skip functions that only have *args and **kwargs; we can't introspect these further.
var_args = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
if all(p.kind in var_args for p in jnp_params.values()):
continue
if all(p.kind in var_args for p in np_params.values()):
continue
# Remove known extra parameters.
jnp_params = {a: p for a, p in jnp_params.items() if a not in extra}
# Remove known unsupported parameters.
np_params = {a: p for a, p in np_params.items() if a not in unsupported}
# Older versions of numpy may have fewer parameters; to avoid extraneous errors on older numpy
# versions, we allow for jnp to have more parameters.
if list(jnp_params)[:len(np_params)] != list(np_params):
mismatches[name] = {'np_params': list(np_params), 'jnp_params': list(jnp_params)}
self.assertEqual(mismatches, {})
_all_dtypes: List[str] = [
"bool_",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64",
"complex64", "complex128",
]
def _all_numpy_ufuncs() -> Iterator[str]:
"""Generate the names of all ufuncs in the top-level numpy namespace."""
for name in dir(np):
f = getattr(np, name)
if isinstance(f, np.ufunc):
yield name
def _dtypes_for_ufunc(name: str) -> Iterator[Tuple[str, ...]]:
"""Generate valid dtypes of inputs to the given numpy ufunc."""
func = getattr(np, name)
for arg_dtypes in itertools.product(_all_dtypes, repeat=func.nin):
args = (np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero", RuntimeWarning)
_ = func(*args)
except TypeError:
pass
else:
yield arg_dtypes
class NumpyUfuncTests(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"_{name}_{','.join(arg_dtypes)}",
"name": name, "arg_dtypes": arg_dtypes}
for name in _all_numpy_ufuncs()
for arg_dtypes in jtu.cases_from_list(_dtypes_for_ufunc(name)))
def testUfuncInputTypes(self, name, arg_dtypes):
# TODO(jakevdp): fix following failures and remove from this exception list.
if (name in ['divmod', 'floor_divide', 'fmod', 'gcd', 'left_shift', 'mod',
'power', 'remainder', 'right_shift', 'rint', 'square']
and 'bool_' in arg_dtypes):
self.skipTest(f"jax.numpy does not support {name}{tuple(arg_dtypes)}")
if name == 'arctanh' and jnp.issubdtype(arg_dtypes[0], jnp.complexfloating):
self.skipTest("np.arctanh & jnp.arctanh have mismatched NaNs for complex input.")
for dtype in arg_dtypes:
jtu.skip_if_unsupported_type(dtype)
jnp_op = getattr(jnp, name)
np_op = getattr(np, name)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
args_maker = lambda: tuple(np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
jnp_op(*args_maker())
except NotImplementedError:
self.skipTest(f"jtu.{name} is not yet implemented.")
# large tol comes from the fact that numpy returns float16 in places
# that jnp returns float32. e.g. np.cos(np.uint8(0))
self._CheckAgainstNumpy(np_op, jnp_op, args_maker, check_dtypes=False, tol=1E-2)
class NumpyDocTests(jtu.JaxTestCase):
def test_lax_numpy_docstrings(self):
# Test that docstring wrapping & transformation didn't fail.
# Functions that have their own docstrings & don't wrap numpy.
known_exceptions = {'broadcast_arrays', 'vectorize'}
for name in dir(jnp):
if name in known_exceptions or name.startswith('_'):
continue
# We only check signatures of functions.
obj = getattr(jnp, name)
if isinstance(obj, type) or not callable(obj):
continue
# Some jnp functions are imported from numpy or jax.dtypes directly.
if any(obj is getattr(mod, obj.__name__, None) for mod in [np, dtypes]):
continue
wrapped_fun = obj.__np_wrapped__
# If the wrapped function has a docstring, obj should too
if wrapped_fun.__doc__ and not obj.__doc__:
raise Exception(f"jnp.{name} does not contain wrapped docstring.")
if obj.__doc__ and "*Original docstring below.*" not in obj.__doc__:
raise Exception(f"jnp.{name} does not have a wrapped docstring.")
def test_parse_numpydoc(self):
# Unit test ensuring that _parse_numpydoc correctly parses docstrings for all
# functions in NumPy's top-level namespace.
section_titles = {'Attributes', 'Examples', 'Notes',
'Parameters', 'Raises', 'References',
'Returns', 'See also', 'See Also', 'Warnings', 'Warns'}
headings = [title + '\n' + '-'*len(title) for title in section_titles]
for name in dir(np):
if name.startswith('_'):
continue
obj = getattr(np, name)
if isinstance(obj, type):
continue
if not callable(obj):
continue
if 'built-in function' in repr(obj):
continue
parsed = _parse_numpydoc(obj.__doc__)
# Check that no docstring is handled gracefully.
if not obj.__doc__:
self.assertEqual(parsed, ParsedDoc(obj.__doc__))
continue
# Check that no unexpected section names are found.
extra_keys = parsed.sections.keys() - section_titles
if extra_keys:
raise ValueError(f"Extra section headers found in np.{name}: {extra_keys}")
# Check that every docstring has a summary.
if not parsed.summary:
raise ValueError(f"No summary found for np.{name}")
# Check that no expected headings are missed.
for heading in headings:
assert heading not in parsed.front_matter
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
StarcoderdataPython
|
67204
|
from ..models import *
from .availability_calendar_api import *
from .calendar_api import *
import json
from datetime import datetime, timedelta
def get_best(event_id):
"""
:param event_id: the id of the event we want to get best times of
:return: A list of sorted pairs: [ (time, [users]), (time, [users]).... ]
where time is the starting time and users is list of users who can make it.
"""
event_set = Event.objects.filter(event_id=event_id)
event = event_set[0]
# make the queryset of users into a list of users
users = list(event.members.all())
# make all these in minutes
duration = int(event.duration)
st = event.potential_start_date
# round up the potential starting minutes
if st.minute > 30:
new_st = st.replace(minute=0)
new_st = new_st + timedelta(hours=1)
elif st.minute > 0:
new_st = st.replace(minute=30)
elif st.minute == 0 or st.minute == 30:
new_st = st
start = convert_to_minutes(new_st, new_st)
et = event.potential_end_date
# round down potential ending minutes
if et.minute > 30:
new_et = et.replace(minute=30)
elif et.minute > 0:
new_et = et.replace(minute=0)
elif et.minute == 0 or et.minute == 30:
new_et = et
end = convert_to_minutes(new_et, new_st)
min_hour = event.no_earlier_than.hour
min_minute = event.no_earlier_than.minute
max_hour = event.no_later_than.hour
max_minute = event.no_later_than.minute
# Dictionary: starting times as keys and values is list of people who can make it,
# keys incremented by duration
optimal_times = {}
# from start to end time, add keys of 30 minute increments with querysets of every user attending
for i in range(start,end+1, 30):
if i + duration > end:
break
# only add times later than min time and earlier than max time
time = convert_to_datetime(new_st, i)
if min_hour < time.hour < max_hour:
optimal_times[i] = users.copy()
elif time.hour == min_hour:
if time.minute >= min_minute:
optimal_times[i] = users.copy()
elif time.hour == max_hour:
if time.minute <= max_minute:
optimal_times[i] = users.copy()
# have a list of all users times
for u in users:
# user_sched = free_busy_month(u)
# schedule = json.dumps(user_sched, default=json_datetime_handler)
# Schedule.objects.create(user=u, availability=schedule)
# get user's schedules in datetime format
for times in get_users_saved_schedule(u):
start_time = list(times.values())[0]
# round DOWN the starting minutes
if start_time.minute > 30:
starting = start_time.replace(minute=30)
elif start_time.minute > 0:
starting = start_time.replace(minute=0)
elif start_time.minute == 0 or start_time.minute == 30:
starting = start_time
the_start = convert_to_minutes(starting, new_st)
end_time = list(times.values())[1]
# round UP the ending minutes
if et.minute > 30:
ending = et.replace(minute=0)
ending = ending + timedelta(hours=1)
elif et.minute > 0:
ending = et.replace(minute=30)
elif et.minute == 0 or et.minute == 30:
ending = end_time
the_end = convert_to_minutes(ending, new_st)
# try to find the keys in 30 minute increments and remove the user
# from the corresponding list
for i in range(the_start, the_end+1, 30):
if i in optimal_times:
dict_value = optimal_times.get(i)
if u in dict_value:
dict_value.remove(u)
new_dict = {i: dict_value}
optimal_times.update(new_dict)
# go through the optimal times and find which list contains
# most users then append to new list
curr_max = 0
if len(optimal_times) > 0:
curr_max = len(list(optimal_times.values())[0])
append_list = []
for times in optimal_times:
if len(optimal_times[times]) >= curr_max:
# append a list of pairs, first = datetime of start second = list of attending
# with the ending of the list having more people available
append_list.append((convert_to_datetime(new_st, times), optimal_times.get(times)))
curr_max = len(optimal_times[times])
# return the reversed list
return append_list[::-1]
# convert a datetime to minutes elapsed
def convert_to_minutes(time, starting):
elapsed = time - starting
minutes = int(elapsed.total_seconds()/60)
return minutes
# convert minutes to a datetime by getting starting datetime and timedelta by minutes
def convert_to_datetime(starting, mins):
time = starting + timedelta(minutes=mins)
return time
|
StarcoderdataPython
|
3224480
|
<gh_stars>0
import random
random.seed(42)
from virus import Virus
class Person(object):
''' Person objects will populate the simulation. '''
def __init__(self, _id, is_vaccinated, infection=None):
''' We start out with is_alive = True, because we don't make vampires or zombies.
All other values will be set by the simulation when it makes each Person object.
If person is chosen to be infected when the population is created, the simulation
should instantiate a Virus object and set it as the value
self.infection. Otherwise, self.infection should be set to None.
'''
self._id = None # int
self.is_alive = True # boolean
self.is_vaccinated = None # boolean
self.infection = None # Virus object or None
def did_survive_infection(self):
''' Generate a random number and compare to virus's mortality_rate.
If random number is smaller, person dies from the disease.
If Person survives, they become vaccinated and they have no infection.
Return a boolean value indicating whether they survived the infection.
'''
# Only called if infection attribute is not None.
# TODO: Finish this method. Should return a Boolean
pass
''' These are simple tests to ensure that you are instantiating your Person class correctly. '''
def test_vacc_person_instantiation():
# create some people to test if our init method works as expected
person = Person(1, True)
assert person._id == 1
assert person.is_alive is True
assert person.is_vaccinated is True
assert person.infection is None
def test_not_vacc_person_instantiation():
person = Person(2, False)
# TODO: complete your own assert statements that test
# the values at each attribute
# assert ...
pass
def test_sick_person_instantiation():
# Create a Virus object to give a Person object an infection
virus = Virus("Dysentery", 0.7, 0.2)
# Create a Person object and give them the virus infection
person = Person(3, False, virus)
# TODO: complete your own assert statements that test
# the values at each attribute
# assert ...
pass
def test_did_survive_infection():
# TODO: Create a Virus object to give a Person object an infection
virus = Virus("Dysentery", 0.7, 0.2)
# TODO: Create a Person object and give them the virus infection
person = Person(4, False, virus)
# Resolve whether the Person survives the infection or not
survived = person.did_survive_infection()
# Check if the Person survived or not
if survived:
assert person.is_alive is True
# TODO: Write your own assert statements that test
# the values of each attribute for a Person who survived
# assert ...
else:
assert person.is_alive is False
# TODO: Write your own assert statements that test
# the values of each attribute for a Person who did not survive
# assert ...
pass
|
StarcoderdataPython
|
101809
|
<reponame>hymer-up/streamlink
import unittest
from streamlink.plugins.stv import STV
class TestPluginSTV(unittest.TestCase):
def test_can_handle_url(self):
self.assertTrue(STV.can_handle_url('https://player.stv.tv/live'))
self.assertTrue(STV.can_handle_url('http://player.stv.tv/live'))
def test_can_handle_url_negative(self):
self.assertFalse(STV.can_handle_url('http://example.com/live'))
|
StarcoderdataPython
|
41188
|
<gh_stars>1-10
import datetime
from nba_api.stats.endpoints import Scoreboard
from nba_api.stats.library.parameters import LeagueID
from nba_api.stats.library.data import teams
def get_teams():
return dict((team[0],team[5]) for team in teams)
def get_games(date):
teams = get_teams()
gamefinder = Scoreboard(league_id=LeagueID.nba,
day_offset=0,
game_date=date)
games_dict = gamefinder.get_normalized_dict()
for game in games_dict['GameHeader']:
game['HOME_TEAM_NAME'] = teams[game['HOME_TEAM_ID']]
game['VISITOR_TEAM_NAME'] = teams[game['VISITOR_TEAM_ID']]
return games_dict['GameHeader']
|
StarcoderdataPython
|
3254503
|
<filename>bookmarks/urls.py<gh_stars>0
from django.conf.urls import include, url
from . import views
from rest_framework import routers
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'bookmarks', views.BookmarkViewSet, 'bookmarks')
router.register(r'categories', views.CategoryViewSet, 'categories')
router.register(r'keywords', views.KeywordViewSet, "keywords")
router.register(r'folders', views.FolderViewSet, 'folders')
router.register(r'notifications', views.NotificationViewSet, 'notifications')
router.register(r'websites', views.WebsiteViewSet, 'websites')
router.register(r'friends', views.FriendsViewSet, 'bk-friends')
router.register(r'share/$', views.Share, 'share')
router.register(r'public-bookmarks', views.PublicBookmarkViewSet, 'public-bookmarks')
urlpatterns = [
url(r'^', include(router.urls), name='api-root'),
url(r'^search/$', views.query.as_view({'get': 'list'})),
url(r'^auth/me/upload', views.Upload.as_view({'get': 'list'})),
url(r'^auth/me/$', views.MeView.as_view({'get': 'retrieve'}), name='me'),
url(r'^auth/', include('djoser.urls')),
url(r'^auth/', include('djoser.urls.authtoken')),
]
|
StarcoderdataPython
|
75637
|
<gh_stars>0
from Scenes.TitleScreen import TitleScene
import pygame
import Tools.Images
import datetime
def run_game(width, height, fps, starting_scene):
pygame.init()
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
current_time = datetime.datetime.now()
time_passed = datetime.datetime.now() - current_time
active_scene = starting_scene
pressed_keys = []
frames = 0
while active_scene is not None:
new_time = datetime.datetime.now()
time_passed += new_time - current_time
current_time = new_time
frames += 1
if time_passed > datetime.timedelta(seconds=1):
print("FPS: " + str(frames))
frames = 0
time_passed = datetime.timedelta(seconds=0)
# event filtering
filtered_events = []
for event in pygame.event.get():
quit_attempt = False
if event.type == pygame.QUIT:
quit_attempt = True
elif event.type == pygame.KEYDOWN:
alt_pressed = pygame.K_LALT in pressed_keys or pygame.K_RALT in pressed_keys
if event.key == pygame.K_ESCAPE:
quit_attempt = True
elif event.key == pygame.K_F4 and alt_pressed:
quit_attempt = True
elif event.key not in pressed_keys:
pressed_keys.append(event.key)
elif event.type == pygame.KEYUP:
if event.key in pressed_keys:
pressed_keys.remove(event.key)
if quit_attempt:
active_scene.terminate()
else:
filtered_events.append(event)
active_scene.process_input(filtered_events, pressed_keys)
active_scene.update(screen)
active_scene.render(screen)
active_scene = active_scene.next
pygame.display.flip()
clock.tick(fps)
il = Tools.Images.ImageLoader()
run_game(1200, 800, 60, TitleScene(il))
|
StarcoderdataPython
|
82484
|
a=2
b=3
#三目运算符
str='a>b'if a>b else 'a<b'
print(str)
|
StarcoderdataPython
|
29115
|
<reponame>gaivin/GWeb
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: <NAME>
@license: Apache Licence
@contact: <EMAIL>
@site:
@software: PyCharm
@file: chart.py
@time: 10/10/2018 4:14 PM
"""
from pyecharts import Bar, Line, WordCloud
import pandas as pd
import random, os.path as path
DATA_PATH = path.join(path.dirname(__file__), "data")
def history_chart():
BOTTOM = 1
TOP = 200
XAXIS_COUNT = 10
XAXIS_INTERVAL = ((TOP - BOTTOM) // XAXIS_COUNT) - 1
chart = Line(title="Python History Ratings", subtitle="Source: www.tiobe.com",
title_color="DarkSlateGray", background_color="Azure",
width=1000, height=500, page_title="Python Ratings History")
chart.use_theme('walden')
df = pd.read_csv(path.join(DATA_PATH, "pythonratehistory.csv"), sep=",")
TOP = len(df.Python)
values = list(df.Python[BOTTOM:TOP])
title = list(df.Date[BOTTOM:TOP])
chart.add(name="Rating", x_axis=title, y_axis=values, yaxis_name="Rating (%)",
xaxis_name="Date",
# xaxis_interval=XAXIS_INTERVAL,
# is_label_show=True,
# label_formatter="{a}%",
is_legend_show=False,
is_smooth=True,
is_symbol_show=False,
line_width=4,
mark_point=['max'],
mark_point_symbolsize=60,
mark_line=["max", "min"],
is_datazoom_show=True,
is_visualmap=True,
visual_range=[0, 8])
return chart
def language_rank_chart():
TOP = 10
AXIS_LABEL_TEXT_COLOR = "BLACK"
bar = Bar(title="Program Language Ratings for September 2018", subtitle="Source: www.tiobe.com",
title_color="DarkSlateGray", background_color="Azure", width=1000, height=500,
page_title="Program Language Ratings"
)
# bar.use_theme('walden')
df = pd.read_csv(path.join(DATA_PATH, "program_language_rank.csv"), sep=",", usecols=[2, 3])
values = [float(x.replace("%", "")) for x in df.Ratings[0:TOP]]
title = list(df.ProgrammingLanguage[0:TOP])
bar.add(name="Rating", x_axis=title, y_axis=values, is_label_show=True,
yaxis_name="Rating (%)", yaxis_label_textcolor=AXIS_LABEL_TEXT_COLOR,
xaxis_name="Program Language", xaxis_interval=0, xaxis_label_textcolor=AXIS_LABEL_TEXT_COLOR,
label_formatter="{c}%", is_legend_show=False,
label_text_color=AXIS_LABEL_TEXT_COLOR,
mark_point=[{"coord": [2, 3], "name": "3rd"}, {"coord": [1, 2], "name": "2nd"},
{"coord": [0, 1], "name": "1st"}],
mark_point_symbolsize=80,
mark_point_textcolor="SteelBlue",
)
return bar
def world_cloud_chart():
CAT1 = 1000
CAT2 = 800
OFFSET = 20
item_dict = {
# "Python": CAT1 + random.randrange(-OFFSET, OFFSET),
# "Anywhere": CAT1 + random.randrange(-OFFSET, OFFSET),
"Web Apps": CAT1 + random.randrange(-OFFSET, OFFSET),
"Files": CAT1 + random.randrange(-OFFSET, OFFSET),
"Consoles": CAT1 + random.randrange(-OFFSET, OFFSET),
"Databases": CAT1 + random.randrange(-OFFSET, OFFSET),
"Scheduled Tasks": CAT1 + random.randrange(-OFFSET, OFFSET),
"Easy Deploy": CAT2 + random.randrange(-OFFSET, OFFSET),
"Develop Anywhere": CAT2 + random.randrange(-OFFSET, OFFSET),
"Amazing Support": CAT2 + random.randrange(-OFFSET, OFFSET),
"Teach & Learn": CAT2 + random.randrange(-OFFSET, OFFSET), }
name_list = item_dict.keys()
value_list = item_dict.values()
wordcloud = WordCloud(title="Python Anywhere Features and Advantages", width=1000, height=500,
page_title="Python anywhere Word Cloud")
wordcloud.add("", name_list, value_list, word_size_range=[30, 60])
return wordcloud
|
StarcoderdataPython
|
77706
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Encoder_Control_GUI_ONLY.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Tester(object):
def setupUi(self, Tester):
Tester.setObjectName("Tester")
Tester.resize(595, 358)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../Downloads/Cirris.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Tester.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(Tester)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.RecordingEnco = QtWidgets.QSlider(self.groupBox_2)
self.RecordingEnco.setOrientation(QtCore.Qt.Horizontal)
self.RecordingEnco.setObjectName("RecordingEnco")
self.verticalLayout_2.addWidget(self.RecordingEnco)
self.RegisterEnco = QtWidgets.QCheckBox(self.groupBox_2)
self.RegisterEnco.setObjectName("RegisterEnco")
self.verticalLayout_2.addWidget(self.RegisterEnco)
self.verticalLayout_7.addWidget(self.groupBox_2)
self.groupBox_5 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_5.setObjectName("groupBox_5")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox_5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.ToConnectButton = QtWidgets.QPushButton(self.groupBox_5)
self.ToConnectButton.setObjectName("ToConnectButton")
self.verticalLayout_3.addWidget(self.ToConnectButton)
self.ToDisconnectButton = QtWidgets.QPushButton(self.groupBox_5)
self.ToDisconnectButton.setObjectName("ToDisconnectButton")
self.verticalLayout_3.addWidget(self.ToDisconnectButton)
self.DisplayPlotButton = QtWidgets.QPushButton(self.groupBox_5)
self.DisplayPlotButton.setObjectName("DisplayPlotButton")
self.verticalLayout_3.addWidget(self.DisplayPlotButton)
self.ToResetDistance = QtWidgets.QPushButton(self.groupBox_5)
self.ToResetDistance.setObjectName("ToResetDistance")
self.verticalLayout_3.addWidget(self.ToResetDistance)
self.verticalLayout_7.addWidget(self.groupBox_5)
self.gridLayout_2.addLayout(self.verticalLayout_7, 0, 1, 1, 1)
self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_4.setTitle("")
self.groupBox_4.setObjectName("groupBox_4")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_4)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.textBoxDirectory = QtWidgets.QLabel(self.groupBox_4)
self.textBoxDirectory.setObjectName("textBoxDirectory")
self.verticalLayout_8.addWidget(self.textBoxDirectory)
self.textEditDirectory = QtWidgets.QTextEdit(self.groupBox_4)
self.textEditDirectory.setObjectName("textEditDirectory")
self.verticalLayout_8.addWidget(self.textEditDirectory)
self.DirectoryConfirmB = QtWidgets.QPushButton(self.groupBox_4)
self.DirectoryConfirmB.setObjectName("DirectoryConfirmB")
self.verticalLayout_8.addWidget(self.DirectoryConfirmB)
self.horizontalLayout_2.addLayout(self.verticalLayout_8)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.textBoxFile = QtWidgets.QLabel(self.groupBox_4)
self.textBoxFile.setObjectName("textBoxFile")
self.verticalLayout_4.addWidget(self.textBoxFile)
self.textEditFile = QtWidgets.QTextEdit(self.groupBox_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEditFile.sizePolicy().hasHeightForWidth())
self.textEditFile.setSizePolicy(sizePolicy)
self.textEditFile.setObjectName("textEditFile")
self.verticalLayout_4.addWidget(self.textEditFile)
self.FileConfirmButton = QtWidgets.QPushButton(self.groupBox_4)
self.FileConfirmButton.setObjectName("FileConfirmButton")
self.verticalLayout_4.addWidget(self.FileConfirmButton)
self.horizontalLayout_2.addLayout(self.verticalLayout_4)
self.gridLayout.addLayout(self.horizontalLayout_2, 0, 0, 1, 2)
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.textBoxDataInterval = QtWidgets.QLabel(self.groupBox_4)
self.textBoxDataInterval.setObjectName("textBoxDataInterval")
self.verticalLayout_9.addWidget(self.textBoxDataInterval)
self.DataIntervalSpinBox = QtWidgets.QSpinBox(self.groupBox_4)
self.DataIntervalSpinBox.setObjectName("DataIntervalSpinBox")
self.verticalLayout_9.addWidget(self.DataIntervalSpinBox)
self.DataIntervalButton = QtWidgets.QPushButton(self.groupBox_4)
self.DataIntervalButton.setObjectName("DataIntervalButton")
self.verticalLayout_9.addWidget(self.DataIntervalButton)
self.gridLayout.addLayout(self.verticalLayout_9, 1, 0, 1, 1)
self.verticalLayout_10 = QtWidgets.QVBoxLayout()
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.gridLayout.addLayout(self.verticalLayout_10, 1, 1, 1, 1)
self.gridLayout_2.addWidget(self.groupBox_4, 0, 2, 1, 2)
self.urlRepo = QtWidgets.QLabel(self.centralwidget)
self.urlRepo.setObjectName("urlRepo")
self.gridLayout_2.addWidget(self.urlRepo, 1, 0, 1, 3)
self.CloseButton = QtWidgets.QPushButton(self.centralwidget)
self.CloseButton.setObjectName("CloseButton")
self.gridLayout_2.addWidget(self.CloseButton, 1, 3, 1, 1)
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_3)
self.verticalLayout.setObjectName("verticalLayout")
self.lcdTimeRecording = QtWidgets.QLCDNumber(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lcdTimeRecording.sizePolicy().hasHeightForWidth())
self.lcdTimeRecording.setSizePolicy(sizePolicy)
self.lcdTimeRecording.setObjectName("lcdTimeRecording")
self.verticalLayout.addWidget(self.lcdTimeRecording)
self.lcdTextTimeRecording = QtWidgets.QLabel(self.groupBox_3)
self.lcdTextTimeRecording.setObjectName("lcdTextTimeRecording")
self.verticalLayout.addWidget(self.lcdTextTimeRecording)
self.lcdPositionChange = QtWidgets.QLCDNumber(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lcdPositionChange.sizePolicy().hasHeightForWidth())
self.lcdPositionChange.setSizePolicy(sizePolicy)
self.lcdPositionChange.setObjectName("lcdPositionChange")
self.verticalLayout.addWidget(self.lcdPositionChange)
self.lcdTextPositionChange = QtWidgets.QLabel(self.groupBox_3)
self.lcdTextPositionChange.setObjectName("lcdTextPositionChange")
self.verticalLayout.addWidget(self.lcdTextPositionChange)
self.lcdTimeChange = QtWidgets.QLCDNumber(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lcdTimeChange.sizePolicy().hasHeightForWidth())
self.lcdTimeChange.setSizePolicy(sizePolicy)
self.lcdTimeChange.setObjectName("lcdTimeChange")
self.verticalLayout.addWidget(self.lcdTimeChange)
self.lcdTextTimeChange = QtWidgets.QLabel(self.groupBox_3)
self.lcdTextTimeChange.setObjectName("lcdTextTimeChange")
self.verticalLayout.addWidget(self.lcdTextTimeChange)
self.lcdDistance = QtWidgets.QLCDNumber(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lcdDistance.sizePolicy().hasHeightForWidth())
self.lcdDistance.setSizePolicy(sizePolicy)
self.lcdDistance.setObjectName("lcdDistance")
self.verticalLayout.addWidget(self.lcdDistance)
self.lcdTextDistance = QtWidgets.QLabel(self.groupBox_3)
self.lcdTextDistance.setObjectName("lcdTextDistance")
self.verticalLayout.addWidget(self.lcdTextDistance)
self.gridLayout_2.addWidget(self.groupBox_3, 0, 0, 1, 1)
Tester.setCentralWidget(self.centralwidget)
self.statusBar = QtWidgets.QStatusBar(Tester)
self.statusBar.setObjectName("statusBar")
Tester.setStatusBar(self.statusBar)
self.retranslateUi(Tester)
QtCore.QMetaObject.connectSlotsByName(Tester)
def retranslateUi(self, Tester):
_translate = QtCore.QCoreApplication.translate
Tester.setWindowTitle(_translate("Tester", "Interface de contrôle"))
self.groupBox_2.setTitle(_translate("Tester", "Encoder"))
self.RegisterEnco.setText(_translate("Tester", "Enregistrement"))
self.groupBox_5.setTitle(_translate("Tester", "Connectivité"))
self.ToConnectButton.setText(_translate("Tester", "Connexion"))
self.ToDisconnectButton.setText(_translate("Tester", "Déconnexion"))
self.DisplayPlotButton.setText(_translate("Tester", "Graphique"))
self.ToResetDistance.setText(_translate("Tester", "Reset distance"))
self.textBoxDirectory.setText(_translate("Tester", "Dossier"))
self.DirectoryConfirmB.setText(_translate("Tester", "Confirmer"))
self.textBoxFile.setText(_translate("Tester", "Fichier"))
self.FileConfirmButton.setText(_translate("Tester", "Confirmer"))
self.textBoxDataInterval.setText(_translate("Tester", "Data Interval"))
self.DataIntervalButton.setText(_translate("Tester", "Confirmer"))
self.urlRepo.setText(_translate("Tester", "https://github.com/WilliamBonilla62/GUIPythonEncodeur"))
self.CloseButton.setText(_translate("Tester", "Fermer"))
self.groupBox_3.setTitle(_translate("Tester", "Afficher données"))
self.lcdTextTimeRecording.setText(_translate("Tester", "Time recording [s]"))
self.lcdTextPositionChange.setText(_translate("Tester", "Position Change"))
self.lcdTextTimeChange.setText(_translate("Tester", "Time change [ms]"))
self.lcdTextDistance.setText(_translate("Tester", "Distance [dm]"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Tester = QtWidgets.QMainWindow()
ui = Ui_Tester()
ui.setupUi(Tester)
Tester.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3377035
|
<gh_stars>1-10
#!/usr/bin/env python2
# -*- coding: utf8 -*-
#
# Copyright (c) 2014 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# <NAME> <<EMAIL>>
#
# Requires PyGithub for unfoldingWord export.
'''
Converts translationWords from JSON to Markdown.
'''
import os
import re
import sys
import json
import codecs
import urllib2
linknamere = re.compile(ur'\|.*?(\]\])', re.UNICODE)
sugre = re.compile(ur'<h2>(Translation Suggestions)</h2>', re.UNICODE)
lire = re.compile(ur'<li>(.*?)</li>', re.UNICODE)
def getURL(url):
try:
request = urllib2.urlopen(url)
content = request.read()
except:
print " => ERROR retrieving %s\nCheck the URL" % url
sys.exit(1)
return content
def clean(text):
text = linknamere.sub(ur'\1', text)
text = sugre.sub(ur'\n\n## \1\n\n', text)
text = text.replace(u'<ul>', u'\n\n').replace(u'</ul>', u'')
text = lire.sub(ur'* \1\n', text)
return text
## Need to add Bible/OBS References once that is in API
if __name__ == '__main__':
terms_url = 'https://api.unfoldingword.org/ts/txt/2/bible/en/terms.json'
terms_content = getURL(terms_url)
terms_json = json.loads(terms_content)
outdir = '/tmp/tw-en'
if not os.path.exists(outdir):
os.makedirs(outdir)
for x in terms_json:
if 'id' not in x: continue
chkf = codecs.open('{0}/01/{1}.md'.format(outdir, x['id']), 'w', encoding='utf-8')
# Write tW
chkf.write(u'# {0}\n\n'.format(x['term']))
chkf.write(u'## {0}\n\n'.format(x['def_title']))
chkf.write(clean(x['def']))
chkf.write(u'\n\n## See Also\n\n')
for cf in x['cf']:
chkf.write(u'* {0}\n'.format(cf))
if 'aliases' in x:
chkf.write(u'\n## Aliases\n\n')
for a in x['aliases']:
chkf.write(u'* {0}\n'.format(a))
chkf.close()
|
StarcoderdataPython
|
10223
|
<filename>edivorce/apps/core/views/graphql.py
import graphene
import graphene_django
from django.http import HttpResponseForbidden
from graphene_django.views import GraphQLView
from graphql import GraphQLError
from edivorce.apps.core.models import Document
class PrivateGraphQLView(GraphQLView):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return HttpResponseForbidden()
return super().dispatch(request, *args, **kwargs)
class DocumentType(graphene_django.DjangoObjectType):
file_url = graphene.String(source='get_file_url')
content_type = graphene.String(source='get_content_type')
class Meta:
model = Document
exclude = ('id', 'file')
class Query(graphene.ObjectType):
documents = graphene.List(DocumentType, doc_type=graphene.String(required=True), party_code=graphene.Int(required=True))
def resolve_documents(self, info, **kwargs):
if info.context.user.is_anonymous:
raise GraphQLError('Unauthorized')
q = Document.objects.filter(bceid_user=info.context.user, **kwargs)
for doc in q:
if not doc.file_exists():
q.delete()
return Document.objects.none()
return q
class DocumentInput(graphene.InputObjectType):
filename = graphene.String(required=True)
size = graphene.Int(required=True)
width = graphene.Int()
height = graphene.Int()
rotation = graphene.Int()
class DocumentMetaDataInput(graphene.InputObjectType):
files = graphene.List(DocumentInput, required=True)
doc_type = graphene.String(required=True)
party_code = graphene.Int(required=True)
class UpdateMetadata(graphene.Mutation):
class Arguments:
input = DocumentMetaDataInput(required=True)
documents = graphene.List(DocumentType)
def mutate(self, info, **kwargs):
input_ = kwargs['input']
documents = Document.objects.filter(bceid_user=info.context.user, doc_type=input_['doc_type'], party_code=input_['party_code'])
unique_files = [dict(s) for s in set(frozenset(d.items()) for d in input_['files'])]
if documents.count() != len(input_['files']) or documents.count() != len(unique_files):
raise GraphQLError("Invalid input: there must be the same number of files")
for i, file in enumerate(input_['files']):
try:
doc = documents.get(filename=file['filename'], size=file['size'])
doc.sort_order = i + 1
doc.width = file.get('width', doc.width)
doc.height = file.get('height', doc.height)
doc.rotation = file.get('rotation', doc.rotation)
if doc.rotation not in [0, 90, 180, 270]:
raise GraphQLError(f"Invalid rotation {doc.rotation}, must be 0, 90, 180, 270")
doc.save()
except Document.DoesNotExist:
raise GraphQLError(f"Couldn't find document '{file['filename']}' with size '{file['size']}'")
return UpdateMetadata(documents=documents.all())
class Mutations(graphene.ObjectType):
update_metadata = UpdateMetadata.Field()
graphql_schema = graphene.Schema(query=Query, mutation=Mutations)
|
StarcoderdataPython
|
92825
|
<filename>26_ShortestPath/Step07/gamjapark.py
import sys
V, E = map(int, sys.stdin.readline().split())
max_size = E * (400 * 399) + 1
shortest_path = [[max_size for _ in range(V + 1)] for _ in range(V + 1)]
for e in range(E):
a, b, c = map(int, sys.stdin.readline().split())
shortest_path[a][b] = c
for k in range(1, V + 1):
for i in range(1, V + 1):
for j in range(1, V + 1):
shortest_path[i][j] = min(shortest_path[i][j], shortest_path[i][k] + shortest_path[k][j])
ans = max_size
for i in range(1, V + 1):
ans = min(ans, shortest_path[i][i])
print(-1 if ans == max_size else ans)
|
StarcoderdataPython
|
114400
|
<reponame>aleasims/Peach
def Test(tester):
from Ft.Lib.DbUtil import EscapeQuotes
for i,out in [('hello','hello'),
("he'llo",r"he\'llo"),
("he'll'o",r"he\'ll\'o"),
("'hello'",r"\'hello\'"),
("'","\\'"),
(r"hhh\\hhhh",r"hhh\\\\hhhh"),
(r"\\",r"\\\\"),
(r"'\\''\\'\\'",r"\'\\\\\'\'\\\\\'\\\\\'"),
(None,r""),
]:
tester.startTest(repr(i))
e = EscapeQuotes(i)
tester.compare(out,e)
tester.testDone()
|
StarcoderdataPython
|
3274630
|
<reponame>shinymud/ShinyMUD<filename>tests/shinytest/models/test_item.py<gh_stars>10-100
from shinytest import ShinyTestCase
class TestItem(ShinyTestCase):
def test_something(self):
pass
|
StarcoderdataPython
|
1711597
|
<gh_stars>1-10
from django.conf.urls.defaults import *
from news_and_events import views
# from news_and_events.views import NewsAndEventsViews
urlpatterns = patterns('',
# news and events items
url(r"^news/(?P<slug>[-\w]+)/$", views.newsarticle, name="newsarticle"),
url(r"^event/(?P<slug>[-\w]+)/$", views.event, name="event"),
# named entities' news and events
url(r'^news-archive/(?:(?P<slug>[-\w]+)/)?$', views.news_archive, name="news_archive"),
url(r'^previous-events/(?:(?P<slug>[-\w]+)/)?$', views.previous_events, name="previous_events"),
url(r'^forthcoming-events/(?:(?P<slug>[-\w]+)/)?$', views.all_forthcoming_events, name="forthcoming_event"),
url(r"^news-and-events/(?:(?P<slug>[-\w]+)/)?$", views.news_and_events, name="news_and_events"),
)
#(r"^entity/(?P<slug>[-\w]+)/news/$", "news_and_events.views.news"), # in development
|
StarcoderdataPython
|
9234
|
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routes import items
import config
from constants import *
config.parse_args()
app = FastAPI(
title="API",
description="API boilerplate",
version="1.0.0",
openapi_tags=API_TAGS_METADATA,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(items.router)
@app.get("/")
async def root():
return {
"docs": "api documentation at /docs or /redoc",
}
if __name__ == "__main__":
uvicorn.run("main:app", host=config.CONFIG.host, port=int(config.CONFIG.port))
|
StarcoderdataPython
|
192870
|
<filename>src/pipelines/epidemiology/us_wa_authority.py<gh_stars>100-1000
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from pandas import DataFrame, concat
from lib.pipeline import DataSource
from lib.utils import table_merge, table_rename
_col_adapter_cases = {
"WeekStartDate": "date",
"County": "subregion2_name",
# "NewPos_All": "new_confirmed",
"Age 0-19": "new_confirmed_age_00",
"Age 20-39": "new_confirmed_age_01",
"Age 40-59": "new_confirmed_age_02",
"Age 60-79": "new_confirmed_age_03",
"Age 80+": "new_confirmed_age_04",
}
_col_adapter_deaths = {
"WeekStartDate": "date",
"County": "subregion2_name",
# "Deaths": "new_deceased",
"Age 0-19": "new_deceased_age_00",
"Age 20-39": "new_deceased_age_01",
"Age 40-59": "new_deceased_age_02",
"Age 60-79": "new_deceased_age_03",
"Age 80+": "new_deceased_age_04",
}
_col_adapter_hosp = {
"WeekStartDate": "date",
"County": "subregion2_name",
# "Hospitalizations": "new_hospitalized",
"Age 0-19": "new_hospitalized_age_00",
"Age 20-39": "new_hospitalized_age_01",
"Age 40-59": "new_hospitalized_age_02",
"Age 60-79": "new_hospitalized_age_03",
"Age 80+": "new_hospitalized_age_04",
}
class WashingtonDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
rename_opts = dict(drop=True, remove_regex=r"[^0-9a-z\s]")
data = table_merge(
[
table_rename(dataframes[0]["Cases"], _col_adapter_cases, **rename_opts),
table_rename(dataframes[0]["Deaths"], _col_adapter_deaths, **rename_opts),
table_rename(dataframes[0]["Hospitalizations"], _col_adapter_hosp, **rename_opts),
],
how="outer",
on=["date", "subregion2_name"],
)
state = data.drop(columns=["subregion2_name"]).groupby(["date"]).sum().reset_index()
state["key"] = "US_WA"
# Remove deceased data since we get it from another source which provides daily counts
for col in [col for col in state.columns if "deceased" in col]:
state[col] = None
data = data[data["subregion2_name"] != "Unassigned"]
data["country_code"] = "US"
data["subregion1_code"] = "WA"
for df in (state, data):
df["age_bin_00"] = "0-19"
df["age_bin_01"] = "20-39"
df["age_bin_02"] = "40-59"
df["age_bin_03"] = "60-79"
df["age_bin_04"] = "80-"
return concat([state, data])
|
StarcoderdataPython
|
3344536
|
"""
Script to create catalogue entries for LLAGN sample.
The catalogue is given by the positional cross-match between 2RXS and AllWISE,
and removing the 3LAC blazars. A Seyferntess PDF is assigned and only sources with
Seyfertness larger than 0.5 are selected in the final sample.
"""
from flarestack.analyses.agn_cores.shared_agncores import raw_cat_dir, agn_catalogue_name, agn_cores_output_dir
from shared_agncores import create_random_src, plot_catalogue
from flarestack.utils.prepare_catalogue import cat_dtype
import astropy.io.fits as pyfits
from astropy.table import Table
import numpy as np
import pandas as pd
import os
def select_nrandom_sources(cat, n_random=100):
df = cat.to_pandas()
df_random = df.sample(n=n_random)
cat_new = Table.from_pandas(df_random)
print (cat_new)
return cat_new
def select_n_brightest_srcs(cat, nr_srcs):
"""
Select the first nr_srcs brightest sources
:param cat: original catalogue of sources
:param nr_srcs: number of sources to select
:return: catalogue after selection
"""
print ("Selecting", nr_srcs, "brightest sources.Length after cuts:", len(raw_cat))
return cat[-nr_srcs:]
'''Open original (complete) catalogue'''
raw_cat = pyfits.open(raw_cat_dir+'LLAGN_2rxs2AllWiseSayfertness_no3LACbl_April2020_small.fits')
raw_cat = Table(raw_cat[1].data, masked=True)
print ("Catalogue length:", len(raw_cat))
raw_cat = raw_cat[raw_cat['DEC_DEG']>-5] # Select Northen sky sources only
raw_cat = raw_cat.group_by('XRay_FLUX') # order catalog by flux
print ("Catalogue length after cut:", len(raw_cat))
new_cat = np.empty(len(raw_cat), dtype=cat_dtype)
new_cat["ra_rad"] = np.deg2rad(raw_cat["RA_DEG"]) # rosat RA in radians #np.deg2rad(random_ra)
new_cat["dec_rad"] = np.deg2rad(raw_cat["DEC_DEG"]) # rosat DEC in radians #np.deg2rad(random_dec)
new_cat["distance_mpc"] = np.ones(len(raw_cat))
new_cat["ref_time_mjd"] = np.ones(len(raw_cat))
new_cat["start_time_mjd"] = np.ones(len(raw_cat))
new_cat["end_time_mjd"] = np.ones(len(raw_cat))
new_cat["base_weight"] = raw_cat["XRay_FLUX"] * 1e13
new_cat["injection_weight_modifier"] = np.ones(len(raw_cat))
src_name = []
for src, vv10 in enumerate(raw_cat['2RXS_ID']):
# if (vv10!='N/A'):
# src_name.append(vv10)
if (raw_cat['2RXS_ID'][src] != 'N/A'):
src_name.append(raw_cat['2RXS_ID'][src])
elif (raw_cat['XMMSL2_ID'][src] != 'N/A'):
src_name.append(raw_cat['XMMSL2_ID'][src])
else:
print ("No valid name found for source nr ", src)
break
new_cat["source_name"] = src_name
save_path = agn_catalogue_name("lowluminosity", "irselected_north")
np.save(save_path, new_cat)
|
StarcoderdataPython
|
34001
|
<reponame>jpsantos-mf/ezdxf
# Copyright (c) 2014-2019, <NAME>
# License: MIT License
import pytest
from ezdxf.sections.acdsdata import AcDsDataSection
from ezdxf import DXFKeyError
from ezdxf.lldxf.tags import internal_tag_compiler, group_tags
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
@pytest.fixture
def section():
entities = group_tags(internal_tag_compiler(ACDSSECTION))
return AcDsDataSection(None, entities)
def test_loader(section):
assert 'ACDSDATA' == section.name.upper()
assert len(section.entities) > 0
def test_acds_record(section):
records = [entity for entity in section.entities if entity.dxftype() == 'ACDSRECORD']
assert len(records) > 0
record = records[0]
assert record.has_section('ASM_Data') is True
assert record.has_section('AcDbDs::ID') is True
assert record.has_section('mozman') is False
with pytest.raises(DXFKeyError):
_ = record.get_section('mozman')
asm_data = record.get_section('ASM_Data')
binary_data = (tag for tag in asm_data if tag.code == 310)
length = sum(len(tag.value) for tag in binary_data)
assert asm_data[2].value == length
def test_write_dxf(section):
result = TagCollector.dxftags(section)
expected = basic_tags_from_text(ACDSSECTION)
assert result[:-1] == expected
ACDSSECTION = """0
SECTION
2
ACDSDATA
70
2
71
6
0
ACDSSCHEMA
90
0
1
AcDb3DSolid_ASM_Data
2
AcDbDs::ID
280
10
91
8
2
ASM_Data
280
15
91
0
101
ACDSRECORD
95
0
90
2
2
AcDbDs::TreatedAsObjectData
280
1
291
1
101
ACDSRECORD
95
0
90
3
2
AcDbDs::Legacy
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
4
2
AcDs:Indexable
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
5
2
AcDbDs::HandleAttribute
280
7
282
1
0
ACDSSCHEMA
90
1
1
AcDb_Thumbnail_Schema
2
AcDbDs::ID
280
10
91
8
2
Thumbnail_Data
280
15
91
0
101
ACDSRECORD
95
1
90
2
2
AcDbDs::TreatedAsObjectData
280
1
291
1
101
ACDSRECORD
95
1
90
3
2
AcDbDs::Legacy
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
4
2
AcDs:Indexable
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
5
2
AcDbDs::HandleAttribute
280
7
282
1
0
ACDSSCHEMA
90
2
1
AcDbDs::TreatedAsObjectDataSchema
2
AcDbDs::TreatedAsObjectData
280
1
91
0
0
ACDSSCHEMA
90
3
1
AcDbDs::LegacySchema
2
AcDbDs::Legacy
280
1
91
0
0
ACDSSCHEMA
90
4
1
AcDbDs::IndexedPropertySchema
2
AcDs:Indexable
280
1
91
0
0
ACDSSCHEMA
90
5
1
AcDbDs::HandleAttributeSchema
2
AcDbDs::HandleAttribute
280
7
91
1
284
1
0
ACDSRECORD
90
0
2
AcDbDs::ID
280
10
320
339
2
ASM_Data
280
15
94
1088
310
414349532042696E61727946696C652855000000000000020000000C00000007104175746F6465736B204175746F434144071841534D203231392E302E302E3536303020556E6B6E6F776E071853756E204D61792020342031353A34373A3233203230313406000000000000F03F068DEDB5A0F7C6B03E06BBBDD7D9DF7CDB
310
3D0D0961736D6865616465720CFFFFFFFF04FFFFFFFF070C3231392E302E302E35363030110D04626F64790C0200000004FFFFFFFF0CFFFFFFFF0C030000000CFFFFFFFF0CFFFFFFFF110E067265665F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C010000000C040000000C05
310
000000110D046C756D700C0600000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C070000000C01000000110D0E6579655F726566696E656D656E740CFFFFFFFF04FFFFFFFF070567726964200401000000070374726904010000000704737572660400000000070361646A040000000007046772616404000000000709706F7374
310
636865636B0400000000070463616C6304010000000704636F6E760400000000070473746F6C06000000E001FD414007046E746F6C060000000000003E4007046473696C0600000000000000000708666C61746E6573730600000000000000000707706978617265610600000000000000000704686D617806000000000000
310
0000070667726964617206000000000000000007056D6772696404B80B0000070575677269640400000000070576677269640400000000070A656E645F6669656C6473110D0F7665727465785F74656D706C6174650CFFFFFFFF04FFFFFFFF0403000000040000000004010000000408000000110E067265665F76740E0365
310
79650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C030000000C040000000C05000000110D057368656C6C0C0800000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0CFFFFFFFF0C090000000CFFFFFFFF0C03000000110E067265665F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFF
310
FFFFFF0CFFFFFFFF0C070000000C040000000C05000000110D04666163650C0A00000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0CFFFFFFFF0C070000000CFFFFFFFF0C0B0000000B0B110E05666D6573680E036579650D066174747269620CFFFFFFFF04FFFFFFFF0C0C0000000CFFFFFFFF0C09000000110E05746F7275730D
310
07737572666163650CFFFFFFFF04FFFFFFFF0CFFFFFFFF131D7B018BA58BA7C0600EB0424970BC4000000000000000001400000000000000000000000000000000000000000000F03F065087D2E2C5418940066050CEE5F3CA644014000000000000F03F000000000000000000000000000000000B0B0B0B0B110E06726566
310
5F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0C0A0000000C090000000C040000000C05000000110E03456E640E026F660E0341534D0D0464617461
"""
|
StarcoderdataPython
|
4809302
|
<reponame>mtu2/stargazing
import concurrent.futures
import re
import urllib.request
import stargazing.audio.audio_player as audio_ap
import stargazing.config.config as config
from stargazing.utils.helper_funcs import silent_stderr, start_daemon_thread
class AudioController():
"""Audio manager, pre-loads the audio players specified in the settings.json and allows audio players to be created and stream via YouTube search
@param volume: Initial volume level."""
def __init__(self, volume=100) -> None:
self.saved_youtube_player_urls = config.get_saved_youtube_player_urls()
self.loaded_players = {
name: None for name in self.saved_youtube_player_urls}
start_daemon_thread(target=self.__load_audio_players)
self.playing = None
self.playing_name = "offline"
self.volume = volume
def stop(self) -> None:
if self.playing:
self.playing.stop()
def offline(self) -> None:
self.stop()
self.playing = None
self.playing_name = "offline"
def set_volume(self, vol: int) -> None:
self.volume = vol
if self.playing:
self.playing.set_volume(vol)
def get_volume(self) -> int:
return self.volume
def set_loaded_player(self, loaded_player_name: str) -> None:
"""Stops the current player, loads the given player name and closes the menu"""
self.stop()
# If player has not loaded (not enough time or error)
if not self.loaded_players[loaded_player_name]:
start_daemon_thread(target=self.set_youtube_player_from_url,
args=[self.saved_youtube_player_urls[loaded_player_name], loaded_player_name])
# TODO: set self.loaded_players[loaded_player_name] with this player
# If player has loaded
else:
self.playing = self.loaded_players[loaded_player_name]
self.playing_name = loaded_player_name
self.playing.set_volume(self.volume)
self.playing.play()
def set_youtube_player_from_url(self, youtube_url: str, player_name="") -> str:
self.stop()
self.playing_name = "loading audio..."
self.playing = audio_ap.YoutubeAudioPlayer.safe_create(
youtube_url, True)
if self.playing:
self.playing_name = self.playing.video_titles[0] if not player_name else player_name
self.playing.set_volume(self.volume)
self.playing.play()
else:
self.playing_name = "error loading audio"
def set_youtube_player_from_query(self, search_query: str) -> str:
self.stop()
self.playing_name = "searching youtube..."
search = search_query.replace(" ", "+")
youtube_search = f"https://www.youtube.com/results?search_query={search}"
html = urllib.request.urlopen(youtube_search)
video_ids = re.findall(r"watch\?v=(\S{11})", html.read().decode())
url = "https://www.youtube.com/watch?v=" + video_ids[0]
self.set_youtube_player_from_url(url)
def __load_audio_players(self) -> None:
silent_yt_audio_init = silent_stderr(
lambda url: audio_ap.YoutubeAudioPlayer.safe_create(url, True))
with concurrent.futures.ThreadPoolExecutor() as exec:
futures_to_name = {exec.submit(
silent_yt_audio_init, url): name for name, url in self.saved_youtube_player_urls.items()}
for future in concurrent.futures.as_completed(futures_to_name):
name = futures_to_name[future]
player = future.result()
self.loaded_players[name] = player
|
StarcoderdataPython
|
3249039
|
from django.utils.translation import gettext as _
from django.contrib.admin import ModelAdmin, register, site, display
from django.contrib.auth.models import Group
from .models import Usuario, Grupo
site.unregister(Group)
@register(Grupo)
class GrupoAdmin(ModelAdmin):
pass
@register(Usuario)
class UsuarioAdmin(ModelAdmin):
list_display = ('username', 'nome', 'email', 'email_secundario', 'tipo', 'auth')
list_filter = ('tipo', 'polo__nome', 'campus__sigla')
fieldsets = [
(None, {"fields": ['username', 'tipo'],}),
(_('Aluno'), {"fields": ['campus', 'polo'],}),
(_('Emails'), {"fields": ['email', 'email_escolar', 'email_academico', 'email_secundario'],}),
(_('Auth'), {"fields": ['is_active', 'is_superuser', 'groups'],}),
(_('Dates'), {"fields": ['date_joined', 'first_login', 'last_login'],}),
]
@display
def auth(self, obj):
result = ""
if obj.is_staff:
result += _('Colaborador superusuário ') if obj.is_superuser else _('Colaborador ')
else:
result += _('Usuário ')
result += _('(Ativo)') if obj.is_active else _('(Inativo)')
return result
|
StarcoderdataPython
|
1774519
|
from typing import List
from Core.Base.BaseValidation import BaseValidation
from Core.DatabaseFactory.DatabaseType import DatabaseType
from Model.Bot import BotDataModel
from Model.QnA import QnADataModel
from Utils.DatabaseUtils import DatabaseHelper
from .KnowledgePage import KnowledgePage
class KnowledgeValidation(BaseValidation):
def __init__(self):
BaseValidation.__init__(self)
self.__db_helper = DatabaseHelper(DatabaseType.MONGO_DB).database_query
self.__knowledge = KnowledgePage()
self.__bot = BotDataModel()
self.__qna = QnADataModel()
def should_added_faq_url_successfully(self, actual_url: List['str'], expect_url: str):
self.assertion.should_contain_in_list(actual_url, expect_url, "Has error in add FAQ url")
def should_added_correctly_url_data(self, bot_name, actual_data):
bots: List['BotDataModel.BotInformation'] = self.__bot.get_bots_via_bot_name(bot_name)
list_qna: List[QnADataModel.QnAInformation] = self.__qna.get_qna_via_bot_id(bots[0].bot_id)
expected_data = []
for qna in list_qna:
expected_data.append({"questions": [qna.qna_question], "answer": qna.qna_answer})
self.assertion.should_be_equal(expected_data, actual_data,
"Has difference in faq data \nExpected: {} \nActual: {} ".format(expected_data,
actual_data))
def should_create_question_pair_table_with_empty_data(self):
# It seem to get the first table without check if it is Manual Q&A table
data_table = self.__knowledge.get_question_pair_data_in_gui()
print(f"Data table {data_table}")
is_has_data = lambda input_length: len(input_length) > 0
data_table = [data_row for data_row in data_table if
is_has_data(data_row["questions"])]
self.assertion.should_be_equal(len(data_table), 0, "Init new question pair with existing data")
|
StarcoderdataPython
|
4825438
|
###########################################################################
# Imports
###########################################################################
# Standard library imports
import os
import shutil
from typing import Tuple
from automan.api import Problem, Simulation, Automator
# Local imports
from data_processing import DataProcessor
from gui import MainPipelineGUI
from visual_setup import VisualSetupGUI
###########################################################################
# Code
###########################################################################
def get_batch_limits(length: int, batch: int):
"""
Get the index limits for a given batch size, and the length of the original
list.
Parameters
----------
length : int
The length of the original list.
batch : int
Size of the batch.
"""
batch_limits = []
lb = 1
while lb <= length:
if lb + batch < length:
limits = (lb, lb + batch - 1)
else:
limits = (lb, length)
batch_limits.append(limits)
lb += batch
return batch_limits
def tuple2string(tup: Tuple[int, int]) -> str:
"""
Convert a tuple to a string.
Parameters
----------
tup : Tuple[int, int]
The tuple to convert.
Returns
-------
str
The string representation of the tuple.
"""
return f"{tup[0]}-{tup[1]}"
class DataPreprocessingAutomator(Problem):
def get_name(self):
return "_data_preprocessing_TEMP"
def setup_params(self):
main_pipeline_gui = MainPipelineGUI()
main_pipeline_gui.configure_traits()
self.run_data_preprocessor = main_pipeline_gui.run_data_preprocessor
self.run_visual_setup_simultaneously =\
main_pipeline_gui.run_visual_setup_simultaneously
data_input_dir = main_pipeline_gui.data_input_dir
if data_input_dir == '' and self.run_data_preprocessor:
raise ValueError('No data input directory specified.')
self.data_input_dir = f'"{data_input_dir}"'
self.quiet = main_pipeline_gui.quiet
self.parallel = main_pipeline_gui.parallel
self.batch_size = main_pipeline_gui.batch_size
self.use_data_input_dir_for_output =\
main_pipeline_gui.use_data_input_dir_for_output
self.visual_data_input_dir = main_pipeline_gui.visual_data_input_dir
self.run_visual_setup = main_pipeline_gui.run_visual_setup
if self.parallel and self.batch_size is None:
raise ValueError(
"If parallel is True, batch_size per job must be set."
)
if self.run_data_preprocessor:
temp_obj = DataProcessor(input_dir=data_input_dir)
self.batch_limits = get_batch_limits(
length=len(temp_obj.input_data_files),
batch=self.batch_size
)
if self.run_data_preprocessor and self.visual_data_input_dir == '':
self.visual_data_input_dir = os.path.join(
data_input_dir, '_processed_data'
)
elif self.run_visual_setup and self.visual_data_input_dir == '':
raise ValueError(
'No visual data input directory specified. '
'Please set the visual data input directory.'
)
#self.visual_data_input_dir = f'"{self.visual_data_input_dir}"'
def setup_visualizer(self):
visual_setup_gui = VisualSetupGUI(
visual_data_input_dir=self.visual_data_input_dir,
)
visual_setup_gui.configure_traits()
def setup(self):
self.setup_params()
self.cases = []
if self.run_data_preprocessor is False:
print("Skipping data preprocessing.")
self.cases = []
return
if self.quiet:
base_cmd = 'python data_processing.py --quiet'
else:
base_cmd = 'python data_processing.py'
if self.run_visual_setup_simultaneously:
temp_base_cmd = 'python visual_setup.py'
self.cases.append(
Simulation(
root="_visual_setup_TEMP",
base_command=temp_base_cmd,
visual_data_input_dir=self.visual_data_input_dir,
)
)
print("Running visual setup simultaneously.")
if self.parallel:
self.cases += [
Simulation(
root=f"_parallel_{i}_data_preprocessing_TEMP",
base_command=base_cmd,
input_dir=self.data_input_dir,
range=tuple2string(self.batch_limits[i]),
)
for i in range(1, len(self.batch_limits))
]
else:
self.cases += [
Simulation(
root="_series_data_preprocessing_TEMP",
base_command=base_cmd,
input_dir=self.data_input_dir,
)
]
print('Setup complete.')
def run(self):
self.make_output_dir()
if self.run_visual_setup:
print("Running visual setup.")
self.setup_visualizer()
###########################################################################
# Main Code
###########################################################################
if __name__ == '__main__':
import time
tic = time.perf_counter()
automator = Automator(
simulation_dir='_automator_TEMP',
output_dir='_automator_TEMP/output',
all_problems=[DataPreprocessingAutomator]
)
automator.run()
toc = time.perf_counter()
print('Done.')
print(f"Time taken: {toc - tic:0.4f} seconds.")
# Remove the temporary directories.
shutil.rmtree('_automator_TEMP')
|
StarcoderdataPython
|
1784516
|
class Pupil:
next_id = 0
def __init__(self, loc, smoothing_factor):
self.smoothing_factor = smoothing_factor
self.location = loc
self.id = Pupil.next_id
self.certainty = 0
self.last_locations = []
Pupil.next_id += 1
def update_location(self, new_loc):
self.location = self.smoothing_factor * self.location + (1 - self.smoothing_factor) * new_loc
|
StarcoderdataPython
|
33878
|
<gh_stars>1-10
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network architectures."""
from typing import Callable, Optional
from acme import specs
from acme.jax import networks as acme_networks
from acme.jax import utils as acme_utils
import haiku as hk
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
uniform_initializer = hk.initializers.VarianceScaling(
mode='fan_out', scale=1. / 3.)
class ResidualLayerNormWrapper(hk.Module):
"""Wrapper that applies residual connections and layer norm."""
def __init__(self, layer: Callable[[jnp.ndarray], jnp.ndarray]):
"""Creates the Wrapper Class.
Args:
layer: module to wrap.
"""
super().__init__(name='ResidualLayerNormWrapper')
self._layer = layer
self._layer_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True)
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Returns the result of the residual and layernorm computation.
Args:
inputs: inputs to the main module.
"""
# Apply main module.
outputs = self._layer(inputs)
outputs = self._layer_norm(outputs + inputs)
return outputs
class LayerNormAndResidualMLP(hk.Module):
"""MLP with residual connections and layer norm."""
def __init__(self, hidden_size: int, num_blocks: int):
"""Create the model.
Args:
hidden_size: width of each hidden layer.
num_blocks: number of blocks, each block being MLP([hidden_size,
hidden_size]) + layer norm + residual connection.
"""
super().__init__(name='LayerNormAndResidualMLP')
# Create initial MLP layer.
layers = [hk.nets.MLP([hidden_size], w_init=uniform_initializer)]
# Follow it up with num_blocks MLPs with layernorm and residual connections.
for _ in range(num_blocks):
mlp = hk.nets.MLP([hidden_size, hidden_size], w_init=uniform_initializer)
layers.append(ResidualLayerNormWrapper(mlp))
self._network = hk.Sequential(layers)
def __call__(self, inputs: jnp.ndarray):
return self._network(inputs)
class UnivariateGaussianMixture(acme_networks.GaussianMixture):
"""Head which outputs a Mixture of Gaussians Distribution."""
def __init__(self,
num_dimensions: int,
num_components: int = 5,
init_scale: Optional[float] = None):
"""Create an mixture of Gaussian actor head.
Args:
num_dimensions: dimensionality of the output distribution. Each dimension
is going to be an independent 1d GMM model.
num_components: number of mixture components.
init_scale: the initial scale for the Gaussian mixture components.
"""
super().__init__(num_dimensions=num_dimensions,
num_components=num_components,
multivariate=False,
init_scale=init_scale,
name='UnivariateGaussianMixture')
class StochasticSamplingHead(hk.Module):
"""Simple haiku module to sample from a tfd.Distribution."""
def __call__(self, sample_key: acme_networks.PRNGKey,
distribution: tfd.Distribution):
return distribution.sample(seed=sample_key)
def make_mix_gaussian_feedforward_networks(action_spec: specs.BoundedArray,
num_costs: int):
"""Makes feedforward networks with mix gaussian actor head."""
action_dim = np.prod(action_spec.shape, dtype=int)
hidden_size = 1024
nu_network = hk.Sequential([
acme_utils.batch_concat,
acme_networks.LayerNormMLP(layer_sizes=[512, 512, 256, 1]),
])
chi_network = hk.Sequential([
acme_utils.batch_concat,
acme_networks.LayerNormMLP(layer_sizes=[512, 512, 256, num_costs]),
])
actor_encoder = hk.Sequential([
acme_utils.batch_concat,
hk.Linear(300, w_init=uniform_initializer),
hk.LayerNorm(slice(1, None), True, True),
jnp.tanh,
])
actor_neck = LayerNormAndResidualMLP(hidden_size, num_blocks=4)
actor_head = UnivariateGaussianMixture(
num_components=5, num_dimensions=action_dim)
stochastic_policy_network = hk.Sequential(
[actor_encoder, actor_neck, actor_head])
class LowNoisePolicyNetwork(hk.Module):
def __call__(self, inputs):
x = actor_encoder(inputs)
x = actor_neck(x)
x = actor_head(x, low_noise_policy=True)
return x
low_noise_policy_network = LowNoisePolicyNetwork()
# Behavior networks output an action while the policy outputs a distribution.
stochastic_sampling_head = StochasticSamplingHead()
class BehaviorNetwork(hk.Module):
def __call__(self, sample_key, inputs):
dist = low_noise_policy_network(inputs)
return stochastic_sampling_head(sample_key, dist)
behavior_network = BehaviorNetwork()
return {
'nu': nu_network,
'chi': chi_network,
'policy': stochastic_policy_network,
'low_noise_policy': low_noise_policy_network,
'behavior': behavior_network,
}
|
StarcoderdataPython
|
43698
|
<reponame>LolloneS/DDPG-PyTorch
import random
from typing import List
from src.transition import Transition
random.seed(42)
class ReplayBuffer:
def __init__(self, size: int, random: bool = False):
self.size = size
self.memory: List[Transition] = []
self.random = random
self.occupied = 0
if not random:
self.current = 0
def store(self, transition: Transition):
"""
Store a Transition in the buffer.
If self.random, then the overwritten transition is casual,
otherwise the buffer is circular.
"""
if len(self.memory) < self.size:
self.memory.append(transition)
self.occupied += 1
else:
if self.random:
self.memory[random.randrange(self.size)] = transition
else:
self.memory[self.current] = transition
self.current = (self.current + 1) % self.size
def get(self, amount: int = 1):
"""Get either 1 (default) or `amount` elements randomly
from the buffer."""
if amount == 1:
return random.choice(self.memory)
return random.sample(self.memory, amount)
|
StarcoderdataPython
|
62258
|
<filename>fibonacci_calculator_onion/fibonacci_table.py
class FibonacciTable:
def __init__(self):
self.forward_look_up_table = {0: 0, 1: 1}
self.backward_look_up_table = {0: 0, 1: 1}
def _build_lookup_table(self, fib_index: int) -> None:
if fib_index in self.forward_look_up_table.keys():
return
current_highest_index = max(self.forward_look_up_table.keys())
next_value = self.forward_look_up_table[current_highest_index - 1] + self.forward_look_up_table[
current_highest_index]
self.forward_look_up_table[current_highest_index + 1] = next_value
self.backward_look_up_table[next_value] = current_highest_index + 1
self._build_lookup_table(fib_index)
def _build_non_fibonacci_lookup_table(self, fib_number: int) -> None:
current_index = self.backward_look_up_table[max(self.backward_look_up_table.keys())]
previous_index = current_index - 1
if abs(fib_number - self.forward_look_up_table[previous_index]) <= abs(
fib_number - self.forward_look_up_table[current_index]):
self.backward_look_up_table[fib_number] = previous_index
else:
self.backward_look_up_table[fib_number] = current_index
def _update_look_up_table_number(self, fib_index: int) -> None:
while fib_index > max(self.forward_look_up_table.keys()):
self._build_lookup_table(fib_index)
def _update_look_up_table_index(self, fib_number) -> None:
while fib_number >= max(self.backward_look_up_table.keys()):
current_index = self.backward_look_up_table[max(self.backward_look_up_table.keys())]
self._build_lookup_table(current_index + 1) # hier is het een fibonacci getal
if fib_number is not max(self.backward_look_up_table.keys()):
self._build_non_fibonacci_lookup_table(fib_number) # hier is het geen fibonacci getal
def new_fibonacci_number(self, fib_index: int) -> int:
self._update_look_up_table_number(fib_index)
return self.forward_look_up_table[fib_index]
def new_index_fibonacci_number(self, number: int) -> int:
"""Returns an index corresponding to the given fibonacci number."""
self._update_look_up_table_index(number)
return self.backward_look_up_table[number]
|
StarcoderdataPython
|
10025
|
<gh_stars>0
import os, subprocess
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call(("make pawpy_%s"%comp).split())
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call("make hfc".split())
|
StarcoderdataPython
|
103640
|
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>
# All rights reserved.
# This code is licensed under BSD 3-Clause License.
import sys
import os
import numpy as np
if __name__ == '__main__':
xyz_list_path = sys.argv[1]
xyzs = [xyz for xyz in os.listdir(xyz_list_path) if xyz.endswith('_predict_3.xyz')]
v = np.full([2466, 1], 'v')
for xyz in xyzs:
print(xyz)
obj_path = xyz.replace('.xyz', '.obj')
xyzf = np.loadtxt(os.path.join(xyz_list_path, xyz))
face = np.loadtxt('/home/wc/workspace/P2MPP/data/face3.obj', dtype='|S32')
out = np.vstack((np.hstack((v, xyzf)), face))
np.savetxt(os.path.join(xyz_list_path, obj_path), out, fmt='%s', delimiter=' ')
|
StarcoderdataPython
|
1652635
|
<filename>slender/tests/list/test_concat.py
from unittest import TestCase
from expects import expect, equal, raise_error
from slender import List
class TestConcat(TestCase):
def setUp(self):
self.l = List([1, 2, 3])
def test_concat_if_other_is_empty(self):
expect(self.l.concat([]).to_list()).to(equal([1, 2, 3]))
def test_concat_if_other_is_non_empty(self):
o = List(['a', 'b'])
expect(self.l.concat(o).to_list()).to(equal([1, 2, 3, 'a', 'b']))
def test_concat_if_self_is_empty(self):
l = List()
o = List(['a', 'b'])
expect(l.concat(o).to_list()).to(equal(['a', 'b']))
def test_concat_if_other_is_different(self):
l = List()
o = '...'
expect(lambda: l.concat(o)).to(raise_error(TypeError))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.