text
stringlengths 3
1.05M
|
---|
// adapted the majority of this function from
// https://bl.ocks.org/boeric/e16ad218bc241dfd2d6e
// "D3 Dynamic Array of Tables"
function updateTable(office, sort) {
sort = sort || "contract_date";
var officeRows = contracts.find(function(table) {
return table.office == office
});
var sortedRows = officeRows.rows.sort(function(a, b) {
// distance can be sorted numerically (age, service and wages not so much)
var asort = a[sort];
var bsort = b[sort];
if (sort === "distance_miles") {
asort = parseInt(asort) || 0;
bsort = parseInt(bsort) || 0;
}
if (asort < bsort) { return -1; }
if (asort > bsort) { return 1; }
return 0;
});
officeRows.rows = sortedRows;
// If somebody wants to tell me how to get d3 to do what I want without
// wrapping one of the steps in an array, I would be very happy to learn
// but my first pass at it with an object was a complete failure
var data = [officeRows];
var tableDiv = d3.select("body div.contracts");
// d3 is smart enough to recognize that the office's data has not changed
// if you are only sorting existing, so we remove ALL of the existing table
// before rebuilding it with the newly selected sort
tableDiv.selectAll("div").remove();
// bind the new data to the div, then remove all the rows using the old data
var div = tableDiv.selectAll("div")
.data(data, function(d) { return d.office });
div.exit().remove();
// append a div to hold the information
var divEnter = div.enter()
.append("div")
// typing them out instead of a programmatic solution for ease of alteration
var headers = {
"hiring_office": "Hiring Office",
"contract_date": "Contract Date",
"name": "Name",
"gender": "Gender",
"age": "Age",
"employer": "Employer",
"township": "Township",
"county": "County",
"state": "State",
"distance_miles": "Distance (Miles)",
"position": "Position",
"work_class": "Work Class",
"service_months": "Service (Months)",
"wages_months": "Wages (Months)",
"destination_class": "Destination Class",
"group": "Group"
}
// add table and header
var tableEnter = divEnter.append("table")
.attr("id", function(d) { return d.office })
.attr("class", "table table-condensed table-striped table-bordered");
var headerLinks = tableEnter.append("thead")
.append("tr")
.selectAll("th")
.data(d3.keys(headers))
.enter()
.append("th")
.append("a")
.text(function(d) { return headers[d]; })
.on('click', function(d) {
updateTable(office, d);
})
.attr("class", function(d) {
if (d === sort) {
return "table-col-sorted"
}
})
// append table body in new table
tableEnter.append("tbody");
// select all tr elements in the divs update selection
var tr = div.select("table").select("tbody").selectAll("tr")
.data( function(d) { return d.rows; } );
tr.exit().remove();
// bind data to rows and add columns
tr.enter().append("tr");
var td = tr.selectAll("td")
.data(function(d) { return d3.values(d); });
td.enter().append("td")
.text(function(d) { return d; })
};
|
import './App.css';
import Workspace from './Workspace.jsx';
function App() {
return (
<div className="App">
<Workspace></Workspace>
</div>
);
}
export default App;
|
#-*- coding:utf-8 -*-
#
# This file is part of CoTeTo - code templating tool
#
name = 'libSimModel'
description = 'SimXML file reader, return objects from SimXML files'
version = '0.1'
author = 'EnEff-BIM team'
helptxt = """
Help yourself"""
def fetchData(uriList, systemCfg, generatorCfg, logger):
from mapapi.MapClasses import MapProject
if not uriList:
logger.critical('libSimModel - no files specified!')
raise Exception('No files specified!')
return {'MapProject': MapProject(uriList[0])}
|
Search.setIndex({docnames:["index","modules/data","modules/datasets","modules/nn","modules/transforms","modules/utils","notes/create_dataset","notes/create_gnn","notes/installation","notes/introduction"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["index.rst","modules/data.rst","modules/datasets.rst","modules/nn.rst","modules/transforms.rst","modules/utils.rst","notes/create_dataset.rst","notes/create_gnn.rst","notes/installation.rst","notes/introduction.rst"],objects:{"torch_geometric.data":{Batch:[1,1,1,""],Data:[1,1,1,""],DataListLoader:[1,1,1,""],DataLoader:[1,1,1,""],Dataset:[1,1,1,""],DenseDataLoader:[1,1,1,""],InMemoryDataset:[1,1,1,""],download_url:[1,5,1,""],extract_bz2:[1,5,1,""],extract_gz:[1,5,1,""],extract_tar:[1,5,1,""],extract_zip:[1,5,1,""]},"torch_geometric.data.Batch":{from_data_list:[1,2,1,""],num_graphs:[1,3,1,""]},"torch_geometric.data.Data":{__call__:[1,4,1,""],__cat_dim__:[1,4,1,""],__contains__:[1,4,1,""],__cumsum__:[1,4,1,""],__getitem__:[1,4,1,""],__iter__:[1,4,1,""],__len__:[1,4,1,""],__setitem__:[1,4,1,""],apply:[1,4,1,""],clone:[1,4,1,""],contains_isolated_nodes:[1,4,1,""],contains_self_loops:[1,4,1,""],contiguous:[1,4,1,""],from_dict:[1,2,1,""],is_coalesced:[1,4,1,""],is_directed:[1,4,1,""],is_undirected:[1,4,1,""],keys:[1,3,1,""],num_edges:[1,3,1,""],num_features:[1,3,1,""],num_nodes:[1,3,1,""],to:[1,4,1,""]},"torch_geometric.data.Dataset":{__getitem__:[1,4,1,""],__len__:[1,4,1,""],download:[1,4,1,""],get:[1,4,1,""],num_features:[1,3,1,""],process:[1,4,1,""],processed_file_names:[1,3,1,""],processed_paths:[1,3,1,""],raw_file_names:[1,3,1,""],raw_paths:[1,3,1,""]},"torch_geometric.data.InMemoryDataset":{__getitem__:[1,4,1,""],__len__:[1,4,1,""],collate:[1,4,1,""],download:[1,4,1,""],get:[1,4,1,""],num_classes:[1,3,1,""],process:[1,4,1,""],processed_file_names:[1,3,1,""],raw_file_names:[1,3,1,""],shuffle:[1,4,1,""]},"torch_geometric.datasets":{Amazon:[2,1,1,""],BitcoinOTC:[2,1,1,""],CoMA:[2,1,1,""],Coauthor:[2,1,1,""],CoraFull:[2,1,1,""],Entities:[2,1,1,""],FAUST:[2,1,1,""],GeometricShapes:[2,1,1,""],KarateClub:[2,1,1,""],MNISTSuperpixels:[2,1,1,""],ModelNet:[2,1,1,""],PCPNetDataset:[2,1,1,""],PPI:[2,1,1,""],Planetoid:[2,1,1,""],QM7b:[2,1,1,""],QM9:[2,1,1,""],ShapeNet:[2,1,1,""],TUDataset:[2,1,1,""]},"torch_geometric.nn":{conv:[3,0,0,"-"],data_parallel:[3,0,0,"-"],glob:[3,0,0,"-"],meta:[3,0,0,"-"],models:[3,0,0,"-"],pool:[3,0,0,"-"]},"torch_geometric.nn.conv":{AGNNConv:[3,1,1,""],APPNP:[3,1,1,""],ARMAConv:[3,1,1,""],ChebConv:[3,1,1,""],EdgeConv:[3,1,1,""],GATConv:[3,1,1,""],GCNConv:[3,1,1,""],GINConv:[3,1,1,""],GMMConv:[3,1,1,""],GatedGraphConv:[3,1,1,""],GraphConv:[3,1,1,""],NNConv:[3,1,1,""],PointConv:[3,1,1,""],RGCNConv:[3,1,1,""],SAGEConv:[3,1,1,""],SGConv:[3,1,1,""],SplineConv:[3,1,1,""],XConv:[3,1,1,""],message_passing:[3,0,0,"-"]},"torch_geometric.nn.conv.AGNNConv":{forward:[3,4,1,""],propagation_matrix:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.APPNP":{forward:[3,4,1,""]},"torch_geometric.nn.conv.ARMAConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.ChebConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.EdgeConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.GATConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.GCNConv":{forward:[3,4,1,""],norm:[3,2,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.GINConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.GMMConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.GatedGraphConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.GraphConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.NNConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.PointConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.RGCNConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.SAGEConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.SGConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.SplineConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.XConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.conv.message_passing":{MessagePassing:[3,1,1,""]},"torch_geometric.nn.conv.message_passing.MessagePassing":{message:[3,4,1,""],propagate:[3,4,1,""],update:[3,4,1,""]},"torch_geometric.nn.data_parallel":{DataParallel:[3,1,1,""]},"torch_geometric.nn.data_parallel.DataParallel":{forward:[3,4,1,""],scatter:[3,4,1,""]},"torch_geometric.nn.dense":{dense_sage_conv:[3,0,0,"-"],diff_pool:[3,0,0,"-"]},"torch_geometric.nn.dense.dense_sage_conv":{DenseSAGEConv:[3,1,1,""]},"torch_geometric.nn.dense.dense_sage_conv.DenseSAGEConv":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.dense.diff_pool":{dense_diff_pool:[3,5,1,""]},"torch_geometric.nn.glob":{GlobalAttention:[3,1,1,""],Set2Set:[3,1,1,""],global_add_pool:[3,5,1,""],global_max_pool:[3,5,1,""],global_mean_pool:[3,5,1,""],global_sort_pool:[3,5,1,""]},"torch_geometric.nn.glob.GlobalAttention":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.glob.Set2Set":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.meta":{MetaLayer:[3,1,1,""]},"torch_geometric.nn.meta.MetaLayer":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.models":{ARGA:[3,1,1,""],ARGVA:[3,1,1,""],GAE:[3,1,1,""],VGAE:[3,1,1,""]},"torch_geometric.nn.models.ARGA":{discriminator_loss:[3,4,1,""],reg_loss:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.nn.models.ARGVA":{encode:[3,4,1,""],kl_loss:[3,4,1,""],reparametrize:[3,4,1,""]},"torch_geometric.nn.models.GAE":{decode:[3,4,1,""],decode_indices:[3,4,1,""],encode:[3,4,1,""],negative_sampling:[3,4,1,""],recon_loss:[3,4,1,""],reset_parameters:[3,4,1,""],split_edges:[3,4,1,""],test:[3,4,1,""]},"torch_geometric.nn.models.VGAE":{encode:[3,4,1,""],kl_loss:[3,4,1,""],reparametrize:[3,4,1,""]},"torch_geometric.nn.pool":{TopKPooling:[3,1,1,""],avg_pool:[3,5,1,""],avg_pool_x:[3,5,1,""],fps:[3,5,1,""],graclus:[3,5,1,""],knn:[3,5,1,""],knn_graph:[3,5,1,""],max_pool:[3,5,1,""],max_pool_x:[3,5,1,""],nearest:[3,5,1,""],radius:[3,5,1,""],radius_graph:[3,5,1,""],voxel_grid:[3,5,1,""]},"torch_geometric.nn.pool.TopKPooling":{forward:[3,4,1,""],reset_parameters:[3,4,1,""]},"torch_geometric.transforms":{AddSelfLoops:[4,1,1,""],Cartesian:[4,1,1,""],Center:[4,1,1,""],Compose:[4,1,1,""],Constant:[4,1,1,""],Distance:[4,1,1,""],FaceToEdge:[4,1,1,""],KNNGraph:[4,1,1,""],LineGraph:[4,1,1,""],LinearTransformation:[4,1,1,""],LocalCartesian:[4,1,1,""],LocalDegreeProfile:[4,1,1,""],NormalizeFeatures:[4,1,1,""],NormalizeScale:[4,1,1,""],OneHotDegree:[4,1,1,""],Polar:[4,1,1,""],RadiusGraph:[4,1,1,""],RandomFlip:[4,1,1,""],RandomRotate:[4,1,1,""],RandomScale:[4,1,1,""],RandomShear:[4,1,1,""],RandomTranslate:[4,1,1,""],SamplePoints:[4,1,1,""],Spherical:[4,1,1,""],TargetIndegree:[4,1,1,""],ToDense:[4,1,1,""],TwoHop:[4,1,1,""]},"torch_geometric.utils":{accuracy:[5,5,1,""],add_self_loops:[5,5,1,""],contains_isolated_nodes:[5,5,1,""],contains_self_loops:[5,5,1,""],degree:[5,5,1,""],dense_to_sparse:[5,5,1,""],f1_score:[5,5,1,""],false_negative:[5,5,1,""],false_positive:[5,5,1,""],grid:[5,5,1,""],is_undirected:[5,5,1,""],normalized_cut:[5,5,1,""],one_hot:[5,5,1,""],precision:[5,5,1,""],recall:[5,5,1,""],remove_self_loops:[5,5,1,""],scatter_:[5,5,1,""],softmax:[5,5,1,""],sparse_to_dense:[5,5,1,""],to_dense_batch:[5,5,1,""],to_networkx:[5,5,1,""],to_scipy_sparse_matrix:[5,5,1,""],to_undirected:[5,5,1,""],true_negative:[5,5,1,""],true_positive:[5,5,1,""]},torch_geometric:{data:[1,0,0,"-"],datasets:[2,0,0,"-"],transforms:[4,0,0,"-"],utils:[5,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","staticmethod","Python static method"],"3":["py","attribute","Python attribute"],"4":["py","method","Python method"],"5":["py","function","Python function"]},objtypes:{"0":"py:module","1":"py:class","2":"py:staticmethod","3":"py:attribute","4":"py:method","5":"py:function"},terms:{"100k":2,"25a":8,"25b":8,"abstract":6,"boolean":[1,2],"byte":9,"case":[1,6,8],"class":[1,2,3,4,5,6,9],"default":[1,2,3,4,5,6,9],"final":[1,2,3,6,7,9],"float":[3,4,5,9],"function":[1,2,3,6,7,8,9],"import":[3,6,7,8,9],"int":[1,2,3,4,5],"long":[3,7,9],"new":[1,4,7,9],"return":[1,2,3,5,6,7,8,9],"short":9,"static":[1,3],"super":[3,6,7,9],"true":[1,2,3,4,5,8,9],"try":9,BGS:2,For:[2,6,9],NOT:9,One:3,That:[7,9],The:[1,2,3,4,5,6,8,9],There:9,These:8,Use:6,Using:2,With:[1,7],__and_:8,__call__:1,__cat_dim__:1,__contains__:1,__cudapopcallconfigur:8,__cumsum__:1,__getitem__:[1,6],__init__:[3,6,7,9],__iter__:1,__len__:[1,6],__setitem__:1,__version__:8,_cuda:8,_element:8,_uelement:8,_zn2at6detail20dynamiccudainterface10set_device:8,_zn3c105errorc1ens_14sourcelocationerkss:8,abi:8,about:[2,8,9],acc:9,acceler:7,access:[1,2,6,8,9],accompani:[1,3],accord:[2,3,4,5,6],account:9,accuraci:[5,9],achiev:9,across:[3,8],act:3,activ:[2,9],activi:3,actual:[7,8],adam:9,adapt:3,add:[3,4,5,7,8],add_loop:3,add_self_loop:[5,7],added:[],adding:3,addit:[0,1,2,3,5,6,7,9],addition:8,additionali:[],addselfloop:4,adj:3,adjac:[1,3,4,5,7,9],advanc:[3,8],adversari:3,affect:[],after:9,against:[3,9],aggr:[3,7],aggr_out:[3,7],aggreg:[3,5,7],agnnconv:3,aifb:2,airplan:[2,9],algoithm:3,algorithm:3,all:[1,2,3,4,5,6,7,8,9],allow:[3,7,9],along:[4,5],alpha:3,alpha_:3,alreadi:[6,9],also:[0,1,8],although:[6,9],alwai:7,amazon:2,anaconda:8,analog:[3,6,7],angl:[3,4],ani:[1,3,7,8,9],annot:[2,8],anoth:9,anyth:1,anytim:9,append:[3,4,7],appl:8,appli:[1,3,4,5,6,9],applic:3,appnp:3,approach:3,approxim:3,arbitrari:[0,1,7,9],architectur:[3,7],archiv:1,area:[2,3,4],arg:3,arga:3,argument:[3,7,9],argva:3,arma:3,armaconv:3,around:4,assign:[1,3,5,7,9],assign_index:3,assum:7,atom:2,attent:3,attribut:[1,4,5,9],auc:3,augment:[6,9],author:2,auto:3,autoencod:[2,3],automat:[3,4,5,6,7,9],auxiliari:3,avail:6,averag:[3,9],avg_pool:3,avg_pool_x:3,avoid:[],axi:4,backward:[3,9],bag:2,base:[0,1,3,4,8],baselin:4,basi:3,batch:[0,1,3,5,7],batch_i:3,batch_siz:[1,9],batch_x:3,batchnorm:[],becaus:[6,8],befor:[1,2,6,7,8,9],beforehand:3,begin:[4,9],behavior:[],behaviour:[],being:[1,2,3,4,6,9],below:[7,8],benchmark:[0,2],besid:9,best:6,beta:3,between:[8,9],bia:3,bias:3,big:1,bin:8,binari:[2,3],bipartit:7,bitcoin:2,bitcoinotc:2,block:[7,9],blog:9,bmatrix:[4,9],bodi:6,bool:[1,2,3,4,5],both:[0,7,9],bought:2,brief:6,brows:9,buffer:3,build:[3,7,8],bytetensor:[1,3],cach:[3,8],cad:2,calcul:3,call:[3,7,8,9],callabl:[1,2,3],can:[1,3,6,7,8,9],cannot:8,cap:[2,4],captur:2,car:2,care:[7,9],cartesian:4,cat:[3,4,7],cat_dim:[],categori:[2,9],caught:[],ccompil:8,cdot:[3,5,7,9],center:4,certain:[],chain:9,chair:2,chang:8,channel:3,chebconv:3,chebyshev:3,check:8,chemistri:[2,3],citat:[2,9],cites:[2,9],clang:8,clang_osx:8,clangxx_osx:8,classfic:3,classic:3,classif:[3,4,9],classifi:3,clean:[7,8,9],clear:8,clone:[1,8],close:3,cloud:[0,2,3,7,9],club:2,cluster:[3,8],cmd:8,cnn:[2,3],coarsen:3,coauthor:2,code:6,coeffici:3,col:[3,7],collat:[1,6],collect:[2,8],colon:3,column:9,com:8,coma:2,come:[7,8,9],command:8,common:[0,8],compil:8,complain:8,complet:[1,2,9],compos:[4,9],composit:9,compress:1,comput:[2,3,4,5,7],concat:[3,4],concaten:[1,3,6,9],concept:[6,9],conda:8,condit:3,conflict:2,conform:2,connect:[1,2,3,9],consist:[0,2,8,9],consol:1,constant:4,construct:[1,3,7],constructor:[6,9],contain:[1,2,3,5,6,7,9],contains_isolated_nod:[1,5,9],contains_self_loop:[1,5,9],content:1,contigu:[1,9],continu:[2,3,6],contrast:4,contribut:5,conv1:9,conv2:9,conv:[3,7,8],convers:1,convert:[2,4,5,9],convolut:[2,9],convolv:3,coo:[1,9],coordin:[3,4],copi:3,cora:[2,9],coraful:2,core:[8,9],correct:[5,9],correctli:8,correspond:4,corrupt:1,cos:3,covari:3,cpath:8,cpu:[8,9],crash:8,creat:[0,1,3,4,5,8,9],creation:2,cross:3,cube:2,cuda:[3,9],cumsum:[],cumul:1,cup:[3,7],current:3,curv:3,curvatur:2,cut:[3,5],cxx:8,d_glibcxx_use_cxx11_abi:8,data:[0,2,3,4,5,6,7],data_1:6,data_2:6,data_:6,data_list:[1,3,6],datalistload:[1,3],dataload:[1,6,9],dataparallel:1,dataset:[0,1],ddot:9,decod:3,decode_al:[],decode_for_indic:[],decode_indic:3,decomposit:3,deep:[0,2,3,7],def:[3,6,7,8,9],defin:[3,4,5,7,9],deg:[4,5,7],deg_inv_sqrt:7,degre:[3,4,5,7],denot:[3,7,9],dens:[1,2,4,5],dense_diff_pool:3,dense_to_spars:5,densedataload:1,densesageconv:3,descend:3,describ:[3,7,9],desir:5,detail:3,determin:3,devic:[1,3,5,9],device_id:3,diagon:[3,9],dicconnect:1,dictionari:[1,6],did:[],differ:[2,9],differenti:[3,7],dilat:3,dim:[3,7,9],dim_siz:[3,5],dimens:[1,3,4,5,7,9],dimension:[3,4,5],dir:8,direct:[1,3,7,9],directori:[1,2,8,9],discrimin:3,discriminator_loss:3,disk:[1,2,6,9],distanc:[3,4],distant:3,distribut:9,distutil:8,divid:7,docker:8,document:[2,9],doe:[1,3,5,8],doing:9,domain:7,done:[6,7],dortmund:[2,9],downgrad:8,download:[1,6,8,9],download_url:1,drop:3,dropout:[3,9],dry_run:8,dtype:[1,3,5,7,9],dump:8,duplic:1,dure:3,dyld_library_path:8,dynam:[2,3,6,7],dynamicedgeconv:7,e_1:4,e_2:4,each:[1,2,3,4,5,6,7,9],earli:9,earphon:2,easi:0,easiest:9,easili:7,echo:8,edg:[1,2,3,4,5,9],edge_attr:[1,3,5,9],edge_index:[1,3,5,7,9],edge_indic:[3,5],edge_mlp:3,edge_model:3,edge_norm:3,edge_typ:3,edge_weight:3,edge_window_s:2,edgeconv:[3,7],effect:4,eigenvector:3,either:[3,5,7],element:[3,5],ell_2:3,els:[3,9],embed:[2,3,7],emptyset:4,encod:[3,4],end:[3,4,9],energi:2,ensur:[1,8],entiti:2,entri:[1,3,5,7],entropi:3,environ:8,enzym:9,epoch:[1,9],eps:3,epsilon:3,equal:3,equival:9,error:8,etc:[],euclidean:4,eval:9,evalu:[2,3,9],even:[7,8,9],everi:[1,2,5],everyth:[],evolv:2,evolvegcn:2,exactli:9,exampl:[0,1,3,5,6,8],exchang:7,execut:[3,8],exist:[2,4],exit:8,exp:3,expect:9,experi:9,explan:9,expos:3,express:[2,7],extend:[3,9],extens:0,extent:1,extract:[1,6],extract_bz2:1,extract_gz:1,extract_tar:1,extract_zip:1,extrem:2,f1_score:5,f_1:5,f_e:3,f_u:3,f_x:3,face:[2,4,9],facetoedg:[2,4],fact:[7,9],factor:[3,4],fail:8,fals:[1,2,3,4,5,7,9],false_neg:5,false_posit:5,faq:8,fashion:9,fast:3,faster:9,fault:8,faust:[2,9],featur:[1,2,3,4,5,7,9],field:2,fifth:3,file:[1,6,8,9],filepath:1,fill_valu:5,filter:[3,6],find:[1,3,6,9],fine:[],first:[3,5,7,8,9],fission:2,fit:[1,6],fix:[2,3],fixabl:8,flag:3,flip:4,flow:[2,3,7],fly:1,folder:[1,6,8],follow:[6,7,8,9],forc:8,form:3,format:[1,5,6,8,9],formula:7,forward:[3,7,9],found:[3,4,6,8,9],four:[6,9],fps:3,frac:[3,4,5,7],framework:2,frequent:2,from:[0,1,2,3,4,5,6,7,8,9],from_data_list:1,from_dict:1,full:[2,7],func:[1,3],fundament:[6,9],further:[6,7],gae:3,gamma:7,gamma_:3,gatconv:3,gate:3,gate_nn:3,gatedgraphconv:3,gaussian:[2,3],gcc:8,gcn:9,gcnconv:[3,7,9],gene:2,gener:[2,3,7,9],geometr:[2,3,6,7,8,9],geometricshap:2,get:[1,3,4,6],gfortran_osx:8,ginconv:3,git:8,github:8,give:6,given:[1,2,3,4,5],global:4,global_add_pool:3,global_max_pool:3,global_mean_pool:3,global_mlp:3,global_model:3,global_nn:3,global_sort_pool:3,globalattent:3,gmmconv:3,gnn:7,good:2,gpu:[1,3,7,8,9],graclu:3,gradient:3,graph:[0,1,2,3,4,5,6,7],graphconv:3,graphkernel:9,graphsag:3,greedi:3,grid:[3,5],group:2,gru:3,guid:8,guitar:2,hack:9,hand:9,handl:3,happen:6,happi:9,has:[3,6,7,9],hat:3,have:[1,3,6,7,8,9],head:3,heavi:6,height:5,help:[0,6,7,8],here:[1,3,6,7,8,9],hidden_channel:3,high:9,higher:3,highnois:2,hold:[1,2,3,5,9],hop:[3,4],host:8,hot:[4,5],how:[1,3,6],howev:6,http:[8,9],huge:6,human:2,identifi:[1,3,9],idx:[1,6],imag:[8,9],imdb:2,immunolog:2,implement:[3,6,8,9],importerror:8,improv:3,in_channel:[3,7],includ:[1,2,3,8],include_norm:4,index:[0,1,3,5,9],indic:[1,2,3,4,5,7,9],individu:[3,6,9],induct:[2,3],infer:4,inferr:3,inform:[2,9],inherit:[6,7,9],initi:[2,3,7,9],inmemorydataset:[1,6],inner:3,input:[3,9],insert:3,insid:7,inspir:3,instal:0,instanc:[7,9],instead:[2,3,4],intend:3,interact:2,interfac:[0,7],intern:[1,7],interpret:3,interv:[3,4],introduc:[8,9],introduct:[0,6],invalid:5,invari:[3,7],involv:6,irregular:[0,7],is_avail:[8,9],is_coalesc:1,is_construct:8,is_direct:[1,9],is_open_splin:3,is_undirect:[1,5,9],isol:[1,5],isomorph:3,issu:8,item:9,iter:[1,3],its:[1,2,3,4,5,7,8,9],join:6,karat:2,karateclub:2,kei:[1,9],kernel:[2,3,8],kernel_s:3,keyword:2,kind:[3,7],kipf:7,kl_loss:3,knife:2,knn:3,knn_graph:[3,7],knngraph:4,known:[0,3],kwarg:[1,3,7],label:[2,5,9],lai:3,lamp:2,laplacian:3,laptop:2,larg:[0,3,9],larger:3,last:3,latent:3,layer:[2,9],layout:1,lceil:3,lcudart:8,ld_library_path:8,ldot:[3,5],ldp:4,lead:9,leakyrelu:3,learn:[0,2,3],learnabl:3,least:8,leav:7,left:[3,5,7],leman:3,len:[6,9],length:3,leq:4,less:3,let:[6,7,9],level:[2,3,9],lib64:8,lib:8,librari:[0,8],lift:[3,7],like:[2,9],limit:7,lin:[3,7],line:[4,8],linear:[3,7,9],linearli:7,lineartransform:4,linegraph:4,link:[2,3,4,8],linker:8,linux:8,list:[1,3,4,5,6,9],load:[1,2,6,9],loader:[0,1,9],local:[2,3,4,8],local_nn:3,localcartesian:4,localdegreeprofil:4,locat:[3,5],log:[1,3,8],log_softmax:9,logic:[6,7],logist:3,logvar:3,longtensor:[1,3,5],look:[6,8,9],loop:[1,3,4,5,7],loss:[3,9],lot:[6,8],low:2,lownois:2,lstm:3,luckili:7,mac:8,machin:[2,8],macosx_deployment_target:8,magic:6,mai:[1,6,8,9],main:9,make:[1,8],mani:6,manifold:[2,3],manual:6,map:[1,2],mask:[2,3],match:[3,8],mathbb:[3,5,7],mathbf:[3,4,5,7,9],mathcal:[3,4,5,7],mathemat:7,mathrm:[3,4,5,9],matric:[1,3,7,9],matrix:[1,3,4,5,7,9],matter:3,max:[3,4,5,7,9],max_:[3,4,7],max_degre:4,max_i:5,max_num_neighbor:3,max_pool:3,max_pool_x:3,max_val:[3,5],max_valu:4,maxim:3,maximum:[3,4],mean:[3,4,5,7,8],mednois:2,meet:3,memori:1,merg:1,mesh:[0,2,4,9],messag:[0,2,3],messagepass:3,meta:3,metalay:3,method:[0,1,3,6,7],metric:3,mid:4,min:4,mini:[0,1],miniconda:8,minim:5,minimum:3,miss:8,mixtur:[2,3],mlp:[3,7],mnist:2,mnistsuperpixel:2,mode:1,model:[1,2,9],modelnet10:[2,9],modelnet40:2,modelnet:2,modifi:3,modul:[0,3,8,9],molecul:2,molecular:2,moleculenet:2,more:[3,9],most:3,motif:2,motorbik:2,mug:2,multi:[1,2,3,5],multicellular:2,multilevel:3,multipl:5,must:[3,6,8],mutag:2,myowndataset:6,n_1:[3,5],n_b:[3,5],n_i:[3,5],name:[1,2,3,5,7,8,9],nearest:[3,7,9],need:[1,3,6,7,8,9],neg:[3,5],neg_adj_mask:[],neg_edge_index:3,negative_sampl:3,negative_slop:3,neighbor:[3,4,7,9],neighborhood:[3,4,7],neq:4,net:[3,9],network:[0,2,3,9],networkx:5,neural:[2,3,7,9],newli:8,next:9,nll_loss:9,nnconv:3,nngraph:9,node:[1,2,3,4,5,7,9],node_mlp:3,node_model:3,noisi:2,noisyandvardens:2,non:[4,9],none:[1,2,3,4,5,6,7,9],nonois:2,norm:[3,4,7],normal:[2,3,4,5,7],normalized_cut:5,normalizefeatur:4,normalizescal:4,notat:9,note:[0,6,7,8,9],now:9,num:4,num_bas:3,num_class:[1,5,9],num_dimens:[1,9],num_edg:[1,4,9],num_edge_featur:[1,9],num_fac:[4,9],num_featur:[1,9],num_graph:[1,9],num_lay:3,num_nod:[1,3,4,5,7,9],num_node_featur:[1,9],num_rel:3,num_stack:3,number:[0,1,2,3,4,5,9],nvcc:8,obj:[2,3],object:[1,2,3,4,6,8,9],obtain:3,occur:[],odot:3,offic:[],offici:8,offlin:4,old:[1,4,9],omit:9,onc:6,one:[1,2,3,4,5,6,8,9],one_hot:5,onehotdegre:4,onli:[1,3,6,7,9],ontolog:2,open:8,oper:[3,7,9],ops:6,optim:9,option:[1,2,3,4,5,6,7,8],order:[1,3,6,8],origin:[3,4],osp:6,otc:2,other:[0,1,4,8],otherwis:2,our:[7,9],out:[3,6,9],out_channel:[3,7],output:[3,4,5,7,9],output_devic:3,outsourc:8,over:[1,2,3,9],overlai:3,overridden:1,own:[0,8,9],packag:[0,8],pagerank:3,pair:3,pairwis:9,paper:[0,2,3,4],parallel:[3,9],param:[],paramet:[1,2,3,4,5,7,9],part:2,particular:[],pass:[0,2,3,6,9],path:[1,6,8],pcpnet:2,pcpnetdataset:2,per:[1,3,9],perform:[1,4,9],perm:9,permut:[3,7,9],person:3,phi:7,phi_:3,photo:2,physic:2,pick:3,pip:8,pistol:2,pitfal:2,place:7,plain:[1,9],planetoid:[2,9],pleas:8,point:[0,2,3,4,7,9],pointcnn:3,pointconv:3,pointnet:3,polar:4,pool:9,popular:7,portion:3,pos:[1,3,5,7,9],pos_edge_index:3,pose:2,posit:[1,2,3,4,5,9],positv:3,post:9,pow:7,power:3,ppi:2,pre_filt:[1,2,6],pre_transform:[1,2,6,9],precis:[3,5],precomput:6,pred:[5,9],predict:[2,3,5],present:[1,2],pretend:[],prevent:8,previou:8,previous:9,prime:[3,4],print:[1,8,9],probabilist:3,probabilti:3,probabl:[3,4],problem:8,proce:7,process:[1,3,6,7,9],processed_dir:[1,6],processed_file_nam:[1,6],processed_path:[1,6],processing_step:3,produc:3,product:[2,3],profil:4,project:3,propag:[3,7],propagation_matrix:3,properti:[2,3,6],protein:2,provid:[2,6,7,9],pseudo:[3,4],publicli:6,publish:0,pubm:[2,9],pyramid:2,python:[1,6,8,9],pytorch:[6,7,8,9],pytorch_scatt:8,qm7:9,qm7b:2,qm9:[2,9],qquad:9,quantum:[2,3],queri:3,radiu:3,radius_graph:3,radiusgraph:4,random:4,random_start:3,randomflip:4,randomli:[1,4,9],randomrot:4,randomscal:4,randomshear:4,randomtransl:[4,9],randperm:9,rang:[4,9],rank:2,rare:8,rate:3,ratio:3,raw:[2,6,9],raw_dir:[1,6],raw_file_nam:[1,6],raw_path:[1,6],rceil:3,read:6,real:6,realli:6,reason:[],recal:5,receiv:9,recogn:8,recommend:[7,8],recompil:8,recomput:7,recon_loss:3,reconstruct:[1,6],record:6,recurr:3,recurs:3,reddit:2,refer:[0,5],reg_loss:3,regard:3,region:2,registr:2,regress:2,regular:3,reinstal:8,rel:[4,7],relat:[2,3,9],relu:[3,7,9],remov:[4,5,8],remove_fac:4,remove_self_loop:5,renam:8,reparametr:3,replac:[4,8],replic:[3,9],replica:3,repositori:8,repres:[2,3],represent:[2,3],requir:9,requires_grad:3,reset_paramet:3,reshuffl:1,respect:[1,2,3,7,8],rest:3,restrict:[1,6,9],result:[3,4,5],review:2,revisit:2,rgcnconv:3,right:[3,5,7],roc:3,rocket:2,root:[1,2,3,6,8,9],root_weight:3,rotat:4,row:[3,4,7],run:[3,8,9],runtim:8,rusty1:8,sageconv:3,same:[1,3,4,5],sampl:[1,2,3,4],samplepoint:[2,4],save:[1,2,4,6,7,9],scalabl:2,scalar:1,scale:[3,4],scatter:[3,4,5,8,9],scatter_:5,scatter_mean:[3,9],scheme:[3,7],scipi:5,score:[3,5],second:[3,5],section:8,see:[1,3,6,8,9],seem:[],segment:[2,3,8],self:[1,3,4,5,6,7,8,9],semi:[2,3,9],separ:4,seq:[3,7],sequenc:[2,3,4],sequenti:[2,3,7],set2set:3,set:[1,2,3,4,5,7,8],setup:[6,8],sever:4,sgconv:3,shape:[1,2,3,4,7,9],shapenet:[2,9],share:[3,8],shared_weight:3,shear:4,shortli:9,should:[1,2,3,6,8,9],show:9,shown:7,shuffl:[1,9],sigma:3,sigma_m:3,sigmoid:3,signal:8,signatur:2,simgpl:3,similar:6,simpl:[0,3,4,7,9],simpli:[6,7,8],simplifi:[3,6],sinc:[2,3],singl:[1,2,3,6,9],size:[2,3,5,7,9],skateboard:2,skip:[1,3,6],slice:[1,6,9],slope:3,slow:6,small:[2,9],soft:3,softmax:[3,5,9],solv:8,some_file_1:6,some_file_2:6,sort:3,sourc:[1,2,3,4,5,6,7,8,9],source_to_target:[3,7],space:[3,7],spars:[4,5,7,8,9],sparse_to_dens:5,spatial:2,spawn:8,specif:[1,3,4,5,6,8],specifi:5,spectral:3,sphere:2,spheric:4,spline:[3,8],splinecnn:3,splineconv:3,split:[2,3,6,9],split_edg:3,sqrt:7,squar:[3,4,7],square_:[3,7],src:5,stack:3,stackoverflow:8,standard:9,start:[3,7],statu:8,std:[4,8],step:[2,7,8,9],stochast:3,stop:9,storag:1,store:3,straightforward:[6,7,9],string:[1,2,3,5,8],structur:[0,3,9],studi:[2,9],subject:2,subprocess:8,subsect:8,success:8,sudo:8,suit:8,sum:[1,3,4,7,9],sum_:[3,7],superpixel:2,supervis:[2,3,9],support:[1,8],surfac:2,symbol:8,symlink:8,symmetr:7,synthet:2,system:8,tabl:2,take:[1,2,3,7,9],tanh:3,tar:1,target:[1,2,3,4,5,7,9],target_to_sourc:[3,7],targetindegre:4,task:2,teleport:3,temporarili:8,tensor:[1,3,4,5,7,9],termin:8,test:[2,3,8,9],test_dataset:9,test_mask:9,test_ratio:3,textrm:[3,4],than:3,thei:[2,7],them:[3,4,9],therefor:[1,6],theta:[3,7],thi:[1,3,4,6,7,8,9],thread:8,three:[4,9],through:[2,9],time:[2,3,5,7,9],tissu:2,tmp:[7,9],to_dense_batch:5,to_networkx:5,to_scipy_sparse_matrix:5,to_undirect:5,todens:4,togeth:[2,3,4,9],tool:8,top:[3,9],topkpool:3,torch:[3,5,6,7,8,9],torch_:8,torch_geometr:[0,6,7,9],torch_scatt:[3,9],torchvis:[6,9],total:2,toward:3,train:[2,3,9],train_dataset:9,train_ep:3,train_mask:9,trainabl:3,transfer:9,transform:[0,1,2,3,6,7],translat:[4,9],transpos:9,tri:3,triangl:9,true_neg:5,true_posit:5,trust:2,tudataset:[2,9],tupl:[3,4,9],tutori:[1,3],twice:3,two:[2,3,4,5,6,7,9],twohop:4,type:[3,5,9],typic:7,unabl:8,undefin:8,under:3,understand:9,undirect:[1,2,3,5,9],unexpect:[],uniformli:[3,4],union:3,univers:2,unmark:3,unsupervis:2,unsur:9,unweight:[2,5,9],updat:[3,7],upgrad:[],url:1,use:[0,1,2,3,5,6,7,9],use_node_attr:2,used:[1,3,4,6,7,9],useful:[3,6],user:[3,7,8],using:[2,3,7,8,9],usr:8,usual:[8,9],util:[0,7,9],val:[2,3],val_mask:9,val_ratio:3,valid:[2,3,9],valu:[1,2,3,4,5,8],vardens:2,vardensitygradi:2,vardensitystrip:2,vari:8,variabl:[3,7,8],variant:7,variat:3,varieti:[0,2],variou:[0,1,2,6,9],vdot:9,vector:[1,3,5,9],verbos:8,verifi:[7,8],version:[1,2,3,8],vert:[3,4],vertex:3,vgae:3,via:[1,2,6,8,9],view:7,volumetr:2,voxel:3,voxel_grid:3,w_m:3,wai:[7,9],wang:7,want:[6,9],warn:8,watertight:2,weight:[3,5,7],weight_decai:9,weisfeil:3,well:[0,3,7],went:[],what:6,when:1,where:[1,2,3,4,6,7,9],whether:[1,2,9],which:[1,3,4,5,6,7,8,9],who:2,whole:6,whom:2,width:5,window:2,wise:[3,7,9],wish:6,within:[3,4,9],without:3,word:2,work:[1,3],would:3,write:9,x_i:[3,7],x_j:[3,7],xconv:3,xxx:8,yet:4,yield:1,you:[3,6,7,8,9],your:[0,8,9],yourself:6,zachari:2,zero_grad:9,zip:1},titles:["PyTorch Geometric documentation","torch_geometric.data","torch_geometric.datasets","torch_geometric.nn","torch_geometric.transforms","torch_geometric.utils","Creating your own datasets","Creating message passing networks","Installation","Introduction by example"],titleterms:{"class":7,The:7,ask:8,base:7,batch:9,benchmark:9,common:9,content:3,convolut:[3,7],creat:[6,7],cuda:8,data:[1,9],dataparallel:3,dataset:[2,6,9],dens:3,document:0,edg:7,exampl:9,extens:8,frequent:8,gcn:7,geometr:0,global:3,graph:9,handl:9,hierarch:3,implement:7,indic:0,instal:8,introduct:9,larger:6,layer:[3,7],learn:9,maco:8,memori:6,messag:7,messagepass:7,method:9,mini:9,model:3,network:7,own:6,pass:7,pool:3,pytorch:0,question:8,spars:3,tabl:0,torch_geometr:[1,2,3,4,5],transform:[4,9],util:5,your:6}}) |
// call this from the developer console and you can control both instances
var calendars = {};
$(document).ready( function() {
// assuming you've got the appropriate language files,
// clndr will respect whatever moment's language is set to.
// moment.lang('ru');
// here's some magic to make sure the dates are happening this month.
var thisMonth = moment().format('YYYY-MM');
var eventArray = [
{ startDate: thisMonth + '-10', endDate: thisMonth + '-14', title: 'Multi-Day Event' },
{ startDate: thisMonth + '-21', endDate: thisMonth + '-23', title: 'Another Multi-Day Event' }
];
// the order of the click handlers is predictable.
// direct click action callbacks come first: click, nextMonth, previousMonth, nextYear, previousYear, or today.
// then onMonthChange (if the month changed).
// finally onYearChange (if the year changed).
calendars.clndr1 = $('.cal1').clndr({
events: eventArray,
// constraints: {
// startDate: '2013-11-01',
// endDate: '2013-11-15'
// },
clickEvents: {
click: function(target) {
console.log(target);
// if you turn the `constraints` option on, try this out:
// if($(target.element).hasClass('inactive')) {
// console.log('not a valid datepicker date.');
// } else {
// console.log('VALID datepicker date.');
// }
},
nextMonth: function() {
console.log('next month.');
},
previousMonth: function() {
console.log('previous month.');
},
onMonthChange: function() {
console.log('month changed.');
},
nextYear: function() {
console.log('next year.');
},
previousYear: function() {
console.log('previous year.');
},
onYearChange: function() {
console.log('year changed.');
}
},
multiDayEvents: {
startDate: 'startDate',
endDate: 'endDate'
},
showAdjacentMonths: true,
adjacentDaysChangeMonth: false
});
calendars.clndr2 = $('.cal2').clndr({
template: $('#template-calendar').html(),
events: eventArray,
startWithMonth: moment().add('month', 1),
clickEvents: {
click: function(target) {
console.log(target);
}
},
forceSixRows: true
});
// bind both clndrs to the left and right arrow keys
$(document).keydown( function(e) {
if(e.keyCode == 37) {
// left arrow
calendars.clndr1.back();
calendars.clndr2.back();
}
if(e.keyCode == 39) {
// right arrow
calendars.clndr1.forward();
calendars.clndr2.forward();
}
});
});
|
define("ace/mode/xquery",["require","exports","module","ace/worker/worker_client","ace/lib/oop","ace/mode/text","ace/tokenizer","ace/mode/xquery_highlight_rules","ace/range","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle"],function(e,t,n){var r=e("../worker/worker_client").WorkerClient,i=e("../lib/oop"),s=e("./text").Mode,o=e("../tokenizer").Tokenizer,u=e("./xquery_highlight_rules").XQueryHighlightRules,a=e("../range").Range,f=e("./behaviour/cstyle").CstyleBehaviour,l=e("./folding/cstyle").FoldMode,c=function(e){this.$tokenizer=new o((new u).getRules()),this.$behaviour=new f(e),this.foldingRules=new l};i.inherits(c,s),function(){this.getNextLineIndent=function(e,t,n){var r=this.$getIndent(t),i=t.match(/\s*(?:then|else|return|[{\(]|<\w+>)\s*$/);return i&&(r+=n),r},this.checkOutdent=function(e,t,n){return/^\s+$/.test(t)?/^\s*[\}\)]/.test(n):!1},this.autoOutdent=function(e,t,n){var r=t.getLine(n),i=r.match(/^(\s*[\}\)])/);if(!i)return 0;var s=i[1].length,o=t.findMatchingBracket({row:n,column:s});if(!o||o.row==n)return 0;var u=this.$getIndent(t.getLine(o.row));t.replace(new a(n,0,n,s-1),u)},this.$getIndent=function(e){var t=e.match(/^(\s+)/);return t?t[1]:""},this.toggleCommentLines=function(e,t,n,r){var i,s,o=!0,u=/^\s*\(:(.*):\)/;for(i=n;i<=r;i++)if(!u.test(t.getLine(i))){o=!1;break}var f=new a(0,0,0,0);for(i=n;i<=r;i++)s=t.getLine(i),f.start.row=i,f.end.row=i,f.end.column=s.length,t.replace(f,o?s.match(u)[1]:"(:"+s+":)")},this.createWorker=function(e){this.$deltas=[];var t=new r(["ace"],"ace/mode/xquery_worker","XQueryWorker"),n=this;return e.getDocument().on("change",function(e){n.$deltas.push(e.data)}),t.attachToDocument(e.getDocument()),t.on("start",function(e){n.$deltas=[]}),t.on("error",function(t){e.setAnnotations([t.data])}),t.on("ok",function(t){e.clearAnnotations()}),t.on("highlight",function(t){if(n.$deltas.length>0)return;var r=0,i=e.getLength()-1,s=t.data.lines,o=t.data.states;e.bgTokenizer.lines=s,e.bgTokenizer.states=o,e.bgTokenizer.fireUpdateEvent(r,i)}),t}}.call(c.prototype),t.Mode=c}),define("ace/mode/xquery_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],function(e,t,n){var r=e("../lib/oop"),i=e("./text_highlight_rules").TextHighlightRules,s=function(){var e="after|ancestor|ancestor-or-self|and|as|ascending|attribute|before|case|cast|castable|child|collation|comment|copy|count|declare|default|delete|descendant|descendant-or-self|descending|div|document|document-node|element|else|empty|empty-sequence|end|eq|every|except|first|following|following-sibling|for|function|ge|group|gt|idiv|if|import|insert|instance|intersect|into|is|item|last|le|let|lt|mod|modify|module|namespace|namespace-node|ne|node|only|or|order|ordered|parent|preceding|preceding-sibling|processing-instruction|rename|replace|return|satisfies|schema-attribute|schema-element|self|some|stable|start|switch|text|to|treat|try|typeswitch|union|unordered|validate|where|with|xquery|contains|paragraphs|sentences|times|words|by|collectionreturn|variable|version|option|when|encoding|toswitch|catch|tumbling|sliding|window|at|using|stemming|collection|schema|while|on|nodes|index|external|then|in|updating|value|of|containsbreak|loop|continue|exit|returning|append|json|position".split("|"),t="[_A-Za-zÀ-ÖØ-öø-˿Ͱ-ͽͿ--⁰-Ⰰ-、-豈-﷏ﷰ-�]",n="[-._A-Za-z0-9·À-ÖØ-öø-˿̀-ͽͿ-‿⁀⁰-Ⰰ-、-豈-﷏ﷰ-�]",r=t+n+"*",i="(?:"+r+":)?"+r,s="(?:(?:Q{.*}"+r+")|(?:"+i+"))";this.$rules={start:[{token:"support.type",regex:"<\\!\\[CDATA\\[",next:"cdata"},{token:"xml-pe",regex:"<\\?",next:"pi"},{token:"comment",regex:"<\\!--",next:"xmlcomment"},{token:"comment.doc",regex:"\\(:~",next:"comment.doc"},{token:"comment",regex:"\\(:",next:"comment"},{token:["text","meta.tag"],regex:"(<\\/?)("+i+")",next:"tag"},{token:"constant",regex:"[+-]?\\d+(?:(?:\\.\\d*)?(?:[eE][+-]?\\d+)?)?\\b"},{token:"variable",regex:"\\$"+s},{token:"string",regex:"'",next:"apos-string"},{token:"string",regex:'"',next:"quot-string"},{token:"text",regex:"\\s+"},{token:function(t){return e.indexOf(t.toLowerCase())!==-1?"keyword":"support.function"},regex:s},{token:"keyword.operator",regex:"\\*|:=|=|<|>|\\-|\\+"},{token:"lparen",regex:"[[({]"},{token:"rparen",regex:"[\\])}]"}],tag:[{token:"text",regex:"\\/?>",next:"start"},{token:["text","meta.tag"],regex:"(<\\/)("+i+")",next:"start"},{token:"meta.tag",regex:i},{token:"text",regex:"\\s+"},{token:"string",regex:"'",next:"apos-attr"},{token:"string",regex:'"',next:"quot-attr"},{token:"string",regex:"'.*?'"},{token:"text",regex:"="}],pi:[{token:"xml-pe",regex:".*\\?>",next:"start"},{token:"xml-pe",regex:".*"}],cdata:[{token:"support.type",regex:"\\]\\]>",next:"start"},{token:"support.type",regex:"\\s+"},{token:"support.type",regex:"(?:[^\\]]|\\](?!\\]>))+"}],"comment.doc":[{token:"comment.doc",regex:":\\)",next:"start"},{token:"comment.doc.tag",regex:"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[a-zA-Z]{2,6}"},{token:"comment.doc.tag",regex:"@[\\w\\d_]+"},{token:"comment.doc",regex:"\\s+"},{token:"comment.doc.tag",regex:"TODO"},{token:"comment.doc",regex:"[^@:^\\s]+"},{token:"comment.doc",regex:"."}],comment:[{token:"comment",regex:".*:\\)",next:"start"},{token:"comment",regex:".+"}],xmlcomment:[{token:"comment",regex:".*?-->",next:"start"},{token:"comment",regex:".+"}],"apos-string":[{token:"string",regex:".*'",next:"start"},{token:"string",regex:".*"}],"quot-string":[{token:"string",regex:'.*"',next:"start"},{token:"string",regex:".*"}],"apos-attr":[{token:"string",regex:".*'",next:"tag"},{token:"string",regex:".*"}],"quot-attr":[{token:"string",regex:'.*"',next:"tag"},{token:"string",regex:".*"}]}};r.inherits(s,i),t.XQueryHighlightRules=s}),define("ace/mode/behaviour/cstyle",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator","ace/lib/lang"],function(e,t,n){var r=e("../../lib/oop"),i=e("../behaviour").Behaviour,s=e("../../token_iterator").TokenIterator,o=e("../../lib/lang"),u=["text","paren.rparen","punctuation.operator"],a=["text","paren.rparen","punctuation.operator","comment"],f=0,l=-1,c="",h=0,p=-1,d="",v="",m=function(){m.isSaneInsertion=function(e,t){var n=e.getCursorPosition(),r=new s(t,n.row,n.column);if(!this.$matchTokenType(r.getCurrentToken()||"text",u)){var i=new s(t,n.row,n.column+1);if(!this.$matchTokenType(i.getCurrentToken()||"text",u))return!1}return r.stepForward(),r.getCurrentTokenRow()!==n.row||this.$matchTokenType(r.getCurrentToken()||"text",a)},m.$matchTokenType=function(e,t){return t.indexOf(e.type||e)>-1},m.recordAutoInsert=function(e,t,n){var r=e.getCursorPosition(),i=t.doc.getLine(r.row);this.isAutoInsertedClosing(r,i,c[0])||(f=0),l=r.row,c=n+i.substr(r.column),f++},m.recordMaybeInsert=function(e,t,n){var r=e.getCursorPosition(),i=t.doc.getLine(r.row);this.isMaybeInsertedClosing(r,i)||(h=0),p=r.row,d=i.substr(0,r.column)+n,v=i.substr(r.column),h++},m.isAutoInsertedClosing=function(e,t,n){return f>0&&e.row===l&&n===c[0]&&t.substr(e.column)===c},m.isMaybeInsertedClosing=function(e,t){return h>0&&e.row===p&&t.substr(e.column)===v&&t.substr(0,e.column)==d},m.popAutoInsertedClosing=function(){c=c.substr(1),f--},m.clearMaybeInsertedClosing=function(){h=0,p=-1},this.add("braces","insertion",function(e,t,n,r,i){var s=n.getCursorPosition(),u=r.doc.getLine(s.row);if(i=="{"){var a=n.getSelectionRange(),f=r.doc.getTextRange(a);if(f!==""&&f!=="{"&&n.getWrapBehavioursEnabled())return{text:"{"+f+"}",selection:!1};if(m.isSaneInsertion(n,r))return/[\]\}\)]/.test(u[s.column])?(m.recordAutoInsert(n,r,"}"),{text:"{}",selection:[1,1]}):(m.recordMaybeInsert(n,r,"{"),{text:"{",selection:[1,1]})}else if(i=="}"){var l=u.substring(s.column,s.column+1);if(l=="}"){var c=r.$findOpeningBracket("}",{column:s.column+1,row:s.row});if(c!==null&&m.isAutoInsertedClosing(s,u,i))return m.popAutoInsertedClosing(),{text:"",selection:[1,1]}}}else if(i=="\n"||i=="\r\n"){var p="";m.isMaybeInsertedClosing(s,u)&&(p=o.stringRepeat("}",h),m.clearMaybeInsertedClosing());var l=u.substring(s.column,s.column+1);if(l=="}"||p!==""){var d=r.findMatchingBracket({row:s.row,column:s.column},"}");if(!d)return null;var v=this.getNextLineIndent(e,u.substring(0,s.column),r.getTabString()),g=this.$getIndent(u);return{text:"\n"+v+"\n"+g+p,selection:[1,v.length,1,v.length]}}}}),this.add("braces","deletion",function(e,t,n,r,i){var s=r.doc.getTextRange(i);if(!i.isMultiLine()&&s=="{"){var o=r.doc.getLine(i.start.row),u=o.substring(i.end.column,i.end.column+1);if(u=="}")return i.end.column++,i;h--}}),this.add("parens","insertion",function(e,t,n,r,i){if(i=="("){var s=n.getSelectionRange(),o=r.doc.getTextRange(s);if(o!==""&&n.getWrapBehavioursEnabled())return{text:"("+o+")",selection:!1};if(m.isSaneInsertion(n,r))return m.recordAutoInsert(n,r,")"),{text:"()",selection:[1,1]}}else if(i==")"){var u=n.getCursorPosition(),a=r.doc.getLine(u.row),f=a.substring(u.column,u.column+1);if(f==")"){var l=r.$findOpeningBracket(")",{column:u.column+1,row:u.row});if(l!==null&&m.isAutoInsertedClosing(u,a,i))return m.popAutoInsertedClosing(),{text:"",selection:[1,1]}}}}),this.add("parens","deletion",function(e,t,n,r,i){var s=r.doc.getTextRange(i);if(!i.isMultiLine()&&s=="("){var o=r.doc.getLine(i.start.row),u=o.substring(i.start.column+1,i.start.column+2);if(u==")")return i.end.column++,i}}),this.add("brackets","insertion",function(e,t,n,r,i){if(i=="["){var s=n.getSelectionRange(),o=r.doc.getTextRange(s);if(o!==""&&n.getWrapBehavioursEnabled())return{text:"["+o+"]",selection:!1};if(m.isSaneInsertion(n,r))return m.recordAutoInsert(n,r,"]"),{text:"[]",selection:[1,1]}}else if(i=="]"){var u=n.getCursorPosition(),a=r.doc.getLine(u.row),f=a.substring(u.column,u.column+1);if(f=="]"){var l=r.$findOpeningBracket("]",{column:u.column+1,row:u.row});if(l!==null&&m.isAutoInsertedClosing(u,a,i))return m.popAutoInsertedClosing(),{text:"",selection:[1,1]}}}}),this.add("brackets","deletion",function(e,t,n,r,i){var s=r.doc.getTextRange(i);if(!i.isMultiLine()&&s=="["){var o=r.doc.getLine(i.start.row),u=o.substring(i.start.column+1,i.start.column+2);if(u=="]")return i.end.column++,i}}),this.add("string_dquotes","insertion",function(e,t,n,r,i){if(i=='"'||i=="'"){var s=i,o=n.getSelectionRange(),u=r.doc.getTextRange(o);if(u!==""&&u!=="'"&&u!='"'&&n.getWrapBehavioursEnabled())return{text:s+u+s,selection:!1};var a=n.getCursorPosition(),f=r.doc.getLine(a.row),l=f.substring(a.column-1,a.column);if(l=="\\")return null;var c=r.getTokens(o.start.row),h=0,p,d=-1;for(var v=0;v<c.length;v++){p=c[v],p.type=="string"?d=-1:d<0&&(d=p.value.indexOf(s));if(p.value.length+h>o.start.column)break;h+=c[v].value.length}if(!p||d<0&&p.type!=="comment"&&(p.type!=="string"||o.start.column!==p.value.length+h-1&&p.value.lastIndexOf(s)===p.value.length-1)){if(!m.isSaneInsertion(n,r))return;return{text:s+s,selection:[1,1]}}if(p&&p.type==="string"){var g=f.substring(a.column,a.column+1);if(g==s)return{text:"",selection:[1,1]}}}}),this.add("string_dquotes","deletion",function(e,t,n,r,i){var s=r.doc.getTextRange(i);if(!i.isMultiLine()&&(s=='"'||s=="'")){var o=r.doc.getLine(i.start.row),u=o.substring(i.start.column+1,i.start.column+2);if(u==s)return i.end.column++,i}})};r.inherits(m,i),t.CstyleBehaviour=m}),define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"],function(e,t,n){var r=e("../../lib/oop"),i=e("../../range").Range,s=e("./fold_mode").FoldMode,o=t.FoldMode=function(){};r.inherits(o,s),function(){this.foldingStartMarker=/(\{|\[)[^\}\]]*$|^\s*(\/\*)/,this.foldingStopMarker=/^[^\[\{]*(\}|\])|^[\s\*]*(\*\/)/,this.getFoldWidgetRange=function(e,t,n){var r=e.getLine(n),i=r.match(this.foldingStartMarker);if(i){var s=i.index;return i[1]?this.openingBracketBlock(e,i[1],n,s):e.getCommentFoldRange(n,s+i[0].length,1)}if(t!=="markbeginend")return;var i=r.match(this.foldingStopMarker);if(i){var s=i.index+i[0].length;return i[1]?this.closingBracketBlock(e,i[1],n,s):e.getCommentFoldRange(n,s,-1)}}}.call(o.prototype)}) |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['JobArgs', 'Job']
@pulumi.input_type
class JobArgs:
def __init__(__self__, *,
app_engine_http_target: Optional[pulumi.Input['AppEngineHttpTargetArgs']] = None,
attempt_deadline: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
http_target: Optional[pulumi.Input['HttpTargetArgs']] = None,
legacy_app_engine_cron: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
pubsub_target: Optional[pulumi.Input['PubsubTargetArgs']] = None,
retry_config: Optional[pulumi.Input['RetryConfigArgs']] = None,
schedule: Optional[pulumi.Input[str]] = None,
time_zone: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Job resource.
:param pulumi.Input['AppEngineHttpTargetArgs'] app_engine_http_target: App Engine HTTP target.
:param pulumi.Input[str] attempt_deadline: The deadline for job attempts. If the request handler does not respond by this deadline then the request is cancelled and the attempt is marked as a `DEADLINE_EXCEEDED` failure. The failed attempt can be viewed in execution logs. Cloud Scheduler will retry the job according to the RetryConfig. The allowed duration for this deadline is: * For HTTP targets, between 15 seconds and 30 minutes. * For App Engine HTTP targets, between 15 seconds and 24 hours. * For PubSub targets, this field is ignored.
:param pulumi.Input[str] description: Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters.
:param pulumi.Input['HttpTargetArgs'] http_target: HTTP target.
:param pulumi.Input[bool] legacy_app_engine_cron: Immutable. This field is used to manage the legacy App Engine Cron jobs using the Cloud Scheduler API. If the field is set to true, the job will be considered a legacy job. Note that App Engine Cron jobs have fewer features than Cloud Scheduler jobs, e.g., are only limited to App Engine targets.
:param pulumi.Input[str] name: Optionally caller-specified in CreateJob, after which it becomes output only. The job name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`. * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the job's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `JOB_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The maximum length is 500 characters.
:param pulumi.Input['PubsubTargetArgs'] pubsub_target: Pub/Sub target.
:param pulumi.Input['RetryConfigArgs'] retry_config: Settings that determine the retry behavior.
:param pulumi.Input[str] schedule: Required, except when used with UpdateJob. Describes the schedule on which the job will be executed. The schedule can be either of the following types: * [Crontab](http://en.wikipedia.org/wiki/Cron#Overview) * English-like [schedule](https://cloud.google.com/scheduler/docs/configuring/cron-job-schedules) As a general rule, execution `n + 1` of a job will not begin until execution `n` has finished. Cloud Scheduler will never allow two simultaneously outstanding executions. For example, this implies that if the `n+1`th execution is scheduled to run at 16:00 but the `n`th execution takes until 16:15, the `n+1`th execution will not start until `16:15`. A scheduled start time will be delayed if the previous execution has not ended when its scheduled time occurs. If retry_count > 0 and a job attempt fails, the job will be tried a total of retry_count times, with exponential backoff, until the next scheduled start time.
:param pulumi.Input[str] time_zone: Specifies the time zone to be used in interpreting schedule. The value of this field must be a time zone name from the [tz database](http://en.wikipedia.org/wiki/Tz_database). Note that some time zones include a provision for daylight savings time. The rules for daylight saving time are determined by the chosen tz. For UTC use the string "utc". If a time zone is not specified, the default will be in UTC (also known as GMT).
"""
if app_engine_http_target is not None:
pulumi.set(__self__, "app_engine_http_target", app_engine_http_target)
if attempt_deadline is not None:
pulumi.set(__self__, "attempt_deadline", attempt_deadline)
if description is not None:
pulumi.set(__self__, "description", description)
if http_target is not None:
pulumi.set(__self__, "http_target", http_target)
if legacy_app_engine_cron is not None:
pulumi.set(__self__, "legacy_app_engine_cron", legacy_app_engine_cron)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if pubsub_target is not None:
pulumi.set(__self__, "pubsub_target", pubsub_target)
if retry_config is not None:
pulumi.set(__self__, "retry_config", retry_config)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
if time_zone is not None:
pulumi.set(__self__, "time_zone", time_zone)
@property
@pulumi.getter(name="appEngineHttpTarget")
def app_engine_http_target(self) -> Optional[pulumi.Input['AppEngineHttpTargetArgs']]:
"""
App Engine HTTP target.
"""
return pulumi.get(self, "app_engine_http_target")
@app_engine_http_target.setter
def app_engine_http_target(self, value: Optional[pulumi.Input['AppEngineHttpTargetArgs']]):
pulumi.set(self, "app_engine_http_target", value)
@property
@pulumi.getter(name="attemptDeadline")
def attempt_deadline(self) -> Optional[pulumi.Input[str]]:
"""
The deadline for job attempts. If the request handler does not respond by this deadline then the request is cancelled and the attempt is marked as a `DEADLINE_EXCEEDED` failure. The failed attempt can be viewed in execution logs. Cloud Scheduler will retry the job according to the RetryConfig. The allowed duration for this deadline is: * For HTTP targets, between 15 seconds and 30 minutes. * For App Engine HTTP targets, between 15 seconds and 24 hours. * For PubSub targets, this field is ignored.
"""
return pulumi.get(self, "attempt_deadline")
@attempt_deadline.setter
def attempt_deadline(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attempt_deadline", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="httpTarget")
def http_target(self) -> Optional[pulumi.Input['HttpTargetArgs']]:
"""
HTTP target.
"""
return pulumi.get(self, "http_target")
@http_target.setter
def http_target(self, value: Optional[pulumi.Input['HttpTargetArgs']]):
pulumi.set(self, "http_target", value)
@property
@pulumi.getter(name="legacyAppEngineCron")
def legacy_app_engine_cron(self) -> Optional[pulumi.Input[bool]]:
"""
Immutable. This field is used to manage the legacy App Engine Cron jobs using the Cloud Scheduler API. If the field is set to true, the job will be considered a legacy job. Note that App Engine Cron jobs have fewer features than Cloud Scheduler jobs, e.g., are only limited to App Engine targets.
"""
return pulumi.get(self, "legacy_app_engine_cron")
@legacy_app_engine_cron.setter
def legacy_app_engine_cron(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "legacy_app_engine_cron", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Optionally caller-specified in CreateJob, after which it becomes output only. The job name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`. * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the job's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `JOB_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The maximum length is 500 characters.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="pubsubTarget")
def pubsub_target(self) -> Optional[pulumi.Input['PubsubTargetArgs']]:
"""
Pub/Sub target.
"""
return pulumi.get(self, "pubsub_target")
@pubsub_target.setter
def pubsub_target(self, value: Optional[pulumi.Input['PubsubTargetArgs']]):
pulumi.set(self, "pubsub_target", value)
@property
@pulumi.getter(name="retryConfig")
def retry_config(self) -> Optional[pulumi.Input['RetryConfigArgs']]:
"""
Settings that determine the retry behavior.
"""
return pulumi.get(self, "retry_config")
@retry_config.setter
def retry_config(self, value: Optional[pulumi.Input['RetryConfigArgs']]):
pulumi.set(self, "retry_config", value)
@property
@pulumi.getter
def schedule(self) -> Optional[pulumi.Input[str]]:
"""
Required, except when used with UpdateJob. Describes the schedule on which the job will be executed. The schedule can be either of the following types: * [Crontab](http://en.wikipedia.org/wiki/Cron#Overview) * English-like [schedule](https://cloud.google.com/scheduler/docs/configuring/cron-job-schedules) As a general rule, execution `n + 1` of a job will not begin until execution `n` has finished. Cloud Scheduler will never allow two simultaneously outstanding executions. For example, this implies that if the `n+1`th execution is scheduled to run at 16:00 but the `n`th execution takes until 16:15, the `n+1`th execution will not start until `16:15`. A scheduled start time will be delayed if the previous execution has not ended when its scheduled time occurs. If retry_count > 0 and a job attempt fails, the job will be tried a total of retry_count times, with exponential backoff, until the next scheduled start time.
"""
return pulumi.get(self, "schedule")
@schedule.setter
def schedule(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schedule", value)
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the time zone to be used in interpreting schedule. The value of this field must be a time zone name from the [tz database](http://en.wikipedia.org/wiki/Tz_database). Note that some time zones include a provision for daylight savings time. The rules for daylight saving time are determined by the chosen tz. For UTC use the string "utc". If a time zone is not specified, the default will be in UTC (also known as GMT).
"""
return pulumi.get(self, "time_zone")
@time_zone.setter
def time_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_zone", value)
class Job(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_engine_http_target: Optional[pulumi.Input[pulumi.InputType['AppEngineHttpTargetArgs']]] = None,
attempt_deadline: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
http_target: Optional[pulumi.Input[pulumi.InputType['HttpTargetArgs']]] = None,
legacy_app_engine_cron: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
pubsub_target: Optional[pulumi.Input[pulumi.InputType['PubsubTargetArgs']]] = None,
retry_config: Optional[pulumi.Input[pulumi.InputType['RetryConfigArgs']]] = None,
schedule: Optional[pulumi.Input[str]] = None,
time_zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a job.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AppEngineHttpTargetArgs']] app_engine_http_target: App Engine HTTP target.
:param pulumi.Input[str] attempt_deadline: The deadline for job attempts. If the request handler does not respond by this deadline then the request is cancelled and the attempt is marked as a `DEADLINE_EXCEEDED` failure. The failed attempt can be viewed in execution logs. Cloud Scheduler will retry the job according to the RetryConfig. The allowed duration for this deadline is: * For HTTP targets, between 15 seconds and 30 minutes. * For App Engine HTTP targets, between 15 seconds and 24 hours. * For PubSub targets, this field is ignored.
:param pulumi.Input[str] description: Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters.
:param pulumi.Input[pulumi.InputType['HttpTargetArgs']] http_target: HTTP target.
:param pulumi.Input[bool] legacy_app_engine_cron: Immutable. This field is used to manage the legacy App Engine Cron jobs using the Cloud Scheduler API. If the field is set to true, the job will be considered a legacy job. Note that App Engine Cron jobs have fewer features than Cloud Scheduler jobs, e.g., are only limited to App Engine targets.
:param pulumi.Input[str] name: Optionally caller-specified in CreateJob, after which it becomes output only. The job name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`. * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the job's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `JOB_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The maximum length is 500 characters.
:param pulumi.Input[pulumi.InputType['PubsubTargetArgs']] pubsub_target: Pub/Sub target.
:param pulumi.Input[pulumi.InputType['RetryConfigArgs']] retry_config: Settings that determine the retry behavior.
:param pulumi.Input[str] schedule: Required, except when used with UpdateJob. Describes the schedule on which the job will be executed. The schedule can be either of the following types: * [Crontab](http://en.wikipedia.org/wiki/Cron#Overview) * English-like [schedule](https://cloud.google.com/scheduler/docs/configuring/cron-job-schedules) As a general rule, execution `n + 1` of a job will not begin until execution `n` has finished. Cloud Scheduler will never allow two simultaneously outstanding executions. For example, this implies that if the `n+1`th execution is scheduled to run at 16:00 but the `n`th execution takes until 16:15, the `n+1`th execution will not start until `16:15`. A scheduled start time will be delayed if the previous execution has not ended when its scheduled time occurs. If retry_count > 0 and a job attempt fails, the job will be tried a total of retry_count times, with exponential backoff, until the next scheduled start time.
:param pulumi.Input[str] time_zone: Specifies the time zone to be used in interpreting schedule. The value of this field must be a time zone name from the [tz database](http://en.wikipedia.org/wiki/Tz_database). Note that some time zones include a provision for daylight savings time. The rules for daylight saving time are determined by the chosen tz. For UTC use the string "utc". If a time zone is not specified, the default will be in UTC (also known as GMT).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[JobArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a job.
:param str resource_name: The name of the resource.
:param JobArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(JobArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_engine_http_target: Optional[pulumi.Input[pulumi.InputType['AppEngineHttpTargetArgs']]] = None,
attempt_deadline: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
http_target: Optional[pulumi.Input[pulumi.InputType['HttpTargetArgs']]] = None,
legacy_app_engine_cron: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
pubsub_target: Optional[pulumi.Input[pulumi.InputType['PubsubTargetArgs']]] = None,
retry_config: Optional[pulumi.Input[pulumi.InputType['RetryConfigArgs']]] = None,
schedule: Optional[pulumi.Input[str]] = None,
time_zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = JobArgs.__new__(JobArgs)
__props__.__dict__["app_engine_http_target"] = app_engine_http_target
__props__.__dict__["attempt_deadline"] = attempt_deadline
__props__.__dict__["description"] = description
__props__.__dict__["http_target"] = http_target
__props__.__dict__["legacy_app_engine_cron"] = legacy_app_engine_cron
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["pubsub_target"] = pubsub_target
__props__.__dict__["retry_config"] = retry_config
__props__.__dict__["schedule"] = schedule
__props__.__dict__["time_zone"] = time_zone
__props__.__dict__["last_attempt_time"] = None
__props__.__dict__["schedule_time"] = None
__props__.__dict__["state"] = None
__props__.__dict__["status"] = None
__props__.__dict__["user_update_time"] = None
super(Job, __self__).__init__(
'google-native:cloudscheduler/v1beta1:Job',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Job':
"""
Get an existing Job resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = JobArgs.__new__(JobArgs)
__props__.__dict__["app_engine_http_target"] = None
__props__.__dict__["attempt_deadline"] = None
__props__.__dict__["description"] = None
__props__.__dict__["http_target"] = None
__props__.__dict__["last_attempt_time"] = None
__props__.__dict__["legacy_app_engine_cron"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pubsub_target"] = None
__props__.__dict__["retry_config"] = None
__props__.__dict__["schedule"] = None
__props__.__dict__["schedule_time"] = None
__props__.__dict__["state"] = None
__props__.__dict__["status"] = None
__props__.__dict__["time_zone"] = None
__props__.__dict__["user_update_time"] = None
return Job(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appEngineHttpTarget")
def app_engine_http_target(self) -> pulumi.Output['outputs.AppEngineHttpTargetResponse']:
"""
App Engine HTTP target.
"""
return pulumi.get(self, "app_engine_http_target")
@property
@pulumi.getter(name="attemptDeadline")
def attempt_deadline(self) -> pulumi.Output[str]:
"""
The deadline for job attempts. If the request handler does not respond by this deadline then the request is cancelled and the attempt is marked as a `DEADLINE_EXCEEDED` failure. The failed attempt can be viewed in execution logs. Cloud Scheduler will retry the job according to the RetryConfig. The allowed duration for this deadline is: * For HTTP targets, between 15 seconds and 30 minutes. * For App Engine HTTP targets, between 15 seconds and 24 hours. * For PubSub targets, this field is ignored.
"""
return pulumi.get(self, "attempt_deadline")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Optionally caller-specified in CreateJob or UpdateJob. A human-readable description for the job. This string must not contain more than 500 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="httpTarget")
def http_target(self) -> pulumi.Output['outputs.HttpTargetResponse']:
"""
HTTP target.
"""
return pulumi.get(self, "http_target")
@property
@pulumi.getter(name="lastAttemptTime")
def last_attempt_time(self) -> pulumi.Output[str]:
"""
The time the last job attempt started.
"""
return pulumi.get(self, "last_attempt_time")
@property
@pulumi.getter(name="legacyAppEngineCron")
def legacy_app_engine_cron(self) -> pulumi.Output[bool]:
"""
Immutable. This field is used to manage the legacy App Engine Cron jobs using the Cloud Scheduler API. If the field is set to true, the job will be considered a legacy job. Note that App Engine Cron jobs have fewer features than Cloud Scheduler jobs, e.g., are only limited to App Engine targets.
"""
return pulumi.get(self, "legacy_app_engine_cron")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Optionally caller-specified in CreateJob, after which it becomes output only. The job name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`. * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the job's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `JOB_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The maximum length is 500 characters.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pubsubTarget")
def pubsub_target(self) -> pulumi.Output['outputs.PubsubTargetResponse']:
"""
Pub/Sub target.
"""
return pulumi.get(self, "pubsub_target")
@property
@pulumi.getter(name="retryConfig")
def retry_config(self) -> pulumi.Output['outputs.RetryConfigResponse']:
"""
Settings that determine the retry behavior.
"""
return pulumi.get(self, "retry_config")
@property
@pulumi.getter
def schedule(self) -> pulumi.Output[str]:
"""
Required, except when used with UpdateJob. Describes the schedule on which the job will be executed. The schedule can be either of the following types: * [Crontab](http://en.wikipedia.org/wiki/Cron#Overview) * English-like [schedule](https://cloud.google.com/scheduler/docs/configuring/cron-job-schedules) As a general rule, execution `n + 1` of a job will not begin until execution `n` has finished. Cloud Scheduler will never allow two simultaneously outstanding executions. For example, this implies that if the `n+1`th execution is scheduled to run at 16:00 but the `n`th execution takes until 16:15, the `n+1`th execution will not start until `16:15`. A scheduled start time will be delayed if the previous execution has not ended when its scheduled time occurs. If retry_count > 0 and a job attempt fails, the job will be tried a total of retry_count times, with exponential backoff, until the next scheduled start time.
"""
return pulumi.get(self, "schedule")
@property
@pulumi.getter(name="scheduleTime")
def schedule_time(self) -> pulumi.Output[str]:
"""
The next time the job is scheduled. Note that this may be a retry of a previously failed attempt or the next execution time according to the schedule.
"""
return pulumi.get(self, "schedule_time")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
State of the job.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StatusResponse']:
"""
The response from the target for the last attempted execution.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> pulumi.Output[str]:
"""
Specifies the time zone to be used in interpreting schedule. The value of this field must be a time zone name from the [tz database](http://en.wikipedia.org/wiki/Tz_database). Note that some time zones include a provision for daylight savings time. The rules for daylight saving time are determined by the chosen tz. For UTC use the string "utc". If a time zone is not specified, the default will be in UTC (also known as GMT).
"""
return pulumi.get(self, "time_zone")
@property
@pulumi.getter(name="userUpdateTime")
def user_update_time(self) -> pulumi.Output[str]:
"""
The creation time of the job.
"""
return pulumi.get(self, "user_update_time")
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import {
cleanGeometry,
geoJsonCleanAndValidate,
} from './geo_json_clean_and_validate';
const jsts = require('jsts');
describe('geo_json_clean_and_validate', () => {
const reader = new jsts.io.GeoJSONReader();
it('should not modify valid features', () => {
const goodFeatureGeoJson = {
type: 'Feature',
geometry: {
type: 'Polygon',
coordinates: [[
[-104.05, 78.99],
[-87.22, 78.98],
[-86.58, 75.94],
[-104.03, 75.94],
[-104.05, 78.99]
]]
},
};
// Confirm valid geometry
const geoJson = reader.read(goodFeatureGeoJson);
const isSimpleOrValid = (geoJson.geometry.isSimple()
|| geoJson.geometry.isValid());
expect(isSimpleOrValid).toEqual(true);
// Confirm no change to features
const cleanedFeature = cleanGeometry(geoJson);
expect(cleanedFeature).toEqual(goodFeatureGeoJson.geometry);
});
it('should modify incorrect features', () => {
// This feature collection contains polygons which cross over themselves,
// which is invalid for geojson
const badFeaturesGeoJson = {
type: 'FeatureCollection',
features: [
{
type: 'Feature',
geometry: {
type: 'Polygon',
coordinates: [[
[0, 0],
[2, 2],
[0, 2],
[2, 0],
[0, 0]
]]
}
},
{
type: 'Feature',
geometry: {
type: 'Polygon',
coordinates: [[
[2, 2],
[4, 0],
[2, 0],
[4, 2],
[2, 2]
]]
}
}
]
};
// Confirm invalid geometry
let geoJson = reader.read(badFeaturesGeoJson);
let isSimpleOrValid;
geoJson.features.forEach(feature => {
isSimpleOrValid = (feature.geometry.isSimple()
|| feature.geometry.isValid());
expect(isSimpleOrValid).toEqual(false);
});
// Confirm changes to object
const cleanedFeatures = geoJson.features.map(feature => ({
...feature,
geometry: cleanGeometry(feature)
}));
cleanedFeatures.forEach((feature, idx) =>
expect(feature).not.toEqual(badFeaturesGeoJson.features[idx])
);
// Confirm now valid features geometry
geoJson = reader.read({ ...badFeaturesGeoJson, features: cleanedFeatures });
geoJson.features.forEach(feature => {
isSimpleOrValid = (feature.geometry.isSimple()
|| feature.geometry.isValid());
expect(isSimpleOrValid).toEqual(true);
});
});
it('should reverse counter-clockwise winding order', () => {
const counterClockwiseGeoJson = {
type: 'Feature',
geometry: {
type: 'Polygon',
coordinates: [[
[100, 0],
[101, 0],
[101, 1],
[100, 1],
[100, 0]
], [
[100.2, 0.2],
[100.8, 0.2],
[100.8, 0.8],
[100.2, 0.8],
[100.2, 0.2]
]]
}
};
// Confirm changes to object
const clockwiseGeoJson = geoJsonCleanAndValidate(counterClockwiseGeoJson);
expect(clockwiseGeoJson).not.toEqual(counterClockwiseGeoJson);
// Run it through again, expect it not to change
const clockwiseGeoJson2 = geoJsonCleanAndValidate(clockwiseGeoJson);
expect(clockwiseGeoJson).toEqual(clockwiseGeoJson2);
});
it('error out on invalid object', () => {
const invalidGeoJson = {
type: 'notMyType',
geometry: 'shmeometry'
};
const notEvenCloseToGeoJson = [1, 2, 3, 4];
const badObjectPassed = () => geoJsonCleanAndValidate(invalidGeoJson);
expect(badObjectPassed).toThrow();
const worseObjectPassed = () => geoJsonCleanAndValidate(notEvenCloseToGeoJson);
expect(worseObjectPassed).toThrow();
});
});
|
"""Provides device trigger for lights."""
from __future__ import annotations
import voluptuous as vol
from openpeerpower.components.automation import AutomationActionType
from openpeerpower.components.device_automation import toggle_entity
from openpeerpower.const import CONF_DOMAIN
from openpeerpower.core import CALLBACK_TYPE, OpenPeerPower
from openpeerpower.helpers.typing import ConfigType
from . import DOMAIN
TRIGGER_SCHEMA = toggle_entity.TRIGGER_SCHEMA.extend(
{vol.Required(CONF_DOMAIN): DOMAIN}
)
async def async_attach_trigger(
opp: OpenPeerPower,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
return await toggle_entity.async_attach_trigger(
opp, config, action, automation_info
)
async def async_get_triggers(opp: OpenPeerPower, device_id: str) -> list[dict]:
"""List device triggers."""
return await toggle_entity.async_get_triggers(opp, device_id, DOMAIN)
async def async_get_trigger_capabilities(opp: OpenPeerPower, config: dict) -> dict:
"""List trigger capabilities."""
return await toggle_entity.async_get_trigger_capabilities(opp, config)
|
/*
Copyright 2018 New Vector Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React from 'react';
import PropTypes from 'prop-types';
import sdk from '../../../index';
import MatrixClientPeg from '../../../MatrixClientPeg';
import Modal from '../../../Modal';
import { _t } from '../../../languageHandler';
export default React.createClass({
displayName: 'RoomUpgradeDialog',
propTypes: {
room: PropTypes.object.isRequired,
onFinished: PropTypes.func.isRequired,
},
componentWillMount: function() {
this._targetVersion = this.props.room.shouldUpgradeToVersion();
},
getInitialState: function() {
return {
busy: false,
};
},
_onCancelClick: function() {
this.props.onFinished(false);
},
_onUpgradeClick: function() {
this.setState({busy: true});
MatrixClientPeg.get().upgradeRoom(this.props.room.roomId, this._targetVersion).catch((err) => {
const ErrorDialog = sdk.getComponent("dialogs.ErrorDialog");
Modal.createTrackedDialog('Failed to upgrade room', '', ErrorDialog, {
title: _t("Failed to upgrade room"),
description: ((err && err.message) ? err.message : _t("The room upgrade could not be completed")),
});
}).finally(() => {
this.setState({busy: false});
});
},
render: function() {
const BaseDialog = sdk.getComponent('views.dialogs.BaseDialog');
const DialogButtons = sdk.getComponent('views.elements.DialogButtons');
const Spinner = sdk.getComponent('views.elements.Spinner');
let buttons;
if (this.state.busy) {
buttons = <Spinner />;
} else {
buttons = <DialogButtons
primaryButton={_t(
'Upgrade this room to version %(version)s',
{version: this._targetVersion},
)}
primaryButtonClass="danger"
hasCancel={true}
onPrimaryButtonClick={this._onUpgradeClick}
focus={this.props.focus}
onCancel={this._onCancelClick}
/>;
}
return (
<BaseDialog className="mx_RoomUpgradeDialog"
onFinished={this.onCancelled}
title={_t("Upgrade Room Version")}
contentId='mx_Dialog_content'
onFinished={this.props.onFinished}
hasCancel={true}
>
<p>
{_t(
"Upgrading this room requires closing down the current " +
"instance of the room and creating a new room it its place. " +
"To give room members the best possible experience, we will:",
)}
</p>
<ol>
<li>{_t("Create a new room with the same name, description and avatar")}</li>
<li>{_t("Update any local room aliases to point to the new room")}</li>
<li>{_t("Stop users from speaking in the old version of the room, and post a message advising users to move to the new room")}</li>
<li>{_t("Put a link back to the old room at the start of the new room so people can see old messages")}</li>
</ol>
{buttons}
</BaseDialog>
);
},
});
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
__author__ = 'pablo'
import subprocess
import os
import matrix
import count
import sort
import random
from threading import Thread
threads = []
num_threads = 20
def my_function(another_function, param):
"""
Llama a una función dada por parámetro (param1) y le pasa el segundo parámetro a la misma (param2)
:param another_function:
:param param:
"""
another_function(param)
def thread_me(name, lim):
"""
Dado un nombre de una función y un límite (o lista) este método crea un hilo con estos dos argumentos.
El hilo llama a my_function, es inicializado e introducido a una lista de hilos.
:param name:
:param lim:
"""
for j in range(num_threads):
t = Thread(target=my_function, args=(name, lim))
t.start()
threads.append(t)
def benchmark():
"""
Llama a la función thread_me pasando la función de carga deseada implementadas en las librerías de count, sort y matrix.
En el caso de sort generaremos una lista de números aleatoria para que la ordene
"""
# count
thread_me(count.fact, 30)
thread_me(count.fib, 30)
# sort
n = 1000
list1 = []
for i in range(0, n):
list1.append(random.randint(0, n-1))
thread_me(sort.sort, list1)
thread_me(sort.merge_sort, list1)
# matrix
thread_me(matrix.m_sum, 20)
thread_me(matrix.m_prod, 20)
thread_me(matrix.m_times, 20)
def avg():
results_files = ['fib', 'fact', 'sort', 'merge_sort', 'm_sum', 'm_prod', 'm_times' ]
path = str(os.path.dirname(os.path.abspath(__file__))) + "/results"
for result in results_files:
# script en awk para calcular la media
command = "awk < results_" + str(result) + \
" '{ sum += $3; n++ } END { if (n > 0) print $1 \" : \" sum / n \" ms \"; }' >> " + path + ".txt "
subprocess.call(command,shell=True)
if __name__ == "__main__":
# Empezamos llamando a la función benchmark 3 veces para tomar muestras
print "Iniciando Benchmark..."
for i in range(0, 5):
benchmark()
# Reaper: finaliza los hilos empezados por la función benchmark()
for t in threads:
t.join()
print "Terminando hilos"
# finalmente, hacemos las medias de los archivos generados
avg()
print "Proceso finalizado. Consulte los resultados en \"results.txt\""
|
import {
Avatar,
Box,
Card,
CardContent,
Grid,
Typography
} from '@mui/material';
import ArrowDownwardIcon from '@mui/icons-material/ArrowDownward';
import MoneyIcon from '@mui/icons-material/Money';
import { red } from '@mui/material/colors';
const Budget = (props) => (
<Card
sx={{ height: '100%' }}
{...props}
>
<CardContent>
<Grid
container
spacing={3}
sx={{ justifyContent: 'space-between' }}
>
<Grid item>
<Typography
color="textSecondary"
gutterBottom
variant="h6"
>
BUDGET
</Typography>
<Typography
color="textPrimary"
variant="h3"
>
$24,000
</Typography>
</Grid>
<Grid item>
<Avatar
sx={{
backgroundColor: red[600],
height: 56,
width: 56
}}
>
<MoneyIcon />
</Avatar>
</Grid>
</Grid>
<Box
sx={{
pt: 2,
display: 'flex',
alignItems: 'center'
}}
>
<ArrowDownwardIcon sx={{ color: red[900] }} />
<Typography
sx={{
color: red[900],
mr: 1
}}
variant="body2"
>
12%
</Typography>
<Typography
color="textSecondary"
variant="caption"
>
Since last month
</Typography>
</Box>
</CardContent>
</Card>
);
export default Budget;
|
// rayTracing.js is the entrypoint to the raytracing code. It will be separate from canvasRenderer.js as that will allow it to be used with ascii renderers etc.
import { Scene } from './scene.js';
import { SphereObject } from './sphereObject.js';
import { PlaneObject } from './planeObject.js';
import { Vector } from './vector.js';
import { Camera } from './camera.js';
import { Matrix } from './matrix.js';
import { DirectionalLight, PointLight } from './light.js';
import { ColorVector } from './colorVector.js';
import { reflectVectorInPlane } from './rayMath.js';
import { Material } from './material.js';
import { sphericalMap } from './textureMapping.js';
import { pointAroundCircle } from './circleMath.js';
export class RayTracing {
constructor (buffer, width, height) {
this.buffer = buffer;
this.width = width;
this.height = height;
this.NUM_RAY_BOUNCES = 3;
// constructor cannot be async so we need a new function:
(async () => {
this.skyMaterial = await Material.newMaterial('resources/sky.jpg', 1, -1, 0, 0.6);
await this.createScene();
const t0 = performance.now();
this.renderScene();
const t1 = performance.now();
console.log(`Rendered in ${t1 - t0}ms`);
})();
}
raycast (origin, direction) {
let closest = null;
for (const obj of this.scene.objects) {
const hit = obj.rayIntersection({ origin: origin, direction: direction });
if (hit && (closest === null || hit.hits[0].distance < closest.hits[0].distance)) {
closest = hit;
}
}
return closest;
}
raytrace (origin, direction, maxBounces, hits = []) {
const hit = this.raycast(origin, direction);
if (hit && maxBounces >= 0) {
hits.push(hit);
const hitPoint = hit.hits[0].point;
const reflectedVector = reflectVectorInPlane(direction, hit.object.surfaceNormal(hitPoint));
return this.raytrace(hitPoint, reflectedVector, maxBounces - 1, hits);
} else {
return { hits, direction };
}
}
computeLight (point, surfaceNorm, vectToCamera, specular) {
let totalIllum = 0;
// Check how much light every scene light contributes to the point
for (const light of this.scene.lights) {
const vectToLight = (light instanceof PointLight) ? light.pos.sub(point) : light.dir;
// Check not in shadow by doing a raycast in this direction and ensuring nothing is hit:
const hit = this.raycast(point, vectToLight);
if (hit && ((light instanceof DirectionalLight) || hit.hits[0].distance < vectToLight.magnitude)) {
continue;
}
// Taken from https://gabrielgambetta.com/computer-graphics-from-scratch/03-light.html
// Diffuse reflection
const mattScale = surfaceNorm.dot(vectToLight) / (vectToLight.magnitude);
if (mattScale > 0) {
totalIllum += light.intensity * mattScale;
}
// Specular reflection
if (specular !== -1) {
const reflectedLightVect = (surfaceNorm.mul(2 * surfaceNorm.dot(vectToLight))).sub(vectToLight);
const reflectedDotCamera = vectToCamera.dot(reflectedLightVect);
if (reflectedDotCamera > 0) {
const specScale = (reflectedDotCamera / (vectToCamera.magnitude * reflectedLightVect.magnitude)) ** specular;
totalIllum += light.intensity * specScale;
}
}
}
return totalIllum;
}
renderScene () {
this.camera.iterateDirectionVectors(this.width, this.height, (dir, sampleNum) => {
// Finds where the original ray would have hit the focus plane
const focusPoint = this.camera.pos.add(dir.mul(this.camera.focusDist / this.camera.forward.dot(dir)));
const startPoint = this.startPointForSample(sampleNum);
const toFocus = focusPoint.sub(startPoint).normalized();
const { hits, direction } = this.raytrace(startPoint, toFocus, this.NUM_RAY_BOUNCES);
let partialColor = this.skyMaterial.colorAtUV(sphericalMap(direction));
hits.reverse().forEach((hit) => {
const hitColor = this.unlitColorForHit(hit);
const illumination = this.illuminationForHit(hit);
partialColor = hitColor.mul(illumination).lerp(partialColor, hit.object.reflectivity);
});
return partialColor;
}, (x, y, color) => {
// Seperate pixel colouring so color can be accumulated to prevent rounding errors
this.buffer[y * this.width + x] = color.getReverseHexColor();
});
}
startPointForSample (sampleNum) {
const pointOnCircle = pointAroundCircle(sampleNum / this.camera.SAMPLECOUNT);
const hScaled = this.camera.rotMatrix.vectMul(this.camera.hRadius).mul(pointOnCircle.x * Math.random());
const vScaled = this.camera.rotMatrix.vectMul(this.camera.vRadius).mul(pointOnCircle.y * Math.random());
return this.camera.pos.add(hScaled).add(vScaled);
}
illuminationForHit (hit) {
const point = hit.hits[0].point;
const hitToCamera = this.camera.pos.sub(point);
const surfaceNorm = hit.object.surfaceNormal(point);
return this.computeLight(point, surfaceNorm, hitToCamera, hit.object.specular);
}
unlitColorForHit (hit) {
// Index 0 always has the lowest distance
const point = hit.hits[0].point;
return hit.object.colorAtPoint(point);
}
async createScene () {
const earthMat = await Material.newMaterial('resources/earth.jpg', 1, 10, 0, 0.25);
const poolBallMat = await Material.newMaterial('resources/poolball.jpg', 1, 10, 0.05);
const bricksMat = await Material.newMaterial('resources/bricks.jpg', 1, -1, 0);
const planeMat = await Material.newMaterial('resources/checkerboard.png', 1, -1, 0.5);
this.scene = new Scene();
const sphere1 = new SphereObject(new Vector(1, 0, 0), 0.5, bricksMat);
const sphere2 = new SphereObject(new Vector(3, 1, 0.5), 0.5, poolBallMat);
const sphere3 = new SphereObject(new Vector(6, -1, 1.5), 0.5, earthMat);
const sphere4 = new SphereObject(new Vector(4, 0.2, 1.5), 0.5, Material.newColorMaterial(ColorVector.white, 100, 0.25));
const plane1 = new PlaneObject(new Vector(0, 0, -1), new Vector(0, 0, 1), Infinity, planeMat);
const light1 = new PointLight(new Vector(2, 0, 2.5), 0.75);
const light2 = new PointLight(new Vector(1, 0, 2), 1);
this.scene.addObject(sphere1);
this.scene.addObject(sphere2);
this.scene.addObject(sphere3);
this.scene.addObject(sphere4);
this.scene.addObject(plane1);
this.scene.addLight(light1);
this.scene.addLight(light2);
const FOV = (60 / 360) * 2 * Math.PI;
this.camera = new Camera(new Vector(-2, 0, 1), Matrix.yawPitchRoll(0, 0, 0), FOV, 0);
}
}
|
module.exports={A:{A:{"2":"H C G E A B EB"},B:{"2":"D g w J L N I"},C:{"2":"0 1 2 4 5 6 7 8 YB BB F K H C G E A B D g w J L N I O P Q R S T U V W X Y Z z b c d e f M h i j k l m n o p q r s t u v x y WB QB"},D:{"1":"0 1 2 4 5 6 7 8 V W X Y Z z b c d e f M h i j k l m n o p q r s t u v x y KB aB FB DB GB HB IB","16":"F K H C G E A B D g w J L N I O P Q R S T U"},E:{"1":"H C G E A B LB MB NB OB PB a RB","2":"F JB CB","16":"K"},F:{"1":"9 D J L N I O P Q R S T U V W X Y Z z b c d e f M h i j k l m n o p q r s t u v XB","16":"E B SB TB UB VB a AB"},G:{"1":"G dB eB fB gB hB iB jB kB","16":"3 CB ZB bB cB"},H:{"16":"lB"},I:{"1":"3 F y pB qB rB","16":"BB mB nB oB"},J:{"16":"C A"},K:{"16":"9 A B D M a AB"},L:{"1":"DB"},M:{"2":"x"},N:{"16":"A B"},O:{"16":"sB"},P:{"1":"F K tB"},Q:{"1":"uB"},R:{"1":"vB"}},B:5,C:"DOMFocusIn & DOMFocusOut events"};
|
/*
Copyright 2017 Carmilla Mina Jankovic
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
'use strict';
const express = require('express');
const objectValidator = require('../../utils/objectValidator');
const userManager = require('../../managers/users');
const roomManager = require('../../managers/rooms');
const aliasManager = require('../../managers/aliases');
const restErrorChecker = require('../../helpers/restErrorChecker');
const errorCreator = require('../../error/errorCreator');
const { dbConfig } = require('../../config/defaults/config');
const router = new express.Router();
/**
* @param {Object} io Socket.io
* @returns {Object} Router
*/
function handle(io) {
/**
* @api {get} /users Get users.
* @apiVersion 8.0.0
* @apiName GetUsers
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token.
*
* @apiDescription Get users.
*
* @apiParam {boolean} [Query] [includeInactive] Should banned and unverified users be in the result?
*
* @apiSuccess {Object} data
* @apiSuccess {Object} data.users Found users.
*/
router.get('/', (request, response) => {
const { authorization: token } = request.headers;
const { includeInactive } = request.query;
userManager.getUsersByUser({
token,
includeInactive,
callback: ({ error, data }) => {
if (error) {
restErrorChecker.checkAndSendError({ response, error });
return;
}
response.json({ data });
},
});
});
/**
* @api {post} /users/:userId/password Update a user's password.
* @apiVersion 8.0.0
* @apiName ChangePassword
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token.
*
* @apiDescription Update a user's password.
*
* @apiParam {string} userId Id of the user that will get a new password.
*
* @apiParam {Object} data
* @apiParam {string} data.password New password.
*
* @apiSuccess {Object} data
* @apiSuccess {Object[]} data.success Was the password properly changed?
*/
router.post('/:userId/password', (request, response) => {
const sentData = request.body.data;
if (!objectValidator.isValidData(request.params, { userId: true })) {
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: '{ userId }' }) });
return;
}
const { authorization: token } = request.headers;
const { password } = request.body.data;
const { userId } = request.params;
userManager.changePassword({
password,
userId,
token,
callback: ({ error, data }) => {
if (error) {
sentData.password = typeof sentData.password !== 'undefined';
restErrorChecker.checkAndSendError({ response, error, sentData });
return;
}
response.json({ data });
},
});
});
/**
* @api {post} /users Create a user.
* @apiVersion 8.0.0
* @apiName CreateUser
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token.
*
* @apiDescription Create a user.
*
* @apiParam {Object} data
* @apiParam {Object} data.user User to create.
* @apiParam {string} [data.options] Update options.
*
* @apiSuccess {Object} data
* @apiSuccess {Object} data.user Created user.
*/
router.post('/', (request, response) => {
const sentData = request.body.data;
if (!objectValidator.isValidData(request.body, { data: { user: { username: true, password: true } } })) {
sentData.user.password = typeof sentData.user.password !== 'undefined';
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: '{ data: { user: { username, password } } }' }), sentData });
return;
}
const { authorization: token } = request.headers;
const { user, options } = request.body.data;
user.registerDevice = dbConfig.DeviceTypes.RESTAPI;
userManager.createUser({
user,
token,
io,
options,
callback: ({ error, data }) => {
if (error) {
sentData.user.password = typeof sentData.user.password !== 'undefined';
restErrorChecker.checkAndSendError({ response, error, sentData });
return;
}
response.json({ data });
},
});
});
/**
* @api {put} /users/:userId Update a user.
* @apiVersion 8.0.0
* @apiName UpdateUser
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token.
*
* @apiDescription Update a user.
*
* @apiParam {string} userIdToUpdate [Url] Id of the user to update.
*
* @apiParam {string} data Body parameters.
* @apiParam {string} data.user User parameters to update.
* @apiParam {string} [data.options] Update options.
*
* @apiSuccess {Object} data
* @apiSuccess {User} data.user Updated user.
*/
router.put('/:userId', (request, response) => {
if (!objectValidator.isValidData(request.params, { userId: true })) {
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: '{ userId }' }) });
return;
}
if (!objectValidator.isValidData(request.body, { data: { user: true } })) {
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: '{ data: { user } }' }), sentData: request.body.data });
return;
}
const {
user,
options,
} = request.body.data;
const { userId } = request.params;
const { authorization: token } = request.headers;
userManager.updateUser({
user,
options,
io,
userId,
token,
callback: ({ error, data }) => {
if (error) {
restErrorChecker.checkAndSendError({ response, error, sentData: request.body.data });
return;
}
response.json({ data });
},
});
});
/**
* @api {get} /users/:userId Get a user.
* @apiVersion 8.0.0
* @apiName GetUser
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token.
*
* @apiDescription Get a user.
*
* @apiParam {string} userId [Url] Id of the user to get.
*
* @apiSuccess {Object} data
* @apiSuccess {Object} data.user Found user.
*/
router.get('/:userId', (request, response) => {
if (!objectValidator.isValidData(request.params, { userId: true })) {
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: '{ userId }' }) });
return;
}
const { authorization: token } = request.headers;
const { userId } = request.params;
userManager.getUserById({
token,
userId,
callback: ({ error, data }) => {
if (error) {
restErrorChecker.checkAndSendError({ response, error });
return;
}
response.json({ data });
},
});
});
/**
* @api {put} /users/:userId/rooms/:roomId/follow Follow a room.
* @apiVersion 8.0.0
* @apiName FollowRoom
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token.
*
* @apiDescription Follow a room.
*
* @apiParam {Object} data
* @apiParam {string} [data.password] Password for the room.
* @apiParam {string} [data.aliasId] Id of the alias used to follow the room.
*
* @apiSuccess {Object} data
* @apiSuccess {Object} data.room Followed room.
*/
router.put('/:userId/rooms/:roomId/follow', (request, response) => {
const sentData = request.body.data;
if (!objectValidator.isValidData(request.params, { userId: true, roomId: true })) {
sentData.password = typeof sentData.password !== 'undefined';
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: '{ userId, roomId }' }), sentData });
return;
}
const { password, aliasId } = request.body.data;
const { roomId } = request.params;
const { authorization: token } = request.headers;
roomManager.followRoom({
io,
token,
roomId,
password,
aliasId,
callback: ({ error, data }) => {
if (error) {
sentData.password = typeof sentData.password !== 'undefined';
restErrorChecker.checkAndSendError({ response, error, sentData });
return;
}
response.json({ data });
},
});
});
/**
* @api {put} /users/:userId/rooms/:roomId/unfollow Unfollow a room.
* @apiVersion 8.0.0
* @apiName UnfollowRoom
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token
*
* @apiDescription Unfollow a room.
*
* @apiParam {Object} userId [Url] Id of the user that will unfollow the room.
* @apiParam {Object} roomId [Url] Id of the room to unfollow.
*
* @apiParam {Object} data.
* @apiParam {Object} [data.aliasId] Id of the alias that will unfollow a room. It overrides userId.
*
* @apiSuccess {Object} data
* @apiSuccess {Object} data.room Room that was unfollowed.
*/
router.put('/:userId/rooms/:roomId/unfollow', (request, response) => {
if (!objectValidator.isValidData(request.params, { userId: true, roomId: true })) {
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: 'params: { userId, roomId }' }), sentData: request.body.data });
return;
}
const { roomId, userId } = request.params;
const { authorization: token } = request.headers;
const { aliasId } = request.body.data;
roomManager.unfollowRoom({
io,
token,
userId,
roomId,
aliasId,
callback: ({ error, data }) => {
if (error) {
restErrorChecker.checkAndSendError({ response, error, sentData: request.body.data });
return;
}
response.json({ data });
},
});
});
/**
* @api {get} /users/:userId/aliases Get aliases from a user.
* @apiVersion 8.0.0
* @apiName GetUserAliases
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token
*
* @apiDescription Get aliases from a user.
*
* @apiSuccess {Object} data
* @apiSuccess {Object} data.aliases Found aliases.
*/
router.get('/:userId/aliases', (request, response) => {
if (!objectValidator.isValidData(request.params, { userId: true })) {
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: '{ userId }' }), sentData: request.body.data });
return;
}
const { userId } = request.params;
const { authorization: token } = request.headers;
aliasManager.getAliasesByUser({
userId,
token,
callback: ({ error, data }) => {
if (error) {
restErrorChecker.checkAndSendError({ response, error, sentData: request.body.data });
return;
}
response.json({ data });
},
});
});
/**
* @api {put} /users/:userIdToVerify/verify Verify a user.
* @apiVersion 8.0.0
* @apiName VerifyUser
* @apiGroup Users
*
* @apiHeader {string} Authorization Your JSON Web Token
*
* @apiDescription Verify a user and allow it to log in.
*
* @apiParam {string} userId Id of the user to verify.
*
* @apiSuccess {Object} data
* @apiSuccess {Object} data.user Verified user.
*/
router.put('/:userIdToVerify/verify', (request, response) => {
if (!objectValidator.isValidData(request.params, { userIdToVerify: true })) {
restErrorChecker.checkAndSendError({ response, error: new errorCreator.InvalidData({ expected: '{ userId }' }), sentData: request.body.data });
return;
}
const { userIdToVerify } = request.params;
const { authorization: token } = request.headers;
userManager.verifyUser({
userIdToVerify,
token,
io,
callback: ({ error, data }) => {
if (error) {
restErrorChecker.checkAndSendError({ response, error, sentData: request.body.data });
return;
}
response.json({ data });
},
});
});
return router;
}
module.exports = handle;
|
import React from 'react';
import { StyleSheet, Text, View } from 'react-native';
export default class App extends React.Component {
render() {
return (
<View style={styles.container}>
<Text>Open up App.js to start working on your app!</Text>
<Text>Changes you make will automatically reload.</Text>
<Text>Shake your phone to open the developer menu.</Text>
</View>
);
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#aaa',
alignItems: 'center',
justifyContent: 'center',
},
});
|
/*-------*/
var Script = function () {
$(function() {
});
$('').click(function () {
});
}();
|
const { mailer } = require('../models/Mailer.js')
/**
* GET /contact
* Contact form page.
*/
exports.getContact = (req, res, next) => {
res.render('contact', {
title: 'Contact'
});
};
/**
* POST /contact
* Send a contact form via Nodemailer.
*/
exports.postContact = (req, res) => {
req.assert('name', 'Name cannot be blank')
.notEmpty();
req.assert('email', 'Email is not valid')
.isEmail();
req.assert('message', 'Message cannot be blank')
.notEmpty();
const errors = req.validationErrors();
if (errors) {
req.flash('errors', errors);
return res.redirect('/contact');
}
const mailOptions = {
to: '[email protected]'
, from: `[email protected]`
, subject: 'Contact Form | ${req.body.email}'
, text: `${req.body.name} <${req.body.email}>\n\n ${req.body.message}`
};
mailer.sendMail(mailOptions, (err) => {
if (err) {
req.flash('errors', {
msg: err.message
});
return res.redirect('/contact');
}
req.flash('success', {
msg: 'Email has been sent successfully!'
});
res.redirect('/contact');
});
};
|
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""serializer for the users object"""
class Meta:
model = get_user_model()
fields = (
'email',
'password',
'name'
)
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authetication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDatasets(PythonPackage):
"""Datasets is a lightweight library providing two main
features: one-line dataloaders for many public datasets and
efficient data pre-processing."""
homepage = "https://github.com/huggingface/datasets"
pypi = "datasets/datasets-1.8.0.tar.gz"
version('1.8.0', sha256='d57c32bb29e453ee7f3eb0bbca3660ab4dd2d0e4648efcfa987432624cab29d3')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:3+parquet', type=('build', 'run'))
depends_on('py-dill', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:4.49', type=('build', 'run'))
depends_on('py-dataclasses', when='^python@:3.6', type=('build', 'run'))
depends_on('py-xxhash', type=('build', 'run'))
depends_on('py-multiprocess', type=('build', 'run'))
depends_on('py-importlib-metadata', when='^python@:3.7', type=('build', 'run'))
depends_on('py-huggingface-hub@:0.0', type=('build', 'run'))
depends_on('py-packaging', type=('build', 'run'))
depends_on('py-fsspec@:0.8.0', when='^python@:3.7', type=('build', 'run'))
depends_on('py-fsspec', when='^[email protected]:', type=('build', 'run'))
|
(function() {
"use strict";
angular
.module('tornano')
.controller('builderController', BuilderController);
BuilderController.$inject = ['$scope', '$rootScope', '$http'];
function BuilderController($scope, $rootScope, $http) {
}
})(); |
describe('utils/matter', function () {
let pInst;
beforeEach(function () {
pInst = new p5(function () {});
});
afterEach(function () {
pInst.remove();
});
describe('createMatter()', function () {
it('Returns a Matter object', function () {
let m = pInst.createMatter();
const keys = ['name', 'version', 'uses', 'used', 'use',
'before', 'after', 'Body', 'Composite', 'World', 'Contact',
'Detector', 'Grid', 'Pairs', 'Pair', 'Query', 'Resolver',
'SAT', 'Constraint', 'MouseConstraint', 'Common', 'Engine',
'Events', 'Mouse', 'Runner', 'Sleeping', 'Plugin', 'Bodies',
'Composites', 'Axes', 'Bounds', 'Svg', 'Vector', 'Vertices',
'Render', 'RenderPixi'];
expect(m).to.have.all.keys(keys);
});
});
});
|
import React from 'react';
import PropTypes from 'prop-types';
import styled from 'styled-components';
import form from '../common-components/form.scss';
// const InputElement = styled.input`
// font-size: 1rem;
// width: 100%;
// vertical-align: top;
// margin: 0;
// outline: none;
// -webkit-appearance: none;
// tap-highlight-color: rgba(255, 255, 255, 0);
// line-height: 1.21428571em;
// padding: .67857143em 1em;
// background: #fff;
// border: 1px solid #E3E3E3;
// color: rgba(0, 0, 0, 0.87);
// border-radius: 25px;
// box-shadow: 0 0 0 0 transparent inset;
//
// &:focus {
// border: 1px solid #00a65a;
// box-shadow: none;
// outline: 0 !important;
// color: #2c2c2c;
// transition: all 0.30s ease-in-out;
// }
// `;
//
// const LabelElement = styled.label`
// font-size: 1rem;
// `;
const Input = (props) => (
<div className={props.className}>
<label>{props.label} </label>
<input type={props.type} required={props.required} className={props.inputClassName} multiple={props.multiple}
accept={props.accept} checked={props.checked} value={props.value} disabled={props.disabled} min={props.min}
onChange={props.onChange} onFocus={props.onFocus} placeholder={props.placeholder} />
</div>
);
Input.defaultProps = {
type: 'text'
};
Input.propTypes = {
type: PropTypes.string.isRequired,
className: PropTypes.string,
checked: PropTypes.bool,
onChange: PropTypes.func.isRequired,
onFocus: PropTypes.func
};
export default Input;
|
import {build, files} from '$service-worker'
import {CACHE_NAME} from './constants'
export default (event) => {
event.waitUntil(
caches
.open(CACHE_NAME)
.then((cache) =>
Promise.all([
cache.addAll(['/']), // cache root page
cache.addAll(build),
cache.addAll(files)
])
)
.then(() => self.skipWaiting())
)
}
|
/* eslint-env browser */
import React, { Component } from "react";
import { propTypes, defaultProps } from "./video-prop-types";
import SkySportsBanner from "./sky-sports-banner";
const css = `
.video-js .vjs-big-play-button {
width: 70px;
height: 70px;
margin-top: -35px;
margin-left: -35px;
background: rgba(0, 0, 0, 0.4);
line-height: 65px;
border-radius: 0;
border-style: solid;
border-width: 3px;
border-color: white;
}
.video-js .vjs-big-play-button:before {
font-size: 60px;
left: -2px;
}
.video-js .vjs-dock-text {
visibility: hidden;
}
.video-js .vjs-poster {
background-size: cover;
}
.video-js .vjs-tech {
position: relative;
}
`;
class InlineVideoPlayer extends Component {
static index = 0;
static scriptLoadError = false;
static activePlayers = [];
static brightcoveSDKLoadedStarted = false;
static brightcoveSDKHasLoaded() {
return !!(window.bc && window.videojs);
}
static appendScript(s) {
document.body.appendChild(s);
}
static attachStyles() {
const styleTag = document.createElement("style");
styleTag.type = "text/css";
const cssText = document.createTextNode(css);
styleTag.appendChild(cssText);
document.head.appendChild(styleTag);
}
constructor(props) {
super(props);
this.state = {
error: null,
showSkyBanner: props.skySports
};
InlineVideoPlayer.index += 1;
this.id = `${props.videoId}-${props.accountId}-${InlineVideoPlayer.index}`;
}
componentDidMount() {
if (InlineVideoPlayer.scriptLoadError) {
this.handleError(InlineVideoPlayer.scriptLoadError);
}
this.loadBrightcoveSDKIfRequired();
InlineVideoPlayer.activePlayers.push(this);
if (InlineVideoPlayer.brightcoveSDKHasLoaded()) {
this.initBrightcove();
}
}
componentWillUnmount() {
InlineVideoPlayer.activePlayers.splice(
InlineVideoPlayer.activePlayers.indexOf(this)
);
if (this.player) {
this.player.dispose();
this.player = null;
}
}
handleError = () => {
this.setState({ error: true });
};
handlePlay = () => {
this.hideSkyBanner();
InlineVideoPlayer.activePlayers.forEach(video => {
if (video !== this && video.player) {
video.player.pause();
}
});
};
hideSkyBanner = () => {
this.setState({ showSkyBanner: false });
};
loadBrightcoveSDKIfRequired() {
if (!InlineVideoPlayer.brightcoveSDKLoadedStarted) {
InlineVideoPlayer.brightcoveSDKLoadedStarted = true;
const s = this.createBrightcoveScript();
s.onload = () => {
InlineVideoPlayer.activePlayers.forEach(player => player.initVideojs());
};
s.onerror = () => {
InlineVideoPlayer.scriptLoadError = "Brightcove script failed to load";
InlineVideoPlayer.activePlayers.forEach(player => player.handleError());
};
InlineVideoPlayer.appendScript(s);
InlineVideoPlayer.attachStyles();
}
}
createBrightcoveScript() {
const { accountId, playerId } = this.props;
const s = document.createElement("script");
s.src = `//players.brightcove.net/${accountId}/${playerId}_default/index.min.js`;
return s;
}
initVideojs() {
this.player = window.videojs(this.id);
this.player.ready(() => {
this.player.contextmenu({ disabled: true });
});
this.player.on("error", this.handleError);
this.player.on("play", this.handlePlay);
}
initBrightcove() {
window.bc(document.getElementById(this.id));
this.initVideojs();
}
render() {
const { width, height, poster, videoId, accountId, playerId } = this.props;
const { error, showSkyBanner } = this.state;
if (error) {
throw new Error(); // caught by parent ErrorView
}
return (
/* eslint jsx-a11y/media-has-caption: "off" */
// Added a wrapping div as brightcove adds siblings to the video tag
<div data-testid="video-component" style={{ height, width }}>
<div style={{ position: "relative" }}>
{showSkyBanner && <SkySportsBanner />}
<video
id={this.id}
style={{ height, width }}
{...(poster ? { poster: poster.uri } : {})}
className="video-js"
controls
data-account={accountId}
data-application-id
data-embed="default"
data-player={playerId}
data-video-id={videoId}
/>
</div>
</div>
);
}
}
InlineVideoPlayer.defaultProps = defaultProps;
InlineVideoPlayer.propTypes = propTypes;
export default InlineVideoPlayer;
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position, redefined-outer-name, range-builtin-not-iterating, too-many-locals, try-except-raise, import-outside-toplevel, line-too-long
"""
Reco class for performing reconstructions
"""
from __future__ import absolute_import, division, print_function
__all__ = [
"METHODS",
"CRS_STOP_FLAGS",
"REPORT_AFTER",
"CART_DIMS",
"Reco",
"get_multinest_meta",
"main",
]
__author__ = "J.L. Lanfranchi, P. Eller"
__license__ = """Copyright 2017-2018 Justin L. Lanfranchi and Philipp Eller
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from argparse import ArgumentParser
from collections import OrderedDict
from os.path import abspath, dirname, isdir, isfile, join
from shutil import rmtree
import sys
from tempfile import mkdtemp
import time
import traceback
import numpy as np
from six import string_types
if __name__ == "__main__" and __package__ is None:
RETRO_DIR = dirname(dirname(abspath(__file__)))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro import __version__, MissingOrInvalidPrefitError, init_obj
from retro.hypo.discrete_cascade_kernels import SCALING_CASCADE_ENERGY
from retro.hypo.discrete_muon_kernels import pegleg_eval
from retro.priors import (
EXT_IC,
PRI_COSINE,
PRI_TIME_RANGE,
PRI_UNIFORM,
PRISPEC_OSCNEXT_PREFIT_TIGHT,
PRISPEC_OSCNEXT_CRS,
PRISPEC_OSCNEXT_CRS_MN,
Bound,
get_prior_func,
)
from retro.retro_types import EVT_DOM_INFO_T, EVT_HIT_INFO_T, FitStatus
from retro.tables.pexp_5d import generate_pexp_and_llh_functions
from retro.utils.geom import (
rotate_points,
add_vectors,
)
from retro.utils.get_arg_names import get_arg_names
from retro.utils.misc import sort_dict
from retro.utils.stats import estimate_from_llhp
LLH_FUDGE_SUMMAND = -1000
METHODS = set(
[
"multinest",
"crs",
"crs_prefit",
"mn8d",
"emily_ref",
"emily_test",
"emily_crs_ref",
"emily_crs_test",
"stopping_atm_muon_crs",
"dn8d",
"nlopt",
"scipy",
"skopt",
"experimental_trackfit",
"fast",
"test",
"truth",
]
)
CRS_STOP_FLAGS = {
0: "max iterations reached",
1: "llh stddev below threshold",
2: "no improvement",
3: "all parameter stddevs below thresholds",
}
# TODO: make following args to `__init__` or `run`
REPORT_AFTER = 100
CART_DIMS = ("x", "y", "z", "time")
EMILY_CRS_SETTINGS = dict(
n_live=250,
max_iter=100000,
max_noimprovement=1000,
min_llh_std=0.5,
stdthresh=dict(x=2.5, y=2.5, z=2, time=10),
use_sobol=True,
seed=0,
)
class StandaloneEvents(object):
"""
Standalone events class to iteratively run recos, as opposed to I3tray Module
Paramters
---------
events_kw : mapping
As returned by `retro.init_obj.parse_args`
"""
def __init__(self, events_kw):
# We don't want to specify 'recos' so that new recos are automatically
# found by `init_obj.get_events` function
events_kw.pop("recos", None)
self.events_kw = sort_dict(events_kw)
# Replace None values for `start` and `step` for fewer branches in
# subsequent logic (i.e., these will always be integers)
self.events_start = 0 if events_kw["start"] is None else events_kw["start"]
self.events_step = 1 if events_kw["step"] is None else events_kw["step"]
# Nothing we can do about None for `stop` since we don't know how many
# events there are in total.
self.events_stop = events_kw["stop"]
self.event_counter = 0
self.attrs = OrderedDict([("events_kw", self.events_kw)])
@property
def events(self):
"""Iterator over events.
Yields
------
event : OrderedDict
Each event has attribute `.meta` (see retro.init_obj.get_events)
but this gets populated with additional information within this
method: "recodir" and "prefix".
"""
# do initialization here so any new recos are automatically detected
events = init_obj.get_events(**self.events_kw)
for event in events:
event.meta["prefix"] = join(
event.meta["events_root"],
"recos",
"evt{}.".format(event.meta["event_idx"]),
)
self.event_counter += 1
print(
'Reconstructing event #{} (index {} in dir "{}")'.format(
self.event_counter,
event.meta["event_idx"],
event.meta["events_root"],
)
)
yield event
class Reco(object):
"""
Setup tables, get events, run reconstructons on them, and optionally store
results to disk.
Note that "recipes" for different reconstructions are defined in the
`Reco.run` method.
Parameters
----------
dom_tables_kw, tdi_tables_kw : mappings
As returned by `retro.init_obj.parse_args`
debug : bool
"""
def __init__(
self,
dom_tables_kw,
tdi_tables_kw,
debug=False,
):
self.debug = bool(debug)
self.dom_tables_kw = sort_dict(dom_tables_kw)
self.tdi_tables_kw = sort_dict(tdi_tables_kw)
self.attrs = OrderedDict(
[
("dom_tables_kw", self.dom_tables_kw),
("tdi_tables_kw", self.tdi_tables_kw),
]
)
self.dom_tables = init_obj.setup_dom_tables(**dom_tables_kw)
self.tdi_tables, self.tdi_metas = init_obj.setup_tdi_tables(**tdi_tables_kw)
self.pexp, self.get_llh, _ = generate_pexp_and_llh_functions(
dom_tables=self.dom_tables,
tdi_tables=self.tdi_tables,
tdi_metas=self.tdi_metas,
)
self.event = None
self.hypo_handler = None
self.prior = None
self.priors_used = None
self.loglike = None
self.n_params = None
self.n_opt_params = None
def __call__(
self,
frame,
methods,
reco_pulse_series_name,
hit_charge_quant,
min_hit_charge,
seeding_recos,
triggers,
additional_keys,
filter,
point_estimator,
):
"""Method to run Retro reconstructions as part of an I3Tray module
Parameters
----------
frame : icecube.icetray.I3Frame
this will be provided by the tray automatically
methods : string or sequence of strings
reco methods to be performed, e.g. `["crs_prefit"]`
reco_pulse_series_name : string
name of pulse series to use, e.g. `"SRTTWOfflinePulsesDC"`
hit_charge_quant : scalar >= 0
modify pulses in pulse series by quantizing their charge; specify 0
to disable quantization.
min_hit_charge : scalar >= 0
remove all pulses with charge less than this value (after
quantization has been applied); specify 0 to keep all pulses
seeding_recos : list of strings
recos to load for seeding / constructing priors
triggers : list of strings
for purposes of obtaining time window, if this is used by a reco.
E.g., `["I3TriggerHierarchy"]`
additional_keys : list of strings
additional keys from the frame to load into the retro events object
filter : string
expression as passed to method `_reco_event`, e.g.
`'event["header"]["L5_oscNext_bool"] and len(event['hits']) >= 8'`
point_estimator : string in {"max", "mean", "median"}
which point estimator to use for I3Particles output
Usage
-----
Instantiate an object of this class and hand it directly to IceTray, e.g. ::
my_reco = Reco(**kwargs)
tray = I3Tray()
tray.AddModule("I3Reader", ...)
tray.Add(my_reco, "retro", **more_kwargs)
tray.AddModule("I3Writer", ...)
"""
from icecube.icetray import I3Int
from retro.i3processing.extract_events import (
I3EVENTHEADER_SPECS,
extract_metadata_from_frame,
extract_photons,
extract_pulses,
extract_reco,
extract_trigger_hierarchy,
get_frame_item,
)
from retro.i3processing.retro_recos_to_i3files import (
make_i3_particles, extract_all_reco_info, setitem_pframe
)
event = OrderedDict()
header_info = get_frame_item(
frame=frame,
key="I3EventHeader",
specs=I3EVENTHEADER_SPECS,
allow_missing=False,
)
event["header"] = extract_metadata_from_frame(frame)
for key, val in header_info.items():
event["header"][key] = val
event["pulses"] = OrderedDict()
event["recos"] = OrderedDict()
event["triggers"] = OrderedDict()
# who even knows what all this stuff is at this point
# just ading keys and attributes (really?) to the dict until it's happy
meta = OrderedDict(
[
("events_root", None),
("num_events", None),
("event_idx", None),
("agg_event_idx", None),
]
)
event.meta = meta
event.meta["event_idx"] = None
event.meta["agg_event_idx"] = None
pulses_list, time_range = extract_pulses(frame, reco_pulse_series_name)
event["pulses"][reco_pulse_series_name] = pulses_list
event["pulses"][reco_pulse_series_name + "TimeRange"] = time_range
if seeding_recos is not None:
for seed_reco_name in seeding_recos:
event["recos"][seed_reco_name] = extract_reco(frame, seed_reco_name)
if triggers is not None:
for trigger_hierarchy_name in triggers:
event["triggers"][trigger_hierarchy_name] = extract_trigger_hierarchy(
frame, trigger_hierarchy_name
)
if additional_keys is not None:
for frame_key in additional_keys:
event["header"][frame_key] = frame[frame_key].value
hits_array, hits_indexer, hits_summary = init_obj.get_hits(
event=event,
path=["pulses", reco_pulse_series_name],
hit_charge_quant=hit_charge_quant,
min_hit_charge=min_hit_charge,
angsens_model=None,
)
event["hits"] = hits_array
event["hits_indexer"] = hits_indexer
event["hits_summary"] = hits_summary
# TODO: include this in future? if so, make sure a parallel item exists
# in .npy-based recos
#setitem_pframe(
# frame=frame,
# key="retro_num_hits__{}".format(reco_pulse_series_name),
# val=I3Int(int(len(event["hits"]))),
# overwrite=True,
#)
if isinstance(methods, string_types):
methods = [methods]
for method in methods:
if method not in METHODS:
raise ValueError(
'Unrecognized `method` "{}"; must be one of {}'.format(
method, METHODS
)
)
if len(set(methods)) != len(methods):
raise ValueError("Same reco specified multiple times")
for method in methods:
reco_name = "retro_" + method
try:
fit_status = self._reco_event(
event,
method=method,
save_llhp=False,
filter=filter,
save_estimate=False,
)
except MissingOrInvalidPrefitError:
fit_status = FitStatus.MissingSeed
# Do not populate recos that were not performed
if fit_status == FitStatus.NotSet:
continue
# Only populate single field {reco_name}__fit_status for special
# not-run cases
if fit_status in (FitStatus.Skipped, FitStatus.MissingSeed):
setitem_pframe(
frame=frame,
key=reco_name + "__fit_status",
val=I3Int(fit_status),
)
continue
# Any other fit_status: fully populate particles, etc.
particles_identifiers = make_i3_particles(
event["recos"][reco_name][0],
point_estimator=point_estimator,
)
all_reco_info = extract_all_reco_info(
event["recos"][reco_name][0],
reco_name=reco_name,
)
for particle, identifier in particles_identifiers:
key = "__".join([reco_name, point_estimator, identifier])
setitem_pframe(frame, key, particle, overwrite=True)
for key, val in all_reco_info.items():
setitem_pframe(frame, key, val, overwrite=True)
def setup_hypo(self, **kwargs):
"""Setup hypothesis and record `n_params` and `n_opt_params`
corresponding to the hypothesis.
Parameters
----------
**kwargs
Passed to `retro.init_obj.setup_discrete_hypo`
"""
self.hypo_handler = init_obj.setup_discrete_hypo(**kwargs)
self.n_params = self.hypo_handler.n_params
self.n_opt_params = self.hypo_handler.n_opt_params
def _reco_event(self, event, method, save_llhp, filter, save_estimate):
"""Recipes for performing different kinds of reconstructions.
Parameters
----------
method : str
save_llhp : bool
filter : str or None
Filter to apply for selecting events to reconstruct. String is
passed through `eval` and must produce a scalar value interpretable
via `bool(eval(filter))`. Current event is accessible via the name
`event` and numpy is named `np`. E.g. .. ::
filter='event["header"]["L5_oscNext_bool"] and len(event["hits"]) >= 8'
save_estimate : bool
save estimate to npy file; set to False if calling as part of an
icetray module
Returns
-------
fit_status : FitStatus
Fit status from the reconstruction. Note that `FitStatus.NotSet`
is returned if, e.g., the `filter` expression evaluates to `False`
"""
self.event = event
if filter is not None:
assert isinstance(filter, string_types)
filter = filter.strip()
print("filter: '{}'".format(filter))
print("Running {} reconstruction on event".format(method))
if filter and not eval(filter): # pylint: disable=eval-used
print(
"filter evaluates to False; skipping event (index {})".format(
event.meta["event_idx"]
)
)
fit_status = FitStatus.Skipped
if save_estimate:
self.write_status_npy(event=event, method=method, fit_status=fit_status)
return fit_status
# simple 1-stage recos
if method in ("multinest", "test", "truth", "crs", "scipy", "nlopt", "skopt"):
self.setup_hypo(
cascade_kernel="scaling_aligned_one_dim",
track_kernel="pegleg",
track_time_step=1.0,
)
self.generate_prior_method(**PRISPEC_OSCNEXT_PREFIT_TIGHT)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
if method == "test":
run_info, fit_meta = self.run_test(seed=0)
if method == "truth":
run_info, fit_meta = self.run_with_truth()
elif method == "crs":
run_info, fit_meta = self.run_crs(
n_live=250,
max_iter=20000,
max_noimprovement=5000,
min_llh_std=0.1,
stdthresh=dict(x=1, y=1, z=1, time=3),
use_sobol=True,
seed=0,
)
elif method == "multinest":
run_info, fit_meta = self.run_multinest(
importance_sampling=True,
max_modes=1,
const_eff=True,
n_live=160,
evidence_tol=0.5,
sampling_eff=0.3,
max_iter=10000,
seed=0,
)
elif method == "scipy":
run_info, fit_meta = self.run_scipy(
method="differential_evolution", eps=0.02
)
elif method == "nlopt":
run_info, fit_meta = self.run_nlopt()
elif method == "skopt":
run_info, fit_meta = self.run_skopt()
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=True,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "fast":
self.setup_hypo(
cascade_kernel="scaling_aligned_point_ckv",
track_kernel="pegleg",
track_time_step=3.0,
)
self.generate_prior_method(**PRISPEC_OSCNEXT_PREFIT_TIGHT)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_crs(
n_live=160,
max_iter=10000,
max_noimprovement=1000,
min_llh_std=0.5,
stdthresh=dict(x=5, y=5, z=5, time=15),
use_sobol=True,
seed=0,
)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=False,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "stopping_atm_muon_crs":
self.setup_hypo(
track_kernel="stopping_table_energy_loss", track_time_step=3.0
)
self.generate_prior_method(
x=dict(kind=PRI_UNIFORM, extents=EXT_IC["x"]),
y=dict(kind=PRI_UNIFORM, extents=EXT_IC["y"]),
z=dict(kind=PRI_UNIFORM, extents=EXT_IC["z"]),
time=dict(kind=PRI_TIME_RANGE),
track_zenith=dict(
kind=PRI_COSINE, extents=((0, Bound.ABS), (np.pi / 2, Bound.ABS))
),
)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_crs(
n_live=160,
max_iter=10000,
max_noimprovement=1000,
min_llh_std=0.,
stdthresh=dict(x=5, y=5, z=4, time=20),
use_sobol=True,
seed=0,
)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=False,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "crs_prefit":
self.setup_hypo(
cascade_kernel="scaling_aligned_point_ckv",
track_kernel="pegleg",
track_time_step=3.0,
)
self.generate_prior_method(**PRISPEC_OSCNEXT_PREFIT_TIGHT)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_crs(
n_live=160,
max_iter=10000,
max_noimprovement=1000,
min_llh_std=0.5,
stdthresh=dict(x=5, y=5, z=4, time=20),
use_sobol=True,
seed=0,
)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=False,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "emily_crs_ref":
self.setup_hypo(
cascade_kernel="scaling_aligned_one_dim",
track_kernel="table_energy_loss",
track_time_step=1.0,
)
self.generate_prior_method(**PRISPEC_OSCNEXT_PREFIT_TIGHT)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_crs(**EMILY_CRS_SETTINGS)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=False,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "emily_crs_test":
self.setup_hypo(
cascade_kernel="scaling_aligned_one_dim",
track_kernel="table_energy_loss_secondary_light",
track_time_step=1.0,
)
self.generate_prior_method(**PRISPEC_OSCNEXT_PREFIT_TIGHT)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_crs(**EMILY_CRS_SETTINGS)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=False,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "mn8d":
self.setup_hypo(
cascade_kernel="scaling_aligned_one_dim",
track_kernel="pegleg",
track_time_step=1.0,
)
self.generate_prior_method(**PRISPEC_OSCNEXT_CRS_MN)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_multinest(
importance_sampling=True,
max_modes=1,
const_eff=True,
n_live=250,
evidence_tol=0.02,
sampling_eff=0.5,
max_iter=10000,
seed=0,
)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=True,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "emily_ref":
self.setup_hypo(
cascade_kernel="scaling_aligned_one_dim",
track_kernel="table_energy_loss",
track_time_step=1.0,
)
self.generate_prior_method(**PRISPEC_OSCNEXT_CRS_MN)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_multinest(
importance_sampling=True,
max_modes=1,
const_eff=True,
n_live=250,
evidence_tol=0.02,
sampling_eff=0.5,
max_iter=10000,
seed=0,
)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=True,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "emily_test":
self.setup_hypo(
cascade_kernel="scaling_aligned_one_dim",
track_kernel="table_energy_loss_secondary_light",
track_time_step=1.0,
)
self.generate_prior_method(**PRISPEC_OSCNEXT_CRS_MN)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_multinest(
importance_sampling=True,
max_modes=1,
const_eff=True,
n_live=250,
evidence_tol=0.02,
sampling_eff=0.5,
max_iter=10000,
seed=0,
)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=True,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
elif method == "dn8d":
self.setup_hypo(
cascade_kernel="scaling_aligned_one_dim",
track_kernel="pegleg",
track_time_step=1.0,
)
self.generate_prior_method(return_cube=True, **PRISPEC_OSCNEXT_CRS_MN)
#self.generate_prior_method(return_cube=True, **PRISPEC_OSCNEXT_PREFIT_TIGHT)
param_values = []
log_likelihoods = []
aux_values = []
t_start = []
self.generate_loglike_method(
param_values=param_values,
log_likelihoods=log_likelihoods,
aux_values=aux_values,
t_start=t_start,
)
run_info, fit_meta = self.run_dynesty(
n_live=100,
maxiter=2000,
maxcall=10000,
dlogz=0.1,
)
llhp = self.make_llhp(
method=method,
log_likelihoods=log_likelihoods,
param_values=param_values,
aux_values=aux_values,
save=save_llhp,
)
self.make_estimate(
method=method,
llhp=llhp,
remove_priors=True,
run_info=run_info,
fit_meta=fit_meta,
save=save_estimate,
)
else:
raise ValueError("Unknown `Method` {}".format(method))
return 0
def _print_non_fatal_exception(self, method):
"""Print to stderr a detailed message about a failure in reconstruction
that is non-fatal.
Parameters
----------
method : str
The name of the function, e.g. "run_crs" or "run_multinest"
"""
id_fields = ["run_id", "sub_run_id", "event_id", "sub_event_id"]
id_str = ", ".join(
"{} {}".format(f, self.event["header"][f]) for f in id_fields
)
sys.stderr.write(
"ERROR! Reco function {method} failed on event index {idx} ({id_str}) in"
' path "{fpath}". Recording reco failure and continuing to next event)'
"\n{tbk}\n".format(
method=method,
idx=self.event.meta["event_idx"],
fpath=self.event.meta["events_root"],
id_str=id_str,
tbk="".join(traceback.format_exc()),
)
)
def run(
self,
event,
methods,
redo_failed=False,
redo_all=False,
save_llhp=False,
filter=None, # pylint: disable=redefined-builtin
):
"""Run reconstruction(s) on events.
Intended to run Retro reconstructions in standalone mode (i.e., not as
an icetray module). For the same operation meant to be called as part
of an icetray module, see the `__call__` method.
Parameters
----------
event : event
methods : string or iterable thereof
Each must be one of `METHODS`
redo_failed : bool, optional
If `True`, reconstruct each event that either hasn't been
reconstructed with each method (as usual), but also re-reconstruct
events that have `fit_status` indicating a failure (i.e., all
events will be reconstructed using a given method unless they have
for that method `fit_status == FitStatus.OK`). Default is False.
redo_all : bool, optional
If `True`, reconstruct all events with all `methods`, regardless if
they have been reconstructed with these methods previously.
save_llhp : bool, optional
Save likelihood values & corresponding parameter values within a
LLH range of the max LLH (this takes up a lot of disk space and
creats a lot of files; use with caution if running jobs en masse)
filter : str or None
Filter to apply for selecting events to reconstruct. String is
passed through `eval` and must produce a scalar value interpretable
via `bool(eval(filter))`. Current event is accessible via the name
`event` and numpy is named `np`. E.g. .. ::
filter='event["header"]["L5_oscNext_bool"]'
"""
if isinstance(methods, string_types):
methods = [methods]
for method in methods:
if method not in METHODS:
raise ValueError(
'Unrecognized `method` "{}"; must be one of {}'.format(
method, METHODS
)
)
if len(set(methods)) != len(methods):
raise ValueError("Same reco specified multiple times")
for method in methods:
reco_name = "retro_" + method
fit_status_outf = join(
event.meta["events_root"],
"recos",
"{}__fit_status.npy".format(reco_name),
)
if isfile(fit_status_outf):
fit_statuses = np.load(fit_status_outf, mmap_mode="r+")
try:
fit_status = fit_statuses[event.meta["event_idx"]]
finally:
del fit_statuses
if fit_status != FitStatus.NotSet:
if redo_all:
print(
'Method "{}" already run on event but redoing'.format(
method
)
)
elif redo_failed and fit_status != FitStatus.OK:
print(
'Method "{}" already run on event and failed'
" previously; retrying".format(method)
)
else:
print(
'Method "{}" already run on event; skipping'.format(
method
)
)
continue
print('Running "{}" reconstruction'.format(method))
try:
self._reco_event(
event,
method=method,
save_llhp=save_llhp,
filter=filter,
save_estimate=True,
)
except MissingOrInvalidPrefitError as error:
print(
'ERROR: event idx {}, reco method {}: "{}"; ignoring'
" and moving to next event".format(
event.meta["event_idx"], method, error
)
)
self.write_status_npy(
event=event,
method=method,
fit_status=FitStatus.MissingSeed,
)
def generate_prior_method(self, return_cube=False, **kwargs):
"""Generate the prior transform method `self.prior` and info
`self.priors_used` for a given event. Optionally, plots the priors to
current working directory if `self.debug` is True.
Call, e.g., via:
self.generate_prior_method(
x=dict(
kind=PRI_OSCNEXT_L5_V1_PREFIT,
extents=((-100, Bounds.REL), (100, Bounds.REL)),
),
y=dict(
kind=PRI_OSCNEXT_L5_V1_PREFIT,
extents=((-100, Bounds.REL), (100, Bounds.REL)),
),
z=dict(
kind=PRI_OSCNEXT_L5_V1_PREFIT,
extents=((-50, Bounds.REL), (50, Bounds.REL)),
),
time=dict(
kind=PRI_OSCNEXT_L5_V1_PREFIT,
extents=((-1000, Bounds.REL), (1000, Bounds.REL)),
),
azimuth=dict(kind=PRI_OSCNEXT_L5_V1_PREFIT),
zenith=dict(kind=PRI_OSCNEXT_L5_V1_PREFIT),
)
Parameters
----------
return_cube : bool
if true, explicitly return the transformed cube
**kwargs
Prior definitions; anything unspecified falls back to a default
(since all params must have priors, including ranges, for e.g.
MultiNest and CRS).
"""
prior_funcs = []
self.priors_used = OrderedDict()
miscellany = []
for dim_num, dim_name in enumerate(self.hypo_handler.opt_param_names):
spec = kwargs.get(dim_name, {})
prior_func, prior_def, misc = get_prior_func(
dim_num=dim_num, dim_name=dim_name, event=self.event, **spec
)
prior_funcs.append(prior_func)
self.priors_used[dim_name] = prior_def
miscellany.append(misc)
def prior(cube, ndim=None, nparams=None): # pylint: disable=unused-argument, inconsistent-return-statements
"""Apply `prior_funcs` to the hypercube to map values from the unit
hypercube onto values in the physical parameter space.
The result overwrites the values in `cube`.
Parameters
----------
cube
ndim
nparams
"""
for prior_func in prior_funcs:
prior_func(cube)
if return_cube:
return cube
self.prior = prior
if self.debug:
# -- Plot priors and save to png's in current dir -- #
import matplotlib as mpl
mpl.use("agg", warn=False)
import matplotlib.pyplot as plt
n_opt_params = len(self.hypo_handler.opt_param_names)
rand = np.random.RandomState(0)
cube = rand.rand(n_opt_params, int(1e5))
self.prior(cube)
nx = int(np.ceil(np.sqrt(n_opt_params)))
ny = int(np.ceil(n_opt_params / nx))
fig, axes = plt.subplots(ny, nx, figsize=(6 * nx, 4 * ny))
axit = iter(axes.flat)
for dim_num, dim_name in enumerate(self.hypo_handler.opt_param_names):
ax = next(axit)
ax.hist(cube[dim_num], bins=100)
misc = miscellany[dim_num]
if "reco_val" in misc:
ylim = ax.get_ylim()
ax.plot([misc["reco_val"]] * 2, ylim, "k--", lw=1)
ax.set_ylim(ylim)
misc_strs = []
if "reco" in misc:
misc_strs.append(misc["reco"])
if "reco_val" in misc:
misc_strs.append("{:.2f}".format(misc["reco_val"]))
if (
"split_by_reco_param" in misc
and misc["split_by_reco_param"] is not None
):
misc_strs.append(
"split by {} = {:.2f}".format(
misc["split_by_reco_param"], misc["split_val"]
)
)
misc_str = ", ".join(misc_strs)
ax.set_title(
"{}: {} {}".format(
dim_name, self.priors_used[dim_name][0], misc_str
)
)
for ax in axit:
ax.axis("off")
fig.tight_layout()
plt_fpath_base = self.event.meta["prefix"] + "priors"
fig.savefig(plt_fpath_base + ".png", dpi=120)
def generate_loglike_method(
self, param_values, log_likelihoods, aux_values, t_start
):
"""Generate the LLH callback method `self.loglike` for a given event.
Parameters
----------
param_values : list
log_likelihoods : list
aux_values : list
t_start : list
Needs to be a list for `t_start` to be passed by reference (and
therefore universally accessible within all methods that require
knowing `t_start`).
"""
# -- Variables to be captured by `loglike` closure -- #
all_param_names = self.hypo_handler.all_param_names
opt_param_names = self.hypo_handler.opt_param_names
n_opt_params = self.hypo_handler.n_opt_params
fixed_params = self.hypo_handler.fixed_params
event = self.event
hits = event["hits"]
hits_indexer = event["hits_indexer"]
hypo_handler = self.hypo_handler
pegleg_muon_dt = hypo_handler.pegleg_kernel_kwargs.get("dt")
pegleg_muon_const_e_loss = True
dom_info = self.dom_tables.dom_info
sd_idx_table_indexer = self.dom_tables.sd_idx_table_indexer
if "truth" in event:
truth = event["truth"]
truth_info = OrderedDict(
[
("x", truth["x"]),
("y", truth["y"]),
("z", truth["z"]),
("time", truth["time"]),
("zenith", truth["zenith"]),
("azimuth", truth["azimuth"]),
("track_azimuth", truth["track_azimuth"]),
("track_zenith", truth["track_zenith"]),
("track_energy", truth["track_energy"]),
("energy", truth["energy"]),
("cascade_energy", truth["total_cascade_energy"]),
]
)
optional = [
("cscd_az", "total_cascade_azimuth"),
("cscd_zen", "total_cascade_zenith"),
("cscd_em_equiv_en", "total_cascade_em_equiv_energy"),
]
for label, key in optional:
if key in truth:
truth_info[label] = truth[key]
else:
truth_info = None
num_operational_doms = np.sum(dom_info["operational"])
# Array containing only DOMs operational during the event & info
# relevant to the hits these DOMs got (if any)
event_dom_info = np.zeros(shape=num_operational_doms, dtype=EVT_DOM_INFO_T)
# Array containing all relevant hit info for the event, including a
# pointer back to the index of the DOM in the `event_dom_info` array
event_hit_info = np.zeros(shape=hits.size, dtype=EVT_HIT_INFO_T)
# Copy 'time' and 'charge' over directly; add 'event_dom_idx' below
event_hit_info[["time", "charge"]] = hits[["time", "charge"]]
# Must be a list, not tuple:
copy_fields = [
"sd_idx",
"x",
"y",
"z",
"quantum_efficiency",
"noise_rate_per_ns",
]
print("all noise rate %.5f" % np.nansum(dom_info["noise_rate_per_ns"]))
print(
"DOMs with zero or NaN noise %i"
% np.count_nonzero(
np.isnan(dom_info["noise_rate_per_ns"])
| (dom_info["noise_rate_per_ns"] == 0)
)
)
# Fill `event_{hit,dom}_info` arrays only for operational DOMs
for dom_idx, this_dom_info in enumerate(dom_info[dom_info["operational"]]):
this_event_dom_info = event_dom_info[dom_idx : dom_idx + 1]
this_event_dom_info[copy_fields] = this_dom_info[copy_fields]
sd_idx = this_dom_info["sd_idx"]
this_event_dom_info["table_idx"] = sd_idx_table_indexer[sd_idx]
# Copy any hit info from `hits_indexer` and total charge from
# `hits` into `event_hit_info` and `event_dom_info` arrays
this_hits_indexer = hits_indexer[hits_indexer["sd_idx"] == sd_idx]
if len(this_hits_indexer) == 0:
this_event_dom_info["hits_start_idx"] = 0
this_event_dom_info["hits_stop_idx"] = 0
this_event_dom_info["total_observed_charge"] = 0
continue
start = this_hits_indexer[0]["offset"]
stop = start + this_hits_indexer[0]["num"]
event_hit_info[start:stop]["event_dom_idx"] = dom_idx
this_event_dom_info["hits_start_idx"] = start
this_event_dom_info["hits_stop_idx"] = stop
this_event_dom_info["total_observed_charge"] = np.sum(
hits[start:stop]["charge"]
)
print("this evt. noise rate %.5f" % np.sum(event_dom_info["noise_rate_per_ns"]))
print(
"DOMs with zero noise: %i"
% np.sum(event_dom_info["noise_rate_per_ns"] == 0)
)
# settings those to minimum noise
noise = event_dom_info["noise_rate_per_ns"]
mask = noise < 1e-7
noise[mask] = 1e-7
print("this evt. noise rate %.5f" % np.sum(event_dom_info["noise_rate_per_ns"]))
print(
"DOMs with zero noise: %i"
% np.sum(event_dom_info["noise_rate_per_ns"] == 0)
)
print("min noise: ", np.min(noise))
print("mean noise: ", np.mean(noise))
assert np.sum(event_dom_info["quantum_efficiency"] <= 0) == 0, "negative QE"
assert np.sum(event_dom_info["total_observed_charge"]) > 0, "no charge"
assert np.isfinite(
np.sum(event_dom_info["total_observed_charge"])
), "non-finite charge"
def loglike(cube, ndim=None, nparams=None): # pylint: disable=unused-argument
"""Get log likelihood values.
Defined as a closure to capture particulars of the event and priors
without having to pass these as parameters to the function.
Note that this is called _after_ `prior` has been called, so `cube`
already contains the parameter values scaled to be in their
physical ranges.
Parameters
----------
cube
ndim : int, optional
nparams : int, optional
Returns
-------
llh : float
"""
t0 = time.time()
if len(t_start) == 0:
t_start.append(time.time())
hypo = OrderedDict(list(zip(opt_param_names, cube)))
generic_sources = hypo_handler.get_generic_sources(hypo)
pegleg_sources = hypo_handler.get_pegleg_sources(hypo)
scaling_sources = hypo_handler.get_scaling_sources(hypo)
get_llh_retval = self.get_llh(
generic_sources=generic_sources,
pegleg_sources=pegleg_sources,
scaling_sources=scaling_sources,
event_hit_info=event_hit_info,
event_dom_info=event_dom_info,
pegleg_stepsize=1,
)
llh, pegleg_idx, scalefactor = get_llh_retval[:3]
llh += LLH_FUDGE_SUMMAND
aux_values.append(get_llh_retval[3:])
assert np.isfinite(llh), "LLH not finite: {}".format(llh)
# assert llh <= 0, "LLH positive: {}".format(llh)
additional_results = []
if self.hypo_handler.pegleg_kernel:
pegleg_result = pegleg_eval(
pegleg_idx=pegleg_idx,
dt=pegleg_muon_dt,
const_e_loss=pegleg_muon_const_e_loss,
)
additional_results.append(pegleg_result)
if self.hypo_handler.scaling_kernel:
additional_results.append(scalefactor * SCALING_CASCADE_ENERGY)
result = (
tuple(cube[:n_opt_params])
+ tuple(fixed_params.values())
+ tuple(additional_results)
)
param_values.append(result)
log_likelihoods.append(llh)
n_calls = len(log_likelihoods)
t1 = time.time()
if n_calls % REPORT_AFTER == 0:
print("")
if truth_info:
msg = "truth: "
for key, val in zip(all_param_names, result):
try:
msg += " %s=%.1f" % (key, truth_info[key])
except KeyError:
pass
print(msg)
t_now = time.time()
best_idx = np.argmax(log_likelihoods)
best_llh = log_likelihoods[best_idx]
best_p = param_values[best_idx]
msg = "best llh = {:.3f} @ ".format(best_llh)
for key, val in zip(all_param_names, best_p):
msg += " %s=%.1f" % (key, val)
print(msg)
msg = "this llh = {:.3f} @ ".format(llh)
for key, val in zip(all_param_names, result):
msg += " %s=%.1f" % (key, val)
print(msg)
print("{} LLH computed".format(n_calls))
print(
"avg time per llh: {:.3f} ms".format(
(t_now - t_start[0]) / n_calls * 1000
)
)
print("this llh took: {:.3f} ms".format((t1 - t0) * 1000))
print("")
return llh
self.loglike = loglike
def make_llhp(self, method, log_likelihoods, param_values, aux_values, save):
"""Create a structured numpy array containing the reco information;
also add derived dimensions, and optionally save to disk.
Parameters
----------
method : str
log_likelihoods : array
param_values : array
aux_values : array
save : bool
Returns
-------
llhp : length-n_llhp array of dtype llhp_t
Note that llhp_t is derived from the defined parameter names.
"""
reco_name = "retro_" + method
# Setup LLHP dtype
dim_names = list(self.hypo_handler.all_param_names)
# add derived quantities
derived_dim_names = ["energy", "azimuth", "zenith"]
if "cascade_d_zenith" in dim_names and "cascade_d_azimuth" in dim_names:
derived_dim_names += ["cascade_zenith", "cascade_azimuth"]
aux_names = ["zero_dllh", "lower_dllh", "upper_dllh"]
all_dim_names = dim_names + derived_dim_names + aux_names
llhp_t = np.dtype([(field, np.float32) for field in ["llh"] + all_dim_names])
# dump
llhp = np.zeros(shape=len(param_values), dtype=llhp_t)
llhp["llh"] = log_likelihoods
llhp[dim_names] = param_values
llhp[aux_names] = aux_values
# create derived dimensions
if "energy" in derived_dim_names:
if "track_energy" in dim_names:
llhp["energy"] += llhp["track_energy"]
if "cascade_energy" in dim_names:
llhp["energy"] += llhp["cascade_energy"]
if "cascade_d_zenith" in dim_names and "cascade_d_azimuth" in dim_names:
# create cascade angles from delta angles
rotate_points(
p_theta=llhp["cascade_d_zenith"],
p_phi=llhp["cascade_d_azimuth"],
rot_theta=llhp["track_zenith"],
rot_phi=llhp["track_azimuth"],
q_theta=llhp["cascade_zenith"],
q_phi=llhp["cascade_azimuth"],
)
if "track_zenith" in all_dim_names and "track_azimuth" in all_dim_names:
if "cascade_zenith" in all_dim_names and "cascade_azimuth" in all_dim_names:
# this resulting radius we won't need, but need to supply an array to
# the function
r_out = np.empty(shape=llhp.shape, dtype=np.float32)
# combine angles:
add_vectors(
r1=llhp["track_energy"],
theta1=llhp["track_zenith"],
phi1=llhp["track_azimuth"],
r2=llhp["cascade_energy"],
theta2=llhp["cascade_zenith"],
phi2=llhp["cascade_azimuth"],
r3=r_out,
theta3=llhp["zenith"],
phi3=llhp["azimuth"],
)
else:
# in this case there is no cascade angles
llhp["zenith"] = llhp["track_zenith"]
llhp["azimuth"] = llhp["track_azimuth"]
elif "cascade_zenith" in all_dim_names and "cascade_azimuth" in all_dim_names:
# in this case there are no track angles
llhp["zenith"] = llhp["cascade_zenith"]
llhp["azimuth"] = llhp["cascade_azimuth"]
if save:
fname = "{}.llhp".format(reco_name)
# NOTE: since each array can have different length and numpy
# doesn't handle "ragged" arrays nicely, forcing each llhp to be
# saved to its own file
llhp_outf = "{}{}.npy".format(self.event.meta["prefix"], fname)
llh = llhp["llh"]
cut_llhp = llhp[llh > np.max(llh) - 30]
print(
'Saving llhp within 30 LLH of max ({} llhp) to "{}"'.format(
len(cut_llhp), llhp_outf
)
)
np.save(llhp_outf, cut_llhp)
return llhp
def make_estimate(
self, method, llhp, remove_priors, run_info=None, fit_meta=None, save=True
):
"""Create estimate from llhp, attach result to `self.event`, and save to disk.
Parameters
----------
method : str
Reconstruction method used
llhp : length-n_llhp array of dtype llhp_t
remove_priors : bool
Remove effect of priors
fit_meta : mapping, optional
save : bool
store to npy file
Returns
-------
estimate : numpy struct array
"""
reco_name = "retro_" + method
estimate, _ = estimate_from_llhp(
llhp=llhp,
treat_dims_independently=False,
use_prob_weights=True,
priors_used=self.priors_used if remove_priors else None,
meta=fit_meta,
)
# Test if the LLH would be positive without LLH_FUDGE_SUMMAND
if estimate["max_llh"] > LLH_FUDGE_SUMMAND:
sys.stderr.write(
"\nWARNING: Would be positive LLH w/o LLH_FUDGE_SUMMAND: {}\n".format(
estimate["max_llh"]
)
)
if estimate.dtype.names and "fit_status" in estimate.dtype.names:
if estimate["fit_status"] not in (FitStatus.OK, FitStatus.PositiveLLH):
raise ValueError(
"Postive LLH *and* fit failed with fit_status = {}".format(
estimate["fit_status"]
)
)
estimate["fit_status"] = FitStatus.PositiveLLH
# Place reco in current event in case another reco depends on it
if "recos" not in self.event:
self.event["recos"] = OrderedDict()
self.event["recos"][reco_name] = estimate
if not save:
return
estimate_outf = join(
self.event.meta["events_root"],
"recos",
"{}.npy".format(reco_name),
)
if isfile(estimate_outf):
estimates = np.load(estimate_outf, mmap_mode="r+")
try:
estimates[self.event.meta["event_idx"]] = estimate
finally:
# ensure file handle is not left open
del estimates
else:
estimates = np.full(
shape=self.event.meta["num_events"],
fill_value=np.nan,
dtype=estimate.dtype,
)
# Filling with nan doesn't set correct "fit_status"
estimates["fit_status"] = FitStatus.NotSet
estimates[self.event.meta["event_idx"]] = estimate
np.save(estimate_outf, estimates)
self.write_status_npy(
event=self.event,
method=method,
fit_status=estimate["fit_status"],
)
def write_status_npy(self, event, method, fit_status):
"""Write fit status to a numpy npy file.
This allows for a fit to fail before useful information about the fit
is generated, yet the failure can be recorded.
Parameters
-----------
event
method : str
reconstruction method, e.g., "crs_prefit"
fit_status : retro.retro_types.FitStatus
"""
reco_name = "retro_" + method
fit_status_outf = join(
event.meta["events_root"],
"recos",
"{}__fit_status.npy".format(reco_name),
)
if isfile(fit_status_outf):
fit_statuses = np.load(fit_status_outf, mmap_mode="r+")
try:
fit_statuses[event.meta["event_idx"]] = fit_status
finally:
# ensure file handle is not left open
del fit_statuses
else:
fit_statuses = np.full(
shape=event.meta["num_events"],
fill_value=FitStatus.NotSet.value,
dtype=np.int8,
)
fit_statuses[event.meta["event_idx"]] = fit_status
np.save(fit_status_outf, fit_statuses)
def run_test(self, seed):
"""Random sampling instead of an actual minimizer"""
raise NotImplementedError("`run_test` not implemented") # TODO
t0 = time.time()
kwargs = OrderedDict()
for arg_name in get_arg_names(self.run_test)[1:]:
kwargs[arg_name] = locals()[arg_name]
rand = np.random.RandomState(seed=seed)
for i in range(100):
param_vals = rand.uniform(0, 1, self.n_opt_params)
self.prior(param_vals)
llh = self.loglike(param_vals)
run_info = OrderedDict([("method", "run_test"), ("kwargs", kwargs)])
fit_meta = OrderedDict(
[
("fit_status", np.int8(FitStatus.OK)),
("run_time", np.float32(time.time() - t0)),
]
)
return run_info, fit_meta
def run_with_truth(self, rand_dims=None, n_samples=10000, seed=0):
"""Run with all params set to truth except for the dimensions defined,
which will be randomized.
Parameters
----------
rand_dims : list, optional
Dimensions to randomly sample; all not specified are set to truth
n_samples : int
Number of samples to draw
"""
raise NotImplementedError("`run_with_truth` not implemented") # TODO
t0 = time.time()
if rand_dims is None:
rand_dims = []
kwargs = OrderedDict()
for arg_name in get_arg_names(self.run_with_truth)[1:]:
kwargs[arg_name] = locals()[arg_name]
truth = self.event["truth"]
true_params = np.zeros(self.n_opt_params)
for i, name in enumerate(self.hypo_handler.opt_param_names):
name = name.replace("cascade_", "total_cascade_")
true_params[i] = truth[name]
rand = np.random.RandomState(seed=seed)
if len(rand_dims) > 1:
for i in range(n_samples):
rand_params = rand.uniform(0, 1, self.n_opt_params)
self.prior(rand_params)
param_vals = np.zeros(self.n_opt_params)
param_vals[:] = true_params[:]
param_vals[rand_dims] = rand_params[rand_dims]
llh = self.loglike(param_vals)
else:
llh = self.loglike(true_params)
run_info = OrderedDict([("method", "run_with_truth"), ("kwargs", kwargs)])
fit_meta = OrderedDict(
[
("fit_status", np.int8(FitStatus.OK)),
("run_time", np.float32(time.time() - t0)),
]
)
return run_info, fit_meta
def run_crs(
self,
n_live,
max_iter,
max_noimprovement,
min_llh_std,
stdthresh,
use_sobol,
seed,
):
"""
At the moment Cartesian (standard) parameters and spherical parameters
are assumed to have particular names (i.e., spherical coordinates start
with "az" and "zen"). Furthermore, all Cartesian coordinates must come
first followed by the pairs of (azimuth, zenith) spherical coordinates;
e.g., "az_1", "zen_1", "az_2", "zen_2", etc.
Parameters
----------
n_live : int
Number of live points
max_iter : int
Maximum iterations
max_noimprovement : int
Maximum iterations with no improvement of best point
min_llh_std : float
Break if stddev of llh values across all livepoints drops below
this threshold
stdthresh : mapping
Break condition on stddev of Cartesian dimension(s), i.e.,
non-spherical dimensions (x, y, z, time, length, energies, etc.).
Specify -1 to set no condition on a dimension. Keys are dimension
names and values are the standard deviations for each dimension.
All specified dimensions must drop below the specified stddevs for
this break condition to be met.
use_sobol : bool
Use a Sobol sequence instead of numpy pseudo-random numbers. Seems
to do slightly better (but only small differences observed in tests
so far)
seed : int
Random seed
Returns
-------
run_info : OrderedDict
"""
t0 = time.time()
from spherical_opt.spherical_opt import spherical_opt
if use_sobol:
from sobol import i4_sobol
rand = np.random.RandomState(seed=seed)
# Record kwargs user supplied (after translation & standardization)
kwargs = OrderedDict()
for arg_name in get_arg_names(self.run_crs)[1:]:
kwargs[arg_name] = locals()[arg_name]
run_info = OrderedDict(
[
("method", "run_crs"),
("method_description", "CRS2spherical+lm+sampling"),
("kwargs", kwargs),
]
)
spherical_pairs = []
cstdthresh = []
opt_param_properties = OrderedDict()
for idx, pname in enumerate(self.hypo_handler.opt_param_names):
if "azimuth" in pname:
opt_param_properties[pname] = dict(
is_cartesian=False, stdthresh_defined=False
)
p_zen = pname.replace("azimuth", "zenith")
assert self.hypo_handler.opt_param_names[idx + 1] == p_zen
assert pname not in stdthresh, "threshold on sph params not implemented yet"
spherical_pairs.append(
[
self.hypo_handler.all_param_names.index(pname),
self.hypo_handler.all_param_names.index(p_zen),
]
)
elif "zenith" in pname:
# The (azimuth, zenith) pair is treted atomically within
# `spherical_opt`, so ignore "zenith" since we already recorded
# "azimuth"
assert pname not in stdthresh, "threshold on sph params not implemented yet"
else:
if pname in stdthresh:
thresh = stdthresh[pname]
cstdthresh.append(thresh)
opt_param_properties[pname] = dict(
is_cartesian=True, stdthresh_defined=thresh > 0
)
else:
cstdthresh.append(-1)
opt_param_properties[pname] = dict(
is_cartesian=True, stdthresh_defined=False
)
# Note we only kept "azimuth" part of spherical pairs above
params_std = np.full(
shape=1,
fill_value=np.nan,
dtype=[
(pname.replace("azimuth", "sph"), np.float32)
for pname in self.hypo_handler.opt_param_names
],
)
stdthresh_met_at_iter = np.full(
shape=1,
fill_value=-1,
dtype=[
(pname, np.int32)
for pname, ppties in opt_param_properties.items()
if ppties["stdthresh_defined"]
],
)
fit_meta = OrderedDict(
[
("fit_status", np.int8(FitStatus.NotSet)),
("iterations", np.int32(-1)),
("stopping_flag", np.int8(-1)),
("llh_std", np.float32(np.nan)),
("no_improvement_counter", np.int32(-1)),
("params_std", params_std),
("stdthresh_met_at_iter", stdthresh_met_at_iter),
("num_simplex_successes", np.int32(-1)),
("num_mutation_successes", np.int32(-1)),
("num_failures", np.int32(-1)),
("run_time", np.float32(np.nan)),
]
)
def func(x):
return -self.loglike(x)
try:
initial_points = []
# generate initial population
for i in range(n_live):
# Sobol seems to do slightly better than pseudo-random numbers
if use_sobol:
# Note we start at seed=1 since for n_live=1 this puts the
# first point in the middle of the range for all params (0.5),
# while seed=0 produces all zeros (the most extreme point
# possible, which will bias the distribution away from more
# likely values).
x, _ = i4_sobol(
dim_num=self.n_opt_params, # number of dimensions
seed=i + 1, # Sobol sequence number
)
else:
x = rand.uniform(0, 1, self.n_opt_params)
# Apply prior xforms to `param_vals` (contents are overwritten)
self.prior(x)
initial_points.append(x)
initial_points = np.vstack(initial_points)
fit = spherical_opt(
func=func,
method="CRS2",
initial_points=initial_points,
spherical_indices=spherical_pairs,
max_iter=max_iter,
max_noimprovement=max_noimprovement,
fstdthresh=min_llh_std,
cstdthresh=cstdthresh,
meta=True,
rand=rand,
)
# Populate the meaningful stats about cartesian stddevs to our
# output dict from the meta dict returned by spherical_opt.
cartesian_param_idx = 0
spherical_param_idx = 0
for pname, ppties in opt_param_properties.items():
if ppties["is_cartesian"]:
c_or_s_idx = cartesian_param_idx
cartesian_param_idx += 1
std_name = "cstd"
else:
# Only one spherical param to represent both "azimuth" is
if "zenith" in pname:
continue
c_or_s_idx = spherical_param_idx
spherical_param_idx += 1
std_name = "sstd"
pname = pname.replace("azimuth", "sph")
# Record stds of all params, whether or not a threshold is
# defined
if std_name in fit["meta"]:
params_std[pname] = fit["meta"][std_name][c_or_s_idx]
# Only meaningful to record if a std threshold has been
# defined for the param
met_at_iter_name = "{}thresh_met_at_iter".format(std_name)
if ppties["stdthresh_defined"] and met_at_iter_name in fit["meta"]:
stdthresh_met_at_iter[pname] = fit["meta"][met_at_iter_name][c_or_s_idx]
fit_meta["fit_status"] = np.int8(
FitStatus.OK if fit["success"] else FitStatus.FailedToConverge
)
fit_meta["iterations"] = np.int32(fit["nit"])
fit_meta["stopping_flag"] = np.int8(fit["stopping_flag"])
fit_meta["llh_std"] = np.float32(fit["meta"]["fstd"])
fit_meta["no_improvement_counter"] = np.int32(fit["meta"]["no_improvement_counter"])
fit_meta["params_std"] = params_std
fit_meta["stdthresh_met_at_iter"] = stdthresh_met_at_iter
fit_meta["num_simplex_successes"] = np.int32(fit["meta"]["num_simplex_successes"])
fit_meta["num_mutation_successes"] = np.int32(fit["meta"]["num_mutation_successes"])
fit_meta["num_failures"] = np.int32(fit["meta"]["num_failures"])
fit_meta["run_time"] = np.float32(time.time() - t0)
except KeyboardInterrupt:
raise
except MissingOrInvalidPrefitError:
fit_meta["fit_status"] = FitStatus.MissingSeed
self._print_non_fatal_exception(method=run_info["method"])
except Exception:
self._print_non_fatal_exception(method=run_info["method"])
return run_info, fit_meta
def run_scipy(self, method, eps):
"""Use an optimizer from scipy"""
t0 = time.time()
from scipy import optimize
kwargs = OrderedDict()
for arg_name in get_arg_names(self.run_scipy)[1:]:
kwargs[arg_name] = locals()[arg_name]
run_info = OrderedDict([("method", "run_scipy"), ("kwargs", kwargs)])
# initial guess
x0 = 0.5 * np.ones(shape=self.n_opt_params)
def func(x, *args): # pylint: disable=unused-argument, missing-docstring
param_vals = np.copy(x)
self.prior(param_vals)
llh = self.loglike(param_vals)
del param_vals
return -llh
bounds = [(eps, 1 - eps)] * self.n_opt_params
settings = OrderedDict()
settings["eps"] = eps
fit_status = FitStatus.GeneralFailure
try:
if method == "differential_evolution":
optimize.differential_evolution(func, bounds=bounds, popsize=100)
else:
optimize.minimize(
func, x0, method=method, bounds=bounds, options=settings
)
fit_status = FitStatus.OK
except KeyboardInterrupt:
raise
except MissingOrInvalidPrefitError:
fit_status = FitStatus.MissingSeed
self._print_non_fatal_exception(method=run_info["method"])
except Exception:
self._print_non_fatal_exception(method=run_info["method"])
fit_meta = OrderedDict(
[
("fit_status", np.int8(fit_status)),
("run_time", np.float32(time.time() - t0)),
]
)
return run_info, fit_meta
def run_skopt(self):
"""Use an optimizer from scikit-optimize"""
t0 = time.time()
from skopt import gp_minimize # , forest_minimize
settings = OrderedDict(
[
("acq_func", "EI"), # acquisition function
("n_calls", 1000), # number of evaluations of f
("n_random_starts", 5), # number of random initialization
]
)
run_info = OrderedDict([("method", "run_skopt"), ("settings", settings)])
# initial guess
x0 = 0.5 * np.ones(shape=self.n_opt_params)
def func(x, *args): # pylint: disable=unused-argument, missing-docstring
param_vals = np.copy(x)
self.prior(param_vals)
llh = self.loglike(param_vals)
del param_vals
return -llh
bounds = [(0, 1)] * self.n_opt_params
fit_status = FitStatus.GeneralFailure
try:
_ = gp_minimize(
func, # function to minimize
bounds, # bounds on each dimension of x
x0=list(x0),
**settings
)
fit_status = FitStatus.OK
except KeyboardInterrupt:
raise
except MissingOrInvalidPrefitError:
fit_status = FitStatus.MissingSeed
self._print_non_fatal_exception(method=run_info["method"])
except Exception:
self._print_non_fatal_exception(method=run_info["method"])
fit_meta = OrderedDict(
[
("fit_status", np.int8(fit_status)),
("run_time", np.float32(time.time() - t0)),
]
)
return run_info, fit_meta
def run_nlopt(self):
"""Use an optimizer from nlopt"""
t0 = time.time()
import nlopt
def func(x, grad): # pylint: disable=unused-argument, missing-docstring
param_vals = np.copy(x)
self.prior(param_vals)
llh = self.loglike(param_vals)
del param_vals
return -llh
# bounds
lower_bounds = np.zeros(shape=self.n_opt_params)
upper_bounds = np.ones(shape=self.n_opt_params)
# for angles make bigger
for i, name in enumerate(self.hypo_handler.opt_param_names):
if "azimuth" in name:
lower_bounds[i] = -0.5
upper_bounds[i] = 1.5
if "zenith" in name:
lower_bounds[i] = -0.5
upper_bounds[i] = 1.5
# initial guess
x0 = 0.5 * np.ones(shape=self.n_opt_params)
# stepsize
dx = np.zeros(shape=self.n_opt_params)
for i in range(self.n_opt_params):
if "azimuth" in self.hypo_handler.opt_param_names[i]:
dx[i] = 0.001
elif "zenith" in self.hypo_handler.opt_param_names[i]:
dx[i] = 0.001
elif self.hypo_handler.opt_param_names[i] in ("x", "y"):
dx[i] = 0.005
elif self.hypo_handler.opt_param_names[i] == "z":
dx[i] = 0.002
elif self.hypo_handler.opt_param_names[i] == "time":
dx[i] = 0.01
# seed from several angles
# opt = nlopt.opt(nlopt.LN_NELDERMEAD, self.n_opt_params)
opt = nlopt.opt(nlopt.GN_CRS2_LM, self.n_opt_params)
ftol_abs = 0.1
# opt = nlopt.opt(nlopt.LN_PRAXIS, self.n_opt_params)
opt.set_lower_bounds([0.0] * self.n_opt_params)
opt.set_upper_bounds([1.0] * self.n_opt_params)
opt.set_min_objective(func)
opt.set_ftol_abs(ftol_abs)
settings = OrderedDict(
[("method", opt.get_algorithm_name()), ("ftol_abs", np.float32(ftol_abs))]
)
run_info = OrderedDict([("method", "run_nlopt"), ("settings", settings)])
fit_status = FitStatus.GeneralFailure
try:
# initial guess
angles = np.linspace(0, 1, 3)
angles = 0.5 * (angles[1:] + angles[:-1])
for zen in angles:
for az in angles:
x0 = 0.5 * np.ones(shape=self.n_opt_params)
for i in range(self.n_opt_params):
if "az" in self.hypo_handler.opt_param_names[i]:
x0[i] = az
elif "zen" in self.hypo_handler.opt_param_names[i]:
x0[i] = zen
x = opt.optimize(x0) # pylint: disable=unused-variable
# local_opt = nlopt.opt(nlopt.LN_NELDERMEAD, self.n_opt_params)
# local_opt.set_lower_bounds([0.]*self.n_opt_params)
# local_opt.set_upper_bounds([1.]*self.n_opt_params)
# local_opt.set_min_objective(func)
##local_opt.set_ftol_abs(0.5)
##local_opt.set_ftol_abs(100)
##local_opt.set_xtol_rel(10)
# local_opt.set_ftol_abs(1)
# global
# opt = nlopt.opt(nlopt.G_MLSL, self.n_opt_params)
# opt.set_lower_bounds([0.]*self.n_opt_params)
# opt.set_upper_bounds([1.]*self.n_opt_params)
# opt.set_min_objective(func)
# opt.set_local_optimizer(local_opt)
# opt.set_ftol_abs(10)
# opt.set_xtol_rel(1)
# opt.set_maxeval(1111)
# opt = nlopt.opt(nlopt.GN_ESCH, self.n_opt_params)
# opt = nlopt.opt(nlopt.GN_ISRES, self.n_opt_params)
# opt = nlopt.opt(nlopt.GN_CRS2_LM, self.n_opt_params)
# opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND_NOSCAL, self.n_opt_params)
# opt = nlopt.opt(nlopt.LN_NELDERMEAD, self.n_opt_params)
# opt.set_lower_bounds(lower_bounds)
# opt.set_upper_bounds(upper_bounds)
# opt.set_min_objective(func)
# opt.set_ftol_abs(0.1)
# opt.set_population([x0])
# opt.set_initial_step(dx)
# local_opt.set_maxeval(10)
# x = opt.optimize(x0) # pylint: disable=unused-variable
# polish it up
# print("***************** polishing ******************")
# dx = np.ones(shape=self.n_opt_params) * 0.001
# dx[0] = 0.1
# dx[1] = 0.1
# local_opt = nlopt.opt(nlopt.LN_NELDERMEAD, self.n_opt_params)
# lower_bounds = np.clip(np.copy(x) - 0.1, 0, 1)
# upper_bounds = np.clip(np.copy(x) + 0.1, 0, 1)
# lower_bounds[0] = 0
# lower_bounds[1] = 0
# upper_bounds[0] = 0
# upper_bounds[1] = 0
# local_opt.set_lower_bounds(lower_bounds)
# local_opt.set_upper_bounds(upper_bounds)
# local_opt.set_min_objective(func)
# local_opt.set_ftol_abs(0.1)
# local_opt.set_initial_step(dx)
# x = opt.optimize(x)
fit_status = FitStatus.OK
except KeyboardInterrupt:
raise
except MissingOrInvalidPrefitError:
fit_status = FitStatus.MissingSeed
self._print_non_fatal_exception(method=run_info["method"])
except Exception:
self._print_non_fatal_exception(method=run_info["method"])
fit_meta = OrderedDict(
[
("fit_status", np.int8(fit_status)),
("run_time", np.float32(time.time() - t0)),
("ftol_abs", np.float32(opt.get_ftol_abs())),
("ftol_rel", np.float32(opt.get_ftol_rel())),
("xtol_abs", np.float32(opt.get_xtol_abs())),
("xtol_rel", np.float32(opt.get_xtol_rel())),
("maxeval", np.float32(opt.get_maxeval())),
("maxtime", np.float32(opt.get_maxtime())),
("stopval", np.float32(opt.get_stopval())),
]
)
return run_info, fit_meta
def run_dynesty(
self,
n_live,
maxiter,
maxcall,
dlogz,
):
"""Setup and run Dynesty on an event.
Parameters
----------
Returns
-------
run_info : OrderedDict
Metadata dict containing dynesty settings used and extra info returned by
dynesty
fit_meta : OrderedDict
"""
import dynesty
t0 = time.time()
kwargs = OrderedDict()
for arg_name in get_arg_names(self.run_dynesty)[1:]:
kwargs[arg_name] = locals()[arg_name]
dn_kwargs = OrderedDict(
[
("ndim", self.n_opt_params),
("nlive", n_live),
(
"periodic",
[i for i, p in enumerate(self.hypo_handler.all_param_names) if "az" in p.lower()],
),
]
)
sampler_kwargs = OrderedDict(
[
("maxiter", maxiter),
("maxcall", maxcall),
("dlogz", dlogz),
]
)
run_info = OrderedDict(
[
("method", "run_dynesty"),
("kwargs", kwargs),
("dn_kwargs", dn_kwargs),
("sampler_kwargs", sampler_kwargs),
]
)
fit_meta = OrderedDict()
fit_meta["fit_status"] = np.int8(FitStatus.NotSet)
sampler = dynesty.NestedSampler(
loglikelihood=self.loglike,
prior_transform=self.prior,
method="unif",
bound="single",
update_interval=1,
**dn_kwargs
)
print("sampler instantiated")
sampler.run_nested(**sampler_kwargs)
fit_meta["fit_status"] = np.int8(FitStatus.OK)
fit_meta["run_time"] = np.float32(time.time() - t0)
print(fit_meta)
return run_info, fit_meta
def run_multinest(
self,
importance_sampling,
max_modes,
const_eff,
n_live,
evidence_tol,
sampling_eff,
max_iter,
seed,
):
"""Setup and run MultiNest on an event.
See the README file from MultiNest for greater detail on parameters
specific to to MultiNest (parameters from `importance_sampling` on).
Parameters
----------
importance_sampling
max_modes
const_eff
n_live
evidence_tol
sampling_eff
max_iter
Note that this limit is the maximum number of sample replacements
and _not_ max number of likelihoods evaluated. A replacement only
occurs when a likelihood is found that exceeds the minimum
likelihood among the live points.
seed
Returns
-------
run_info : OrderedDict
Metadata dict containing MultiNest settings used and extra info returned by
MultiNest
fit_meta : OrderedDict
"""
t0 = time.time()
# Import pymultinest here; it's a less common dependency, so other
# functions/constants in this module will still be import-able w/o it.
import pymultinest
kwargs = OrderedDict()
for arg_name in get_arg_names(self.run_multinest)[1:]:
kwargs[arg_name] = locals()[arg_name]
mn_kwargs = OrderedDict(
[
("n_dims", self.n_opt_params),
("n_params", self.n_params),
("n_clustering_params", self.n_opt_params),
(
"wrapped_params",
["az" in p.lower() for p in self.hypo_handler.all_param_names],
),
("importance_nested_sampling", importance_sampling),
("multimodal", max_modes > 1),
("const_efficiency_mode", const_eff),
("n_live_points", n_live),
("evidence_tolerance", evidence_tol),
("sampling_efficiency", sampling_eff),
("null_log_evidence", -1e90),
("max_modes", max_modes),
("mode_tolerance", -1e90),
("seed", seed),
("log_zero", -1e100),
("max_iter", max_iter),
]
)
run_info = OrderedDict(
[("method", "run_multinest"), ("kwargs", kwargs), ("mn_kwargs", mn_kwargs)]
)
fit_status = FitStatus.GeneralFailure
tmpdir = mkdtemp()
outputfiles_basename = join(tmpdir, "")
mn_fit_meta = {}
try:
pymultinest.run(
LogLikelihood=self.loglike,
Prior=self.prior,
verbose=True,
outputfiles_basename=outputfiles_basename,
resume=False,
write_output=True,
n_iter_before_update=REPORT_AFTER,
**mn_kwargs
)
fit_status = FitStatus.OK
mn_fit_meta = get_multinest_meta(outputfiles_basename=outputfiles_basename)
except KeyboardInterrupt:
raise
except MissingOrInvalidPrefitError:
fit_status = FitStatus.MissingSeed
self._print_non_fatal_exception(method=run_info["method"])
except Exception:
self._print_non_fatal_exception(method=run_info["method"])
finally:
rmtree(tmpdir)
# TODO: If MultiNest fails in specific ways, set fit_status accordingly...
fit_meta = OrderedDict(
[
("fit_status", np.int8(fit_status)),
("logZ", np.float32(mn_fit_meta.pop("logZ", np.nan))),
("logZ_err", np.float32(mn_fit_meta.pop("logZ_err", np.nan))),
("ins_logZ", np.float32(mn_fit_meta.pop("ins_logZ", np.nan))),
("ins_logZ_err", np.float32(mn_fit_meta.pop("ins_logZ_err", np.nan))),
("run_time", np.float32(time.time() - t0)),
]
)
if mn_fit_meta:
sys.stderr.write(
"WARNING: Unrecorded MultiNest metadata: {}\n".format(
", ".join("{} = {}".format(k, v) for k, v in mn_fit_meta.items())
)
)
return run_info, fit_meta
def get_multinest_meta(outputfiles_basename):
"""Get metadata from files that MultiNest writes to disk.
Parameters
----------
outputfiles_basename : str
Returns
-------
fit_meta : OrderedDict
Contains "logZ", "logZ_err" and, if importance nested sampling was run,
"ins_logZ" and "ins_logZ_err"
"""
fit_meta = OrderedDict()
if isdir(outputfiles_basename):
stats_fpath = join(outputfiles_basename, "stats.dat")
else:
stats_fpath = outputfiles_basename + "stats.dat"
with open(stats_fpath, "r") as stats_f:
stats = stats_f.readlines()
logZ, logZ_err = None, None
ins_logZ, ins_logZ_err = None, None
for line in stats:
if logZ is None and line.startswith("Nested Sampling Global Log-Evidence"):
logZ, logZ_err = [float(x) for x in line.split(":")[1].split("+/-")]
elif ins_logZ is None and line.startswith(
"Nested Importance Sampling Global Log-Evidence"
):
ins_logZ, ins_logZ_err = [float(x) for x in line.split(":")[1].split("+/-")]
if logZ is not None:
fit_meta["logZ"] = np.float32(logZ)
fit_meta["logZ_err"] = np.float32(logZ_err)
if ins_logZ is not None:
fit_meta["ins_logZ"] = np.float32(ins_logZ)
fit_meta["ins_logZ_err"] = np.float32(ins_logZ_err)
return fit_meta
def main(description=__doc__):
"""Script interface to Reco class and Reco.run(...) method"""
parser = ArgumentParser(description=description)
parser.add_argument(
"--methods",
required=True,
choices=METHODS,
nargs="+",
help="""Method(s) to use for performing reconstructions; performed in
order specified, so be sure to specify pre-fits / fits used as seeds
first""",
)
parser.add_argument(
"--redo-failed",
action="store_true",
help="""Whether to re-reconstruct events that have been reconstructed
but have `fit_status` set to non-zero (i.e., not `FitStatus.OK`), in
addition to reconstructing events with `fit_status` set to -1 (i.e.,
`FitStatus.NotSet`)""",
)
parser.add_argument(
"--redo-all",
action="store_true",
help="""Whether to reconstruct all events without existing
reconstructions AND re-reconstruct all events that have existing
reconstructions, regardless if their `fit_status` is OK or some form of
failure""",
)
parser.add_argument(
"--save-llhp",
action="store_true",
help="Whether to save LLHP within 30 LLH of max-LLH to disk",
)
parser.add_argument(
"--filter",
default=None,
help="""Filter to apply for selecting events to reconstruct. String is
passed through `eval` and must produce a scalar value interpretable via
`bool(eval(filter))`. Current event is accessible via the name `event`
and numpy is named `np`. E.g.,
--filter='event["header"]["L5_oscNext_bool"] and len(event["hits"]) >= 8'"""
)
split_kwargs = init_obj.parse_args(
dom_tables=True, tdi_tables=True, events=True, parser=parser
)
other_kw = split_kwargs.pop("other_kw")
events_kw = split_kwargs.pop("events_kw")
my_reco = Reco(**split_kwargs)
start_time = time.time()
my_events = StandaloneEvents(events_kw)
for event in my_events.events:
my_reco.run(event, **other_kw)
print("Total run time is {:.3f} s".format(time.time() - start_time))
if __name__ == "__main__":
main()
|
# Copyright 2020–2021 Cirq on IQM developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for mapping Cirq Operations to the IQM transfer format.
"""
from cirq.ops import (CZPowGate, MeasurementGate, Operation, PhasedXPowGate,
XPowGate, YPowGate)
from iqm_client.iqm_client import Instruction
class OperationNotSupportedError(RuntimeError):
"""Raised when a given operation is not supported by the IQM server."""
def map_operation(operation: Operation) -> Instruction:
"""Map a Cirq Operation to the IQM data transfer format.
Assumes the circuit has been transpiled so that it only contains operations natively supported by the
given IQM quantum architecture.
Args:
operation: a Cirq Operation
Returns:
Instruction: the converted operation
Raises:
OperationNotSupportedError When the circuit contains an unsupported operation.
"""
phased_rx_name = 'phased_rx'
qubits = [str(qubit) for qubit in operation.qubits]
if isinstance(operation.gate, PhasedXPowGate):
return Instruction(
name=phased_rx_name,
qubits=qubits,
args={
'angle_t': operation.gate.exponent / 2,
'phase_t': operation.gate.phase_exponent / 2
}
)
if isinstance(operation.gate, XPowGate):
return Instruction(
name=phased_rx_name,
qubits=qubits,
args={'angle_t': operation.gate.exponent / 2,
'phase_t': 0}
)
if isinstance(operation.gate, YPowGate):
return Instruction(
name=phased_rx_name,
qubits=qubits,
args={'angle_t': operation.gate.exponent / 2,
'phase_t': 0.25}
)
if isinstance(operation.gate, MeasurementGate):
if any(operation.gate.full_invert_mask()):
raise OperationNotSupportedError('Invert mask not supported')
return Instruction(
name='measurement',
qubits=qubits,
args={'key': operation.gate.key}
)
if isinstance(operation.gate, CZPowGate):
if operation.gate.exponent == 1.0:
return Instruction(
name='cz',
qubits=qubits,
args={}
)
raise OperationNotSupportedError(
f'CZPowGate exponent was {operation.gate.exponent}, but only 1 is natively supported.'
)
raise OperationNotSupportedError(f'{type(operation.gate)} not natively supported.')
|
import React, { Component } from 'react';
export default class Resume extends Component {
render() {
let resumeData = this.props.resumeData;
return (
<section id="resume">
<div className="row education">
<div className="three columns header-col">
<h1><span>Education</span></h1>
</div>
<div className="nine columns main-col">
{
resumeData.education && resumeData.education.map((item)=>{
return(
<div className="row item" key={item.id}>
<div className="twelve columns">
<h3>{item.UniversityName}</h3>
<p className="info">
{item.specialization}
{/* <span>•</span> <em className="date">{item.startMonth} {item.startYear}</em></p> */}
<span>•</span> <em className="date">{item.startMonth} {item.startYear} - {item.MonthOfPassing} {item.YearOfPassing}</em></p>
<p>
{item.Achievements}
</p>
</div>
</div>
)
})
}
</div>
</div>
<div className="row work">
<div className="three columns header-col">
<h1><span>Work</span></h1>
</div>
<div className="nine columns main-col">
{
resumeData.work && resumeData.work.map((item) => {
return(
<div className="row item" key={item.id}>
<div className="twelve columns">
<h3>{item.CompanyName}</h3>
<p className="info">
{item.specialization}
<span>•</span> <em className="date">{item.startMonth} {item.startYear} - {item.MonthOfLeaving} {item.YearOfLeaving}</em></p>
<p>
{item.Achievements}
</p>
</div>
</div>
)
})
}
</div>
</div>
<div className="row skill">
<div className="three columns header-col">
<h1><span>Skills</span></h1>
</div>
<div className="nine columns main-col">
<p>
{resumeData.skillsDescription}
</p>
<div className="bars">
<ul className="skills">
{
resumeData.skills && resumeData.skills.map((item) => {
return(
<li key={item.id}>
<span className={`bar-expand ${item.skillname.toLowerCase()}` }>
</span><em>{item.name}</em>
</li>
)
})
}
</ul>
</div>
</div>
</div>
</section>
);
}
}
|
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import data_helpers
from tensorflow.contrib import learn
import csv
from sklearn import metrics
import yaml
from knx.util.logging import Timing
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
# Parameters
# ==================================================
# Data Parameters
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 32, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", None, "Checkpoint directory from training run")
tf.flags.DEFINE_string("testdir", None, "Test directory")
tf.flags.DEFINE_string("traindir", None, "Train directory")
tf.flags.DEFINE_bool("dev", False, "Dev directory")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
classes = ['Adult', 'Car_accident', 'Death_tragedy', 'Hate_speech', 'Religion', 'Safe']
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import cPickle as pkl
with Timing('Loading vocab...'):
vocab_processor = pkl.load(open('vocab.pkl', 'rb'))
def get_x_vector_y_index(x_raw=None, y_labels=None, dir=None):
test_filenames = None
if dir != None:
x_raw, y_labels, test_filenames = data_helpers.load_data_and_labels(dir, used_onehot=False, return_filenames=True)
#Convert string label into int label
y_index = [classes.index(e) for e in y_labels]
# Map data into vocabulary
with Timing('Transform test x_raw...'):
x_vector = np.array(list(vocab_processor.transform(x_raw)))
if dir != None:
return x_vector, y_index, test_filenames
else:
return x_vector, y_index
def Get_all_preds(test_data):
with Timing("Evaluating..."):
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement)
# log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/scores").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(test_data), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
all_probabilities = None
for x_test_batch in batches:
# batch_predictions_scores = sess.run([predictions, scores], {input_x: x_test_batch, dropout_keep_prob: 1.0})
batch_predictions_scores = sess.run([predictions, scores], {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions_scores[0]])
# probabilities = softmax(batch_predictions_scores[1])
# if all_probabilities is not None:
# all_probabilities = np.concatenate([all_probabilities, probabilities])
# else:
# all_probabilities = probabilities
return all_predictions
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, make_scorer
def Eval(y_test, preds):
with Timing("Evaluating metrics ..."):
f1 = f1_score(y_test, preds, pos_label=None, average='macro')
precision = precision_score(y_test, preds, pos_label=None, average='macro')
recall = recall_score(y_test, preds, pos_label=None, average='macro')
print "F1: " + str(f1)
print "Precision: " + str(precision)
print "Recall: " + str(recall)
if FLAGS.traindir != None:
print '########################'
print 'Evaluating on train data'
print '########################'
x_train_t, y_train_t, _ = get_x_vector_y_index(dir=FLAGS.traindir)
preds = Get_all_preds(x_train_t)
print 'Check len', len(preds), len(y_train_t)
Eval(y_train_t, preds)
if FLAGS.dev:
print '########################'
print 'Evaluating on dev data'
print '########################'
with open('dev_set.pkl', "rb") as fp:
x_dev_t = pkl.load(fp)
y_dev_labels = pkl.load(fp)
y_dev_t = [classes.index(e) for e in y_dev_labels]
preds = Get_all_preds(x_dev_t)
print 'Check len', len(preds), len(y_dev_t)
Eval(y_dev_t, preds)
if FLAGS.testdir != None:
print '########################'
print 'Evaluating on test data'
print '########################'
x_test, y_test, test_filenames = get_x_vector_y_index(dir=FLAGS.testdir)
preds = Get_all_preds(x_test)
print 'Check len', len(preds), len(y_test)
Eval(y_test, preds)
print(metrics.classification_report(y_test, preds))
print(metrics.confusion_matrix(y_test, preds))
f = open('error-log.txt', 'wb')
for i in range(len(preds)):
true_label = int(y_test[i])
predict_label = int(preds[i])
f.write('True Label, Predict label, filename\n')
if true_label != predict_label:
f.write('{0} {1} {2}\n'.format(classes[true_label], classes[predict_label], test_filenames[i]))
f.close() |
import React, { useEffect, useState } from "react";
import {
TouchableOpacity,
StyleSheet,
TextInput,
View,
Image,
Dimensions,
} from "react-native";
import { Input, Button, Text, useTheme } from "react-native-elements";
import { BButton, BackCancelButtons } from "../../components/index";
import { AntDesign, Ionicons, MaterialIcons } from "@expo/vector-icons";
import Icon from "react-native-vector-icons/Feather";
import MapComponent from "../../components/MapComponent";
import { COLORS, makeFullAddress } from "../../constants";
import {
generateGeolocation,
getCurrentLocation,
getJob,
updateDeliveryStatus,
} from "../../firebase";
const { width, height } = Dimensions.get("window"); //Screen dimensions
const ASPECT_RATIO = width / height;
const LATITUDE_DELTA = 0.04; // Controls the zoom level of the map. Smaller means more zoomed in
const LONGITUDE_DELTA = LATITUDE_DELTA * ASPECT_RATIO; // Dependent on LATITUDE_DELTA
const DeliveryComplete = ({ navigation, route }) => {
const {
packageItem,
homeScreen,
receiverItem,
senderItem,
delivererItem,
user_type,
init_deliverer_coord = null, // only has a possible value if this is a deliverer
} = route.params;
const [updatedPackageItem, setUpdatedPackageItem] = useState(packageItem);
const [sourceLat, setSourceLat] = useState(
init_deliverer_coord?.latitude ||
updatedPackageItem.data.deliverer_location?.latitude
);
const [sourceLong, setSourceLong] = useState(
init_deliverer_coord?.longitude ||
updatedPackageItem.data.deliverer_location?.longitude
);
const dropOff_address = updatedPackageItem.data.destination_address;
const full_dropOff_address = makeFullAddress(dropOff_address);
const [destinationLat, destinationLong] = [
dropOff_address.address_coord.latitude,
dropOff_address.address_coord.longitude,
];
const hasLocationData = destinationLat && sourceLat;
const mapProps = hasLocationData
? {
source: { sourceLat: sourceLat, sourceLong: sourceLong },
dest: { destLat: destinationLat, destLong: destinationLong },
LATITUDE_DELTA: LATITUDE_DELTA,
LONGITUDE_DELTA: LONGITUDE_DELTA,
style: styles.map,
}
: null;
useEffect(async () => {
// if this is a buyer/seller, retrieve the latest deliverer location
if (user_type === "Buyer/Seller") {
await getJob(packageItem.id).then((newPackageItem) => {
setUpdatedPackageItem(newPackageItem);
setSourceLat(newPackageItem.data.deliverer_location.latitude);
setSourceLong(newPackageItem.data.deliverer_location.longitude);
});
return;
}
// if this is a deliverer, retrieve this user's current location and update the delivery status and location
await getCurrentLocation().then(async (loc) => {
setSourceLat(loc.latitude);
setSourceLong(loc.longitude);
await updateDeliveryStatus(
packageItem.id,
4,
{},
generateGeolocation(loc.latitude, loc.longitude)
);
packageItem.data.status = 4;
});
}, []);
return (
<View style={styles.container}>
<View style={styles.headingContainer}>
<Text style={styles.lineone}>Delivery Complete!</Text>
</View>
{mapProps ? (
<MapComponent mapProps={{ ...mapProps, style: styles.map }} />
) : (
<Text style={styles.paragraph}>Loading Map...</Text>
)}
<View style={styles.bottomContainer}>
<Text style={styles.linetwo}>
{`Arrived at ${full_dropOff_address}`}
</Text>
<Icon.Button
name="phone"
backgroundColor={COLORS.white}
onPress={() => {}}
></Icon.Button>
<BButton
text="Return to Home"
onPress={() => navigation.replace(homeScreen)}
containerStyle={styles.button}
/>
</View>
</View>
);
};
export default DeliveryComplete;
const styles = StyleSheet.create({
container: {
flex: 1,
justifyContent: "center",
alignItems: "center",
},
lineone: {
fontSize: 30,
fontWeight: "bold",
textAlign: "center",
},
linetwo: {
paddingBottom: 25,
fontSize: 20,
fontWeight: "bold",
textAlign: "center",
},
linethree: {
paddingBottom: 25,
fontSize: 20,
fontWeight: "bold",
textAlign: "center",
},
button: {
width: 200,
marginHorizontal: 50,
marginVertical: 5,
},
buttonView: {
position: "absolute",
top: (5 / 6) * height,
},
buttonContainer: {},
buttonOutline: {},
buttonOutlineText: {},
buttonText: {
color: "blue",
},
input: {
padding: 15,
borderBottomWidth: 1,
borderColor: "rgba(0, 0, 0, .2)",
},
inputContainer: {},
headingContainer: {
position: "absolute",
top: 50,
},
bottomContainer: {
position: "absolute",
bottom: 25,
},
font: {
textAlign: "center",
maxWidth: 200,
fontWeight: "bold",
},
topleftbutton: {
position: "absolute",
left: 25,
top: 50,
},
toprightbutton: {
position: "absolute",
right: 25,
top: 50,
},
map: {
...StyleSheet.absoluteFillObject,
top: (1 / 7) * height,
bottom: (1 / 3) * height,
},
});
|
# Copyright (c)2014 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Wrapper around the requests library. Used for making all HTTP calls.
"""
import logging
import json
import requests
import pyrax
import pyrax.exceptions as exc
req_methods = {
"HEAD": requests.head,
"GET": requests.get,
"POST": requests.post,
"PUT": requests.put,
"DELETE": requests.delete,
"PATCH": requests.patch,
}
# NOTE: FIX THIS!!!
verify_ssl = False
def request(method, uri, *args, **kwargs):
"""
Handles all the common functionality required for API calls. Returns
the resulting response object.
Formats the request into a dict representing the headers
and body that will be used to make the API call.
"""
req_method = req_methods[method.upper()]
raise_exception = kwargs.pop("raise_exception", True)
raw_content = kwargs.pop("raw_content", False)
kwargs["headers"] = kwargs.get("headers", {})
http_log_req(method, uri, args, kwargs)
data = None
if "data" in kwargs:
# The 'data' kwarg is used when you don't want json encoding.
data = kwargs.pop("data")
elif "body" in kwargs:
if "Content-Type" not in kwargs["headers"]:
kwargs["headers"]["Content-Type"] = "application/json"
data = json.dumps(kwargs.pop("body"))
if data:
resp = req_method(uri, data=data, **kwargs)
else:
resp = req_method(uri, **kwargs)
if raw_content:
body = resp.content
else:
try:
body = resp.json()
except ValueError:
# No JSON in response
body = resp.content
http_log_resp(resp, body)
if resp.status_code >= 400 and raise_exception:
raise exc.from_response(resp, body)
return resp, body
def http_log_req(method, uri, args, kwargs):
"""
When pyrax.get_http_debug() is True, outputs the equivalent `curl`
command for the API request being made.
"""
if not pyrax.get_http_debug():
return
string_parts = ["curl -i -X %s" % method]
for element in args:
string_parts.append("%s" % element)
for element in kwargs["headers"]:
header = "-H '%s: %s'" % (element, kwargs["headers"][element])
string_parts.append(header)
string_parts.append(uri)
log = logging.getLogger("pyrax")
log.debug("\nREQ: %s\n" % " ".join(string_parts))
if "body" in kwargs:
pyrax._logger.debug("REQ BODY: %s\n" % (kwargs["body"]))
if "data" in kwargs:
pyrax._logger.debug("REQ DATA: %s\n" % (kwargs["data"]))
def http_log_resp(resp, body):
"""
When pyrax.get_http_debug() is True, outputs the response received
from the API request.
"""
if not pyrax.get_http_debug():
return
log = logging.getLogger("pyrax")
log.debug("RESP: %s\n%s", resp, resp.headers)
if body:
log.debug("RESP BODY: %s", body)
|
/**
* EasyStar.js
* github.com/prettymuchbryce/EasyStarJS
* Licensed under the MIT license.
*
* Implementation By Bryce Neal (@prettymuchbryce)
**/
var EasyStar = {}
var Instance = require('./instance');
var Node = require('./node');
var Heap = require('heap');
const CLOSED_LIST = 0;
const OPEN_LIST = 1;
module.exports = EasyStar;
EasyStar.js = function() {
var STRAIGHT_COST = 1.0;
var DIAGONAL_COST = 1.4;
var syncEnabled = false;
var pointsToAvoid = {};
var collisionGrid;
var costMap = {};
var pointsToCost = {};
var directionalConditions = {};
var allowCornerCutting = true;
var iterationsSoFar;
var instances = [];
var iterationsPerCalculation = Number.MAX_VALUE;
var acceptableTiles;
var diagonalsEnabled = false;
/**
* Sets the collision grid that EasyStar uses.
*
* @param {Array|Number} tiles An array of numbers that represent
* which tiles in your grid should be considered
* acceptable, or "walkable".
**/
this.setAcceptableTiles = function(tiles) {
if (tiles instanceof Array) {
// Array
acceptableTiles = tiles;
} else if (!isNaN(parseFloat(tiles)) && isFinite(tiles)) {
// Number
acceptableTiles = [tiles];
}
};
/**
* Enables sync mode for this EasyStar instance..
* if you're into that sort of thing.
**/
this.enableSync = function() {
syncEnabled = true;
};
/**
* Disables sync mode for this EasyStar instance.
**/
this.disableSync = function() {
syncEnabled = false;
};
/**
* Enable diagonal pathfinding.
*/
this.enableDiagonals = function() {
diagonalsEnabled = true;
}
/**
* Disable diagonal pathfinding.
*/
this.disableDiagonals = function() {
diagonalsEnabled = false;
}
/**
* Sets the collision grid that EasyStar uses.
*
* @param {Array} grid The collision grid that this EasyStar instance will read from.
* This should be a 2D Array of Numbers.
**/
this.setGrid = function(grid) {
collisionGrid = grid;
//Setup cost map
for (var y = 0; y < collisionGrid.length; y++) {
for (var x = 0; x < collisionGrid[0].length; x++) {
if (!costMap[collisionGrid[y][x]]) {
costMap[collisionGrid[y][x]] = 1
}
}
}
};
/**
* Sets the tile cost for a particular tile type.
*
* @param {Number} The tile type to set the cost for.
* @param {Number} The multiplicative cost associated with the given tile.
**/
this.setTileCost = function(tileType, cost) {
costMap[tileType] = cost;
};
/**
* Sets the an additional cost for a particular point.
* Overrides the cost from setTileCost.
*
* @param {Number} x The x value of the point to cost.
* @param {Number} y The y value of the point to cost.
* @param {Number} The multiplicative cost associated with the given point.
**/
this.setAdditionalPointCost = function(x, y, cost) {
pointsToCost[x + '_' + y] = cost;
};
/**
* Remove the additional cost for a particular point.
*
* @param {Number} x The x value of the point to stop costing.
* @param {Number} y The y value of the point to stop costing.
**/
this.removeAdditionalPointCost = function(x, y) {
delete pointsToCost[x + '_' + y];
}
/**
* Remove all additional point costs.
**/
this.removeAllAdditionalPointCosts = function() {
pointsToCost = {};
}
/**
* Sets a directional condition on a tile
*
* @param {Number} x The x value of the point.
* @param {Number} y The y value of the point.
* @param {Array.<String>} allowedDirections A list of all the allowed directions that can access
* the tile.
**/
this.setDirectionalCondition = function(x, y, allowedDirections) {
directionalConditions[x + '_' + y] = allowedDirections;
};
/**
* Remove all directional conditions
**/
this.removeAllDirectionalConditions = function() {
directionalConditions = {};
};
/**
* Sets the number of search iterations per calculation.
* A lower number provides a slower result, but more practical if you
* have a large tile-map and don't want to block your thread while
* finding a path.
*
* @param {Number} iterations The number of searches to prefrom per calculate() call.
**/
this.setIterationsPerCalculation = function(iterations) {
iterationsPerCalculation = iterations;
};
/**
* Avoid a particular point on the grid,
* regardless of whether or not it is an acceptable tile.
*
* @param {Number} x The x value of the point to avoid.
* @param {Number} y The y value of the point to avoid.
**/
this.avoidAdditionalPoint = function(x, y) {
pointsToAvoid[x + "_" + y] = 1;
};
/**
* Stop avoiding a particular point on the grid.
*
* @param {Number} x The x value of the point to stop avoiding.
* @param {Number} y The y value of the point to stop avoiding.
**/
this.stopAvoidingAdditionalPoint = function(x, y) {
delete pointsToAvoid[x + "_" + y];
};
/**
* Enables corner cutting in diagonal movement.
**/
this.enableCornerCutting = function() {
allowCornerCutting = true;
};
/**
* Disables corner cutting in diagonal movement.
**/
this.disableCornerCutting = function() {
allowCornerCutting = false;
};
/**
* Stop avoiding all additional points on the grid.
**/
this.stopAvoidingAllAdditionalPoints = function() {
pointsToAvoid = {};
};
/**
* Find a path.
*
* @param {Number} startX The X position of the starting point.
* @param {Number} startY The Y position of the starting point.
* @param {Number} endX The X position of the ending point.
* @param {Number} endY The Y position of the ending point.
* @param {Function} callback A function that is called when your path
* is found, or no path is found.
*
**/
this.findPath = function(startX, startY, endX, endY, callback) {
// Wraps the callback for sync vs async logic
var callbackWrapper = function(result) {
if (syncEnabled) {
callback(result);
} else {
setTimeout(function() {
callback(result);
});
}
}
// No acceptable tiles were set
if (acceptableTiles === undefined) {
throw new Error("You can't set a path without first calling setAcceptableTiles() on EasyStar.");
}
// No grid was set
if (collisionGrid === undefined) {
throw new Error("You can't set a path without first calling setGrid() on EasyStar.");
}
// Start or endpoint outside of scope.
if (startX < 0 || startY < 0 || endX < 0 || endY < 0 ||
startX > collisionGrid[0].length-1 || startY > collisionGrid.length-1 ||
endX > collisionGrid[0].length-1 || endY > collisionGrid.length-1) {
throw new Error("Your start or end point is outside the scope of your grid.");
}
// Start and end are the same tile.
if (startX===endX && startY===endY) {
callbackWrapper([]);
return;
}
// End point is not an acceptable tile.
var endTile = collisionGrid[endY][endX];
var isAcceptable = false;
for (var i = 0; i < acceptableTiles.length; i++) {
if (endTile === acceptableTiles[i]) {
isAcceptable = true;
break;
}
}
if (isAcceptable === false) {
callbackWrapper(null);
return;
}
// Create the instance
var instance = new Instance();
instance.openList = new Heap(function(nodeA, nodeB) {
return nodeA.bestGuessDistance() - nodeB.bestGuessDistance();
});
instance.isDoneCalculating = false;
instance.nodeHash = {};
instance.startX = startX;
instance.startY = startY;
instance.endX = endX;
instance.endY = endY;
instance.callback = callbackWrapper;
instance.openList.push(coordinateToNode(instance, instance.startX,
instance.startY, null, STRAIGHT_COST));
instances.push(instance);
};
/**
* This method steps through the A* Algorithm in an attempt to
* find your path(s). It will search 4-8 tiles (depending on diagonals) for every calculation.
* You can change the number of calculations done in a call by using
* easystar.setIteratonsPerCalculation().
**/
this.calculate = function() {
if (instances.length === 0 || collisionGrid === undefined || acceptableTiles === undefined) {
return;
}
for (iterationsSoFar = 0; iterationsSoFar < iterationsPerCalculation; iterationsSoFar++) {
if (instances.length === 0) {
return;
}
if (syncEnabled) {
// If this is a sync instance, we want to make sure that it calculates synchronously.
iterationsSoFar = 0;
}
// Couldn't find a path.
if (instances[0].openList.size() === 0) {
var ic = instances[0];
ic.callback(null);
instances.shift();
continue;
}
var searchNode = instances[0].openList.pop();
// Handles the case where we have found the destination
if (instances[0].endX === searchNode.x && instances[0].endY === searchNode.y) {
instances[0].isDoneCalculating = true;
var path = [];
path.push({x: searchNode.x, y: searchNode.y});
var parent = searchNode.parent;
while (parent!=null) {
path.push({x: parent.x, y:parent.y});
parent = parent.parent;
}
path.reverse();
var ic = instances[0];
var ip = path;
ic.callback(ip);
return
}
var tilesToSearch = [];
searchNode.list = CLOSED_LIST;
if (searchNode.y > 0) {
tilesToSearch.push({ instance: instances[0], searchNode: searchNode,
x: 0, y: -1, cost: STRAIGHT_COST * getTileCost(searchNode.x, searchNode.y-1)});
}
if (searchNode.x < collisionGrid[0].length-1) {
tilesToSearch.push({ instance: instances[0], searchNode: searchNode,
x: 1, y: 0, cost: STRAIGHT_COST * getTileCost(searchNode.x+1, searchNode.y)});
}
if (searchNode.y < collisionGrid.length-1) {
tilesToSearch.push({ instance: instances[0], searchNode: searchNode,
x: 0, y: 1, cost: STRAIGHT_COST * getTileCost(searchNode.x, searchNode.y+1)});
}
if (searchNode.x > 0) {
tilesToSearch.push({ instance: instances[0], searchNode: searchNode,
x: -1, y: 0, cost: STRAIGHT_COST * getTileCost(searchNode.x-1, searchNode.y)});
}
if (diagonalsEnabled) {
if (searchNode.x > 0 && searchNode.y > 0) {
if (allowCornerCutting ||
(isTileWalkable(collisionGrid, acceptableTiles, searchNode.x, searchNode.y-1) &&
isTileWalkable(collisionGrid, acceptableTiles, searchNode.x-1, searchNode.y))) {
tilesToSearch.push({ instance: instances[0], searchNode: searchNode,
x: -1, y: -1, cost: DIAGONAL_COST * getTileCost(searchNode.x-1, searchNode.y-1)});
}
}
if (searchNode.x < collisionGrid[0].length-1 && searchNode.y < collisionGrid.length-1) {
if (allowCornerCutting ||
(isTileWalkable(collisionGrid, acceptableTiles, searchNode.x, searchNode.y+1) &&
isTileWalkable(collisionGrid, acceptableTiles, searchNode.x+1, searchNode.y))) {
tilesToSearch.push({ instance: instances[0], searchNode: searchNode,
x: 1, y: 1, cost: DIAGONAL_COST * getTileCost(searchNode.x+1, searchNode.y+1)});
}
}
if (searchNode.x < collisionGrid[0].length-1 && searchNode.y > 0) {
if (allowCornerCutting ||
(isTileWalkable(collisionGrid, acceptableTiles, searchNode.x, searchNode.y-1) &&
isTileWalkable(collisionGrid, acceptableTiles, searchNode.x+1, searchNode.y))) {
tilesToSearch.push({ instance: instances[0], searchNode: searchNode,
x: 1, y: -1, cost: DIAGONAL_COST * getTileCost(searchNode.x+1, searchNode.y-1)});
}
}
if (searchNode.x > 0 && searchNode.y < collisionGrid.length-1) {
if (allowCornerCutting ||
(isTileWalkable(collisionGrid, acceptableTiles, searchNode.x, searchNode.y+1) &&
isTileWalkable(collisionGrid, acceptableTiles, searchNode.x-1, searchNode.y))) {
tilesToSearch.push({ instance: instances[0], searchNode: searchNode,
x: -1, y: 1, cost: DIAGONAL_COST * getTileCost(searchNode.x-1, searchNode.y+1)});
}
}
}
var isDoneCalculating = false;
// Search all of the surrounding nodes
for (var i = 0; i < tilesToSearch.length; i++) {
checkAdjacentNode(tilesToSearch[i].instance, tilesToSearch[i].searchNode,
tilesToSearch[i].x, tilesToSearch[i].y, tilesToSearch[i].cost);
if (tilesToSearch[i].instance.isDoneCalculating === true) {
isDoneCalculating = true;
break;
}
}
if (isDoneCalculating) {
instances.shift();
continue;
}
}
};
// Private methods follow
var checkAdjacentNode = function(instance, searchNode, x, y, cost) {
var adjacentCoordinateX = searchNode.x+x;
var adjacentCoordinateY = searchNode.y+y;
if (pointsToAvoid[adjacentCoordinateX + "_" + adjacentCoordinateY] === undefined &&
isTileWalkable(collisionGrid, acceptableTiles, adjacentCoordinateX, adjacentCoordinateY, searchNode)) {
var node = coordinateToNode(instance, adjacentCoordinateX,
adjacentCoordinateY, searchNode, cost);
if (node.list === undefined) {
node.list = OPEN_LIST;
instance.openList.push(node);
} else if (searchNode.costSoFar + cost < node.costSoFar) {
node.costSoFar = searchNode.costSoFar + cost;
node.parent = searchNode;
instance.openList.updateItem(node);
}
}
};
// Helpers
var isTileWalkable = function(collisionGrid, acceptableTiles, x, y, sourceNode) {
if (directionalConditions[x + "_" + y]) {
var direction = calculateDirection(sourceNode.x - x, sourceNode.y - y)
var directionIncluded = function () {
for (var i = 0; i < directionalConditions[x + "_" + y].length; i++) {
if (directionalConditions[x + "_" + y][i] === direction) return true
}
return false
}
if (!directionIncluded()) return false
}
for (var i = 0; i < acceptableTiles.length; i++) {
if (collisionGrid[y][x] === acceptableTiles[i]) {
return true;
}
}
return false;
};
/**
* -1, -1 | 0, -1 | 1, -1
* -1, 0 | SOURCE | 1, 0
* -1, 1 | 0, 1 | 1, 1
*/
var calculateDirection = function (diffX, diffY) {
if (diffX === 0, diffY === -1) return EasyStar.BOTTOM
else if (diffX === 1, diffY === -1) return EasyStar.BOTTOM_LEFT
else if (diffX === 1, diffY === 0) return EasyStar.LEFT
else if (diffX === 1, diffY === 1) return EasyStar.TOP_LEFT
else if (diffX === 0, diffY === 1) return EasyStar.TOP
else if (diffX === -1, diffY === 1) return EasyStar.TOP_RIGHT
else if (diffX === -1, diffY === 0) return EasyStar.RIGHT
else if (diffX === -1, diffY === -1) return EasyStar.BOTTOM_RIGHT
throw new Error('These differences are not valid: ' + diffX + ', ' + diffY)
};
var getTileCost = function(x, y) {
return pointsToCost[x + '_' + y] || costMap[collisionGrid[y][x]]
};
var coordinateToNode = function(instance, x, y, parent, cost) {
if (instance.nodeHash[x + "_" + y]!==undefined) {
return instance.nodeHash[x + "_" + y];
}
var simpleDistanceToTarget = getDistance(x, y, instance.endX, instance.endY);
if (parent!==null) {
var costSoFar = parent.costSoFar + cost;
} else {
costSoFar = 0;
}
var node = new Node(parent,x,y,costSoFar,simpleDistanceToTarget);
instance.nodeHash[x + "_" + y] = node;
return node;
};
var getDistance = function(x1,y1,x2,y2) {
if (diagonalsEnabled) {
// Octile distance
var dx = Math.abs(x1 - x2);
var dy = Math.abs(y1 - y2);
if (dx < dy) {
return DIAGONAL_COST * dx + dy;
} else {
return DIAGONAL_COST * dy + dx;
}
} else {
// Manhattan distance
var dx = Math.abs(x1 - x2);
var dy = Math.abs(y1 - y2);
return (dx + dy);
}
};
}
EasyStar.TOP = 'TOP'
EasyStar.TOP_RIGHT = 'TOP_RIGHT'
EasyStar.RIGHT = 'RIGHT'
EasyStar.BOTTOM_RIGHT = 'BOTTOM_RIGHT'
EasyStar.BOTTOM = 'BOTTOM'
EasyStar.BOTTOM_LEFT = 'BOTTOM_LEFT'
EasyStar.LEFT = 'LEFT'
EasyStar.TOP_LEFT = 'TOP_LEFT'
|
#
# References
# 1. url https://matplotlib.org/gallery/animation
# /double_pendulum_animated_sgskip.html [20210205].
#
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
G = 9.8 # acceleration due to gravity, in m/s^2
L1 = 1.0 # length of pendulum 1 in m
L2 = 1.0 # length of pendulum 2 in m
M1 = 1.0 # mass of pendulum 1 in kg
M2 = 1.0 # mass of pendulum 2 in kg
def derivs(state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
del_ = state[2] - state[0]
den1 = (M1 + M2)*L1 - M2*L1*cos(del_)*cos(del_)
dydx[1] = (M2*L1*state[1]*state[1]*sin(del_)*cos(del_) +
M2*G*sin(state[2])*cos(del_) +
M2*L2*state[3]*state[3]*sin(del_) -
(M1 + M2)*G*sin(state[0]))/den1
dydx[2] = state[3]
den2 = (L2/L1)*den1
dydx[3] = (-M2*L2*state[3]*state[3]*sin(del_)*cos(del_) +
(M1 + M2)*G*sin(state[0])*cos(del_) -
(M1 + M2)*L1*state[1]*state[1]*sin(del_) -
(M1 + M2)*G*sin(state[2]))/den2
return dydx
# create a time array from 0..100 sampled at 0.05 second steps
dt = 0.05
t = np.arange(0.0, 20, dt)
# th1 and th2 are the initial angles (degrees)
# w10 and w20 are the initial angular velocities (degrees per second)
th1 = 120.0
w1 = 0.0
th2 = -10.0
w2 = 0.0
# initial state
state = np.radians([th1, w1, th2, w2])
# integrate your ODE using scipy.integrate.
y = integrate.odeint(derivs, state, t)
x1 = L1*sin(y[:, 0])
y1 = -L1*cos(y[:, 0])
x2 = L2*sin(y[:, 2]) + x1
y2 = -L2*cos(y[:, 2]) + y1
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2))
ax.set_aspect('equal')
ax.grid()
line, = ax.plot([], [], 'o-', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
def init():
line.set_data([], [])
time_text.set_text('')
return line, time_text
def animate(i):
thisx = [0, x1[i], x2[i]]
thisy = [0, y1[i], y2[i]]
line.set_data(thisx, thisy)
time_text.set_text(time_template % (i*dt))
return line, time_text
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)),
interval=25, blit=True, init_func=init)
# ani.save('double_pendulum.mp4', fps=15)
plt.show() |
import types
from struct import pack
from vertica_python.vertica.messages import *
class Message(object):
@classmethod
def _message_id(cls, message_id):
instance_message_id = message_id
def message_id(self):
return instance_message_id
setattr(cls, 'message_id', types.MethodType(message_id, cls))
def message_string(self, msg):
if isinstance(msg, list):
msg = ''.join(msg)
if hasattr(msg, 'bytesize'):
bytesize = msg.bytesize + 4
else:
bytesize = len(msg) + 4
message_size = pack('!I', bytesize)
if self.message_id() is not None:
msg_with_size = self.message_id() + message_size + msg
else:
msg_with_size = message_size + msg
return msg_with_size
class BackendMessage(Message):
MessageIdMap = {}
@classmethod
def factory(cls, type_, data):
klass = cls.MessageIdMap[type_]
if klass is not None:
return klass(data)
else:
return messages.Unknown(type_, data)
@classmethod
def _message_id(cls, message_id):
super(BackendMessage, cls)
cls.MessageIdMap[message_id] = cls
class FrontendMessage(Message):
def to_bytes(self):
return self.message_string(b'')
|
testTransformSpecifToTTL = (specifData) => {
return transformSpecifToTTL("https://www.example.com",specifData)
};
transformSpecifToTTL = (baseUri, specifData) => {
let {id,dataTypes,propertyClasses,resourceClasses,statementClasses,resources,statements,hierarchies,files} = specifData;
let projectID = id;
let resultTtlString = defineTurtleVocabulary(baseUri, projectID)
+ transformProjectBaseInformations( specifData )
+ transformDatatypes(dataTypes)
+ transformPropertyClasses(propertyClasses)
+ transformResourceClasses(resourceClasses)
+ transformStatementClasses(statementClasses)
+ transformResources(resources)
+ transformStatements(statements)
+ transformHierarchies(hierarchies)
+ transformFiles(files);
return resultTtlString;
};
defineTurtleVocabulary = (baseUri, projectID) => {
let TtlString = tier0RdfEntry(`@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .`)
+ tier0RdfEntry(`@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .`)
+ tier0RdfEntry(`@prefix foaf: <http://xmlns.com/foaf/0.1/> .`)
+ tier0RdfEntry(`@prefix owl: <http://www.w3.org/2002/07/owl#> .`)
+ tier0RdfEntry(`@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .`)
+ tier0RdfEntry(`@prefix xs: <http://www.w3.org/2001/XMLSchema#> .`)
+ tier0RdfEntry(`@prefix dcterms: <http://purl.org/dc/terms/> .`)
+ tier0RdfEntry(`@prefix vann: <http://purl.org/vocab/vann/> .`)
+ tier0RdfEntry(`@prefix foaf: <http://xmlns.com/foaf/0.1/> .`)
+ emptyLine()
+ tier0RdfEntry(`@prefix meta: <http://specif.de/v1.0/schema/meta#> .`)
+ tier0RdfEntry(`@prefix SpecIF: <http://specif.de/v1.0/schema/core#> .`)
+ tier0RdfEntry(`@prefix FMC: <http://specif.de/v1.0/schema/fmc#> .`)
+ tier0RdfEntry(`@prefix IREB: <http://specif.de/v1.0/schema/ireb#> .`)
+ tier0RdfEntry(`@prefix SysML: <http://specif.de/v1.0/schema/sysml#> .`)
+ tier0RdfEntry(`@prefix oslc: <http://specif.de/v1.0/schema/oslc#> .`)
+ tier0RdfEntry(`@prefix oslc_rm: <http://specif.de/v1.0/schema/oslc_rm#> .`)
+ tier0RdfEntry(`@prefix HIS: <http://specif.de/v1.0/schema/HIS#> .`)
+ tier0RdfEntry(`@prefix BPMN: <http://specif.de/v1.0/schema/bpmn#> .`)
+ emptyLine()
+ tier0RdfEntry(`@prefix : <${baseUri}/${projectID}/> .`)
+ tier0RdfEntry(`@prefix this: <${baseUri}/${projectID}/> .`);
return TtlString;
};
transformProjectBaseInformations = (project) => {
let { id , title , description , $schema , generator , generatorVersion , rights , createdAt , createdBy } = project;
let baseProjectTtlString = emptyLine()
+ tier0RdfEntry(`this: a meta:Document ;`)
+ tier1RdfEntry(`meta:id '${escapeSpecialCharaters(id)}' ;`)
+ (title? tier1RdfEntry(`rdfs:label '${escapeSpecialCharaters(title)}' ;`) : '')
+ (description? tier1RdfEntry(`rdfs:comment '${escapeSpecialCharaters(description)}' ;`) : '')
+ tier1RdfEntry(`meta:schema <${$schema}> ;`)
+ (generator? tier1RdfEntry(`meta:generator '${escapeSpecialCharaters(generator)}' ;`) : '')
+ (generatorVersion? tier1RdfEntry(`meta:generatorVersion '${escapeSpecialCharaters(generatorVersion)}' ;`) : '');
if(rights){
baseProjectTtlString += (rights.title? tier1RdfEntry(`meta:rights-title '${escapeSpecialCharaters(rights.title)}' ;`) : '')
+ (rights.type? tier1RdfEntry(`meta:rights-type '${escapeSpecialCharaters(rights.type)}' ;`) : '')
+ (rights.url? tier1RdfEntry(`meta:rights-url '${escapeSpecialCharaters(rights.url)}' ;`) : '');
};
baseProjectTtlString += (createdAt? tier1RdfEntry(`dcterms:modified '${createdAt}' ;`) : '');
if(createdBy){
baseProjectTtlString += (createdBy.familyName? tier1RdfEntry(`meta:createdBy-familyName '${escapeSpecialCharaters(createdBy.familyName)}' ;`) : '')
+ (createdBy.givenName? tier1RdfEntry(`meta:createdBy-givenName '${escapeSpecialCharaters(createdBy.givenName)}' ;`) : '')
+ (createdBy.email.value? tier1RdfEntry(`meta:createdBy-email '${escapeSpecialCharaters(createdBy.email.value)}' ;`) : '')
+ (createdBy.org.organizationName? tier1RdfEntry(`meta:createdBy-org-organizationName '${escapeSpecialCharaters(createdBy.org.organizationName)}' ;`) : '');
};
baseProjectTtlString += ' .';
return baseProjectTtlString;
};
transformDatatypes = (dataTypes) => {
if (!isArrayWithContent(dataTypes)){
return '';
};
let dataTypesTtlString = '';
dataTypes.forEach( dataType => {
let {id , title , type , revision , maxLength , fractionDigits , minInclusive , maxInclusive , changedAt} = dataType;
dataTypesTtlString += emptyLine()
+ tier0RdfEntry(`this: meta:containsDataTypeMapping :${id} .`)
+ tier0RdfEntry(`:${id} a meta:DataTypeMapping , owl:Class ;`)
+ tier1RdfEntry(`meta:id '${escapeSpecialCharaters(id)}' ;`)
+ (title? tier1RdfEntry(`rdfs:label '${escapeSpecialCharaters(title)}' ;`) : '')
+ (type? tier1RdfEntry(`meta:type '${escapeSpecialCharaters(type)}' ; `) : '')
// + (type? tier1RdfEntry(`meta:vocabularyElement '${escapeSpecialCharaters(type)}' ;`) : '')
+ (revision? tier1RdfEntry(`meta:revision '${revision}' ;`) : '')
+ (maxLength? tier1RdfEntry(`meta:maxLength '${maxLength}' ;`) : '')
+ (fractionDigits? tier1RdfEntry(`meta:fractionDigits '${fractionDigits}' ;`) : '')
+ (minInclusive? tier1RdfEntry(`meta:minInclusive '${minInclusive}' ;`) : '')
+ (maxInclusive? tier1RdfEntry(`meta:maxInclusive '${maxInclusive}' ;`) : '')
+ tier1RdfEntry(`dcterms:modified '${escapeSpecialCharaters(changedAt)}' ;`)
+ ' .';
if(isArrayWithContent(dataType.values)){
dataType.values.forEach( enumValue => {
dataTypesTtlString += emptyLine()
+ tier0RdfEntry(`:${escapeSpecialCharaters(enumValue.id)} a :${escapeSpecialCharaters(dataType.title)} ;`)
+ tier1RdfEntry(`meta:id '${escapeSpecialCharaters(enumValue.id)}' ;`)
+ tier1RdfEntry(`rdfs:label '${escapeSpecialCharaters(enumValue.value)}' ;`)
+ ' .';
});
};
});
return dataTypesTtlString;
};
transformPropertyClasses = (propertyClasses) => {
if (!isArrayWithContent(propertyClasses)){
return '';
};
let propertyClassesTtlString = '';
propertyClasses.forEach(propertyClass => {
let {id , title , dataType , revision , changedAt} = propertyClass;
propertyClassesTtlString += emptyLine()
+ tier0RdfEntry(`this: meta:containsPropertyClassMapping :${id} .`)
+ tier0RdfEntry(`:${id} a meta:PropertyClassMapping ;`)
+ tier1RdfEntry(`meta:id '${escapeSpecialCharaters(id)}' ;`)
+ (title? tier1RdfEntry(`meta:title '${escapeSpecialCharaters(title)}' ; `) : '')
// + (title? tier1RdfEntry(`meta:vocabularyElement ${escapeSpecialCharaters(title)} ;`) : '')
+ tier1RdfEntry(`meta:dataType '${escapeSpecialCharaters(dataType)}' ;`)
+ (revision? tier1RdfEntry(`meta:revision '${escapeSpecialCharaters(revision)}' ;`) : '')
+ tier1RdfEntry(`dcterms:modified '${escapeSpecialCharaters(changedAt)}' ;`)
+ ' .';
});
return propertyClassesTtlString;
};
transformResourceClasses = (resourceClasses) => {
if (!isArrayWithContent(resourceClasses)){
return '';
};
let resourceClassesTtlString='';
resourceClasses.forEach( resourceClass => {
let {id , title , description , icon , instantiation , changedAt , revision , propertyClasses} = resourceClass;
resourceClassesTtlString += emptyLine()
+ tier0RdfEntry(`this: meta:containsResourceClassMapping :${id} .`)
+ tier0RdfEntry(`:${id} a meta:ResourceClassMapping ;`)
+ tier1RdfEntry(`meta:id '${escapeSpecialCharaters(id)}' ;`)
+ (title? tier1RdfEntry(`meta:title '${escapeSpecialCharaters(title)}';`):'')
// + (title? tier1RdfEntry(`meta:vocabularyElement '${escapeSpecialCharaters(title)}' ;`):'')
+ (description? tier1RdfEntry(`meta:description '${escapeSpecialCharaters(description)}' ;`):'')
+ (icon? tier1RdfEntry(`meta:icon '${escapeSpecialCharaters(icon)}' ;`):'')
+ tier1RdfEntry(`dcterms:modified '${escapeSpecialCharaters(changedAt)}' ;`)
+ (revision? tier1RdfEntry(`meta:revision '${escapeSpecialCharaters(revision)}' ;`):'')
+ (instantiation? extractRdfFromSpecifDataArray(`meta:instantiation`,instantiation) : '')
+ (propertyClasses? extractRdfFromSpecifDataArray(`meta:propertyClasses`,propertyClasses) : '')
+ ' .';
});
return resourceClassesTtlString;
};
transformStatementClasses = (statementClasses) => {
if (!isArrayWithContent(statementClasses)){
return '';
};
let statementClassesTtlString = '';
statementClasses.forEach( statementClass => {
let {id , title , description , revision , changedAt , instantiation , subjectClasses , objectClasses} = statementClass;
statementClassesTtlString += emptyLine()
+ tier0RdfEntry(`:${id} a meta:StatementClassMapping ;`)
+ tier0RdfEntry(`meta:id '${escapeSpecialCharaters(id)}' ;`)
+ (title? tier1RdfEntry(`rdfs:label '${escapeSpecialCharaters(title)}' ;`) : '')
// + (title? tier1RdfEntry(`meta:vocabularyElement '${escapeSpecialCharaters(title)}' ;`) : '')
+ (description? tier1RdfEntry(`rdfs:comment '${escapeSpecialCharaters(description)}' ;`) : '')
+ (revision? tier1RdfEntry(`meta:revision: '${revision}' ;`) : '')
+ tier1RdfEntry(`dcterms:modified '${escapeSpecialCharaters(changedAt)}' ;`)
+ (instantiation? extractRdfFromSpecifDataArray(`meta:instantiation`,instantiation) : '')
+ (subjectClasses? extractRdfFromSpecifDataArray(`meta:subjectClasses`,subjectClasses) : '')
+ (objectClasses? extractRdfFromSpecifDataArray(`meta:objectClasses `,objectClasses) : '')
+ ' .';
});
return statementClassesTtlString;
};
transformResources = (resources) => {
if (!isArrayWithContent(resources)){
return '';
};
let resourcesTtlString = ''
resources.forEach( resource => {
let {id , title , properties, class : resourceClass, revision , changedAt , changedBy} = resource;
resourcesTtlString += emptyLine()
+ tier0RdfEntry(`:${id} a IREB:Requirement ;`)
+ (title? tier1RdfEntry(`rdfs:label '${escapeSpecialCharaters(title)}' ;`) : '')
+ tier1RdfEntry(`meta:id '${escapeSpecialCharaters(id)}' ;`)
+ (resourceClass? tier1RdfEntry(`meta:PropertyClassMapping '${escapeSpecialCharaters(resourceClass)}' ;`) : '')
+ (revision? tier1RdfEntry(`meta:revision '${revision}' ;`) : '')
+ tier1RdfEntry(`dcterms:modified '${escapeSpecialCharaters(changedAt)}' ;`)
+ (changedBy? tier1RdfEntry(`meta:changedBy '${escapeSpecialCharaters(changedBy)}' ;`) : '');
if(isArrayWithContent(properties)){
properties.forEach( property => {
resourcesTtlString += tier1RdfEntry(`:${property.class} '${escapeSpecialCharaters(property.value)}' ;`);
});
};
resourcesTtlString += ' .';
});
return resourcesTtlString;
};
transformStatements = (statements) => {
if (!isArrayWithContent(statements)){
return '';
};
let statementsTtlString = '';
statements.forEach( statement => {
let {id , subject , class : statementClass , object , changedAt , changedBy , revision} = statement;
statementsTtlString += emptyLine()
+ tier0RdfEntry(`:${id} a meta:Statement ;`)
+ tier1RdfEntry(`meta:id '${escapeSpecialCharaters(id)}' ;`)
+ (subject? tier1RdfEntry(`rdf:subject :${subject} ;`) : '')
+ (statementClass? tier1RdfEntry(`rdf:predicate :${statementClass} ;`) : '')
+ (object? tier1RdfEntry(`rdf:object :${object} ;`) : '')
+ tier1RdfEntry(`meta:modified '${escapeSpecialCharaters(changedAt)}' ;`)
+ (changedBy? tier1RdfEntry(`meta:changedBy '${escapeSpecialCharaters(changedBy)}' ;`) : '')
+ (revision? tier1RdfEntry(`meta:revision '${revision}' ;`) : '')
+ ' .';
});
return statementsTtlString;
};
transformHierarchies = (hierarchies) => {
if (!isArrayWithContent(hierarchies)){
return '';
};
let hierarchyTtlString = '';
hierarchies.forEach( node => {
hierarchyTtlString += transformNodes(node);
});
return hierarchyTtlString;
};
transformNodes = (hierarchyNode) => {
let {id ,resource ,revision ,changedAt ,nodes} = hierarchyNode;
let hierarchyNodeTtlString = emptyLine()
+ tier0RdfEntry(`:${id} a SpecIF:RC-Hierarchy ;`)
+ tier1RdfEntry(`meta:id '${escapeSpecialCharaters(id)}' ;`)
+ (resource? tier1RdfEntry(`meta:resource '${escapeSpecialCharaters(resource)}' ;`) : '')
+ (revision? tier1RdfEntry(`meta:revision '${revision}' ;`) : '')
+ tier1RdfEntry(`dcterms:modified '${escapeSpecialCharaters(changedAt)}' ;`);
if(isArrayWithContent(nodes)){
let NodeTtlString = tier1RdfEntry(`meta:nodes`);
nodes.forEach( node => {
NodeTtlString += tier2RdfEntry(`:${node.id} ,` );
});
hierarchyNodeTtlString += NodeTtlString.replace(/,([^,]*)$/, ';')
+ ` .`;
nodes.forEach( node => {
hierarchyNodeTtlString += transformNodes(node);
});
} else {
hierarchyNodeTtlString += ` .`;
};
return hierarchyNodeTtlString;
};
transformFiles = (files) => {
if (!isArrayWithContent(files)){
return '';
};
let filesTtlString = '';
files.forEach( file => {
let {id , title , type , changedAt} = file;
filesTtlString += emptyLine()
+ tier0RdfEntry(`:${id} a meta:File ;`)
+ tier1RdfEntry(`meta:id '${id}' ;`)
+ (title? tier1RdfEntry(`rdfs:label '${title}' ;`) : '')
+ (type? tier1RdfEntry(`meta:type '${type}' ;`) : '')
+ tier1RdfEntry(`dcterms:modified '${changedAt}' ;`)
+ ' .';
});
return filesTtlString;
};
/*
##########################################################################
########################## Tools #########################################
##########################################################################
*/
isArrayWithContent = (array) => {
return (Array.isArray(array) && array.length > 0);
};
extractRdfFromSpecifDataArray = (predicate, objectArray) => {
let TtlString = '';
if(isArrayWithContent(objectArray)){
TtlString = tier1RdfEntry(predicate);
objectArray.forEach( object => {
TtlString += tier2RdfEntry(`:${object} ,`);
});
TtlString=TtlString.replace(/,([^,]*)$/, ';');
};
return TtlString;
};
/*
########################## String #########################################
*/
tier0RdfEntry = (content) => {
return `\n${content}`;
};
tier1RdfEntry = (content) => {
return `\n\t${content}`;
};
tier2RdfEntry = (content) => {
return `\n\t\t${content}`;
};
tier3RdfEntry = (content) => {
return `\n\t\t\t${content}`;
};
emptyLine = () => {
return `\n`;
};
escapeSpecialCharaters = (string) => {
return string.replace("\\","\\\\").replace(/\\([\s\S])|(')/g, "\\$1$2").replace(/\n/g, "\\n");
}; |
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var tf = require("@tensorflow/tfjs-core");
var tfjs_image_recognition_base_1 = require("tfjs-image-recognition-base");
function extractorsFactory(extractWeights, paramMappings) {
function extractDepthwiseConvParams(numChannels, mappedPrefix) {
var filters = tf.tensor4d(extractWeights(3 * 3 * numChannels), [
3,
3,
numChannels,
1,
]);
var batch_norm_scale = tf.tensor1d(extractWeights(numChannels));
var batch_norm_offset = tf.tensor1d(extractWeights(numChannels));
var batch_norm_mean = tf.tensor1d(extractWeights(numChannels));
var batch_norm_variance = tf.tensor1d(extractWeights(numChannels));
paramMappings.push(
{ paramPath: mappedPrefix + "/filters" },
{ paramPath: mappedPrefix + "/batch_norm_scale" },
{ paramPath: mappedPrefix + "/batch_norm_offset" },
{ paramPath: mappedPrefix + "/batch_norm_mean" },
{ paramPath: mappedPrefix + "/batch_norm_variance" }
);
return {
filters: filters,
batch_norm_scale: batch_norm_scale,
batch_norm_offset: batch_norm_offset,
batch_norm_mean: batch_norm_mean,
batch_norm_variance: batch_norm_variance,
};
}
function extractConvParams(
channelsIn,
channelsOut,
filterSize,
mappedPrefix,
isPointwiseConv
) {
var filters = tf.tensor4d(
extractWeights(channelsIn * channelsOut * filterSize * filterSize),
[filterSize, filterSize, channelsIn, channelsOut]
);
var bias = tf.tensor1d(extractWeights(channelsOut));
paramMappings.push(
{ paramPath: mappedPrefix + "/filters" },
{
paramPath:
mappedPrefix + "/" + (isPointwiseConv ? "batch_norm_offset" : "bias"),
}
);
return { filters: filters, bias: bias };
}
function extractPointwiseConvParams(
channelsIn,
channelsOut,
filterSize,
mappedPrefix
) {
var _a = extractConvParams(
channelsIn,
channelsOut,
filterSize,
mappedPrefix,
true
),
filters = _a.filters,
bias = _a.bias;
return {
filters: filters,
batch_norm_offset: bias,
};
}
function extractConvPairParams(channelsIn, channelsOut, mappedPrefix) {
var depthwise_conv = extractDepthwiseConvParams(
channelsIn,
mappedPrefix + "/depthwise_conv"
);
var pointwise_conv = extractPointwiseConvParams(
channelsIn,
channelsOut,
1,
mappedPrefix + "/pointwise_conv"
);
return { depthwise_conv: depthwise_conv, pointwise_conv: pointwise_conv };
}
function extractMobilenetV1Params() {
var conv_0 = extractPointwiseConvParams(3, 32, 3, "mobilenetv1/conv_0");
var conv_1 = extractConvPairParams(32, 64, "mobilenetv1/conv_1");
var conv_2 = extractConvPairParams(64, 128, "mobilenetv1/conv_2");
var conv_3 = extractConvPairParams(128, 128, "mobilenetv1/conv_3");
var conv_4 = extractConvPairParams(128, 256, "mobilenetv1/conv_4");
var conv_5 = extractConvPairParams(256, 256, "mobilenetv1/conv_5");
var conv_6 = extractConvPairParams(256, 512, "mobilenetv1/conv_6");
var conv_7 = extractConvPairParams(512, 512, "mobilenetv1/conv_7");
var conv_8 = extractConvPairParams(512, 512, "mobilenetv1/conv_8");
var conv_9 = extractConvPairParams(512, 512, "mobilenetv1/conv_9");
var conv_10 = extractConvPairParams(512, 512, "mobilenetv1/conv_10");
var conv_11 = extractConvPairParams(512, 512, "mobilenetv1/conv_11");
var conv_12 = extractConvPairParams(512, 1024, "mobilenetv1/conv_12");
var conv_13 = extractConvPairParams(1024, 1024, "mobilenetv1/conv_13");
return {
conv_0: conv_0,
conv_1: conv_1,
conv_2: conv_2,
conv_3: conv_3,
conv_4: conv_4,
conv_5: conv_5,
conv_6: conv_6,
conv_7: conv_7,
conv_8: conv_8,
conv_9: conv_9,
conv_10: conv_10,
conv_11: conv_11,
conv_12: conv_12,
conv_13: conv_13,
};
}
function extractPredictionLayerParams() {
var conv_0 = extractPointwiseConvParams(
1024,
256,
1,
"prediction_layer/conv_0"
);
var conv_1 = extractPointwiseConvParams(
256,
512,
3,
"prediction_layer/conv_1"
);
var conv_2 = extractPointwiseConvParams(
512,
128,
1,
"prediction_layer/conv_2"
);
var conv_3 = extractPointwiseConvParams(
128,
256,
3,
"prediction_layer/conv_3"
);
var conv_4 = extractPointwiseConvParams(
256,
128,
1,
"prediction_layer/conv_4"
);
var conv_5 = extractPointwiseConvParams(
128,
256,
3,
"prediction_layer/conv_5"
);
var conv_6 = extractPointwiseConvParams(
256,
64,
1,
"prediction_layer/conv_6"
);
var conv_7 = extractPointwiseConvParams(
64,
128,
3,
"prediction_layer/conv_7"
);
var box_encoding_0_predictor = extractConvParams(
512,
12,
1,
"prediction_layer/box_predictor_0/box_encoding_predictor"
);
var class_predictor_0 = extractConvParams(
512,
9,
1,
"prediction_layer/box_predictor_0/class_predictor"
);
var box_encoding_1_predictor = extractConvParams(
1024,
24,
1,
"prediction_layer/box_predictor_1/box_encoding_predictor"
);
var class_predictor_1 = extractConvParams(
1024,
18,
1,
"prediction_layer/box_predictor_1/class_predictor"
);
var box_encoding_2_predictor = extractConvParams(
512,
24,
1,
"prediction_layer/box_predictor_2/box_encoding_predictor"
);
var class_predictor_2 = extractConvParams(
512,
18,
1,
"prediction_layer/box_predictor_2/class_predictor"
);
var box_encoding_3_predictor = extractConvParams(
256,
24,
1,
"prediction_layer/box_predictor_3/box_encoding_predictor"
);
var class_predictor_3 = extractConvParams(
256,
18,
1,
"prediction_layer/box_predictor_3/class_predictor"
);
var box_encoding_4_predictor = extractConvParams(
256,
24,
1,
"prediction_layer/box_predictor_4/box_encoding_predictor"
);
var class_predictor_4 = extractConvParams(
256,
18,
1,
"prediction_layer/box_predictor_4/class_predictor"
);
var box_encoding_5_predictor = extractConvParams(
128,
24,
1,
"prediction_layer/box_predictor_5/box_encoding_predictor"
);
var class_predictor_5 = extractConvParams(
128,
18,
1,
"prediction_layer/box_predictor_5/class_predictor"
);
var box_predictor_0 = {
box_encoding_predictor: box_encoding_0_predictor,
class_predictor: class_predictor_0,
};
var box_predictor_1 = {
box_encoding_predictor: box_encoding_1_predictor,
class_predictor: class_predictor_1,
};
var box_predictor_2 = {
box_encoding_predictor: box_encoding_2_predictor,
class_predictor: class_predictor_2,
};
var box_predictor_3 = {
box_encoding_predictor: box_encoding_3_predictor,
class_predictor: class_predictor_3,
};
var box_predictor_4 = {
box_encoding_predictor: box_encoding_4_predictor,
class_predictor: class_predictor_4,
};
var box_predictor_5 = {
box_encoding_predictor: box_encoding_5_predictor,
class_predictor: class_predictor_5,
};
return {
conv_0: conv_0,
conv_1: conv_1,
conv_2: conv_2,
conv_3: conv_3,
conv_4: conv_4,
conv_5: conv_5,
conv_6: conv_6,
conv_7: conv_7,
box_predictor_0: box_predictor_0,
box_predictor_1: box_predictor_1,
box_predictor_2: box_predictor_2,
box_predictor_3: box_predictor_3,
box_predictor_4: box_predictor_4,
box_predictor_5: box_predictor_5,
};
}
return {
extractMobilenetV1Params: extractMobilenetV1Params,
extractPredictionLayerParams: extractPredictionLayerParams,
};
}
function extractParams(weights) {
var paramMappings = [];
var _a =
tfjs_image_recognition_base_1.TfjsImageRecognitionBase.extractWeightsFactory(
weights
),
extractWeights = _a.extractWeights,
getRemainingWeights = _a.getRemainingWeights;
var _b = extractorsFactory(extractWeights, paramMappings),
extractMobilenetV1Params = _b.extractMobilenetV1Params,
extractPredictionLayerParams = _b.extractPredictionLayerParams;
var mobilenetv1 = extractMobilenetV1Params();
var prediction_layer = extractPredictionLayerParams();
var extra_dim = tf.tensor3d(extractWeights(5118 * 4), [1, 5118, 4]);
var output_layer = {
extra_dim: extra_dim,
};
paramMappings.push({ paramPath: "output_layer/extra_dim" });
if (getRemainingWeights().length !== 0) {
throw new Error(
"weights remaing after extract: " + getRemainingWeights().length
);
}
return {
params: {
mobilenetv1: mobilenetv1,
prediction_layer: prediction_layer,
output_layer: output_layer,
},
paramMappings: paramMappings,
};
}
exports.extractParams = extractParams;
//# sourceMappingURL=extractParams.js.map
|
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class Mixin:
'''
Triplet algorithms as a Mixin. These algorithms recover the mean parameters
of the graphical model.
'''
def _triplet_method_single_seed(self, expectations_to_estimate):
# create triplets for what we need, and return which moments we'll need to compute
exp_to_estimate_list = sorted(list(expectations_to_estimate))
if self.triplet_seed is not None:
random.shuffle(exp_to_estimate_list)
if self.triplets is None:
expectations_in_triplets = set()
triplets = []
for expectation in exp_to_estimate_list:
# if we're already computing it, don't need to add to a new triplet
if expectation in expectations_in_triplets:
continue
if not self.allow_abstentions:
Y_node = expectation[-1]
else:
Y_node = expectation[0][-1]
def check_triplet(triplet):
return (self._is_separator(triplet[0][:-1], triplet[1][:-1], Y_node) and
self._is_separator(triplet[0][:-1], triplet[2][:-1], Y_node) and
self._is_separator(triplet[1][:-1], triplet[2][:-1], Y_node))
triplet = [expectation]
found = False
# first try looking at the other expectations that we need to estimate
for first_node in exp_to_estimate_list:
if self.allow_abstentions:
# need to check if conditionals are the same
if (first_node in triplet or # skip if it's already in the triplet
first_node[0][-1] != Y_node or # skip if the Y values aren't the same
first_node[1] != expectation[1] or # skip if conditions are different
(len(first_node[0]) > 2 and len(expectation[0]) > 2) or # at most one item in the triplet can have length > 2
first_node in expectations_in_triplets or # we're already computing this
not self._is_separator(expectation[0][:-1], first_node[0][:-1], Y_node)): # not separated
continue
else:
if (first_node in triplet or # skip if it's already in the triplet
first_node[-1] != Y_node or # skip if the Y values aren't the same
(len(first_node) > 2 and len(expectation) > 2) or # at most one item in the triplet can have length > 2
first_node in expectations_in_triplets or # we're already computing this
not self._is_separator(expectation[:-1], first_node[:-1], Y_node)): # not separated
continue
triplet = [expectation, first_node]
# first try looking at the other expectations that we need to estimate
for second_node in exp_to_estimate_list:
if self.allow_abstentions:
if (second_node in triplet or # skip if it's already in the triplet
second_node[0][-1] != Y_node or # skip if the Y values aren't the same
second_node[1] != expectation[1] or # skip if conditions are different
(len(second_node[0]) > 2 and
any(len(exp[0]) > 2 for exp in triplet)) or # at most one item in the triplet can have length > 2
second_node in expectations_in_triplets or # we're already computing this
not all(self._is_separator(exp[0][:-1], second_node[0][:-1], Y_node) for exp in triplet)): # not separated
continue
else:
if (second_node in triplet or # skip if it's already in the triplet
second_node[-1] != Y_node or # skip if the Y values aren't the same
(len(second_node) > 2 and
any(len(exp) > 2 for exp in triplet)) or # at most one item in the triplet can have length > 2
second_node in expectations_in_triplets or # we're already computing this
not all(self._is_separator(exp[:-1], second_node[:-1], Y_node) for exp in triplet)): # not separated
continue
# we found a triplet!
triplet = [expectation, first_node, second_node]
found = True
break
if found:
break
# otherwise, try everything
for second_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes
]:
if self.allow_abstentions:
if (second_node in triplet or # skip if it's already in the triplet
second_node[1] != expectation[1] or # skip if conditions are different
not all(self._is_separator(exp[0][:-1], second_node[0][:-1], Y_node) for exp in triplet)): # not separated
continue
else:
if (second_node in triplet or # skip if it's already in the triplet
not all(self._is_separator(exp[:-1], second_node[:-1], Y_node) for exp in triplet)): # not separated
continue
# we found a triplet!
triplet = [expectation, first_node, second_node]
found = True
break
if found:
break
if not found:
# try everything
for first_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]:
if self.allow_abstentions:
if (first_node in triplet or # skip if it's already in the triplet
first_node[0][0] in expectation[1] or # skip if the node is part of the condition
not self._is_separator(expectation[0][:-1], first_node[0][:-1], Y_node)): # not separated
continue
else:
if (first_node in triplet or # skip if it's already in the triplet
not self._is_separator(expectation[:-1], first_node[:-1], Y_node)): # not separated
continue
triplet = [expectation, first_node]
if found:
break
for second_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]:
if self.allow_abstentions:
if (second_node in triplet or # skip if it's already in the triplet
second_node[0][0] in expectation[1] or # skip if the node is part of the condition
not all(self._is_separator(exp[0][:-1], second_node[0][:-1], Y_node) for exp in triplet)): # not separated
continue
else:
if (second_node in triplet or # skip if it's already in the triplet
not all(self._is_separator(exp[:-1], second_node[:-1], Y_node) for exp in triplet)): # not separated
continue
# we found a triplet!
triplet = [expectation, first_node, second_node]
found = True
break
if found:
break
if found:
triplets.append(triplet)
for expectation in triplet:
expectations_in_triplets.add(expectation)
else:
triplets = self.triplets
all_moments = set()
abstention_probabilities = {}
for exp1, exp2, exp3 in triplets:
if self.allow_abstentions:
condition = exp1[1]
moments = [
tuple(sorted(exp1[0][:-1] + exp2[0][:-1])),
tuple(sorted(exp1[0][:-1] + exp3[0][:-1])),
tuple(sorted(exp2[0][:-1] + exp3[0][:-1]))
]
indices1 = tuple(sorted([ int(node.split('_')[1]) for node in exp1[0][:-1] ]))
indices2 = tuple(sorted([ int(node.split('_')[1]) for node in exp2[0][:-1] ]))
indices3 = tuple(sorted([ int(node.split('_')[1]) for node in exp3[0][:-1] ]))
if indices1 not in abstention_probabilities:
abstention_probabilities[indices1] = 0
if indices2 not in abstention_probabilities:
abstention_probabilities[indices2] = 0
if indices3 not in abstention_probabilities:
abstention_probabilities[indices3] = 0
else:
# first, figure out which moments we need to compute
moments = [
tuple(sorted(exp1[:-1] + exp2[:-1])),
tuple(sorted(exp1[:-1] + exp3[:-1])),
tuple(sorted(exp2[:-1] + exp3[:-1]))
]
for moment in moments:
indices = tuple(sorted([ int(node.split('_')[1]) for node in moment ]))
if indices not in all_moments:
all_moments.add(indices)
return triplets, all_moments, abstention_probabilities
def _triplet_method_mean_median(self, expectations_to_estimate, solve_method):
exp_to_estimate_list = sorted(list(expectations_to_estimate))
triplets = []
if self.triplets is None:
if self.fully_independent_case:
Y_node = 'Y'
all_nodes = [
((node, Y_node), '0') if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]
triplets = [
[i, j, k]
for i in all_nodes
for j in all_nodes if i != j
for k in all_nodes if i != k and k != j
] + [
[expectation, -1, -1] for expectation in exp_to_estimate_list
]
else:
for expectation in exp_to_estimate_list:
if not self.allow_abstentions:
Y_node = expectation[-1]
else:
Y_node = expectation[0][-1]
triplet = [expectation]
# try everything
for first_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]:
if self.allow_abstentions:
if (first_node in triplet or # skip if it's already in the triplet
first_node[0][0] in expectation[1] or # skip if the node is part of the condition
not self._is_separator(expectation[0][:-1], first_node[0][:-1], Y_node)): # not separated
continue
else:
if (first_node in triplet or # skip if it's already in the triplet
not self._is_separator(expectation[:-1], first_node[:-1], Y_node)): # not separated
continue
triplet = [expectation, first_node]
for second_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]:
if self.allow_abstentions:
if (second_node in triplet or # skip if it's already in the triplet
second_node[0][0] in expectation[1] or # skip if the node is part of the condition
not all(self._is_separator(exp[0][:-1], second_node[0][:-1], Y_node) for exp in triplet)): # not separated
continue
else:
if (second_node in triplet or # skip if it's already in the triplet
not all(self._is_separator(exp[:-1], second_node[:-1], Y_node) for exp in triplet)): # not separated
continue
if tuple([expectation, second_node, first_node]) in triplets:
continue
# we found a triplet!
triplet = [expectation, first_node, second_node]
triplets.append(tuple(triplet))
triplet = [expectation, first_node]
triplet = [expectation]
else:
triplets = self.triplets
all_moments = set()
abstention_probabilities = {}
if self.fully_independent_case:
all_nodes = list(range(self.m))
all_moments = set([
(i, j)
for i in all_nodes
for j in all_nodes if i != j
])
if self.allow_abstentions:
for node in all_nodes:
abstention_probabilities[tuple([node])] = 0
else:
for exp1, exp2, exp3 in triplets:
if self.allow_abstentions:
condition = exp1[1]
moments = [
tuple(sorted(exp1[0][:-1] + exp2[0][:-1])),
tuple(sorted(exp1[0][:-1] + exp3[0][:-1])),
tuple(sorted(exp2[0][:-1] + exp3[0][:-1]))
]
indices1 = tuple(sorted([ int(node.split('_')[1]) for node in exp1[0][:-1] ]))
indices2 = tuple(sorted([ int(node.split('_')[1]) for node in exp2[0][:-1] ]))
indices3 = tuple(sorted([ int(node.split('_')[1]) for node in exp3[0][:-1] ]))
if indices1 not in abstention_probabilities:
abstention_probabilities[indices1] = 0
if indices2 not in abstention_probabilities:
abstention_probabilities[indices2] = 0
if indices3 not in abstention_probabilities:
abstention_probabilities[indices3] = 0
else:
# first, figure out which moments we need to compute
moments = [
tuple(sorted(exp1[:-1] + exp2[:-1])),
tuple(sorted(exp1[:-1] + exp3[:-1])),
tuple(sorted(exp2[:-1] + exp3[:-1]))
]
for moment in moments:
indices = tuple(sorted([ int(node.split('_')[1]) for node in moment ]))
if indices not in all_moments:
all_moments.add(indices)
return triplets, all_moments, abstention_probabilities
def _triplet_method_preprocess(self, expectations_to_estimate, solve_method):
if solve_method == 'triplet':
return self._triplet_method_single_seed(expectations_to_estimate)
elif solve_method in [ 'triplet_mean', 'triplet_median' ]:
return self._triplet_method_mean_median(expectations_to_estimate, solve_method)
else:
raise NotImplemented('Unknown solve method {}'.format(solve_method))
def _triplet_method_probabilities(self, triplets, lambda_moment_vals, lambda_zeros,
abstention_probabilities, sign_recovery, solve_method):
expectation_values = {}
if solve_method == 'triplet':
pass
else:
# each triplet is constructed for the first value in the expectation
# get all the triplets with the same first value, and take the mean or median
expectation_value_candidates = {}
if self.fully_independent_case and solve_method in ['triplet_mean', 'triplet_median']:
second_moment = np.zeros((self.m, self.m))
for key in lambda_moment_vals:
i, j = key
second_moment[i][j] = lambda_moment_vals[(i, j)]
def all_triplet_vals(idx):
triplet_vals = []
for i in range(self.m):
if i == idx:
continue
for j in range(i):
if j == idx:
continue
val = math.sqrt(abs(
(second_moment[idx, i] * second_moment[idx, j] / second_moment[i, j])
if second_moment[i, j] != 0 else 0
))
triplet_vals.append(val)
return triplet_vals
all_vals = [all_triplet_vals(idx) for idx in range(self.m)]
expectations_to_estimate = [
expectation
for expectation, a, b in triplets if a == -1 and b == -1
]
for expectation in expectations_to_estimate:
if self.allow_abstentions:
idx = int(expectation[0][0].split('_')[1])
else:
idx = int(expectation[0].split('_')[1])
expectation_value_candidates[expectation] = all_vals[idx]
else:
for exp1, exp2, exp3 in triplets:
if self.allow_abstentions:
moments = [
tuple(sorted(exp1[0][:-1] + exp2[0][:-1])),
tuple(sorted(exp1[0][:-1] + exp3[0][:-1])),
tuple(sorted(exp2[0][:-1] + exp3[0][:-1]))
]
else:
# first, figure out which moments we need to compute
moments = [
tuple(sorted(exp1[:-1] + exp2[:-1])),
tuple(sorted(exp1[:-1] + exp3[:-1])),
tuple(sorted(exp2[:-1] + exp3[:-1]))
]
moment_vals = [
lambda_moment_vals[
tuple(sorted([ int(node.split('_')[1]) for node in moment ]))
]
for moment in moments
]
if solve_method == 'triplet':
expectation_values[exp1] = (
math.sqrt(abs(moment_vals[0] * moment_vals[1] / moment_vals[2])) if moment_vals[2] != 0 else 0)
expectation_values[exp2] = (
math.sqrt(abs(moment_vals[0] * moment_vals[2] / moment_vals[1])) if moment_vals[1] != 0 else 0)
expectation_values[exp3] = (
math.sqrt(abs(moment_vals[1] * moment_vals[2] / moment_vals[0])) if moment_vals[0] != 0 else 0)
else:
if exp1 not in expectation_value_candidates:
expectation_value_candidates[exp1] = []
exp_value = (
math.sqrt(abs(moment_vals[0] * moment_vals[1] / moment_vals[2])) if moment_vals[2] != 0 else 0)
expectation_value_candidates[exp1].append(exp_value)
if solve_method in ['triplet_mean', 'triplet_median']:
for exp in expectation_value_candidates:
if solve_method == 'triplet_mean':
agg_function = np.mean
if solve_method == 'triplet_median':
agg_function = np.median
expectation_values[exp] = agg_function(expectation_value_candidates[exp])
self.expectation_value_candidates = expectation_value_candidates
if sign_recovery == 'all_positive':
# all signs are already positive
pass
else:
print('{} sign recovery not implemented'.format(sign_recovery))
return
if self.allow_abstentions:
# probability is 0.5 * (1 + expectation - P(lambda part of factor is zero)) * P(conditional)
# P(conditional) is 1 if there is no conditional
probabilities = {}
for expectation in sorted(list(expectation_values.keys())):
exp_value = expectation_values[expectation]
if expectation[1][0] == '0':
condition_prob = 1
else:
zero_condition = tuple(sorted([ int(node.split('_')[1]) for node in expectation[1] ]))
condition_prob = lambda_zeros[zero_condition]
lambda_factor = tuple(sorted([ int(node.split('_')[1]) for node in expectation[0][:-1] ]))
abstention_prob = abstention_probabilities[lambda_factor]
probabilities[expectation] = 0.5 * (1 + exp_value - abstention_prob) * condition_prob
else:
probabilities = {
expectation: 0.5 * (1 + expectation_values[expectation])
for expectation in sorted(list(expectation_values.keys()))
}
return probabilities, expectation_values
|
const fs = require('fs-extra')
const database = require('../database')
const prompts = require('prompts')
const config = require('../config')
function validateNotEmpty (value) {
if (value.trim() === '') {
return 'Must not be empty'
} else {
return true
}
}
module.exports = async function (dir) {
await fs.ensureDir(dir)
const db = await database.open(dir)
await database.migrate(db, dir)
const res = await prompts([
{
type: 'text',
name: 'discordToken',
message: 'What is your discord token?',
validate: validateNotEmpty
},
{
type: 'text',
name: 'commandChar',
message: 'What character do you want to use for command?',
validate: validateNotEmpty
}
])
const newConfig = {
discordToken: res.discordToken.trim(),
commandChar: res.commandChar.trim()
}
const { valid, errors } = await config.validate(newConfig)
if (!valid) {
console.log(errors)
return { code: 1 }
}
await config.save(dir, newConfig)
}
|
import * as mars2d from "mars2d"
let map
let graphicLayer
let shortestPathLayer
let polygonZAM
let pointQD
let pointZD
/**
* 初始化地图业务,生命周期钩子函数(必须)
* 框架在地图初始化完成后自动调用该函数
* @param {mars2d.Map} mapInstance 地图对象
* @returns {void} 无
*/
export function onMounted(mapInstance) {
map = mapInstance // 记录map
// 创建矢量数据图层
graphicLayer = new mars2d.layer.GraphicLayer()
map.addLayer(graphicLayer)
// 点、线矢量数据图层
shortestPathLayer = new mars2d.layer.GraphicLayer()
map.addLayer(shortestPathLayer)
}
/**
* 释放当前地图业务的生命周期函数
* @returns {void} 无
*/
export function onUnmounted() {
map = null
}
// 绘制障碍面
export function drawPolygon() {
if (polygonZAM) {
polygonZAM.remove()
polygonZAM = null
}
graphicLayer.startDraw({
type: "polygon",
style: {
fillColor: "#66FF66",
fillOpacity: 0.4,
outline: true,
outlineWidth: 1,
outlineColor: "red"
},
success: (graphic) => {
polygonZAM = graphic
}
})
}
// 绘制起点
export function startPoint() {
if (pointQD) {
pointQD.remove()
pointQD = null
}
graphicLayer.startDraw({
type: "point",
style: {
pixelSize: 5,
color: "red",
label: {
text: "起点",
font_size: 20,
color: "#ffffff",
outline: true,
outlineColor: "#000000",
pixelOffsetY: -20
}
},
success: (graphic) => {
pointQD = graphic
}
})
}
// 绘制终点
export function endPoint() {
if (pointZD) {
pointZD.remove()
pointZD = null
}
graphicLayer.startDraw({
type: "point",
style: {
pixelSize: 5,
color: "red",
label: {
text: "终点",
font_size: 20,
color: "#ffffff",
outline: true,
outlineColor: "#000000",
pixelOffsetY: -20
}
},
success: (graphic) => {
pointZD = graphic
}
})
}
// 计算最短路径
export function shortestPath() {
if (!polygonZAM) {
globalMsg("请绘制障碍面")
return
}
if (!pointQD) {
globalMsg("请绘制起点")
return
}
if (!pointZD) {
globalMsg("请绘制终点")
return
}
shortestPathLayer.clear()
const polygon = polygonZAM.toGeoJSON() // 障碍面
const startPoint = pointQD.toGeoJSON() // 起点
const endPoint = pointZD.toGeoJSON() // 终点
const options = {
obstacles: polygon
}
const path = turf.shortestPath(startPoint, endPoint, options)
const positions = path.geometry.coordinates
const latlngs = mars2d.PointTrans.coords2latlngs([positions])
const polyonLine = new mars2d.graphic.Polyline({
latlngs: latlngs,
style: {
color: " #55ff33"
}
})
shortestPathLayer.addGraphic(polyonLine)
}
export function clearLayer() {
polygonZAM = null
pointQD = null
pointZD = null
graphicLayer.clear()
shortestPathLayer.clear()
}
|
const ExtractTextPlugin = require('extract-text-webpack-plugin');
const HtmlWebpackPlugin = require('html-webpack-plugin');
const path = require('path');
const paths = {
src: 'src',
dist: 'dist',
assets: 'assets',
};
const webpackConfig = {
entry: {
pre: path.join(__dirname, paths.src, 'scripts/pre.js'),
post: path.join(__dirname, paths.src, 'scripts/post.js'),
main: path.join(__dirname, paths.src, 'main.js'),
},
output: {
filename: `${paths.assets}/[name].js`,
path: path.resolve(paths.dist),
publicPath: '',
},
resolve: {
extensions: ['.js', '.json', '.vue'],
},
module: {
rules: [{
/* Enable Vue's Single File Components */
test: /\.vue$/,
use: {
loader: 'vue-loader',
options: {
loaders: {
// Vue need to be transpiled to ES5
js: 'babel-loader?presets[]=es2015,presets[]=stage-0',
}
},
}
}, {
test: /\.js$/,
exclude: /(node_modules|bower_components)/,
use: {
loader: 'babel-loader',
options: {
presets: ['stage-0']
}
}
}, {
test: /\.scss$/,
use: ExtractTextPlugin.extract({
fallback: 'style-loader',
// resolve-url-loader may be chained before sass-loader if necessary
use: ['css-loader', 'sass-loader']
})
}],
},
devServer: {
port: 8080,
host: '0.0.0.0',
disableHostCheck: true,
},
};
function getChunk(name) {
return webpackConfig.output.publicPath + name;
}
const getChunkJs = name => getChunk(webpackConfig.output.filename.replace(/\[(.+?)\]/g, name));
const getChunkCss = () => getChunk(`${paths.assets}/style.css`);
webpackConfig.plugins = [
new ExtractTextPlugin({
filename: `${paths.assets}/style.css`,
}),
new HtmlWebpackPlugin({
template: `${paths.src}/index.html`,
excludeChunks: ['pre', 'post'],
styles: getChunkCss(),
headChunk: getChunkJs('pre'),
bodyChunk: getChunkJs('post'),
}),
];
module.exports = webpackConfig;
|
# Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from __future__ import print_function
from collections import Mapping
import _m5.debug
from _m5.debug import SimpleFlag, CompoundFlag
from _m5.debug import schedBreak, setRemoteGDBPort
from m5.util import printList
def help():
print("Base Flags:")
for name in sorted(flags):
if name == 'All':
continue
flag = flags[name]
children = [c for c in flag.kids() ]
if not children:
print(" %s: %s" % (name, flag.desc()))
print()
print("Compound Flags:")
for name in sorted(flags):
if name == 'All':
continue
flag = flags[name]
children = [c for c in flag.kids() ]
if children:
print(" %s: %s" % (name, flag.desc()))
printList([ c.name() for c in children ], indent=8)
print()
class AllFlags(Mapping):
def __init__(self):
self._version = -1
self._dict = {}
def _update(self):
current_version = _m5.debug.getAllFlagsVersion()
if self._version == current_version:
return
self._dict.clear()
for name, flag in _m5.debug.allFlags().items():
self._dict[name] = flag
self._version = current_version
def __contains__(self, item):
self._update()
return item in self._dict
def __getitem__(self, item):
self._update()
return self._dict[item]
def __iter__(self):
self._update()
return iter(self._dict)
def __len__(self):
self._update()
return len(self._dict)
def keys(self):
self._update()
return self._dict.keys()
def values(self):
self._update()
return self._dict.values()
def items(self):
self._update()
return self._dict.items()
flags = AllFlags()
|
const yargs = require('yargs');
yargs.version('1.1.9');
yargs.command({
command: 'add',
describe: '- "Add a new note"',
builder: {
title: {
describe: 'Note title',
demandOption: true, //without title will be error. Title get as required*
type: 'string'
},
body: {
describe: 'Note body',
demandOption: true,
type: 'string'
}
},
handler: function(argv){
console.log('Title: ' + argv.title);
console.log('Body: ' + argv.body);
}
});
yargs.command({
command: 'remove',
describe: '- "Remove a note"',
handler: function(){
console.log('Removing a note');
}
});
yargs.command({
command: 'read',
describe: '- "Read a note"',
handler: function(){
console.log('Reading a note');
}
});
yargs.command({
command: 'list',
describe: '- "List notes"',
handler: function(){
console.log('Listing out all notes');
}
});
yargs.parse(); //parsing all yargs configuration and logging to console result
//to run command help use: using-yargs --help
//to show version of product use: using-yargs --version
//to run command 'add' use: node using-yargs add --title="My title" --body="My body is very short"
//to run command 'remove' use: using-yargs remove
//to run command 'read' use: using-yargs read
//to run command 'list' use: using-yargs list |
function countLetter(str, letter) {
let count = 0;
for (let i = 0; i<str.length; i++)
if (str[i] == letter)
count++;
return count;
}
console.log(countLetter('hello', 'l')) |
"""
* Project Name: AngelDocs
* File Name: test_ignore_flag.py
* Programmer: Kai Prince
* Date: Mon, Apr 19, 2021
* Description: This file contains test for a --ignore flag.
"""
from pathlib import Path
from utils import make_ignore_matcher
import pytest
from main import build_docs, resolve_file_sources
@pytest.mark.parametrize(
("source_paths", "ignore_paths", "expected_paths", "expected_not_paths"),
[
[
["project"],
["project/module/**/*"],
["setup.md"],
["module/__init__.md", "module/file.md", "module/Readme.md"],
],
],
)
@pytest.mark.usefixtures("change_test_dir")
def test_ignore_flag(source_paths, ignore_paths, expected_paths, expected_not_paths):
""" Does not include files matching the ignore pattern in generated output. """
# Arrange
output_dir = "output"
expected = [f"{output_dir}/project/{path}" for path in expected_paths]
expected_not = [f"{output_dir}/project/{path}" for path in expected_not_paths]
# Act
files = resolve_file_sources(source_paths, ignore_paths=ignore_paths)
build_docs(files, output_dir)
# Assert
for path in expected:
assert Path(path).exists()
for path in expected_not:
assert not Path(path).exists()
@pytest.mark.parametrize(
("source_paths", "ignore_paths", "expected_paths", "expected_not_paths"),
[
[
["project"],
["project/module/**/*"],
["setup.py"],
["module/__init__.py", "module/file.py", "module/Readme.md"],
],
],
)
@pytest.mark.usefixtures("change_test_dir")
def test_resolve_ignored_files(
source_paths, ignore_paths, expected_paths, expected_not_paths
):
""" Ignores files matching the ignore pattern. """
# Arrange
output_dir = "output"
expected = [f"project/{path}" for path in expected_paths]
expected_not = [f"project/{path}" for path in expected_not_paths]
# Act
files = resolve_file_sources(source_paths, ignore_paths=ignore_paths)
build_docs(files, output_dir)
# Assert
for path in expected:
assert str(Path(path)) in files
for path in expected_not:
assert str(Path(path)) not in files
@pytest.mark.parametrize(
("pattern", "paths"),
[
[
".\\venv\\**\\*",
[
"venv\\Scripts\\symilar.exe",
"venv\\Scripts\\pythonw.exe",
"venv\\Lib\\site-packages\\_distutils_hack\\override.py",
],
],
[
".\\**\\*cache*\\**\\*",
[
"__pycache__\\main.cpython-39.pyc",
"__pycache__\\config.cpython-39.pyc",
"tests\\__pycache__\\conftest.cpython-39-pytest-6.2.2.pyc",
],
],
],
)
def test_pattern_match(pattern, paths):
""" Unit test for path matching algorithm. """
# Arrange
is_ignored = make_ignore_matcher([pattern])
# Act
ignored_paths = [path for path in paths if is_ignored(path)]
# Assert
for path in paths:
assert path in ignored_paths
|
import time
import mechanicalsoup as ms
from .cli import get_args
from .codeo import CodeoBrowser
from .utils import get_enviroment_vars, show_results
def main():
args = get_args()
with ms.StatefulBrowser() as browser:
cb = CodeoBrowser(browser)
username, password = get_enviroment_vars()
cb.login(username, password)
result = cb.submit_problem(args.url, args.file)
time.sleep(1.5)
show_results(result)
if __name__ == "__main__":
main()
|
//
// ASPX::sqloledb_1_sspi数据库驱动代码模板
//
module.exports = require('./default'); |
import 'bootstrap';
export function configure(aurelia) {
aurelia.use
.standardConfiguration()
.developmentLogging();
//Uncomment the line below to enable animation.
aurelia.use.plugin('aurelia-animator-css');
//Anyone wanting to use HTMLImports to load views, will need to install the following plugin.
//aurelia.use.plugin('aurelia-html-import-template-loader')
aurelia.start().then(a => a.setRoot());
}
|
import json
from django.db.models import Count, Prefetch, QuerySet
from rest_framework import authentication, request, response, serializers, viewsets
from rest_framework.decorators import action
from posthog.auth import PersonalAPIKeyAuthentication, TemporaryTokenAuthentication
from posthog.models import Element, ElementGroup, Event, Filter, Team
class ElementSerializer(serializers.ModelSerializer):
class Meta:
model = Element
fields = [
"text",
"tag_name",
"attr_class",
"href",
"attr_id",
"nth_child",
"nth_of_type",
"attributes",
"order",
]
class ElementViewSet(viewsets.ModelViewSet):
queryset = Element.objects.all()
serializer_class = ElementSerializer
authentication_classes = [
TemporaryTokenAuthentication,
PersonalAPIKeyAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication,
]
def get_queryset(self) -> QuerySet:
queryset = super().get_queryset()
return queryset.filter(group__team=self.request.user.team)
@action(methods=["GET"], detail=False)
def stats(self, request: request.Request) -> response.Response:
team = self.request.user.team
filter = Filter(request=request)
events = (
Event.objects.filter(team=team, event="$autocapture")
.filter(filter.properties_to_Q(team_id=team.pk))
.filter(filter.date_filter_Q)
)
events = events.values("elements_hash").annotate(count=Count(1)).order_by("-count")[0:100]
groups = ElementGroup.objects.filter(
team=team, hash__in=[item["elements_hash"] for item in events]
).prefetch_related(Prefetch("element_set", queryset=Element.objects.order_by("order", "id")))
return response.Response(
[
{
"count": item["count"],
"hash": item["elements_hash"],
"elements": [
ElementSerializer(element).data
for element in [group for group in groups if group.hash == item["elements_hash"]][
0
].element_set.all()
],
}
for item in events
]
)
@action(methods=["GET"], detail=False)
def values(self, request: request.Request) -> response.Response:
key = request.GET.get("key")
params = []
where = ""
# Make sure key exists, otherwise could lead to sql injection lower down
if key not in self.serializer_class.Meta.fields:
return response.Response([])
if request.GET.get("value"):
where = ' AND "posthog_element"."{}" LIKE %s'.format(key)
params.append("%{}%".format(request.GET["value"]))
# This samples a bunch of elements with that property, and then orders them by most popular in that sample
# This is much quicker than trying to do this over the entire table
values = Element.objects.raw(
"""
SELECT
value, COUNT(1) as id
FROM (
SELECT
("posthog_element"."{key}") as "value"
FROM
"posthog_element"
INNER JOIN
"posthog_elementgroup" ON ("posthog_elementgroup".id="posthog_element"."group_id")
WHERE
("posthog_element"."{key}") IS NOT NULL {where} AND
("posthog_elementgroup"."team_id" = {team_id})
LIMIT 10000
) as "value"
GROUP BY value
ORDER BY id DESC
LIMIT 50;
""".format(
where=where, team_id=request.user.team.pk, key=key
),
params,
)
return response.Response([{"name": value.value} for value in values])
|
import { TypeCreator } from "@compas/code-gen";
export function extendWithAuth(app) {
const T = new TypeCreator("auth");
const R = T.router("/auth");
const session = T.object("session").keys({
id: T.uuid(),
createdAt: T.date(),
});
const tokenPair = T.object("tokenPair").keys({
accessToken: T.string(),
refreshToken: T.string(),
});
app.add(
R.get("/me", "me").response({
session,
}),
R.post("/login", "login")
.response(tokenPair)
.invalidations(R.invalidates("auth")),
R.post("/refresh", "refreshTokens")
.body({
refreshToken: T.string(),
})
.response(tokenPair)
.invalidations(R.invalidates("auth")),
R.post("/logout", "logout")
.response({ success: true })
.invalidations(R.invalidates("auth")),
);
}
|
import React from 'react'
const Footer = () => {
return (
<div className='footer'>
<p>Copyright© 2018</p>
</div>
)
}
export default Footer |
"""
Primo Chato
Seu primo possui muitos livros, todos em papel, e vive emprestando aos amigos dele.
Entretanto, muitas vezes ele esquece a quem emprestou e já perdeu diversos Livros por causa disso.
No almoço da família, no último final de semana, ele não parou de falar e se lamentar a respeito até que, cansado de ouvir suas lamentações,
você decidiu ajudá-lo desenvolvendo um programa para resolver este problema.
Conversando com ele a respeito vocês decidiram que sistema deve permitir o cadastro dos amigos, incluindo: nome, número do telefone e e-mail
e também cadastrar os Livros, com: título, resumo, autor, personagem principal, gênero (comédia, romance ou aventura, etc) e faixa etária.
Deve ser possível cadastrar os empréstimos e obter a lista de Livros emprestados incluindo quais amigos estão com quais Livros.
Assim, como um bom desenvolvedor, antes de começar a programar você decidiu modelar o sistema usando um diagrama de classes que:
1. Identifique e modele as classes do problema
2. Identifique e modele os atributos e métodos das classes
3. Identifique e modele as associações existentes entre as classes
"""
# Abrir em: https://mermaid.live/
classDiagram
direction RL
class Genero {
<<enumeration>>
Comedia
Romance
Terror
Aventura
}
Amigo "1" o-- "0..n" Livro
Livro "1" --> "1..*" Genero
class Amigo {
-id
-String nome
-String numero
-String email
-List~Livro~ livro
+getNome()
+setNome(nome)
+getNumero()
+setNumero(numero)
+getEmail()
+setEmail(email)
+addLivro()
+removeLivro()
+getListaLivros()
}
class Livro {
-id
-String titulo
-String resumo
-String autor
-String personagem_principal
-List~Genero~ genero
-int faixa_etaria
-id_amigo
+getStatusLivro()
+setStatusLivro(status)
+isEmprestado()
}
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Title : PyRogue XPM Cu Generator Module
#-----------------------------------------------------------------------------
# File : CuGenerator.py
# Created : 2019-06-24
#-----------------------------------------------------------------------------
# Description:
# PyRogue XPM Cu Generator Module
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
class CuGenerator(pr.Device):
def __init__( self,
name = "CuGenerator",
description = "XPM Cu Generator Module",
**kwargs):
super().__init__(name=name, description=description, **kwargs)
##############################
# Variables
##############################
self.add(pr.RemoteVariable(
name = "timeStamp",
description = "Received time stamp",
offset = 0x00,
bitSize = 64,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
))
self.add(pr.RemoteVariable(
name = "pulseId",
description = "Received pulse ID",
offset = 0x08,
bitSize = 64,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
))
self.add(pr.RemoteVariable(
name = "cuDelay",
description = "Retransmission delay in 186MHz clks",
offset = 0x10,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "cuBeamCode",
description = "Eventcode for Beam present translation",
offset = 0x14,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "cuFiducialIntv",
description = "Interval between last two Cu fiducials",
offset = 0x18,
bitSize = 19,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "cuFiducialIntvErr",
description = "Latched error from Cu fiducial interval",
offset = 0x1B,
bitSize = 1,
bitOffset = 0x7,
base = pr.UInt,
mode = "RW",
))
@self.command(name="ClearFiducialErr", description="Clear the fiducial error latch",)
def ClearFiducialErr():
self.cuFiducialIntv.set(0)
|
"""
HPCCM recipe for DALTON image (MPI)
Contents:
Ubuntu 18.04
GNU compilers (upstream)
Intel MKL as linear algebra backend
HDF5
OpenMPI
OFED/MOFED
PMI2 (SLURM)
UCX
Generating recipe (stdout):
$ hpccm --recipe recipe_dalton_mpi.py --format singularity --singularity-version=3.2
"""
import re
os_version = "18.04"
cmake_version = "3.20.2"
openmpi_version = "4.0.5"
dalton_version = "@_VERSION_@"
# Ubuntu base image
Stage0 += baseimage(image=f"ubuntu:{os_version}", _as="build")
# copy patches to apply
patches = ["dalton-py3.patch", "dalton-install.patch"]
Stage0 += copy(src=[f"patches/{p}" for p in patches], dest="/")
# GNU compilers
compiler = gnu()
Stage0 += compiler
Stage0 += packages(apt=["git", "ca-certificates"])
# (M)OFED
Stage0 += mlnx_ofed()
# UCX
Stage0 += ucx(cuda=False, ofed=True)
# PMI2
Stage0 += slurm_pmi2(version="20.11.7")
# OpenMPI (use UCX instead of IB directly)
Stage0 += openmpi(
cuda=False,
infiniband=False,
pmi="/usr/local/slurm-pmi2",
ucx="/usr/local/ucx",
toolchain=compiler.toolchain,
version=openmpi_version,
)
# CMake
Stage0 += cmake(eula=True, version=cmake_version)
# MKL
Stage0 += mkl(eula=True, mklvars=False)
# HDF5
Stage0 += hdf5(version="1.10.5", configure_opts=["--enable-fortran"])
# Python 3
Stage0 += python(python2=False, python3=True)
# DALTON
Stage0 += generic_cmake(
repository="https://gitlab.com/dalton/dalton",
branch=dalton_version,
recursive=True,
preconfigure=[f"git apply /{p}" for p in patches],
cmake_opts=[
"-D CMAKE_BUILD_TYPE=Release",
"-D CMAKE_Fortran_COMPILER=mpifort",
"-D CMAKE_C_COMPILER=mpicc",
"-D CMAKE_CXX_COMPILER=mpicxx",
"-D ENABLE_MPI=ON",
"-D ENABLE_PELIB=ON",
"-D ENABLE_PDE=ON",
"-D ENABLE_SRDFT=ON",
],
prefix="/usr/local/dalton",
)
# Runtime distributable stage
Stage1 += baseimage(image=f"ubuntu:{os_version}")
Stage1 += Stage0.runtime()
Stage1 += environment(variables={"PATH": "$PATH:/usr/local/dalton/bin"})
Stage1 += runscript(commands=["dalton"])
Stage1 += label(
metadata={
"Author": '"Radovan Bast and Roberto Di Remigio"',
"Version": f'"{dalton_version}"',
"Description": '"DALTON program (MPI version)"',
"Dependency": '"OpenMPI v4.0"',
}
)
help_str = f"""
%help
MPI-parallel build of DALTON using OpenMPI-{openmpi_version} on a
Ubuntu-{os_version} base image. Requires compatible OpenMPI version on the
host.
The image includes Mellanox OFED, UCX and PMI2 for compatibility with common
HPC environments with InfiniBand and SLURM.
To run with N processes you should launch the singularity execution with
mpirun/srun:
$ mpirun -np N singularity exec <image-name>.sif hf molecule
"""
Stage1 += raw(singularity=help_str)
|
export default function cssClass(...c) {
return c.join(" ");
}
function setLocalStorageArray(key, value) {
let val = window.localStorage.getItem(key);
if (val) {
try {
val = JSON.parse(val);
window.localStorage.setItem(
key,
JSON.stringify(Array.from(new Set([...val, value.toLowerCase()])))
);
} catch {
return Error("target is not object");
}
} else {
window.localStorage.setItem(key, `["${value.toLowerCase()}"]`);
}
}
function findLocalStorageArray(key, value) {
let val = window.localStorage.getItem(key);
if (val) {
try {
val = JSON.parse(val);
let reg = new RegExp(value, "i");
return val.filter((i) => reg.test(i));
} catch (r) {
return Error("target is not object", r);
}
} else {
return Error("target is not object");
}
}
function DeleteLocalStorageArray(key, value) {
let val = window.localStorage.getItem(key);
if (val) {
try {
val = JSON.parse(val);
return window.localStorage.setItem(
key,
val.filter((i) => !i.toLowerCase() === value.toLowerCase())
);
} catch (r) {
return Error("target is not object", r);
}
} else {
return Error("target is not object");
}
}
export { setLocalStorageArray, findLocalStorageArray, DeleteLocalStorageArray };
|
'use strict';
Object.defineProperty(exports, "__esModule", { value: true });
const md_file_converter_1 = require("md-file-converter");
const model_impl_1 = require("./model-impl");
function makeUnConfiguredMapParsedDocument({ marked, getSlug }) {
return (conf) => {
return (mdParsedDocument) => {
function parseWithMarked(tokens) {
tokens.links = Object.create(null);
return marked.parser(tokens, conf.markedOptions);
}
if (mdParsedDocument.documentPaths.basename === 'SUMMARY') {
return md_file_converter_1.TargetDocument.createTargetDocument({
documentPaths: mdParsedDocument.documentPaths,
transformedData: marked.parser(mdParsedDocument.parsedTokensList, conf.markedOptions),
fmMetaData: mdParsedDocument.fmMetaData
});
}
else {
const mdParsedDocumentImpl = mdParsedDocument;
const qaFmMetaData = mdParsedDocumentImpl.fmMetaData;
const questionTitleToken = mdParsedDocumentImpl.questionTitleToken[0];
const qaContent = parseWithMarked(mdParsedDocumentImpl.parsedTokensList);
const qaTitleText = questionTitleToken.text;
const qaTitleTag = parseWithMarked(mdParsedDocumentImpl.questionTitleToken);
const sectionTitle = parseWithMarked(mdParsedDocumentImpl.sectionTitleToken);
const slugifiedQaName = getSlug(qaTitleText
.replace('<i>', '')
.replace('</i>', ''), { lang: 'fr' });
const slugifiedSectionName = getSlug(sectionTitle
.replace('<i>', '')
.replace('</i>', ''), { lang: 'fr' });
const transformedData = `<QA create_date="${qaFmMetaData.getCreateDate()}" last_update="${qaFmMetaData.getLastUpdateDate()}" name="${slugifiedQaName}">${qaTitleTag}<author name="${qaFmMetaData.author}"/><keywords>${qaFmMetaData.keywords}</keywords><answer>${qaContent}</answer></QA>`;
return model_impl_1.TargetDocumentImpl.createTargetDocumentImpl(md_file_converter_1.TargetDocument.createTargetDocument({
documentPaths: mdParsedDocumentImpl.documentPaths,
transformedData,
fmMetaData: qaFmMetaData
}), slugifiedQaName, slugifiedSectionName, sectionTitle);
}
};
};
}
exports.makeUnConfiguredMapParsedDocument = makeUnConfiguredMapParsedDocument;
|
# License: MIT
# Dynamic CGI serving using dynamic path imports for
# CGI supporting executable for Interpreted languages Embedded Distribution
# Contribution: 2018 Ganesh K. Bhat <[email protected]>
import os
dictions = {}
def return_statement():
for k, v in os.environ.items():
dictions[k] = v
return str(dictions)
print(return_statement())
print("Python Version of the page")
|
/**
* Created by zhangxiaojing on 2017/12/12.
*/
import React, {Component} from 'react';
import {connect} from 'react-redux';
import { Link } from 'react-router-dom';
import {Pagination} from 'nl-design';
import {ROOT_AVATAR} from '../actions/types';
import {fetchUserStrategy} from '../actions/strategy';
// 引入 ECharts 主模块
import echarts from 'echarts/lib/echarts';
// 引入柱状图
import 'echarts/lib/chart/line';
// 引入提示框和标题组件
import 'echarts/lib/component/tooltip';
class StrategyMy extends Component{
constructor(props) {
super(props);
this.state={
index:0,
pageNum:1,
pageSize:10,
desc:'id'
};
}
componentWillMount() {
const userId= localStorage.getItem('userId');
const pageNum=this.state.pageNum;
const pageSize=this.state.pageSize;
const desc=this.state.desc;
this.props.fetchUserStrategy({pageSize, pageNum, desc, userId});
}
handlePagination(pageNum) {
const userId= localStorage.getItem('userId');
const pageSize=this.state.pageSize;
const desc=this.state.desc;
this.props.fetchUserStrategy(pageSize, pageNum, desc, userId);
}
componentDidUpdate() {
this.props.strategy_user.data.map((item, i)=> {
let myChart2 = echarts.init(document.getElementById(`main2${i}`));
let dataX=item.earningData.time;
let data=item.earningData.data;
let csiData=item.earningData.csiData;
// 绘制图表
myChart2.setOption({
tooltip: {
trigger: 'axis',
formatter: '{b}<br/>{a0} {c0}%<br/>{a1} {c1}%'
},
grid: {
left: '3%', //图表距边框的距离
right: '4%',
top: '10%',
bottom: '0%',
containLabel: true
},
backgroundColor:"rgba(233, 240, 249, .3)",
xAxis : [
{
type : 'category',
data : dataX,
axisLine:{
lineStyle:{
color: "gainsboro"
}
},
axisLabel: {
show: true,
textStyle: {
color: '#252535'
}
}
}
],
yAxis : [
{
type : 'value',
axisLabel: {
formatter: '{value}%',
show: true,
textStyle: {
color: '#252535'
}
},
axisLine:{
lineStyle:{
color: "gainsboro"
}
}
}
],
series : [
{
name:'收益',
type:'line',
smooth:true,
itemStyle: {
normal: {
lineStyle:{
color:'rgb(170, 70, 67)'
}
}
},
data:data
},
{
name:'沪深300',
type:'line',
smooth:true,
itemStyle: {
normal: {
lineStyle:{
color:'rgb(69, 114, 167)'
}
}
},
data: csiData
}
]
});
});
}
handleClick (index) {
this.setState({index});
}
renderSelect(){
const selectItem=[
{title:"我的分享"},
{title:"我的订阅"}
];
return selectItem.map((item, index)=>{
return(
<li className={ index === this.state.index ? "active" : ""} onClick={ this.handleClick.bind(this, index)} key={index}>{item.title}</li>
);
});
}
renderTags(item){
return item.tags.map((item, index)=>{
return(
<div className="g-my-10" key={index}>
<span className="strategy-choiceness-tip g-px-5 g-py-5 g-mr-10">{item.tagName}</span>
</div>
);
});
}
renderList(){
const data= this.props.strategy_user && this.props.strategy_user.data;
if(data == null ||data == '' ){
return(
<div className="text-center h3 col-sm-12 g-py-10">
暂无数据
</div>
);
}
return this.props.strategy_user.data.map((item, index)=>{
return(
<li className="strate-all-content-item clearfix g-mt-20" key={index}>
<Link to={`/strategy/details/${item.id}`}>
<div className="col-sm-2 text-center">
<img style={{width:"70%"}} className="g-mt-30" src="/public/img/u158.png" alt=""/>
<span className="rank">{item.rank}</span>
</div>
<div className="col-sm-5">
<div className="strategy-choiceness-item clearfix" style={{padding:"20px 0"}}>
<div className="strategy-choiceness-title">
<span className="h4">{item.title}</span>
{this.renderTags(item)}
<div className="g-py-10 strategy-choiceness-user">
<div className="photo">
<img src={`${ROOT_AVATAR}/${item.imageUrl}`} alt=""/>
<span className="g-pl-5">{item.loginname}</span>
</div>
<span className="strategy-choiceness-title-line"></span>
</div>
<div className="strategy-choiceness-number row g-pt-10 text-center" style={{fontSize:"12px"}}>
<div className="col-sm-3" style={{padding:0}}>
<h5 className="g-pt-5">{(item.totalReturn).toFixed(2)}%</h5>
<h5 className="g-pt-5">累计收益</h5>
</div>
<div className="col-sm-3" style={{padding:0}}>
<h5 className="g-pt-5">{(item.annualizedReturn).toFixed(2)}%</h5>
<h5 className="g-pt-5">年化收益</h5>
</div>
<div className="col-sm-3" style={{padding:0}}>
<h5 className="g-pt-5">{(item.maxDrawdown).toFixed(2)}%</h5>
<h5 className="g-pt-5">最大回撤</h5>
</div>
<div className="col-sm-3" style={{padding:0}}>
<h5 className="g-pt-5">{new Date(item.beginTime).toLocaleDateString()}</h5>
<h5 className="g-pt-5">开始时间</h5>
</div>
</div>
</div>
</div>
</div>
<div className="col-sm-5">
<div className="strategy-chart g-mt-20" id={`main2${index}`} style={{height:"190px", width:"280px"}}>
<span className="loading"></span>
</div>
</div>
</Link>
</li>
);
});
}
render(){
const data= this.props.strategy_user && this.props.strategy_user.data;
const totalNum = this.props.strategy_user && this.props.strategy_user.rowCount;
return(
<div className="strategy-all-content clearfix">
<div className="strategy-all-content-filtrate g-py-20 clearfix">
<ul>
{this.renderSelect()}
</ul>
</div>
<div className="clearfix">
<ul className="clearfix">
{this.renderList()}
</ul>
</div>
<div className="g-my-30">
{data==null || data==''?'':<Pagination defaultPageSize={this.state.pageSize} total={totalNum} onChange={e => this.handlePagination(e)}/> }
</div>
</div>
);
}
}
function mapStateToProps(state) {
return {
strategy_user:state.strategy.strategy_user
};
}
export default connect(mapStateToProps, {fetchUserStrategy})(StrategyMy); |
// 文章详情类接口
import requestCore from '@/api/request-core'
class detailApi extends requestCore {
//人行征信提交接口
creditRequest (arg, ...other) {
this.post('Api/workplatform/creditrequestsubmit', arg)
}
//风控初审详情接口
getArtificalOneDetail (arg, ...other) {
this.get('Api/workplatform/getworkinfo/', arg)
}
// 退件接口
giveup (arg, ...other) {
this.post('Api/workflow/giveup', arg)
}
//征信处理详情接口
getInquireDetail (arg, ...other) {
this.get('Api/workplatform/getworkinfo', arg)
}
//征信处理接口
submitInquire(arg, ...other){
this.post('Api/workplatform/inquire', arg)
}
//审核历程
getAuditProcess(arg, ...other){
this.get('Api/workplatform/listtasks', arg)
}
//提交一审意见按钮
auditView(arg, ...other){
this.post('Api/workplatform/artificial', arg)
}
//获取图片
getListImgs(arg, ...other) {
this.get('Api/file/listimages', arg)
}
// 图片打包下载
getImgDownload(arg, ...other) {
this.post('Api/file/packagefile/', arg)
}
// word下载
getCreditDownload(arg, ...other) {
this.post('system/file/wordcreate/', arg)
}
//资料录入
basicInfo(arg, ...other){
this.post('Api/workplatform/inputrequest', arg)
}
// 申请件查询中图片录入
basicInfoImg(arg, ...other){
this.post('Api/file/addworkimages', arg)
}
// 上传获取表单接口
uploadForm(arg, ...other){
this.getH('',arg)
}
// 抄单登记处理接口
copytask(arg, ...other){
this.post('Api/workplatform/copytask', arg)
}
//申请打款
applyRemittance(arg, ...other){
this.post('Api/workplatform/applyremittance', arg)
}
//打款审核
moneyAudit(arg, ...other){
this.post('Api/workplatform/moneyaudit', arg)
}
courier (arg, ...other) {//寄件登记
this.post('Api/workplatform/courier', arg)
}
gpstask (arg, ...other) {//车辆gps登记
this.post('Api/workplatform/gps', arg)
}
mortgagetask (arg, ...other) {//抵押登记
this.post('Api/workplatform/mortgage', arg)
}
//财务打款
finance(arg, ...other) {//抵押登记
this.post('Api/workplatform/finance', arg)
}
//回款确认
returnMoney (arg, ...other) {//抵押登记
this.post('Api/workplatform/returnmoney', arg)
}
//fico数据查询
ficoquery(arg, ...other) {//抵押登记
this.post('system/workplatform/ficopost', arg)
}
//百融数据查询
bairongquery(arg, ...other) {//抵押登记
this.post('Api/workplatform/bairongcredit', arg)
}
//修改密码接口
changePW (arg, ...other) {
this.post('auth/changepasswordpost', arg)
}
//申请件修改
changetask(arg, ...other) {//抵押登记
this.post('Api/workplatform/editwork', arg)
}
//申请件补件
salesSupplement(arg, ...other) {//申请件补件
this.post('Api/workplatform/salessupplement', arg)
}
}
export default new detailApi()
|
import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { OptionComponent } from './option.component';
import { OptionGroupComponent } from './option-group.component';
import { CheckboxModule } from './../../free/checkbox/checkbox.module';
import { SelectAllOptionComponent } from './select-all-option';
export class MdbOptionModule {
}
MdbOptionModule.decorators = [
{ type: NgModule, args: [{
imports: [CommonModule, CheckboxModule],
declarations: [OptionComponent, SelectAllOptionComponent, OptionGroupComponent],
exports: [OptionComponent, OptionGroupComponent, SelectAllOptionComponent],
},] }
];
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoib3B0aW9uLm1vZHVsZS5qcyIsInNvdXJjZVJvb3QiOiIiLCJzb3VyY2VzIjpbIi4uLy4uLy4uLy4uLy4uLy4uL3Byb2plY3RzL25nLXVpa2l0LXByby1zdGFuZGFyZC9zcmMvbGliL3Byby9vcHRpb24vb3B0aW9uLm1vZHVsZS50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQSxPQUFPLEVBQUUsUUFBUSxFQUFFLE1BQU0sZUFBZSxDQUFDO0FBQ3pDLE9BQU8sRUFBRSxZQUFZLEVBQUUsTUFBTSxpQkFBaUIsQ0FBQztBQUMvQyxPQUFPLEVBQUUsZUFBZSxFQUFFLE1BQU0sb0JBQW9CLENBQUM7QUFDckQsT0FBTyxFQUFFLG9CQUFvQixFQUFFLE1BQU0sMEJBQTBCLENBQUM7QUFDaEUsT0FBTyxFQUFFLGNBQWMsRUFBRSxNQUFNLHVDQUF1QyxDQUFDO0FBQ3ZFLE9BQU8sRUFBRSx3QkFBd0IsRUFBRSxNQUFNLHFCQUFxQixDQUFDO0FBTy9ELE1BQU0sT0FBTyxlQUFlOzs7WUFMM0IsUUFBUSxTQUFDO2dCQUNSLE9BQU8sRUFBRSxDQUFDLFlBQVksRUFBRSxjQUFjLENBQUM7Z0JBQ3ZDLFlBQVksRUFBRSxDQUFDLGVBQWUsRUFBRSx3QkFBd0IsRUFBRSxvQkFBb0IsQ0FBQztnQkFDL0UsT0FBTyxFQUFFLENBQUMsZUFBZSxFQUFFLG9CQUFvQixFQUFFLHdCQUF3QixDQUFDO2FBQzNFIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IHsgTmdNb2R1bGUgfSBmcm9tICdAYW5ndWxhci9jb3JlJztcbmltcG9ydCB7IENvbW1vbk1vZHVsZSB9IGZyb20gJ0Bhbmd1bGFyL2NvbW1vbic7XG5pbXBvcnQgeyBPcHRpb25Db21wb25lbnQgfSBmcm9tICcuL29wdGlvbi5jb21wb25lbnQnO1xuaW1wb3J0IHsgT3B0aW9uR3JvdXBDb21wb25lbnQgfSBmcm9tICcuL29wdGlvbi1ncm91cC5jb21wb25lbnQnO1xuaW1wb3J0IHsgQ2hlY2tib3hNb2R1bGUgfSBmcm9tICcuLy4uLy4uL2ZyZWUvY2hlY2tib3gvY2hlY2tib3gubW9kdWxlJztcbmltcG9ydCB7IFNlbGVjdEFsbE9wdGlvbkNvbXBvbmVudCB9IGZyb20gJy4vc2VsZWN0LWFsbC1vcHRpb24nO1xuXG5ATmdNb2R1bGUoe1xuICBpbXBvcnRzOiBbQ29tbW9uTW9kdWxlLCBDaGVja2JveE1vZHVsZV0sXG4gIGRlY2xhcmF0aW9uczogW09wdGlvbkNvbXBvbmVudCwgU2VsZWN0QWxsT3B0aW9uQ29tcG9uZW50LCBPcHRpb25Hcm91cENvbXBvbmVudF0sXG4gIGV4cG9ydHM6IFtPcHRpb25Db21wb25lbnQsIE9wdGlvbkdyb3VwQ29tcG9uZW50LCBTZWxlY3RBbGxPcHRpb25Db21wb25lbnRdLFxufSlcbmV4cG9ydCBjbGFzcyBNZGJPcHRpb25Nb2R1bGUge31cbiJdfQ== |
var { PythonShell } = require("python-shell");
const date = require(`./Time`);
const cron = require("node-cron");
const shell = require("shelljs");
const time = require(`./Time`);
const chalk = require("chalk");
const os = require("os");
const path = require("path");
const fs = require("fs");
function update_bot() {
cron.schedule("00 03 * * *", () => {
console.log(
chalk.green(
`--------------------------------------------------------------------------------------------------`
)
);
console.log(
`Control Tower is updating ! @ ${time.hours()}:${time.minutes()}:${time.secondes()} - ${time.date()}/${time.month()}/${time.year()}`
);
shell.exec("git fetch --all");
shell.exec("git reset --hard origin/master");
console.log(
`Control Tower has updated ! @ ${time.hours()}:${time.minutes()}:${time.secondes()} - ${time.date()}/${time.month()}/${time.year()}`
);
});
}
function data_feed() {
cron.schedule("00 02 * * *", () => {
if (os.hostname() == "raspberrypi") {
options = {
pythonPath: "/usr/bin/python3",
mode: "text",
};
} else {
options = {
mode: "text",
};
}
let pyshell = new PythonShell(
path.join(__dirname, "../scrapper/main.py"),
options
);
pyshell.on("message", (res) => {
if (res.match(regex)) {
var regex = /NewScrapper\/data\/.*\.json/gm;
//console.log("File Name : " + res.match(regex));
nameFile = res.replace("File Name : ", "");
} else {
console.log(res);
}
});
pyshell.end((err, code, signal) => {
if (err) {
throw err;
}
if (nameFile != null) {
nameFile = null;
finnish = true;
pyshell = new PythonShell(
path.join(__dirname, "../scrapper/Utils.py"),
options
);
pyshell.on("message", (res) => {
//console.log("File Name 2 : " + res);
});
pyshell.end((err, code, signal) => {
if (err) {
throw err;
}
});
} else {
message.reply("Error in commands");
}
});
});
}
function lm(message) {
function fcdate(data, opt1) {
mydate = new Date(data);
var options = { month: "long" };
var curr_timestamp_now = new Date().getTime();
var curr_timestamp = mydate.getTime();
var curr_date = mydate.getDate();
if (opt1 == false) {
var curr_month = mydate.getMonth();
var curr_year = 1970 - mydate.getFullYear();
} else {
var curr_month = new Intl.DateTimeFormat("fr-FR", options).format(mydate);
var curr_year = mydate.getFullYear();
}
var curr_hour = mydate.getHours();
var curr_minute = mydate.getMinutes();
var curr_second = mydate.getSeconds();
return [
mydate,
curr_timestamp_now,
curr_timestamp,
curr_date,
curr_month,
curr_year,
curr_hour,
curr_minute,
curr_second,
];
}
const data = JSON.parse(
fs.readFileSync(
path.join(
__dirname,
`../../bot_essentials/compagnon_scripts/lm_data/${message.guild.id}.json`
)
)
);
allmember = [];
prstmember = [];
for (let [key, value] of message.guild.members.cache) {
allmember.push(key);
}
for (var key in data) {
prstmember.push(key);
}
for (j in allmember) {
if (!prstmember.includes(allmember[j])) {
message.channel.send(
`<@!${allmember[j]}>, I have no message registered \n__Please send a message__`
);
}
}
//console.log(data);
//console.log(allmember);
//console.log(prstmember);
for (i in data) {
var date = fcdate(data[i], true);
t_n = date[1]; //mtn
t = date[2]; //msg
r = date[1] - date[2];
//console.log(t_n);
//console.log(t);
//console.log(r);
//console.log(t_n/60)
//console.log(t/60)
//console.log(r/60)
//console.log(new Date(t_n))
//console.log(new Date(t))
//console.log(new Date(r))
//console.log(message.guild.member(i))
//console.log(i)
//console.log(message.guild.member(i).user.id)
//console.log(message.guild.member(i).nickname)
if (message.guild.member(i) == null) {
//console.log(message.guild.member(i))
} else if (message.guild.member(i).nickname != null) {
username = message.guild.member(i).nickname;
mydatestr = `${username}(${i}) is ok`;
} else {
username = message.guild.member(i).user.username;
mydatestr = `${username}(${i}) is ok`;
}
if (r >= 1296000000) {
//console.log("Matched 15")
var date = fcdate(date[1] - date[2], false);
//console.log(`D:${date[3]}\nM:${date[4]}\nY:${date[5]}`)
if (date[5] > 0) {
var mydatestr = `${username} send his last massage a long long time ago (${date[3]} Day(s) >= 15 Days) `;
} else if ((date[4] <= 12) & (date[4] > 0)) {
var mydatestr = `${username} send his last massage a long long time ago (${date[4]} Month(s) >= 15 Days)`;
} else if (date[3] <= 31) {
var mydatestr = `${username} send his last massage a long long time ago (${date[5]} Year(s) >= 15 Days)`;
}
console.log(`D:${date[3]} M:${date[4]} Y:${date[5]} ${username} `);
//message.channel.send(`${mydatestr}`);
} else if (r >= 604800000) {
//console.log("Matched 7")
var date = fcdate(r, false);
//console.log(`D:${date[3]}`)
if (date[3] <= 31) {
var mydatestr = `${username} send his last massage a long time ago (${date[3]} Day(s) >= 7 Days)`;
}
console.log(`D:${date[3]} M:${date[4]} Y:${date[5]} ${username} `);
message.channel.send(`${mydatestr}`);
}
//console.log(mydatestr);
}
}
module.exports = { update_bot, data_feed, lm };
/*
let pyshell = new PythonShell("NewScrapper/main.py");
dest = client.channels.cache.get(`802199511102783509`);
//client.channels.cache.get(`719108278461923369`).send();
dest.send(`📚 | D A T A I N C O M I N G`);
// sends a message to the Python script via stdin
// pyshell.send('hello');
pyshell.on("message", function (message) {
// received a message sent from the Python script (a simple "print" statement)
console.log(message);
});
// end the input stream and allow the process to exit
pyshell.end(function (err, code, signal) {
if (err) {
throw err;
}
//console.log('');
//console.log('The exit code was: ' + code);
//console.log('The exit signal was: ' + signal);
//console.log('');
console.log("execution of the python script is finished");
console.log("sending the file");
dest.send(
`📚 | D A T A :arrow_right: ${date.date()}-${date.month()}-${date.year()}`,
{
files: [
`NewScrapper/data/${date.date()}-${date.month()}-${date.year()}.json`,
],
}
);
});
console.log(`The command "am2d" was automatically & succesfully used `);
});
}
*/
|
const mongoose = require('mongoose')
const Schema = mongoose.Schema
const PlantSchema = new Schema({
name: {
type: String,
required: true,
unique: true,
},
litersPerWatering: {
type: Number,
required: true,
},
})
module.exports = mongoose.model('Plant', PlantSchema)
|
const mongoose = require("mongoose");
const Schema = mongoose.Schema;
//Create Schema
const AvatarSchema = new Schema({
user: {
type: Schema.Types.ObjectId,
ref: "users"
},
avatar: {
type: String,
required: true
}
});
module.exports = Avatar = mongoose.model("avatars", AvatarSchema);
|
from math import hypot
import plotly.plotly as py
import pandas as pd
DEGREE_COLORS = {
'unknown': 'lightgreen',
'ms/phd': 'gold',
'phd': 'coral',
'ms': 'royalblue',
'undergrad': 'sienna'}
def _find_closest_city(cities, coord, max_distance=1):
closest_city = None
closest_distance = None
for city, city_coord in cities.items():
distance = hypot(city_coord[0] - coord[0], city_coord[1] - coord[1])
if distance < max_distance and (not closest_distance or distance < closest_distance):
closest_city = city
closest_distance = distance
return closest_city
def plot_degree_count_city_bar_chart(jobs, city_coords, include_others=False):
"""
Plot a bar chart of all job degree requierments for cities
:param jobs: Dataframe with degree requirement information
:param city_coords: Dict of city names to their gps coordinates
:param include_others: Boolean to include jobs that aren't near cities as their own category
:return:
"""
degree_city = pd.DataFrame()
degree_city['degree_classification'] = jobs['degree_classification']
# Find the closest city to all job gps coordinates
degree_city['closest_city'] =\
jobs[['latitude', 'longitude']]\
.apply(lambda row: _find_closest_city(city_coords, row), axis=1)
if include_others:
degree_city['closest_city'] = degree_city['closest_city'].fillna('others')
# Get the total number of jobs for each city
degree_city['total'] = degree_city.groupby(['closest_city'])['degree_classification'].transform('count')
# Get the number of degrees per city per degree
degree_city_counts = degree_city.groupby(['degree_classification', 'closest_city']).size()
degree_city = degree_city.drop_duplicates().set_index(['degree_classification', 'closest_city'])
degree_city['count'] = degree_city_counts
# The order to show the degrees in the bar chart from left to right
ordered_degrees = [
degree for degree in ['undergrad', 'ms', 'ms/phd', 'phd', 'unknown'] if degree in degree_city['count']]
# Sort the bar graph from most to least number of jobs from top to bottom
degree_city = degree_city.sort_values('total')
# Prepare the data for the bar graph
plt_data = []
for degree in ordered_degrees:
plt_data.append({
'x': degree_city['count'][degree].index,
'y': degree_city['count'][degree],
'name': degree,
'orientation': 'v',
'marker': {'color': DEGREE_COLORS[degree]},
'type': 'bar'})
py.plot({'data': plt_data, 'layout': {'barmode': 'stack'}})
def plot_degree_map(jobs):
"""
Plot the degrees on a map of the United States
:param jobs: Dataframe with degree requirement information
:return:
"""
layout = {
'title': 'Job Degree Requierments',
'showlegend': True,
'geo': {
'scope': 'usa',
'projection': {'type': 'albers usa'},
'showland': True,
'showlakes': True,
'landcolor': 'rgb(212, 212, 212)',
'subunitcolor': 'rgb(255, 255, 255)',
'countrycolor': 'rgb(255, 255, 255)',
'lakecolor': 'rgb(255, 255, 255)'}}
# Prepare data for the map
plot_data = []
for degree, data in jobs.groupby('degree_classification'):
plot_data.append({
'name': degree,
'type': 'scattergeo',
'locationmode': 'USA-states',
'lon': data['longitude'],
'lat': data['latitude'],
'text': data['jobtitle'],
'marker': {'color': DEGREE_COLORS[degree]}})
py.plot({'data': plot_data, 'layout': layout})
def plot_jobs_not_in_city_for_degree_requierments(jobs, city_coords):
"""
Plot jobs that are not near <city_coords>
:param jobs: Dataframe with degree requirement information
:param city_coords: Dict of city names to their gps coordinates
:return:
"""
layout = {
'title': 'Job Degree Requierments',
'showlegend': True,
'geo': {
'scope': 'usa',
'projection': {'type': 'albers usa'},
'showland': True,
'showlakes': True,
'landcolor': 'rgb(212, 212, 212)',
'subunitcolor': 'rgb(255, 255, 255)',
'countrycolor': 'rgb(255, 255, 255)',
'lakecolor': 'rgb(255, 255, 255)'}}
# Drop jobs that are not with in a city
noncity_jobs = jobs[
jobs[['latitude', 'longitude']].apply(lambda row: not _find_closest_city(city_coords, row), axis=1)]
# Prepare data for the map
plot_data = []
for degree, job in noncity_jobs.groupby('degree_classification'):
plot_data.append({
'name': degree,
'type': 'scattergeo',
'locationmode': 'USA-states',
'lat': job['latitude'],
'lon': job['longitude'],
'marker': {'color': DEGREE_COLORS[degree]}})
py.plot({'data': plot_data, 'layout': layout})
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ListObjectsCommand = void 0;
const models_0_1 = require("../models/models_0");
const Aws_restXml_1 = require("../protocols/Aws_restXml");
const middleware_bucket_endpoint_1 = require("@aws-sdk/middleware-bucket-endpoint");
const middleware_serde_1 = require("@aws-sdk/middleware-serde");
const smithy_client_1 = require("@aws-sdk/smithy-client");
/**
* <p>Returns some or all (up to 1,000) of the objects in a bucket. You can use the request
* parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
* response can contain valid or invalid XML. Be sure to design your application to parse the
* contents of the response and handle it appropriately.</p>
* <important>
* <p>This action has been revised. We recommend that you use the newer version, <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html">ListObjectsV2</a>, when developing applications. For backward compatibility,
* Amazon S3 continues to support <code>ListObjects</code>.</p>
* </important>
*
*
* <p>The following operations are related to <code>ListObjects</code>:</p>
* <ul>
* <li>
* <p>
* <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html">ListObjectsV2</a>
* </p>
* </li>
* <li>
* <p>
* <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
* </p>
* </li>
* <li>
* <p>
* <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
* </p>
* </li>
* <li>
* <p>
* <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
* </p>
* </li>
* <li>
* <p>
* <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html">ListBuckets</a>
* </p>
* </li>
* </ul>
*/
class ListObjectsCommand extends smithy_client_1.Command {
// Start section: command_properties
// End section: command_properties
constructor(input) {
// Start section: command_constructor
super();
this.input = input;
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(clientStack, configuration, options) {
this.middlewareStack.use(middleware_serde_1.getSerdePlugin(configuration, this.serialize, this.deserialize));
this.middlewareStack.use(middleware_bucket_endpoint_1.getBucketEndpointPlugin(configuration));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "S3Client";
const commandName = "ListObjectsCommand";
const handlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: models_0_1.ListObjectsRequest.filterSensitiveLog,
outputFilterSensitiveLog: models_0_1.ListObjectsOutput.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve((request) => requestHandler.handle(request.request, options || {}), handlerExecutionContext);
}
serialize(input, context) {
return Aws_restXml_1.serializeAws_restXmlListObjectsCommand(input, context);
}
deserialize(output, context) {
return Aws_restXml_1.deserializeAws_restXmlListObjectsCommand(output, context);
}
}
exports.ListObjectsCommand = ListObjectsCommand;
//# sourceMappingURL=ListObjectsCommand.js.map |
export const getPosts = async (req, res, next) => {
const posts = [
{
id: 1,
title: 'Hello World!',
body: 'This is a hello world post',
},
{
id: 2,
title: 'Another Post!',
body: 'This is a another post',
},
];
return res.status(200).json({
data: {
success: true,
posts,
},
});
};
|
"use strict";
var LangSelect = /** @class */ (function () {
// コンストラクタメソッド
function LangSelect(info_list) {
this.info_list = info_list; // Map配列格納
if (this.check_browser_cookie() == false) { // ブラウザに前回拒否したことを示すクッキーが埋め込まれていない
var prop = this.check_info_list(); // info_listからユーザの第一言語と一致するプロパティを取り出す
if (prop != null) { // nullで無ければ
this.recommend_site_change(prop); // サイトの変更を提案するHTMLを表示する
}
}
}
// サイトの変更を提案するHTMLを表示するメソッド
LangSelect.prototype.recommend_site_change = function (prop) {
var _this = this;
var body = document.getElementsByTagName("body")[0]; // id名で要素を抽出
// HTML挿入
body === null || body === void 0 ? void 0 : body.insertAdjacentHTML("beforeend", "\n<div class=\"lang-select\">\n <div class=\"message\">\n <p>" + prop["message"] + "</p>\n </div>\n <div class=\"change-site\">\n <a href=" + prop["url"] + ">" + prop["btn_message"] + "</a>\n </div>\n <div class=\"reject-button\">\n <button></button>\n </div>\n</div>");
var lang_select = document.getElementsByClassName("lang-select")[0];
var div_change_site = lang_select.getElementsByTagName("div")[2]; // reject-buttonのDOMを抽出
div_change_site === null || div_change_site === void 0 ? void 0 : div_change_site.addEventListener('click', function () { _this.reject_recomend_event(); }); // clickイベント this.reject_recomend_eventを追加
};
// ブラウザの第一言語が一致するプロパティがあるかどうかを探索する
// 一致するプロパティがあった場合はそのプロパティを、無い場合はnullを返す
LangSelect.prototype.check_info_list = function () {
for (var _i = 0, _a = this.info_list; _i < _a.length; _i++) {
var info = _a[_i];
// navigator.language.split("-")[0]は言語コードと国コードが連結されている場合に言語コードのみを取り出す処理
if (info["lang"] == navigator.language.split("-")[0]) {
return info;
}
}
return null;
};
// ブラウザのクッキーの有無を確認するメソッド
// クッキー(LangSelectRejectRecomend)があればtrue,無ければfalse
LangSelect.prototype.check_browser_cookie = function () {
var cookie_list = document.cookie; // 保存されているクッキー読み出し
var cookies = cookie_list.split(';'); // ;で分割し配列cookiesに格納
for (var _i = 0, cookies_1 = cookies; _i < cookies_1.length; _i++) {
var cookie = cookies_1[_i];
if (cookie.replace(/\s+/g, "") == "LangSelectRejectRecomend=true") {
return true;
}
}
return false;
};
// ブラウザにクッキーを埋め込むメソッド
// 現在は有効期限1分になっています
LangSelect.prototype.embedding_cookie = function () {
var now = new Date();
now.setMinutes(now.getMinutes() + 1);
document.cookie = "LangSelectRejectRecomend=true;expires=" + now.toUTCString() + ";Path=/"; // クッキーはサイト全体で有効
};
// 表示されているhtmlを除去するメソッド
LangSelect.prototype.remove_lang_select = function () {
var _a;
var lang_select = document.getElementsByClassName("lang-select")[0];
// 作成した<div class="lang-select">要素を全て削除
(_a = lang_select === null || lang_select === void 0 ? void 0 : lang_select.parentNode) === null || _a === void 0 ? void 0 : _a.removeChild(lang_select);
};
// 提案の消去ボタンが押されたとき呼び出されるメソッド
LangSelect.prototype.reject_recomend_event = function () {
this.embedding_cookie(); // クッキーを埋め込む
this.remove_lang_select(); // 提案HTMLを削除
};
return LangSelect;
}());
|
/* jshint node: true */
module.exports = function(environment) {
var ENV = {
modulePrefix: 'ui',
environment: environment,
baseURL: '/',
locationType: 'auto',
apiUrl: '/fixtures/data.json',
EmberENV: {
FEATURES: {
// Here you can enable experimental features on an ember canary build
// e.g. 'with-controller': true
}
},
APP: {
// Here you can pass flags/options to your application instance
// when it is created
}
};
if (environment === 'development') {
// ENV.APP.LOG_RESOLVER = true;
ENV.APP.LOG_ACTIVE_GENERATION = true;
// ENV.APP.LOG_TRANSITIONS = true;
// ENV.APP.LOG_TRANSITIONS_INTERNAL = true;
ENV.APP.LOG_VIEW_LOOKUPS = true;
}
if (environment === 'test') {
// Testem prefers this...
ENV.baseURL = '/';
ENV.locationType = 'auto';
// keep test console output quieter
ENV.APP.LOG_ACTIVE_GENERATION = false;
ENV.APP.LOG_VIEW_LOOKUPS = false;
ENV.APP.rootElement = '#ember-testing';
}
if (environment === 'production') {
ENV.apiUrl = '/data';
}
return ENV;
};
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **PEP-compliant type hint tester** unit tests.
This submodule unit tests the public API of the private
:mod:`beartype._util.hint.pep.utilpeptest` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from pytest import raises
# ....................{ TESTS }....................
# Fine-grained tests are intentionally performed *BEFORE* coarse-grained tests,
# dramatically improving readability of test failures.
# ....................{ TESTS ~ kind : typevar }....................
def test_is_hint_pep_typevars() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpeptest.is_hint_pep_typevars`
tester.
'''
# Defer heavyweight imports.
from beartype._util.hint.pep.utilpeptest import is_hint_pep_typevars
from beartype_test.a00_unit.data.hint.data_hint import HINTS_NONPEP
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
# Assert that various "TypeVar"-centric types are correctly detected.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_pep_typevars(hint_pep_meta.hint) is (
hint_pep_meta.is_typevars)
# Assert that various "TypeVar"-agnostic types are correctly detected.
for nonhint_pep in HINTS_NONPEP:
assert is_hint_pep_typevars(nonhint_pep) is False
# ....................{ TESTS ~ typing }....................
def test_is_hint_pep_typing() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpeptest.is_hint_pep_typing` tester.
'''
# Defer heavyweight imports.
from beartype._util.hint.pep.utilpeptest import (
is_hint_pep_typing)
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
# Assert this tester accepts PEP-compliant type hints defined by the
# "typing" module.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_pep_typing(hint_pep_meta.hint) is (
hint_pep_meta.is_typing)
# Assert this tester rejects non-PEP-compliant type hints.
for not_hint_pep in NOT_HINTS_PEP:
assert is_hint_pep_typing(not_hint_pep) is False
def test_is_hint_pep_type_typing() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpeptest.is_hint_pep_type_typing`
tester.
'''
# Defer heavyweight imports.
from beartype._util.hint.pep.utilpeptest import (
is_hint_pep_type_typing)
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
# Assert this tester accepts PEP-compliant type hints defined by the
# "typing" module.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_pep_type_typing(hint_pep_meta.hint) is (
hint_pep_meta.is_type_typing)
# Assert this tester rejects non-PEP-compliant type hints.
for not_hint_pep in NOT_HINTS_PEP:
assert is_hint_pep_type_typing(not_hint_pep) is False
# ....................{ TESTS }....................
def test_is_hint_pep() -> None:
'''
Test the :func:`beartype._util.hint.pep.utilpeptest.is_hint_pep`
tester.
'''
# Defer heavyweight imports.
from beartype._util.hint.pep.utilpeptest import is_hint_pep
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
from beartype_test.a00_unit.data.hint.nonpep.data_nonpep import (
HINTS_NONPEP_META)
from beartype_test.a00_unit.data.hint.pep.data_pep import HINTS_PEP_META
# Assert this tester accepts PEP-compliant type hints.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_pep(hint_pep_meta.hint) is True
# Assert this tester rejects PEP-noncompliant type hints implemented by the
# "typing" module as normal types indistinguishable from non-"typing" types
# and thus effectively non-PEP-compliant for all practical intents.
for hint_nonpep_meta in HINTS_NONPEP_META:
assert is_hint_pep(hint_nonpep_meta.hint) is False
# Assert this tester rejects non-PEP-compliant type hints.
for not_hint_pep in NOT_HINTS_PEP:
assert is_hint_pep(not_hint_pep) is False
def test_is_hint_pep_args() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpeptest.is_hint_pep_args`
tester.
'''
# Defer heavyweight imports.
from beartype._util.hint.pep.utilpeptest import is_hint_pep_args
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
# Assert this tester accepts PEP-compliant subscripted type hints.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_pep_args(hint_pep_meta.hint) is (
hint_pep_meta.is_args)
# Assert this tester rejects non-PEP-compliant type hints.
for not_hint_pep in NOT_HINTS_PEP:
assert is_hint_pep_args(not_hint_pep) is False
#FIXME: Implement us up, please.
# def test_is_hint_pep_uncached() -> None:
# '''
# Test the
# :func:`beartype._util.hint.pep.utilpeptest.is_hint_pep_uncached`
# tester.
# '''
#
# # Defer heavyweight imports.
# from beartype._util.hint.pep.utilpeptest import is_hint_pep_uncached
# from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_9
# from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
# from beartype_test.a00_unit.data.hint.pep.data_pep import (
# HINTS_PEP_META)
#
# # Assert this tester accepts concrete PEP-compliant type hints.
# for hint_pep_meta in HINTS_PEP_META:
# # True only if we expect this hint to be non-self-cached, including.
# is_hint_pep_uncached_expected = (
# # If th
# hint_pep_meta.is_pep585_builtin or
# (
# IS_PYTHON_AT_LEAST_3_9 and
# hint_pep_meta
# )
# )
#
# assert is_hint_pep_uncached(hint_pep_meta.hint) is (
# is_hint_pep_uncached_expected)
#
# # Assert this tester accepts non-PEP-compliant type hints. What? Look,
# # folks. This tester should probably raise an exception when passed those
# # sort of hints, but this tester *CANNOT* by definition be memoized, which
# # means it needs to be fast despite being unmemoized, which means we treat
# # *ALL* objects other than a small well-known subset of non-self-cached
# # PEP-compliant type hints as self-cached PEP-compliant type hints. *shrug*
# for not_hint_pep in NOT_HINTS_PEP:
# assert is_hint_pep_uncached(not_hint_pep) is True
# ....................{ TESTS ~ supported }....................
def test_is_hint_pep_supported() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpeptest.is_hint_pep_supported`
tester.
'''
# Defer heavyweight imports.
from beartype._util.hint.pep.utilpeptest import is_hint_pep_supported
from beartype_test.a00_unit.data.hint.data_hint import (
NOT_HINTS_UNHASHABLE, NOT_HINTS_PEP)
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
# Assert this tester:
# * Accepts supported PEP-compliant type hints.
# * Rejects unsupported PEP-compliant type hints.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_pep_supported(hint_pep_meta.hint) is (
hint_pep_meta.is_supported)
# Assert this tester rejects objects that are *NOT* PEP-noncompliant.
for not_hint_pep in NOT_HINTS_PEP:
assert is_hint_pep_supported(not_hint_pep) is False
# Assert this tester rejects unhashable objects.
for non_hint_unhashable in NOT_HINTS_UNHASHABLE:
assert is_hint_pep_supported(non_hint_unhashable) is False
def test_die_if_hint_pep_unsupported() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpeptest.die_if_hint_pep_unsupported`
validator.
'''
# Defer heavyweight imports.
from beartype.roar import (
BeartypeDecorHintPepException,
BeartypeDecorHintPepUnsupportedException,
)
from beartype._util.hint.pep.utilpeptest import (
die_if_hint_pep_unsupported)
from beartype_test.a00_unit.data.hint.data_hint import (
NOT_HINTS_UNHASHABLE, NOT_HINTS_PEP)
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
# Assert this validator...
for hint_pep_meta in HINTS_PEP_META:
# Accepts supported PEP-compliant type hints.
if hint_pep_meta.is_supported:
die_if_hint_pep_unsupported(hint_pep_meta.hint)
# Rejects unsupported PEP-compliant type hints.
else:
with raises(BeartypeDecorHintPepUnsupportedException):
die_if_hint_pep_unsupported(hint_pep_meta.hint)
# Assert this validator rejects objects that are *NOT* PEP-noncompliant.
for not_hint_pep in NOT_HINTS_PEP:
with raises(BeartypeDecorHintPepException):
die_if_hint_pep_unsupported(not_hint_pep)
# Assert this validator rejects unhashable objects.
for non_hint_unhashable in NOT_HINTS_UNHASHABLE:
with raises(BeartypeDecorHintPepException):
die_if_hint_pep_unsupported(non_hint_unhashable)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkpush.endpoint import endpoint_data
class CompleteContinuouslyPushRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Push', '2016-08-01', 'CompleteContinuouslyPush','cps')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_MessageId(self):
return self.get_query_params().get('MessageId')
def set_MessageId(self,MessageId):
self.add_query_param('MessageId',MessageId)
def get_AppKey(self):
return self.get_query_params().get('AppKey')
def set_AppKey(self,AppKey):
self.add_query_param('AppKey',AppKey) |
// http://127.0.0.1:9001
// http://localhost:9001
//
const accountSid = 'AC667383016263c00b06c5ca55f60aa462';
const authToken = '1588492378244469c5bcda850c1d4045';
const client = require('twilio')(accountSid, authToken);
client.tokens.create({ttl: 3600 * 24 * 10}).then(token => console.log(token));
const fs = require('fs');
const path = require('path');
const url = require('url');
var httpServer = require('http');
const ioServer = require('socket.io');
const RTCMultiConnectionServer = require('./node_scripts/index.js');
var PORT = 9001;
var isUseHTTPs = false;
const jsonPath = {
config: 'config.json',
logs: 'logs.json'
};
const BASH_COLORS_HELPER = RTCMultiConnectionServer.BASH_COLORS_HELPER;
const getValuesFromConfigJson = RTCMultiConnectionServer.getValuesFromConfigJson;
const getBashParameters = RTCMultiConnectionServer.getBashParameters;
var config = getValuesFromConfigJson(jsonPath);
config = getBashParameters(config, BASH_COLORS_HELPER);
// if user didn't modifed "PORT" object
// then read value from "config.json"
if(PORT === 9001) {
PORT = config.port;
}
if(isUseHTTPs === false) {
isUseHTTPs = config.isUseHTTPs;
}
function serverHandler(request, response) {
// to make sure we always get valid info from json file
// even if external codes are overriding it
config = getValuesFromConfigJson(jsonPath);
config = getBashParameters(config, BASH_COLORS_HELPER);
response.writeHead(200, {
'Content-Type': 'text/plain'
});
response.write('RTCMultiConnection Socket.io Server.\n\n' + 'https://github.com/muaz-khan/RTCMultiConnection-Server\n\n' + 'npm install RTCMultiConnection-Server');
response.end();
}
var httpApp;
if (isUseHTTPs) {
httpServer = require('https');
// See how to use a valid certificate:
// https://github.com/muaz-khan/WebRTC-Experiment/issues/62
var options = {
key: null,
cert: null,
ca: null
};
var pfx = false;
if (!fs.existsSync(config.sslKey)) {
console.log(BASH_COLORS_HELPER.getRedFG(), 'sslKey:\t ' + config.sslKey + ' does not exist.');
} else {
pfx = config.sslKey.indexOf('.pfx') !== -1;
options.key = fs.readFileSync(config.sslKey);
}
if (!fs.existsSync(config.sslCert)) {
console.log(BASH_COLORS_HELPER.getRedFG(), 'sslCert:\t ' + config.sslCert + ' does not exist.');
} else {
options.cert = fs.readFileSync(config.sslCert);
}
if (config.sslCabundle) {
if (!fs.existsSync(config.sslCabundle)) {
console.log(BASH_COLORS_HELPER.getRedFG(), 'sslCabundle:\t ' + config.sslCabundle + ' does not exist.');
}
options.ca = fs.readFileSync(config.sslCabundle);
}
if (pfx === true) {
options = {
pfx: sslKey
};
}
httpApp = httpServer.createServer(options, serverHandler);
} else {
httpApp = httpServer.createServer(serverHandler);
}
RTCMultiConnectionServer.beforeHttpListen(httpApp, config);
httpApp = httpApp.listen(process.env.PORT || PORT, process.env.IP || "0.0.0.0", function() {
RTCMultiConnectionServer.afterHttpListen(httpApp, config);
});
// --------------------------
// socket.io codes goes below
ioServer(httpApp).on('connection', function(socket) {
RTCMultiConnectionServer.addSocket(socket, config);
// ----------------------
// below code is optional
const params = socket.handshake.query;
if (!params.socketCustomEvent) {
params.socketCustomEvent = 'custom-message';
}
socket.on(params.socketCustomEvent, function(message) {
socket.broadcast.emit(params.socketCustomEvent, message);
});
});
|
'use strict';
const express = require('express');
const Cache = require('json-fetch-cache');
const {
logger, ah, platforms, titleCase, trimPlatform,
} = require('../lib/utilities');
const router = express.Router();
const rivenCaches = {};
const groupRivenData = (cacheStrData) => {
/* istanbul ignore if */ if (!cacheStrData.length) return {};
const stripped = cacheStrData
.replace(/NaN/g, 0)
.replace(/WARNING:.*\n/, '');
const parsed = JSON.parse(stripped);
const byType = {};
parsed.forEach((rivenD) => {
if (rivenD.compatibility === null) {
rivenD.compatibility = `Veiled ${rivenD.itemType}`;
}
rivenD.compatibility = titleCase(rivenD.compatibility.replace('<ARCHWING>', '').trim());
if (!byType[rivenD.itemType]) {
byType[rivenD.itemType] = {};
}
if (!byType[rivenD.itemType][rivenD.compatibility]) {
byType[rivenD.itemType][rivenD.compatibility] = {
rerolled: null,
unrolled: null,
};
}
byType[rivenD.itemType][rivenD.compatibility][rivenD.rerolled ? 'rerolled' : 'unrolled'] = rivenD;
});
return byType;
};
platforms.forEach((platform) => {
const rCache = new Cache(`https://n9e5v4d8.ssl.hwcdn.net/repos/weeklyRivens${platform.toUpperCase()}.json`, 604800000, {
parser: groupRivenData,
logger,
delayStart: true,
});
rCache.startUpdating();
rivenCaches[platform] = rCache;
});
router.use((req, res, next) => {
req.platform = trimPlatform(req.baseUrl);
if (req.platform === 'ns') req.platform = 'swi';
/* istanbul ignore if */
if (!platforms.includes(req.platform)) req.platform = req.header('platform') || 'pc';
next();
});
router.get('/', /* cache('1 week'), */ ah(async (req, res) => {
logger.silly(`Got ${req.originalUrl}`);
const rC = rivenCaches[req.platform];
res.json(await rC.getData());
}));
router.get('/search/:query', /* cache('10 hours'), */ ah(async (req, res) => {
logger.silly(`Got ${req.originalUrl}`);
const { query } = req.params;
const results = {};
const rCache = await rivenCaches[req.platform].getData();
Object.keys(rCache).forEach((type) => {
Object.keys(rCache[type]).forEach((compatibility) => {
if (compatibility.toLowerCase().includes(query.toLowerCase())) {
results[compatibility] = rCache[type][compatibility];
}
});
});
res.setHeader('Content-Language', req.language);
res.json(results);
}));
module.exports = router;
|
(window["webpackJsonpel-algoritmo-del-ritmo"]=window["webpackJsonpel-algoritmo-del-ritmo"]||[]).push([[77],{138:function(e,t){ace.define("ace/mode/matching_brace_outdent",["require","exports","module","ace/range"],(function(e,t,n){"use strict";var r=e("../range").Range,o=function(){};(function(){this.checkOutdent=function(e,t){return!!/^\s+$/.test(e)&&/^\s*\}/.test(t)},this.autoOutdent=function(e,t){var n=e.getLine(t).match(/^(\s*\})/);if(!n)return 0;var o=n[1].length,i=e.findMatchingBracket({row:t,column:o});if(!i||i.row==t)return 0;var a=this.$getIndent(e.getLine(i.row));e.replace(new r(t,0,t,o-1),a)},this.$getIndent=function(e){return e.match(/^\s*/)[0]}}).call(o.prototype),t.MatchingBraceOutdent=o})),ace.define("ace/mode/livescript",["require","exports","module","ace/tokenizer","ace/mode/matching_brace_outdent","ace/mode/text"],(function(e,t,n){var r,o,i,a;r="(?![\\d\\s])[$\\w\\xAA-\\uFFDC](?:(?!\\s)[$\\w\\xAA-\\uFFDC]|-[A-Za-z])*",t.Mode=o=function(t){var n,o=function(e,t){function n(){}n.prototype=(e.superclass=t).prototype,(e.prototype=new n).constructor=e,"function"==typeof t.extended&&t.extended(e);return e}(((function(e,t){var n={}.hasOwnProperty;for(var r in t)n.call(t,r)&&(e[r]=t[r]);return e}(i,t)).displayName="LiveScriptMode",i),t).prototype;function i(){var t;this.$tokenizer=new(e("../tokenizer").Tokenizer)(i.Rules),(t=e("../mode/matching_brace_outdent"))&&(this.$outdent=new t.MatchingBraceOutdent),this.$id="ace/mode/livescript"}return n=RegExp("(?:[({[=:]|[-~]>|\\b(?:e(?:lse|xport)|d(?:o|efault)|t(?:ry|hen)|finally|import(?:\\s*all)?|const|var|let|new|catch(?:\\s*"+r+")?))\\s*$"),o.getNextLineIndent=function(e,t,r){var o,i;return o=this.$getIndent(t),(i=this.$tokenizer.getLineTokens(t,e).tokens).length&&"comment"===i[i.length-1].type||"start"===e&&n.test(t)&&(o+=r),o},o.lineCommentStart="#",o.blockComment={start:"###",end:"###"},o.checkOutdent=function(e,t,n){var r;return null!=(r=this.$outdent)?r.checkOutdent(t,n):void 0},o.autoOutdent=function(e,t,n){var r;return null!=(r=this.$outdent)?r.autoOutdent(t,n):void 0},i}(e("../mode/text").Mode),i="(?![$\\w]|-[A-Za-z]|\\s*:(?![:=]))",a={defaultToken:"string"},o.Rules={start:[{token:"keyword",regex:"(?:t(?:h(?:is|row|en)|ry|ypeof!?)|c(?:on(?:tinue|st)|a(?:se|tch)|lass)|i(?:n(?:stanceof)?|mp(?:ort(?:\\s+all)?|lements)|[fs])|d(?:e(?:fault|lete|bugger)|o)|f(?:or(?:\\s+own)?|inally|unction)|s(?:uper|witch)|e(?:lse|x(?:tends|port)|val)|a(?:nd|rguments)|n(?:ew|ot)|un(?:less|til)|w(?:hile|ith)|o[fr]|return|break|let|var|loop)"+i},{token:"constant.language",regex:"(?:true|false|yes|no|on|off|null|void|undefined)"+i},{token:"invalid.illegal",regex:"(?:p(?:ackage|r(?:ivate|otected)|ublic)|i(?:mplements|nterface)|enum|static|yield)"+i},{token:"language.support.class",regex:"(?:R(?:e(?:gExp|ferenceError)|angeError)|S(?:tring|yntaxError)|E(?:rror|valError)|Array|Boolean|Date|Function|Number|Object|TypeError|URIError)"+i},{token:"language.support.function",regex:"(?:is(?:NaN|Finite)|parse(?:Int|Float)|Math|JSON|(?:en|de)codeURI(?:Component)?)"+i},{token:"variable.language",regex:"(?:t(?:hat|il|o)|f(?:rom|allthrough)|it|by|e)"+i},{token:"identifier",regex:r+"\\s*:(?![:=])"},{token:"variable",regex:r},{token:"keyword.operator",regex:"(?:\\.{3}|\\s+\\?)"},{token:"keyword.variable",regex:"(?:@+|::|\\.\\.)",next:"key"},{token:"keyword.operator",regex:"\\.\\s*",next:"key"},{token:"string",regex:"\\\\\\S[^\\s,;)}\\]]*"},{token:"string.doc",regex:"'''",next:"qdoc"},{token:"string.doc",regex:'"""',next:"qqdoc"},{token:"string",regex:"'",next:"qstring"},{token:"string",regex:'"',next:"qqstring"},{token:"string",regex:"`",next:"js"},{token:"string",regex:"<\\[",next:"words"},{token:"string.regex",regex:"//",next:"heregex"},{token:"comment.doc",regex:"/\\*",next:"comment"},{token:"comment",regex:"#.*"},{token:"string.regex",regex:"\\/(?:[^[\\/\\n\\\\]*(?:(?:\\\\.|\\[[^\\]\\n\\\\]*(?:\\\\.[^\\]\\n\\\\]*)*\\])[^[\\/\\n\\\\]*)*)\\/[gimy$]{0,4}",next:"key"},{token:"constant.numeric",regex:"(?:0x[\\da-fA-F][\\da-fA-F_]*|(?:[2-9]|[12]\\d|3[0-6])r[\\da-zA-Z][\\da-zA-Z_]*|(?:\\d[\\d_]*(?:\\.\\d[\\d_]*)?|\\.\\d[\\d_]*)(?:e[+-]?\\d[\\d_]*)?[\\w$]*)"},{token:"lparen",regex:"[({[]"},{token:"rparen",regex:"[)}\\]]",next:"key"},{token:"keyword.operator",regex:"[\\^!|&%+\\-]+"},{token:"text",regex:"\\s+"}],heregex:[{token:"string.regex",regex:".*?//[gimy$?]{0,4}",next:"start"},{token:"string.regex",regex:"\\s*#{"},{token:"comment.regex",regex:"\\s+(?:#.*)?"},{defaultToken:"string.regex"}],key:[{token:"keyword.operator",regex:"[.?@!]+"},{token:"identifier",regex:r,next:"start"},{token:"text",regex:"",next:"start"}],comment:[{token:"comment.doc",regex:".*?\\*/",next:"start"},{defaultToken:"comment.doc"}],qdoc:[{token:"string",regex:".*?'''",next:"key"},a],qqdoc:[{token:"string",regex:'.*?"""',next:"key"},a],qstring:[{token:"string",regex:"[^\\\\']*(?:\\\\.[^\\\\']*)*'",next:"key"},a],qqstring:[{token:"string",regex:'[^\\\\"]*(?:\\\\.[^\\\\"]*)*"',next:"key"},a],js:[{token:"string",regex:"[^\\\\`]*(?:\\\\.[^\\\\`]*)*`",next:"key"},a],words:[{token:"string",regex:".*?\\]>",next:"key"},a]}}))}}]);
//# sourceMappingURL=77.b01fc3fd.chunk.js.map |
const request = require("supertest");
const app = require("./../src/app");
const Task = require("./../src/models/task");
const { userOneId, userOne, userTwoId, userTwo, taskOne, taskTwo, taskThree, setupDatabase } = require("./fixtures/db");
beforeEach(setupDatabase);
test("Should create task for user", async () => {
const response = await request(app)
.post("/tasks")
.set("Authorization", `Bearer ${userOne.tokens[0].token}`)
.send({
description: "From the test"
})
.expect(201);
const task = await Task.findById(response.body._id);
expect(task).not.toBeNull();
expect(task.completed).toEqual(false);
});
test("Should fetch user tasks", async () => {
const response = await request(app)
.get("/tasks")
.set("Authorization", `Bearer ${userOne.tokens[0].token}`)
.send()
.expect(200);
expect(response.body.length).toEqual(2);
});
test("Should not be able to delete other user's task", async () => {
const response = await request(app)
.delete(`/tasks/${userOne._id}`)
.set("Authorization", `Bearer ${userTwo.tokens[0].token}`)
.send()
.expect(404);
const task = await Task.findById(taskOne._id);
expect(task).not.toBeNull();
});
|
import * as cyclic from './cyclic.js'
import * as diverging from './diverging.js'
import * as gist from './gist.js'
import * as qualitative from './qualitative.js'
import * as seasons from './seasons.js'
import * as sequential from './sequential.js'
import * as uniform from './uniform.js'
import * as vega from './vega.js'
export {
cyclic,
diverging,
gist,
qualitative,
seasons,
sequential,
uniform,
vega
}
|
const Util = require('./util/Util');
module.exports = {
// "Root" classes (starting points)
Client: require('./client/Client'),
Shard: require('./sharding/Shard'),
ShardClientUtil: require('./sharding/ShardClientUtil'),
ShardingManager: require('./sharding/ShardingManager'),
WebhookClient: require('./client/WebhookClient'),
// Utilities
Collection: require('./util/Collection'),
Constants: require('./util/Constants'),
DiscordAPIError: require('./client/rest/DiscordAPIError'),
EvaluatedPermissions: require('./util/Permissions'),
Permissions: require('./util/Permissions'),
Snowflake: require('./util/Snowflake'),
SnowflakeUtil: require('./util/Snowflake'),
Util: Util,
util: Util,
version: require('../package').version,
// Shortcuts to Util methods
escapeMarkdown: Util.escapeMarkdown,
fetchRecommendedShards: Util.fetchRecommendedShards,
splitMessage: Util.splitMessage,
// Structures
Channel: require('./structures/Channel'),
ClientUser: require('./structures/ClientUser'),
ClientUserSettings: require('./structures/ClientUserSettings'),
Collector: require('./structures/interfaces/Collector'),
DMChannel: require('./structures/DMChannel'),
Emoji: require('./structures/Emoji'),
Game: require('./structures/Presence').Game,
GroupDMChannel: require('./structures/GroupDMChannel'),
Guild: require('./structures/Guild'),
GuildAuditLogs: require('./structures/GuildAuditLogs'),
GuildChannel: require('./structures/GuildChannel'),
GuildMember: require('./structures/GuildMember'),
Invite: require('./structures/Invite'),
Message: require('./structures/Message'),
MessageAttachment: require('./structures/MessageAttachment'),
MessageCollector: require('./structures/MessageCollector'),
MessageEmbed: require('./structures/MessageEmbed'),
MessageMentions: require('./structures/MessageMentions'),
MessageReaction: require('./structures/MessageReaction'),
OAuth2Application: require('./structures/OAuth2Application'),
ClientOAuth2Application: require('./structures/OAuth2Application'),
PartialGuild: require('./structures/PartialGuild'),
PartialGuildChannel: require('./structures/PartialGuildChannel'),
PermissionOverwrites: require('./structures/PermissionOverwrites'),
Presence: require('./structures/Presence').Presence,
ReactionEmoji: require('./structures/ReactionEmoji'),
ReactionCollector: require('./structures/ReactionCollector'),
Role: require('./structures/Role'),
TextChannel: require('./structures/TextChannel'),
User: require('./structures/User'),
VoiceChannel: require('./structures/VoiceChannel'),
Webhook: require('./structures/Webhook'),
};
if (require('os').platform() === 'browser') window.Discord = module.exports; // eslint-disable-line no-undef
|
//// [mergedInterfacesWithMultipleBases3.ts]
// merged interfaces behave as if all extends clauses from each declaration are merged together
// no errors expected
class C<T> {
a: T;
}
class C2<T> {
b: T;
}
class C3<T> {
c: T;
}
class C4<T> {
d: T;
}
interface A<T> extends C<string>, C3<string> {
y: T;
}
interface A<T> extends C<string>, C4<string> {
z: T;
}
class D implements A<boolean> {
a: string;
b: Date;
c: string;
d: string;
y: boolean;
z: boolean;
}
//// [mergedInterfacesWithMultipleBases3.js]
// merged interfaces behave as if all extends clauses from each declaration are merged together
// no errors expected
var C = /** @class */ (function () {
function C() {
}
return C;
}());
var C2 = /** @class */ (function () {
function C2() {
}
return C2;
}());
var C3 = /** @class */ (function () {
function C3() {
}
return C3;
}());
var C4 = /** @class */ (function () {
function C4() {
}
return C4;
}());
var D = /** @class */ (function () {
function D() {
}
return D;
}());
|
const fs = require('./fs');
const config=require('./config')
const abrTemplate = () => {
let line = `#EXTM3U\n#EXT-X-VERSION:3\n`
if(config.isLD)
line += `#EXT-X-STREAM-INF:BANDWIDTH=800000,RESOLUTION=640x360\n360p/index.m3u8\n`
if(config.isSD)
line += `#EXT-X-STREAM-INF:BANDWIDTH=1400000,RESOLUTION=842x480\n480p/index.m3u8\n`
if(config.isHD)
line += `#EXT-X-STREAM-INF:BANDWIDTH=2800000,RESOLUTION=1280x720\n720p/index.m3u8\n`
if(config.isUD)
line += `#EXT-X-STREAM-INF:BANDWIDTH=3500000,RESOLUTION=1920x1080\n1080p/index.m3u8\n`
return line
};
const createPlaylist = async (mediaRoot, name) => {
console.log('create abr playlist');
await fs.mkdir(`${mediaRoot}/${name}`, { recursive: true });
await fs.writeFile(`${mediaRoot}/${name}/index.m3u8`, abrTemplate());
};
module.exports = {
createPlaylist
}; |
class Genderizer:
def __init__(self):
pass
#This will be dropped in elsewhere
def checkName(self,name,year=1990,host="melville.seas.harvard.edu"):
import urllib
destination = "http://" + host + "/cgi-bin/genderizer.py"
params = {'name':name,'year':year}
encoded_params = urllib.urlencode(params)
destination = destination + '?' + encoded_params
print destination
response = urllib.urlopen(destination)
femaleProb = float(response.read())
if femaleProb < .05:
return "Male"
elif femaleProb > .95:
return "Female"
else:
return "NA"
|
// @format
const path = require("path");
const reactDocgenTypescript = require("react-docgen-typescript").withCustomConfig(
"./tsconfig.base.json"
);
const typescriptPropsParser = reactDocgenTypescript.parse;
module.exports = {
title: "nteract components",
defaultExample: false,
propsParser: typescriptPropsParser,
resolver: require("react-docgen").resolver.findAllComponentDefinitions,
sections: [
{
name: "Introduction",
content: "doc/components.md"
},
{
name: "@nteract/presentational-components",
components: "packages/presentational-components/src/components/*.tsx"
},
{
name: "@nteract/outputs",
components: "packages/outputs/src/components/*.tsx"
},
{
name: "@nteract/outputs/media",
components: "packages/outputs/src/components/media/*.tsx",
content: "packages/outputs/src/components/media/index.md",
ignore: "packages/outputs/src/components/media/index.tsx"
},
// {
// name: "@mybinder/host-cache",
// components: "packages/host-cache/src/components/*.tsx"
// },
{
name: "@nteract/directory-listing",
components: "packages/directory-listing/src/components/*.tsx"
},
{
name: "@nteract/markdown",
content: "packages/markdown/examples.md"
},
{
name: "@nteract/mathjax",
content: "packages/mathjax/examples.md"
}
],
// For overriding the components styleguidist uses
styleguideComponents: {
LogoRenderer: path.join(__dirname, "styleguide-components", "logo.tsx")
},
compilerConfig: {
// Allow us to use {...props}
objectAssign: "Object.assign",
transforms: {
// whether template strings get transpiled (we don't want it to, so that we can use the native functionality)
templateString: false
}
},
template: {
body: {
raw: `
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-129108362-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-129108362-2');
</script>`
}
},
webpackConfig: {
node: {
fs: "empty",
child_process: "empty",
net: "empty"
},
resolve: {
extensions: [".ts", ".tsx"]
},
module: {
rules: [
{
test: /\.tsx?$/,
loader: "ts-loader",
options: {
compilerOptions: {
strict: true,
jsx: "react",
composite: true
},
projectReferences: true,
transpileOnly: true
}
}
]
}
}
};
|
"""\
flask-pytest
------------
Runs pytest in a background process when DEBUG is True.
Links
`````
* `Website <http://github.com/joeyespo/flask-pytest>`_
"""
import os
from setuptools import setup, find_packages
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='flask-pytest',
version='0.0.5',
description='Runs pytest in a background process when DEBUG is True.',
long_description=__doc__,
author='Joe Esposito',
author_email='[email protected]',
url='http://github.com/joeyespo/flask-pytest',
license='MIT',
platforms='any',
packages=find_packages(),
package_data={'': ['LICENSE']},
install_requires=read('requirements.txt'),
)
|
function namePuppy() {
var parent = document.getElementById("textbox");
var lineBreak = document.createElement("BR");
var input = document.createElement("INPUT");
input.type = "text";
input.id = "pname";
input.name = "pname";
input.value = "Spot";
parent.appendChild(lineBreak);
parent.appendChild(input);
}
function finishNamingPuppy() {
var puppyNameInput = document.getElementById("pname");
setPuppyName(puppyNameInput.value);
puppyNameInput.remove();
} |
cuenta = input("¿Quiere abrir una cuenta en el banco?:")
saldo = 0
class banco:
def __init__(self,cuenta,saldo): #defino el constructor
self.cuenta = cuenta
self.saldo = saldo
def respuesta(self):
if self.cuenta == "no":
print("De acuerdo, gracias")
if self.cuenta == "si":
self.saldo = 0
abono = int(input("Introduzca el dinero que desea:"))
self.saldo = abono
print("Tu saldo es:" , self.saldo)
def actividad(self):
act = input("¿Quiere consultar,retirar o abonar dinero?")
if act == "no":
print("De acuerdo, gracias")
if act == "consultar":
print("Su saldo es: ", self.saldo , "€")
if act == "retirar":
retiro = float(input("Dinero a retirar:"))
self.saldo = self.saldo - retiro
print("Su saldo actual es: ",self.saldo)
if act == "abonar":
ingreso = float(input("Dinero a ingresar"))
self.saldo = self.saldo + ingreso
print("Su saldo es: ", self.saldo, "€")
total = banco(cuenta,saldo)
print(total.respuesta())
print(total.actividad()) |
import merge from "ember-metal/merge";
import { get } from "ember-metal/property_get";
export function routeArgs(targetRouteName, models, queryParams) {
var args = [];
if (typeof targetRouteName === 'string') {
args.push('' + targetRouteName);
}
args.push.apply(args, models);
args.push({ queryParams: queryParams });
return args;
}
export function getActiveTargetName(router) {
var handlerInfos = router.activeTransition ?
router.activeTransition.state.handlerInfos :
router.state.handlerInfos;
return handlerInfos[handlerInfos.length - 1].name;
}
export function stashParamNames(router, handlerInfos) {
if (handlerInfos._namesStashed) { return; }
// This helper exists because router.js/route-recognizer.js awkwardly
// keeps separate a handlerInfo's list of parameter names depending
// on whether a URL transition or named transition is happening.
// Hopefully we can remove this in the future.
var targetRouteName = handlerInfos[handlerInfos.length-1].name;
var recogHandlers = router.router.recognizer.handlersFor(targetRouteName);
var dynamicParent = null;
for (var i = 0, len = handlerInfos.length; i < len; ++i) {
var handlerInfo = handlerInfos[i];
var names = recogHandlers[i].names;
if (names.length) {
dynamicParent = handlerInfo;
}
handlerInfo._names = names;
var route = handlerInfo.handler;
route._stashNames(handlerInfo, dynamicParent);
}
handlerInfos._namesStashed = true;
}
/*
Stolen from Controller
*/
export function calculateCacheKey(prefix, _parts, values) {
var parts = _parts || [];
var suffixes = "";
for (var i = 0, len = parts.length; i < len; ++i) {
var part = parts[i];
var value = get(values, part);
suffixes += "::" + part + ":" + value;
}
return prefix + suffixes.replace(ALL_PERIODS_REGEX, '-');
}
var ALL_PERIODS_REGEX = /\./g;
/*
Controller-defined query parameters can come in three shapes:
Array
queryParams: ['foo', 'bar']
Array of simple objects where value is an alias
queryParams: [
{
'foo': 'rename_foo_to_this'
},
{
'bar': 'call_bar_this_instead'
}
]
Array of fully defined objects
queryParams: [
{
'foo': {
as: 'rename_foo_to_this'
},
}
{
'bar': {
as: 'call_bar_this_instead',
scope: 'controller'
}
}
]
This helper normalizes all three possible styles into the
'Array of fully defined objects' style.
*/
export function normalizeControllerQueryParams(queryParams) {
if (queryParams._qpMap) {
return queryParams._qpMap;
}
var qpMap = queryParams._qpMap = {};
for (var i = 0, len = queryParams.length; i < len; ++i) {
accumulateQueryParamDescriptors(queryParams[i], qpMap);
}
return qpMap;
}
function accumulateQueryParamDescriptors(_desc, accum) {
var desc = _desc;
var tmp;
if (typeof desc === 'string') {
tmp = {};
tmp[desc] = { as: null };
desc = tmp;
}
for (var key in desc) {
if (!desc.hasOwnProperty(key)) { return; }
var singleDesc = desc[key];
if (typeof singleDesc === 'string') {
singleDesc = { as: singleDesc };
}
tmp = accum[key] || { as: null, scope: 'model' };
merge(tmp, singleDesc);
accum[key] = tmp;
}
}
|
var net = require('net');
var readline = require('readline');
var inherits = require('util').inherits;
var spawn = require('child_process').spawn;
exports.port = 5858;
exports.start = function() {
if (process.argv.length < 3) {
console.error("Usage: node debug script.js");
process.exit(1);
}
var interface = new Interface();
process.on('uncaughtException', function (e) {
console.error("There was an internal error in Node's debugger. " +
"Please report this bug.");
console.error(e.message);
console.error(e.stack);
if (interface.child) interface.child.kill();
process.exit(1);
});
};
var args = process.argv.slice(2);
args.unshift('--debug-brk');
//
// Parser/Serializer for V8 debugger protocol
// http://code.google.com/p/v8/wiki/DebuggerProtocol
//
// Usage:
// p = new Protocol();
//
// p.onResponse = function(res) {
// // do stuff with response from V8
// };
//
// socket.setEncoding('utf8');
// socket.on('data', function(s) {
// // Pass strings into the protocol
// p.execute(s);
// });
//
//
function Protocol() {
this._newRes();
}
exports.Protocol = Protocol;
Protocol.prototype._newRes = function(raw) {
this.res = { raw: raw || '', headers: {} };
this.state = 'headers';
this.reqSeq = 1;
this.execute('');
};
Protocol.prototype.execute = function(d) {
var res = this.res;
res.raw += d;
switch (this.state) {
case 'headers':
var endHeaderIndex = res.raw.indexOf('\r\n\r\n');
if (endHeaderIndex < 0) break;
var lines = res.raw.slice(0, endHeaderIndex).split('\r\n');
for (var i = 0; i < lines.length; i++) {
var kv = lines[i].split(/: +/);
res.headers[kv[0]] = kv[1];
}
this.contentLength = +res.headers['Content-Length'];
this.bodyStartIndex = endHeaderIndex + 4;
this.state = 'body';
if (res.raw.length - this.bodyStartIndex < this.contentLength) break;
// pass thru
case 'body':
if (res.raw.length - this.bodyStartIndex >= this.contentLength) {
res.body =
res.raw.slice(this.bodyStartIndex,
this.bodyStartIndex + this.contentLength);
// JSON parse body?
res.body = res.body.length ? JSON.parse(res.body) : {};
// Done!
this.onResponse(res);
this._newRes(res.raw.slice(this.bodyStartIndex + this.contentLength));
}
break;
default:
throw new Error('Unknown state');
break;
}
};
Protocol.prototype.serialize = function(req) {
req.type = 'request';
req.seq = this.reqSeq++;
var json = JSON.stringify(req);
return 'Content-Length: ' + json.length + '\r\n\r\n' + json;
};
var NO_FRAME = -1;
function Client() {
net.Stream.call(this);
var protocol = this.protocol = new Protocol(this);
this._reqCallbacks = [];
var socket = this;
this.currentFrame = NO_FRAME;
this.currentSourceLine = -1;
this.currentSource = null;
this.handles = {};
this.scripts = {};
// Note that 'Protocol' requires strings instead of Buffers.
socket.setEncoding('utf8');
socket.on('data', function(d) {
protocol.execute(d);
});
protocol.onResponse = this._onResponse.bind(this);
}
inherits(Client, net.Stream);
exports.Client = Client;
Client.prototype._addHandle = function(desc) {
if (typeof desc != 'object' || typeof desc.handle != 'number') {
throw new Error('bad type');
}
this.handles[desc.handle] = desc;
if (desc.type == 'script') {
this._addScript(desc);
}
};
var natives = process.binding('natives');
Client.prototype._addScript = function(desc) {
this.scripts[desc.id] = desc;
if (desc.name) {
desc.isNative = (desc.name.replace('.js', '') in natives) ||
desc.name == 'node.js';
}
};
Client.prototype._removeScript = function(desc) {
this.scripts[desc.id] = undefined;
};
Client.prototype._onResponse = function(res) {
for (var i = 0; i < this._reqCallbacks.length; i++) {
var cb = this._reqCallbacks[i];
if (this._reqCallbacks[i].request_seq == res.body.request_seq) break;
}
var self = this;
var handled = false;
if (res.headers.Type == 'connect') {
// Request a list of scripts for our own storage.
self.reqScripts();
self.emit('ready');
handled = true;
} else if (res.body && res.body.event == 'break') {
this.emit('break', res.body);
handled = true;
} else if (res.body && res.body.event == 'afterCompile') {
this._addHandle(res.body.body.script);
handled = true;
} else if (res.body && res.body.event == 'scriptCollected') {
// ???
this._removeScript(res.body.body.script);
handled = true;
}
if (cb) {
this._reqCallbacks.splice(i, 1);
handled = true;
cb(res.body);
}
if (!handled) this.emit('unhandledResponse', res.body);
};
Client.prototype.req = function(req, cb) {
this.write(this.protocol.serialize(req));
cb.request_seq = req.seq;
this._reqCallbacks.push(cb);
};
Client.prototype.reqVersion = function(cb) {
this.req({ command: 'version' } , function(res) {
if (cb) cb(res.body.V8Version, res.running);
});
};
Client.prototype.reqLookup = function(refs, cb) {
var self = this;
// TODO: We have a cache of handle's we've already seen in this.handles
// This can be used if we're careful.
var req = {
command: 'lookup',
arguments: {
handles: refs
}
};
this.req(req, function(res) {
if (res.success) {
for (var ref in res.body) {
if (typeof res.body[ref] == 'object') {
self._addHandle(res.body[ref]);
}
}
}
if (cb) cb(res);
});
};
// This is like reqEval, except it will look up the expression in each of the
// scopes associated with the current frame.
Client.prototype.reqEval = function(expression, cb) {
var self = this;
if (this.currentFrame == NO_FRAME) {
// Only need to eval in global scope.
this.reqFrameEval(expression, NO_FRAME, cb);
return;
}
// Otherwise we need to get the current frame to see which scopes it has.
this.reqBacktrace(function (bt) {
var frame = bt.frames[self.currentFrame];
var evalFrames = frame.scopes.map(function(s) {
return bt.frames[s.index].index;
});
self._reqFramesEval(expression, evalFrames, cb);
});
};
// Finds the first scope in the array in which the epxression evals.
Client.prototype._reqFramesEval = function(expression, evalFrames, cb) {
if (evalFrames.length == 0) {
// Just eval in global scope.
this.reqFrameEval(expression, NO_FRAME, cb);
return;
}
var self = this;
var i = evalFrames.shift();
this.reqFrameEval(expression, i, function(res) {
if (res.success) {
if (cb) cb(res);
} else {
self._reqFramesEval(expression, evalFrames, cb);
}
});
};
Client.prototype.reqFrameEval = function(expression, frame, cb) {
var self = this;
var req = {
command: 'evaluate',
arguments: { expression: expression }
};
if (frame == NO_FRAME) {
req.arguments.global = true;
} else {
req.arguments.frame = frame;
}
this.req(req, function(res) {
if (res.success) {
self._addHandle(res.body);
}
if (cb) cb(res);
});
};
// reqBacktrace(cb)
// TODO: from, to, bottom
Client.prototype.reqBacktrace = function(cb) {
this.req({ command: 'backtrace' } , function(res) {
if (cb) cb(res.body);
});
};
// Returns an array of objects like this:
//
// { handle: 11,
// type: 'script',
// name: 'node.js',
// id: 14,
// lineOffset: 0,
// columnOffset: 0,
// lineCount: 562,
// sourceStart: '(function(process) {\n\n ',
// sourceLength: 15939,
// scriptType: 2,
// compilationType: 0,
// context: { ref: 10 },
// text: 'node.js (lines: 562)' }
//
Client.prototype.reqScripts = function(cb) {
var self = this;
this.req({ command: 'scripts' } , function(res) {
for (var i = 0; i < res.body.length; i++) {
self._addHandle(res.body[i]);
}
if (cb) cb();
});
};
Client.prototype.reqContinue = function(cb) {
this.req({ command: 'continue' }, function(res) {
if (cb) cb(res);
});
};
Client.prototype.listbreakpoints = function(cb) {
this.req({ command: 'listbreakpoints' }, function(res) {
if (cb) cb(res);
});
};
Client.prototype.reqSource = function(from, to, cb) {
var req = {
command: 'source',
fromLine: from,
toLine: to
};
this.req(req, function(res) {
if (cb) cb(res.body);
});
};
// client.next(1, cb);
Client.prototype.step = function(action, count, cb) {
var req = {
command: 'continue',
arguments: { stepaction: action, stepcount: count }
};
this.req(req, function(res) {
if (cb) cb(res);
});
};
Client.prototype.mirrorObject = function(handle, cb) {
var self = this;
if (handle.type == 'object') {
// The handle looks something like this:
// { handle: 8,
// type: 'object',
// className: 'Object',
// constructorFunction: { ref: 9 },
// protoObject: { ref: 4 },
// prototypeObject: { ref: 2 },
// properties: [ { name: 'hello', propertyType: 1, ref: 10 } ],
// text: '#<an Object>' }
// For now ignore the className and constructor and prototype.
// TJ's method of object inspection would probably be good for this:
// https://groups.google.com/forum/?pli=1#!topic/nodejs-dev/4gkWBOimiOg
var propertyRefs = handle.properties.map(function(p) {
return p.ref;
});
this.reqLookup(propertyRefs, function(res) {
if (!res.success) {
console.error("problem with reqLookup");
if (cb) cb(handle);
return;
}
var mirror;
if (handle.className == 'Array') {
mirror = [];
} else {
mirror = {};
}
for (var i = 0; i < handle.properties.length; i++) {
var value = res.body[handle.properties[i].ref];
var mirrorValue = value.value ? value.value : value.text;
if (Array.isArray(mirror) &&
typeof handle.properties[i].name != 'number') {
// Skip the 'length' property.
continue;
}
mirror[handle.properties[i].name] = mirrorValue;
}
if (cb) cb(mirror);
});
} else if (handle.value) {
process.nextTick(function() {
cb(handle.value);
});
} else {
process.nextTick(function() {
cb(handle);
});
}
};
Client.prototype.fullTrace = function(cb) {
var self = this;
this.reqBacktrace(function(trace) {
var refs = [];
for (var i = 0; i < trace.frames.length; i++) {
var frame = trace.frames[i];
// looks like this:
// { type: 'frame',
// index: 0,
// receiver: { ref: 1 },
// func: { ref: 0 },
// script: { ref: 7 },
// constructCall: false,
// atReturn: false,
// debuggerFrame: false,
// arguments: [],
// locals: [],
// position: 160,
// line: 7,
// column: 2,
// sourceLineText: ' debugger;',
// scopes: [ { type: 1, index: 0 }, { type: 0, index: 1 } ],
// text: '#00 blah() /home/ryan/projects/node/test-debug.js line 8 column 3 (position 161)' }
refs.push(frame.script.ref);
refs.push(frame.func.ref);
refs.push(frame.receiver.ref);
}
self.reqLookup(refs, function(res) {
for (var i = 0; i < trace.frames.length; i++) {
var frame = trace.frames[i];
frame.script = res.body[frame.script.ref];
frame.func = res.body[frame.func.ref];
frame.receiver = res.body[frame.receiver.ref];
}
if (cb) cb(trace);
});
});
}
var commands = [
'backtrace',
'continue',
'help',
'info breakpoints',
'kill',
'list',
'next',
'print',
'quit',
'run',
'scripts',
'step',
'version',
];
var helpMessage = 'Commands: ' + commands.join(', ');
function SourceUnderline(sourceText, position) {
if (!sourceText) return;
// Create an underline with a caret pointing to the source position. If the
// source contains a tab character the underline will have a tab character in
// the same place otherwise the underline will have a space character.
var underline = '';
for (var i = 0; i < position; i++) {
if (sourceText[i] == '\t') {
underline += '\t';
} else {
underline += ' ';
}
}
underline += '^';
// Return the source line text with the underline beneath.
return sourceText + '\n' + underline;
}
function SourceInfo(body) {
var result = '';
if (body.script) {
if (body.script.name) {
result += body.script.name;
} else {
result += '[unnamed]';
}
}
result += ':';
result += body.sourceLine + 1;
return result;
}
// This class is the readline-enabled debugger interface which is invoked on
// "node debug"
function Interface() {
var self = this;
var term = this.term =
readline.createInterface(process.stdin, process.stdout, function (line) {
return self.complete(line);
});
var child;
var client;
var term;
process.on('exit', function() {
self.killChild();
});
this.stdin = process.openStdin();
term.setPrompt('debug> ');
term.prompt();
this.quitting = false;
process.on('SIGINT', function() {
self.handleSIGINT();
});
term.on('SIGINT', function() {
self.handleSIGINT();
});
term.on('attemptClose', function() {
self.tryQuit();
});
term.on('line', function(cmd) {
// trim whitespace
cmd = cmd.replace(/^\s*/, '').replace(/\s*$/, '');
if (cmd.length) {
self._lastCommand = cmd;
self.handleCommand(cmd);
} else {
self.handleCommand(self._lastCommand);
}
});
}
Interface.prototype.complete = function(line) {
// Match me with a command.
var matches = [];
// Remove leading whitespace
line = line.replace(/^\s*/, '');
for (var i = 0; i < commands.length; i++) {
if (commands[i].indexOf(line) >= 0) {
matches.push(commands[i]);
}
}
return [matches, line];
};
Interface.prototype.handleSIGINT = function() {
if (this.paused) {
this.child.kill('SIGINT');
} else {
this.tryQuit();
}
};
Interface.prototype.quit = function() {
if (this.quitting) return;
this.quitting = true;
this.killChild();
this.term.close();
process.exit(0);
};
Interface.prototype.tryQuit = function() {
var self = this;
if (self.child) {
self.quitQuestion(function(yes) {
if (yes) {
self.quit();
} else {
self.term.prompt();
}
});
} else {
self.quit();
}
};
Interface.prototype.pause = function() {
this.paused = true;
this.stdin.pause();
this.term.pause();
};
Interface.prototype.resume = function() {
if (!this.paused) return false;
this.paused = false;
this.stdin.resume();
this.term.resume();
this.term.prompt();
return true;
};
Interface.prototype.handleBreak = function(r) {
var result = '';
if (r.breakpoints) {
result += 'breakpoint';
if (r.breakpoints.length > 1) {
result += 's';
}
result += ' #';
for (var i = 0; i < r.breakpoints.length; i++) {
if (i > 0) {
result += ', #';
}
result += r.breakpoints[i];
}
} else {
result += 'break';
}
result += ' in ';
result += r.invocationText;
result += ', ';
result += SourceInfo(r);
result += '\n';
result += SourceUnderline(r.sourceLineText, r.sourceColumn);
this.client.currentSourceLine = r.sourceLine;
this.client.currentFrame = 0;
this.client.currentScript = r.script.name;
console.log(result);
if (!this.resume()) this.term.prompt();
};
function intChars(n) {
// TODO dumb:
if (n < 50) {
return 2;
} else if (n < 950) {
return 3;
} else if (n < 9950) {
return 4;
} else {
return 5;
}
}
function leftPad(n) {
var s = n.toString();
var nchars = intChars(n);
var nspaces = nchars - s.length;
for (var i = 0; i < nspaces; i++) {
s = ' ' + s;
}
return s;
}
Interface.prototype.handleCommand = function(cmd) {
var self = this;
var client = this.client;
var term = this.term;
if (cmd == 'quit' || cmd == 'q' || cmd == 'exit') {
self._lastCommand = null;
self.tryQuit();
} else if (/^r(un)?/.test(cmd)) {
self._lastCommand = null;
if (self.child) {
self.restartQuestion(function(yes) {
if (!yes) {
self._lastCommand = null;
term.prompt();
} else {
console.log('restarting...');
self.killChild();
// XXX need to wait a little bit for the restart to work?
setTimeout(function() {
self.trySpawn();
}, 1000);
}
});
} else {
self.trySpawn();
}
} else if (/^help/.test(cmd)) {
console.log(helpMessage);
term.prompt();
} else if ('version' == cmd) {
if (!client) {
self.printNotConnected();
return;
}
client.reqVersion(function(v) {
console.log(v);
term.prompt();
});
} else if (/info +breakpoints/.test(cmd)) {
if (!client) {
self.printNotConnected();
return;
}
client.listbreakpoints(function(res) {
console.log(res);
term.prompt();
});
} else if ('l' == cmd || 'list' == cmd) {
if (!client) {
self.printNotConnected();
return;
}
var from = client.currentSourceLine - 5;
var to = client.currentSourceLine + 5;
client.reqSource(from, to, function(res) {
var lines = res.source.split('\n');
for (var i = 0; i < lines.length; i++) {
var lineno = res.fromLine + i + 1;
if (lineno < from || lineno > to) continue;
if (lineno == 1) {
// The first line needs to have the module wrapper filtered out of
// it.
var wrapper = require('module').wrapper[0];
lines[i] = lines[i].slice(wrapper.length);
}
if (lineno == 1 + client.currentSourceLine) {
var nchars = intChars(lineno);
var pointer = '';
for (var j = 0; j < nchars - 1; j++) {
pointer += '=';
}
pointer += '>';
console.log(pointer + ' ' + lines[i]);
} else {
console.log(leftPad(lineno) + ' ' + lines[i]);
}
}
term.prompt();
});
} else if (/^backtrace/.test(cmd) || /^bt/.test(cmd)) {
if (!client) {
self.printNotConnected();
return;
}
client.fullTrace(function(bt) {
if (bt.totalFrames == 0) {
console.log('(empty stack)');
} else {
var text = '';
var firstFrameNative = bt.frames[0].script.isNative;
for (var i = 0; i < bt.frames.length; i++) {
var frame = bt.frames[i];
if (!firstFrameNative && frame.script.isNative) break;
text += '#' + i + ' ';
if (frame.func.inferredName && frame.func.inferredName.length > 0) {
text += frame.func.inferredName + ' ';
}
text += require('path').basename(frame.script.name) + ':';
text += (frame.line + 1) + ':' + (frame.column + 1);
text += '\n';
}
console.log(text);
}
term.prompt();
});
} else if (cmd == 'scripts' || cmd == 'scripts full') {
if (!client) {
self.printNotConnected();
return;
}
self.printScripts(cmd.indexOf('full') > 0);
term.prompt();
} else if (/^c(ontinue)?/.test(cmd)) {
if (!client) {
self.printNotConnected();
return;
}
self.pause();
client.reqContinue(function() {
self.resume();
});
} else if (/^k(ill)?/.test(cmd)) {
if (!client) {
self.printNotConnected();
return;
}
// kill
if (self.child) {
self.killQuestion(function(yes) {
if (yes) {
self.killChild();
} else {
self._lastCommand = null;
}
});
} else {
self.term.prompt();
}
} else if (/^next/.test(cmd) || /^n/.test(cmd)) {
if (!client) {
self.printNotConnected();
return;
}
client.step('next', 1, function(res) {
// Wait for break point. (disable raw mode?)
});
} else if (/^step/.test(cmd) || /^s/.test(cmd)) {
if (!client) {
self.printNotConnected();
return;
}
client.step('in', 1, function(res) {
// Wait for break point. (disable raw mode?)
});
} else if (/^print/.test(cmd) || /^p/.test(cmd)) {
if (!client) {
self.printNotConnected();
return;
}
var i = cmd.indexOf(' ');
if (i < 0) {
console.log('print [expression]');
term.prompt();
} else {
cmd = cmd.slice(i);
client.reqEval(cmd, function(res) {
if (!res.success) {
console.log(res.message);
term.prompt();
return;
}
client.mirrorObject(res.body, function(mirror) {
console.log(mirror);
term.prompt();
});
});
}
} else {
if (!/^\s*$/.test(cmd)) {
// If it's not all white-space print this error message.
console.log('Unknown command "%s". Try "help"', cmd);
}
term.prompt();
}
};
Interface.prototype.yesNoQuestion = function(prompt, cb) {
var self = this;
self.resume();
this.term.question(prompt, function(answer) {
if (/^y(es)?$/i.test(answer)) {
cb(true);
} else if (/^n(o)?$/i.test(answer)) {
cb(false);
} else {
console.log('Please answer y or n.');
self.restartQuestion(cb);
}
});
};
Interface.prototype.restartQuestion = function(cb) {
this.yesNoQuestion('The program being debugged has been started already.\n' +
'Start it from the beginning? (y or n) ', cb);
};
Interface.prototype.killQuestion = function(cb) {
this.yesNoQuestion('Kill the program being debugged? (y or n) ', cb);
};
Interface.prototype.quitQuestion = function(cb) {
this.yesNoQuestion('A debugging session is active. Quit anyway? (y or n) ',
cb);
};
Interface.prototype.killChild = function() {
if (this.child) {
this.child.kill();
this.child = null;
}
if (this.client) {
this.client.destroy();
this.client = null;
}
this.resume();
};
Interface.prototype.trySpawn = function(cb) {
var self = this;
this.killChild();
this.child = spawn(process.execPath, args, { customFds: [0, 1, 2] });
this.pause();
setTimeout(function() {
process.stdout.write('connecting...');
var client = self.client = new Client();
client.connect(exports.port);
client.once('ready', function() {
process.stdout.write('ok\r\n');
// since we did debug-brk, we're hitting a break point immediately
// continue before anything else.
client.reqContinue(function() {
if (cb) cb();
});
});
client.on('close', function() {
console.log('\nprogram terminated');
self.client = null;
self.killChild();
if (!self.quitting) self.term.prompt();
});
client.on('unhandledResponse', function(res) {
console.log('\r\nunhandled res:');
console.log(res);
self.term.prompt();
});
client.on('break', function(res) {
self.handleBreak(res.body);
});
}, 100);
};
Interface.prototype.printNotConnected = function() {
console.log("Program not running. Try 'run'.");
this.term.prompt();
};
// argument full tells if it should display internal node scripts or not
Interface.prototype.printScripts = function(displayNatives) {
var client = this.client;
var text = '';
for (var id in client.scripts) {
var script = client.scripts[id];
if (typeof script == 'object' && script.name) {
if (displayNatives ||
script.name == client.currentScript ||
!script.isNative) {
text += script.name == client.currentScript ? '* ' : ' ';
text += require('path').basename(script.name) + '\n';
}
}
}
process.stdout.write(text);
};
|
from typing import Any, Dict, List, Sequence, TypeVar, Union
import pandas as pd
from ..shopcore import shop_rest
ShopCore = TypeVar("ShopCore") #To represent shop_pybind.ShopCore
ShopApi = Union[ShopCore,'shop_rest.ShopRestNative']
IntStrFloat = Union[int,str,float]
DataFrameOrSeries = Union[pd.DataFrame,pd.Series]
CommandValues = Union[IntStrFloat,List[IntStrFloat]]
CommandOptions = Union[str,List[str]]
Message = Union[Dict[str,str], List[Dict[str,str]]]
XyType = Union[pd.Series,List[Dict[str,Any]]] #XY curves can be specified by pd.Series or a list of dicts
ShopDatatypes = Union[IntStrFloat,Sequence[IntStrFloat],DataFrameOrSeries,Sequence[DataFrameOrSeries],XyType,List[XyType]]
LpModelDatatypes = Sequence[Union[IntStrFloat,bool]] |
"""
OOPyCQL
-------------
An object oriented interface for the CypherQueries in Python.
"""
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
append_to_deps = []
try:
from functools import lru_cache
del lru_cache
except ImportError:
if sys.version_info.major < 3:
append_to_deps.append("functools32")
if sys.version_info.major < 3 or sys.version_info.minor < 4:
append_to_deps.append("pathlib")
__author__ = "Dom Weldon <[email protected]>"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
__package__ = "oopycql"
__version__ = "1.1.5a2"
required_deps = ["six", "regex"] + append_to_deps
download_url = (
"https://github.com/domweldon/oopycql/archive/"
"{0}.tar.gz".format(__version__)
)
setup(
name="oopycql",
version=__version__,
url="https://github.com/DomWeldon/oopycql",
license="Apache License, Version 2.0",
author="Dom Weldon",
author_email="[email protected]",
description="An object oritneted interface for the cypher query language",
long_description=__doc__,
packages=["oopycql"],
zip_safe=False,
include_package_data=True,
platforms="any",
install_requires=required_deps,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Database",
],
keywords="graph database neo4j cypher cql",
test_suite="py.test",
tests_require=["pytest"],
setup_requires=["pytest-runner"],
python_requires=">=2.7",
download_url=download_url,
)
|
import React, { Component } from 'react'
import AnimatedCurve from './AnimatedCurve'
import styled, { css } from 'styled-components'
const Wrapper = styled.h1`
margin-bottom: 0;
margin-top: 0;
display: flex;
flex-direction: column;
height: calc(100vh - 70px);
justify-content: center;
align-items: center;
position: relative;
`
const Curve = styled.div`
position: absolute;
height: 95vh;
top: 0;
right: 0;
left: 0;
z-index: -1;
`
const Text = styled.div`
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
width: 100%;
padding: 25px;
box-sizing: border-box;
`
const Title = styled.div`
color: #b8d078;
font-weight: 300;
font-size: 3rem;
`
const SubTitle = styled.div`
font-size: 1.2rem;
color: #807a84;
padding-top: 15px;
font-weight: 300;
`
const Image = styled.img`
width: 48px;
margin: 0;
position: absolute;
bottom: 0;
`
class Splash extends Component {
render() {
return (
<Wrapper>
<Curve>
<AnimatedCurve fill="beige" />
</Curve>
<Text>
<Title>Beansprouty</Title>
<SubTitle>Vegan Traveling</SubTitle>
</Text>
<Image src="icons/freepik/sprout.svg" />
</Wrapper>
)
}
}
export default Splash
|
"use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
var KhmerChessBoard_1 = __importDefault(require("./KhmerChessBoard"));
var helper_1 = require("./test/helper");
var expect = chai.expect;
describe('KhmerChessBoard', function () {
var _this = this;
var kcb = new KhmerChessBoard_1.default();
before(function () {
(0, helper_1.init)(kcb);
});
afterEach(function () {
(0, helper_1.reset)(kcb);
});
it('should move with shadow', function () { return __awaiter(_this, void 0, void 0, function () {
var cell, targetCell, movedCells;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
kcb.pieceShadowManager.quickMove(false);
kcb.loadRen(helper_1.capturing.renStr);
kcb.playManager.play();
cell = kcb.boardManager.get(helper_1.capturing.fromIndex);
targetCell = kcb.boardManager.get(helper_1.capturing.toIndex);
kcb.boardManager.selectCell(cell);
kcb.boardManager.selectCell(targetCell);
return [4 /*yield*/, kcb.pieceShadowManager.resolveAnimation()];
case 1:
_a.sent();
movedCells = kcb.boardManager.movedCells;
expect(movedCells.length).to.eql(2);
return [2 /*return*/];
}
});
}); }).timeout(1e3 * 5);
});
/*
* Copyright (c) 2021, K4us
* Author: Raksa Eng <[email protected]>, K4us Net <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
**/
//# sourceMappingURL=PieceShadowManager.Spec.js.map |
import React from "react"
import PropTypes from "prop-types"
import styled, { ThemeProvider } from "styled-components"
import "fontsource-roboto/400.css"
import "fontsource-roboto/700.css"
import { lightTheme, darkTheme } from "../styles/theme"
import { useDarkMode } from "../hooks"
import GlobalStyle from "../styles/globalStyle"
import Header from "./header"
import Footer from "./footer"
import CookieBar from "../components/cookieBar"
import { useCookieBar } from "../../config"
// https://medium.com/@chrisfitkin/how-to-smooth-scroll-links-in-gatsby-3dc445299558
if (typeof window !== "undefined") {
require("smooth-scroll")('a[href*="#"]')
}
const StyledLayoutWrapper = styled.div`
width: 100%;
min-height: 100vh;
margin: 0 auto;
display: grid;
grid-template-rows: auto 1fr auto;
grid-template-columns: 100%;
`
const Layout = ({ children }) => {
// Enables dark mode if the user's OS has an active dark theme
const darkModeEnabled = useDarkMode()
const theme = darkModeEnabled ? darkTheme : lightTheme
return (
<StyledLayoutWrapper id="layout-wrapper" data-useCookieBar={useCookieBar}>
<ThemeProvider theme={theme}>
<GlobalStyle />
<Header />
<main id="main-content">{children}</main>
<Footer />
{useCookieBar && <CookieBar />}
</ThemeProvider>
</StyledLayoutWrapper>
)
}
Layout.propTypes = {
children: PropTypes.any,
}
export default Layout
|
import styled, { css } from 'styled-components';
import PropTypes from 'prop-types';
import BaseTheme from '../../style/themes/base';
import OptionsHelper from '../../utils/helpers/options-helper';
import CloseIconClassicStyling from './dismiss-button-classic.style';
import Link from '../link';
import StyledIcon from '../icon/icon.style';
import { isClassic } from '../../utils/helpers/style-helper';
const DismissButtonStyle = styled.div`
border: none;
position: absolute;
right: 16px;
${({ theme }) => !isClassic(theme) && css`
margin-top: -10px;
top: 50%;
`}
${StyledIcon} {
&:before {
color: ${({ theme }) => theme.colors.border};
}
&:hover:before{
color: ${({ theme }) => theme.colors.focusedIcon};
}
}
${CloseIconClassicStyling}
`;
const LinkStyle = styled(Link)`
${({ theme }) => !isClassic(theme) && css`
a:focus {
outline: none;
background-color: transparent;
span {
&:before {
outline: 2px solid ${theme.colors.focus};
outline-offset: 3px;
}
}
}
`}
.carbon-link__content {
display: none;
}
${StyledIcon} {
margin-right: 0;
top: -2px;
}
`;
DismissButtonStyle.defaultProps = {
variant: 'info',
roundedCorners: true,
theme: BaseTheme,
transparent: false
};
DismissButtonStyle.propTypes = {
variant: PropTypes.oneOf(OptionsHelper.colors),
border: PropTypes.bool,
roundedCorners: PropTypes.bool,
transparent: PropTypes.bool
};
LinkStyle.defaultProps = {
theme: BaseTheme
};
export { DismissButtonStyle, LinkStyle };
|
const _ = require('lodash')
const md5 = require('blueimp-md5')
const {
mdToJson,
jsonToMd
} = require('./mdParser/yaml')
const CmdHelper = require('./cmdHelper')
const {
MARKDOWN_CMD,
MOD_CMD_BEGIN,
MOD_CMD_END
} = require('./cmdHelper')
const ModAdaptor = require('./modAdaptor')
const blockHelper = {
buildJson(block) {
if (CmdHelper.isValidCmd(block.cmd)) {
if (CmdHelper.isOldCmd(block.cmd)) {
const targetCmd = CmdHelper.targetCmd(block.cmd)
block.data = ModAdaptor.transfer(block.md, block.cmd, targetCmd)
this.updateCmd(block, targetCmd)
this.buildMarkdown(block)
} else {
const data = this.isMarkdownMod(block)
? {
md: {
data: this.mdText(block)
}
}
: mdToJson(this.mdText(block))
block.data = data
}
} else {
block.data = {}
}
},
buildKey(block) {
block.key = md5(this.text(block), block.uuid)
block.modKey = md5(this.text(block), block.modType)
},
buildMarkdown(block) {
if (!block.data) return
let newMd = this.isMarkdownMod(block)
? block.data.md.data
: jsonToMd(block.data)
newMd = newMd.split('\n')
block.lengthDiff = newMd.length - block.md.length
block.md = newMd
this.buildKey(block)
},
isMarkdownMod(block) {
return block && block.cmd === MARKDOWN_CMD
},
isOnEdit(block, lineNumber) {
return (
lineNumber >= this.contentBegin(block) &&
lineNumber < this.contentBegin(block) + block.md.length
)
},
isOnCursor(block, beginLine) {
return CmdHelper.isMarkdownCmd(block.cmd)
? beginLine >= block.lineBegin && beginLine < block.lineBegin + block.md.length
: beginLine >= block.lineBegin && beginLine <= block.lineBegin + block.md.length + 1
},
updateCmd(block, cmd) {
block.cmd = cmd
block.modType = 'Mod' + block.cmd
this.buildKey(block)
},
updateUUID(block, uuid) {
block.uuid = uuid
this.buildKey(block)
},
updateJson(block, jsonData) {
block.data = _.cloneDeep(jsonData)
this.buildMarkdown(block)
},
updateJsonValue(block, key, value) {
block.data[key] = value
this.buildMarkdown(block)
},
updateMarkdown(block, mdLines) {
block.lengthDiff = mdLines.length - block.md.length
block.md = mdLines
this.buildJson(block)
this.buildKey(block)
},
addLine(block, line) {
block.md.push(line)
},
modifyBegin(block, diff) {
block.lineBegin += diff
},
contentBegin(block) {
return this.isMarkdownMod(block) ? block.lineBegin : block.lineBegin + 1
},
textLength(block) {
return this.lines(block).length
},
lines(block) {
if (block.cmd !== MARKDOWN_CMD) {
let headLine = MOD_CMD_BEGIN + block.cmd
let endLine = MOD_CMD_END
return _.flatten([headLine, block.md, endLine])
} else {
return block.md
}
},
endLine(block) {
return block.lineBegin + this.lines(block).length - 1
},
text(block) {
return this.lines(block).join('\n')
},
mdText(block) {
return block.md.join('\n')
}
}
module.exports = blockHelper
|
const { Thought, User } = require('../models');
const thoughtController = {
createThought({ body }, res) { //create a new thought
Thought.create(body).then(({ _id }) => {
return User.findOneAndUpdate({ _id: body.userId }, { $push: { thoughts: _id } }, { new: true })
.populate({ path: 'thoughts', select: '-__v' })
}).then(dbUserData => {
if (!dbUserData) {
res.status(404).json({ message: 'No user found with this id!' });
return;
}
res.json(dbUserData);
}).catch(err => res.json(err));
},
getAllThoughts(req, res) { //get all thoughts
Thought.find({}).then(dbThoughtData => res.json(dbThoughtData))
.catch(err => { res.status(400).json(err); });
},
getThoughtById({ params }, res) { //get a single thought by id
Thought.findOne({ _id: params.id })
.then(dbUserData => res.json(dbUserData))
.catch(err => { res.sendStatus(400); });
},
updateThoughtById({ params, body }, res) { //update thought by id
Thought.findOneAndUpdate({ _id: params.id }, body, { new: true, runValidators: true })
.then(dbThoughtData => {
if (!dbThoughtData) {
res.status(404).json({ message: 'No thought found with this id!' });
return;
}
res.json(dbThoughtData);
})
.catch(err => res.status(400).json(err));
},
removeThoughtById({ params }, res) { //remove thought by id
Thought.findOneAndDelete({ _id: params.id })
.then(deletedThought => {
if (!deletedThought) {
return res.status(404).json({ message: 'No thought with this id!' });
}
return User.findOneAndUpdate({ thoughts: params.id }, { $pull: { thoughts: params.id } }, { new: true });
})
.then(dbUserData => {
if (!dbUserData) {
res.status(404).json({ message: 'No user found with this id!' });
return;
}
res.json(dbUserData);
})
.catch(err => res.json(err));
},
createReaction({ params, body }, res) { //create a reaction stored in a single thought's reactions array
Thought.findOneAndUpdate({ _id: params.thoughtId }, { $push: { reactions: body } }, { new: true, runValidators: true })
.then(dbThoughtData => {
if (!dbThoughtData) {
res.status(404).json({ message: 'No thought found with this id!' });
return;
}
res.json(dbThoughtData);
}).catch(err => json(err));
},
removeReaction({ params }, res) {
Thought.findOneAndUpdate({ _id: params.thoughtId }, { $pull: { reactions: { reactionId: params.reactionId } } }, { new: true })
.then(dbThoughtData => {
if (!dbThoughtData) {
res.status(404).json({ message: 'No thought found with this id!' });
return;
}
res.json(dbThoughtData);
}).catch(err => json(err));
}
};
module.exports = thoughtController; |
import matplotlib.pyplot as plt
import sys,os
sys.path.append(os.path.realpath('../AI3603_HW1'))
from hybrid_a_star import hybrid_a_star
def main():
print(__file__ + " start!!")
# start and goal position
sx, sy, stheta = 0, 0, 0
gx, gy, gtheta = 4, 4, 0
current_map = [
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]
ox, oy = [], []
for i in range(6):
for j in range(6):
if current_map[i][j]:
ox.append(i)
oy.append(j)
plt.plot(ox, oy, ".k")
plt.plot(sx, sy, "xr")
plt.plot(gx, gy, "xb")
plt.grid(True)
plt.axis("equal")
hy_a_star = hybrid_a_star(-6, 6, -6, 6, current_map=current_map,
resolution=1, vehicle_length=2)
path, theta = hy_a_star.find_path((sx, sy, stheta), (gx, gy, gtheta))
rx, ry = [], []
for node in path:
rx.append(node[0])
ry.append(node[1])
plt.plot(rx, ry, "-r")
plt.savefig("out/test_hybrid_A_star.png")
if __name__ == '__main__':
main()
|
!function(e){const t=e.jv=e.jv||{};t.dictionary=Object.assign(t.dictionary||{},{"Bulleted List":"","Bulleted list styles toolbar":"",Circle:"Bunder",Decimal:"","Decimal with leading zero":"",Disc:"Kaset","List properties":"","Lower-latin":"","Lower–roman":"","Numbered List":"","Numbered list styles toolbar":"","Reversed order":"Dipunwangsul",Square:"Kotak","Start at":"Wiwit saking","Start index must be greater than 0.":"","To-do List":"","Toggle the circle list style":"","Toggle the decimal list style":"","Toggle the decimal with leading zero list style":"","Toggle the disc list style":"","Toggle the lower–latin list style":"","Toggle the lower–roman list style":"","Toggle the square list style":"","Toggle the upper–latin list style":"","Toggle the upper–roman list style":"","Upper-latin":"","Upper-roman":""})}(window.CKEDITOR_TRANSLATIONS||(window.CKEDITOR_TRANSLATIONS={})); |
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import {View, Dimensions} from 'react-native';
import Animator from './Animator';
const SCREEN_HEIGHT = Dimensions.get('window').height;
export default class BottomDrawer extends Component {
static propTypes = {
/**
* Height of the drawer.
*/
containerHeight: PropTypes.number.isRequired,
/**
* The amount of offset to apply to the drawer's position.
* If the app uses a header and tab navigation, offset should equal
* the sum of those two components' heights.
*/
offset: PropTypes.number,
/**
* Set to true to have the drawer start in up position.
*/
startUp: PropTypes.bool,
/**
* How much the drawer's down display falls beneath the up display.
* Ex: if set to 20, the down display will be 20 points underneath the up display.
*/
downDisplay: PropTypes.number,
/**
* The background color of the drawer.
*/
backgroundColor: PropTypes.string,
/**
* Set to true to give the top of the drawer rounded edges.
*/
roundedEdges: PropTypes.bool,
/**
* Set to true to give the drawer a shadow.
*/
shadow: PropTypes.bool,
/**
* A callback function triggered when the drawer swiped into up position
*/
onExpanded: PropTypes.func,
/**
* A callback function triggered when the drawer swiped into down position
*/
onCollapsed: PropTypes.func,
/**
* Set bottom left border raidus
*/
borderBottomLeftRadius: PropTypes.number,
/**
* Set bottom right border raidus
*/
borderBottomRightRadius: PropTypes.number,
/**
* Set border raidus
*/
borderRadius: PropTypes.number,
/**
* Set top left border raidus
*/
borderTopLeftRadius: PropTypes.number,
/**
* Set top right border raidus
*/
borderTopRightRadius: PropTypes.number,
/**
* Set all the way down positon
*/
alldownDisplay: PropTypes.number,
};
static defaultProps = {
offset: 0,
startUp: true,
backgroundColor: '#ffffff',
borderRadius: 0,
borderBottomLeftRadius: 0,
borderBottomRightRadius: 0,
borderTopLeftRadius: 0,
borderTopRightRadius: 0,
roundedEdges: true,
shadow: true,
onExpanded: () => {},
onCollapsed: () => {},
alldownDisplay: 0,
};
constructor(props) {
super(props);
/**
* TOGGLE_THRESHOLD is how much the user has to swipe the drawer
* before its position changes between up / down.
*/
this.TOGGLE_THRESHOLD = this.props.containerHeight / 11;
this.DOWN_DISPLAY =
this.props.downDisplay || this.props.containerHeight / 1.5;
/**
* UP_POSITION and DOWN_POSITION calculate the two (x,y) values for when
* the drawer is swiped into up position and down position.
*/
this.UP_POSITION = this._calculateUpPosition(
SCREEN_HEIGHT,
this.props.containerHeight,
this.props.offset
);
this.DOWN_POSITION = this._calculateDownPosition(
this.UP_POSITION,
this.DOWN_DISPLAY
);
this.ALL_DOWN_POSITION = {x: 0, y: this.props.alldownDisplay};
this.state = {
currentPosition: this.props.startUp
? this.UP_POSITION
: this.DOWN_POSITION,
};
}
render() {
return (
<Animator
currentPosition={this.state.currentPosition}
setCurrentPosition={position => this.setCurrentPosition(position)}
toggleThreshold={this.TOGGLE_THRESHOLD}
upPosition={this.UP_POSITION}
downPosition={this.DOWN_POSITION}
roundedEdges={this.props.roundedEdges}
shadow={this.props.shadow}
containerHeight={this.props.containerHeight}
backgroundColor={this.props.backgroundColor}
onExpanded={() => this.props.onExpanded()}
onCollapsed={() => this.props.onCollapsed()}
alldownPosition={this.ALL_DOWN_POSITION}
>
{this.props.children}
<View
style={{
height: Math.sqrt(SCREEN_HEIGHT),
backgroundColor: this.props.backgroundColor,
borderRadius: this.props.borderRadius,
borderBottomLeftRadius: this.props.borderBottomLeftRadius,
borderBottomRightRadius: this.props.borderBottomRightRadius,
borderTopLeftRadius: this.props.borderTopLeftRadius,
borderTopRightRadius: this.props.borderTopRightRadius,
}}
/>
</Animator>
);
}
setCurrentPosition(position) {
this.setState({currentPosition: position});
}
_calculateUpPosition(screenHeight, containerHeight, offset) {
return {
x: 0,
y: screenHeight - (containerHeight + offset),
};
}
_calculateDownPosition(upPosition, downDisplay) {
return {
x: 0,
y: upPosition.y + downDisplay,
};
}
}
|
module.exports = (app) => {
app.use('/tasks', require('./api/tasks'));
app.use('/books', require('./api/books'));
}; |
##########################################################################
# Copyright (c) 2009, 2010, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re
import debug, tests
from common import TestCommon, TimeoutError
from results import RawResults
class BulkTestCommon(TestCommon):
use_memcpy = None
def get_module_name(self):
return "bulkbench"
def get_build_targets(self, build, machine):
targets = super(BulkTestCommon, self).get_build_targets(build, machine)
targets.append('%s/sbin/%s' %
(machine.get_bootarch(), self.get_module_name()))
return targets
def run(self, build, machine, testdir):
if machine.get_ncores() == machine.get_cores_per_socket():
# single-socket machine, pick first and last core
sendcore = machine.get_coreids()[0]
recvcore = machine.get_coreids()[-1]
else:
# compute two core IDs on different sockets to benchmark between
sendcore = machine.get_coreids()[0]
# first core on 2nd socket
recvcore = machine.get_coreids()[machine.get_cores_per_socket()]
# Iterate over all bulk block sizes
for i in [2048]:
debug.log('running %s block size %d' % (self.name, i))
modules = self.get_modules(build, machine)
modules.add_module(self.get_module_name(),
["core=%d" % sendcore, i, "send", self.use_memcpy])
modules.add_module(self.get_module_name(),
["core=%d" % recvcore, i, "recv", self.use_memcpy])
self.boot(machine, modules)
for line in self.collect_data(machine):
yield line
def process_data(self, testdir, rawiter):
results = RawResults('buffersize')
data = []
for line in rawiter:
m = re.match("rawresult (\d+)", line)
if m:
data.append(2048 / int(m.group(1)))
results.add_group("2048", data)
return results
@tests.add_test
class BulkThroughputTest(BulkTestCommon):
''' Bulk transport throughput microbenchmark '''
name = "bulk"
use_memcpy = "nomemcpy"
@tests.add_test
class BulkMemThroughputTest(BulkTestCommon):
''' Bulk transport throughput microbenchmark with memcpy on receiver '''
name = "bulk_memcpy"
use_memcpy = "memcpy"
|
exports.seed = async knex => {
await knex("foods").del();
await knex("foods").insert([
{
food_name: "Oatmeal",
qty: 1,
// date format YYYY-MM-DD
date: "2015-03-06",
children_id: 1,
category_id: 4
},
{
food_name: "Banana",
qty: 1,
// date format YYYY-MM-DD
date: "2015-03-07",
children_id: 1,
category_id: 2
},
{
food_name: "Chicken Nuggets",
qty: 1,
// date format YYYY-MM-DD
date: "2015-04-06",
children_id: 3,
category_id: 3
},
{
food_name: "Mac and Cheese",
qty: 1,
// date format YYYY-MM-DD
date: "2015-04-06",
children_id: 3,
category_id: 5
},
{
food_name: "Doritos",
qty: 1,
// date format YYYY-MM-DD
date: "2015-04-07",
children_id: 3,
category_id: 6
},
{
food_name: "Broccoli",
qty: 1,
// date format YYYY-MM-DD
date: "2015-08-07",
children_id: 3,
category_id: 1
}
]);
};
|
import os.path
import sys
# Enable running IDLE with idlelib in a non-standard location.
# This was once used to run development versions of IDLE.
# Because PEP 434 declared idle.py a public interface,
# removal should require deprecation.
idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if idlelib_dir not in sys.path:
sys.path.insert(0, idlelib_dir)
from idlelib.pyshell import main # This is subject to change
main()
|
"""
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
from datetime import datetime
from pyrogram import filters
from wbb import app
from wbb.core.decorators.errors import capture_err
from wbb.utils.dbfunctions import get_couple, save_couple
__MODULE__ = "Shippering"
__HELP__ = "/detect_gay - To Choose Couple Of The Day"
# Date and time
def dt():
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M")
dt_list = dt_string.split(" ")
return dt_list
def dt_tom():
a = (
str(int(dt()[0].split("/")[0]) + 1)
+ "/"
+ dt()[0].split("/")[1]
+ "/"
+ dt()[0].split("/")[2]
)
return a
today = str(dt()[0])
tomorrow = str(dt_tom())
@app.on_message(filters.command("detect_gay") & ~filters.edited)
@capture_err
async def couple(_, message):
if message.chat.type == "private":
return await message.reply_text(
"This command only works in groups."
)
try:
chat_id = message.chat.id
is_selected = await get_couple(chat_id, today)
if not is_selected:
list_of_users = []
async for i in app.iter_chat_members(message.chat.id):
if not i.user.is_bot:
list_of_users.append(i.user.id)
if len(list_of_users) < 2:
return await message.reply_text("Not enough users")
c1_id = random.choice(list_of_users)
c2_id = random.choice(list_of_users)
while c1_id == c2_id:
c1_id = random.choice(list_of_users)
c1_mention = (await app.get_users(c1_id)).mention
c2_mention = (await app.get_users(c2_id)).mention
couple_selection_message = f"""**Couple of the day:**
{c1_mention} + {c2_mention} = ❤️"""
await app.send_message(
message.chat.id, text=couple_selection_message
)
couple = {"c1_id": c1_id, "c2_id": c2_id}
await save_couple(chat_id, today, couple)
elif is_selected:
c1_id = int(is_selected["c1_id"])
c2_id = int(is_selected["c2_id"])
c1_name = (await app.get_users(c1_id)).first_name
c2_name = (await app.get_users(c2_id)).first_name
couple_selection_message = f"""Couple of the day:
[{c1_name}](tg://openmessage?user_id={c1_id}) + [{c2_name}](tg://openmessage?user_id={c2_id}) = ❤️"""
await app.send_message(
message.chat.id, text=couple_selection_message
)
except Exception as e:
print(e)
await message.reply_text(e)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.